hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
cb75ea77cbb1213a17d2d1581f42dd7398cc3e89
254,771
ipynb
Jupyter Notebook
notebooks_Pk/Pk_inference_rodriguezpuebla_pivot1.ipynb
dangilman/lenslikelihood
1490ee9756b4d2ed108a2478977609bbe0ba2e17
[ "MIT" ]
null
null
null
notebooks_Pk/Pk_inference_rodriguezpuebla_pivot1.ipynb
dangilman/lenslikelihood
1490ee9756b4d2ed108a2478977609bbe0ba2e17
[ "MIT" ]
null
null
null
notebooks_Pk/Pk_inference_rodriguezpuebla_pivot1.ipynb
dangilman/lenslikelihood
1490ee9756b4d2ed108a2478977609bbe0ba2e17
[ "MIT" ]
null
null
null
464.910584
99,764
0.933509
[ [ [ "from lenslikelihood.power_spectra import *\nmass_function_model = 'rodriguezPuebla2016'\nnormalization = 'As'\npivot_string = '1'\npivot = 1.0\n\nstructure_formation_interp_As = load_interpolated_mapping(mass_function_model, pivot_string)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport os\n\nplt.rcParams['axes.linewidth'] = 2.5\nplt.rcParams['xtick.major.width'] = 2.5\nplt.rcParams['xtick.major.size'] = 8\nplt.rcParams['xtick.minor.size'] = 5\nplt.rcParams['ytick.major.width'] = 2.5\nplt.rcParams['ytick.major.size'] = 8\nplt.rcParams['ytick.minor.size'] = 4\nplt.rcParams['ytick.labelsize'] = 15\nplt.rcParams['xtick.labelsize'] = 15", "_____no_output_____" ], [ "from lenslikelihood.measurements import *\nfrom lenslikelihood.sampling import InterpolatedLikelihood\nimport dill as pickle\nfrom trikde.pdfs import DensitySamples, IndepdendentLikelihoods, MultivariateNormalPriorHyperCube, CustomPriorHyperCube\n\nnbins = 20\nparam_names = ['LOS_normalization', 'beta', 'log10c0', 'delta_power_law_index', 'sigma_sub']\nparam_ranges = [all_param_ranges_version2[name] for name in param_names]\nload_from_pickle = True \nsave_to_pickle = False \n\nfilename_extension = '_joint_logprior'\nbase_path = './../lenslikelihood/precomputed_likelihoods/'\nlikelihoods = []\nfor lens in all_lens_names:\n fname = base_path + lens + filename_extension\n print('loading joint likelihoods for lens '+lens+' ...')\n f = open(fname, 'rb')\n single_lens_likelihood = pickle.load(f)\n f.close()\n likelihoods.append(single_lens_likelihood)\n \nlikelihood_noprior = IndepdendentLikelihoods(likelihoods)", "loading joint likelihoods for lens HE0435 ...\nloading joint likelihoods for lens WGD2038 ...\nloading joint likelihoods for lens B1422 ...\nloading joint likelihoods for lens WFI2033 ...\nloading joint likelihoods for lens PSJ1606 ...\nloading joint likelihoods for lens WFI2026 ...\nloading joint likelihoods for lens RXJ0911 ...\nloading joint likelihoods for lens MG0414 ...\nloading joint likelihoods for lens PG1115 ...\nloading joint likelihoods for lens RXJ1131 ...\nloading joint likelihoods for lens WGDJ0405 ...\n" ] ], [ [ "## Priors on the subhalo and field halo mass functions\n\nA reasonable assumption to impose on the inference is that the number of subhalos varies proportionally with the number of field halos, since subhalos are accreted from the field. We can enforce this by choosing an expected amplitude for the subhalo mass function in $\\Lambda$CDM, and then coupling variations to $\\Sigma_{\\rm{sub}}$ around this value to $\\delta_{\\rm{LOS}}$. ", "_____no_output_____" ] ], [ [ "def couple_mass_functions(samples, sigma_sub_theory=0.025, coupling_strength=0.2):\n \n delta_los_samples = samples[:, 0]\n sigma_sub_samples = samples[:, -1]\n delta_sigma_sub = sigma_sub_samples/sigma_sub_theory\n chi2 = (delta_sigma_sub - delta_los_samples)**2/coupling_strength**2 \n return chi2\n\nextrapolate_likelihood = True\nsigma_sub_theory = 0.05\nkwargs_prior = {'sigma_sub_theory': sigma_sub_theory}\nprior_on_mass_functions = CustomPriorHyperCube(couple_mass_functions, param_names, param_ranges, nbins, kwargs_prior)\n\nlikelihood = IndepdendentLikelihoods(likelihoods + [prior_on_mass_functions])\ninterpolated_lens_likelihood = InterpolatedLikelihood(likelihood, param_names, param_ranges, extrapolate=extrapolate_likelihood)", "_____no_output_____" ] ], [ [ "### Plot the likelihood\n\nFirst we show the likelihood as inferred from the lenses with no additional modeling assumptions", "_____no_output_____" ] ], [ [ "from trikde.triangleplot import TrianglePlot\nfig = plt.figure()\ncmap = 'jet'\ntriangle_plot = TrianglePlot([likelihood_noprior])\ntriangle_plot.set_cmap(cmap, marginal_col='k')\ntriangle_plot.truth_color = 'k'\ntruths = {'sigma_sub': 1.05, 'LOS_normalization': 1., 'beta': 0.85, 'log10c0': np.log10(18.5), 'delta_power_law_index': 0.}\naxes = triangle_plot.make_triplot(filled_contours=False, show_intervals=False, contour_alpha=1.,\n contour_colors=['k', 'k'],\n show_contours=True, contour_levels=[0.32], truths=truths)\n\nbeta = r'$\\beta$'\nbeta_ticks = [-0.2, 3, 6, 9, 12, 15]\nc0 = r'$\\log_{10} c_8$'\nc0_ticks = [0., 1.0, 2.0, 3.0, 4.0]\ndelta_power_law_index = r'$\\Delta \\alpha$'\ndpli_ticks = [-0.6, -0.3, 0., 0.3, 0.6, 0.9]\nsigma_sub = r'$\\Sigma_{\\rm{sub}} \\ \\left[\\rm{kpc^{-2}}\\right]$'\nsigma_sub_ticks = [0., 0.025, 0.05, 0.075, 0.1]\ndelta_LOS = r'$\\delta_{\\rm{LOS}}$'\ndlos_ticks = [0.0, 0.5, 1., 1.5, 2., 2.5]\nticksize = 14\nlabelsize = 18\nrotation = 40\n\naxes[5].set_ylabel(beta, fontsize=labelsize)\naxes[5].set_yticks(beta_ticks)\naxes[5].set_yticklabels(beta_ticks, fontsize=ticksize)\n\naxes[10].set_ylabel(c0, fontsize=labelsize)\naxes[10].set_yticks(c0_ticks)\naxes[10].set_yticklabels(c0_ticks, fontsize=ticksize)\n\naxes[15].set_ylabel(delta_power_law_index, fontsize=labelsize)\naxes[15].set_yticks(dpli_ticks)\naxes[15].set_yticklabels(dpli_ticks, fontsize=ticksize)\n\naxes[20].set_ylabel(sigma_sub, fontsize=labelsize)\naxes[20].set_yticks(sigma_sub_ticks)\naxes[20].set_yticklabels(sigma_sub_ticks, fontsize=ticksize)\n\naxes[20].set_xlabel(delta_LOS, fontsize=labelsize)\naxes[20].set_xticks(dlos_ticks)\naxes[20].set_xticklabels(dlos_ticks, fontsize=ticksize, rotation=rotation)\n\naxes[21].set_xlabel(beta, fontsize=labelsize)\naxes[21].set_xticks(beta_ticks)\naxes[21].set_xticklabels(beta_ticks, fontsize=ticksize, rotation=rotation)\n\naxes[22].set_xlabel(c0, fontsize=labelsize)\naxes[22].set_xticks(c0_ticks)\naxes[22].set_xticklabels(c0_ticks, fontsize=ticksize, rotation=rotation)\n\n\naxes[23].set_xlabel(delta_power_law_index, fontsize=labelsize)\naxes[23].set_xticks(dpli_ticks)\naxes[23].set_xticklabels(dpli_ticks, fontsize=ticksize, rotation=rotation)\n\naxes[24].set_xlabel(sigma_sub, fontsize=labelsize)\naxes[24].set_xticks(sigma_sub_ticks)\naxes[24].set_xticklabels(sigma_sub_ticks, fontsize=ticksize, rotation=rotation)\n\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\n\nax_idx = 9\naxins1 = inset_axes(axes[ax_idx],\n width=\"300%\", # width = 50% of parent_bbox width\n height=\"15%\", # height : 5%\n loc='upper right')\nempty = np.zeros((20, 20))\nempty[0,0] = 1\n\nim1 = axes[ax_idx].imshow(empty, interpolation='None', cmap=cmap)\ncb = fig.colorbar(im1, cax=axins1, orientation=\"horizontal\", ticks=[0, 0.25, 0.5, 0.75, 1])\naxes[ax_idx].set_visible(False)\ncb.set_label('probability', fontsize=15)\n#plt.savefig('./figures/lensing_likelihood.pdf')\n", "/Users/danielgilman/.local/lib/python3.6/site-packages/matplotlib/contour.py:1173: UserWarning: No contour levels were found within the data range.\n warnings.warn(\"No contour levels were found\"\n" ] ], [ [ "### Likelihood with a prior\n\nNow we show the likelihood after adding the prior coupling $\\Sigma_{\\rm{sub}}$ to $\\delta_{LOS}$, assuming $\\Sigma_{\\rm{sub}} = 0.05 \\rm{kpc^{-1}}$ in $\\Lambda$CDM, corresponding to doubly efficient tidal disruption of halos between in the Milky Way relative to massive ellipticals", "_____no_output_____" ] ], [ [ "fig = plt.figure()\ntriangle_plot = TrianglePlot([likelihood])\ntriangle_plot.set_cmap(cmap, marginal_col='k')\ntriangle_plot.truth_color = 'k'\ntruths= {'sigma_sub': 1.05, 'LOS_normalization': 1., 'beta': 0.85, 'log10c0': np.log10(18.5), 'delta_power_law_index': 0.}\naxes = triangle_plot.make_triplot(filled_contours=False, show_intervals=False, show_contours=True,\n contour_levels=[0.32], contour_colors=['k', 'k'],\n display_params=['LOS_normalization', 'beta', 'log10c0', 'delta_power_law_index'],\n truths=truths)\n\naxes[4].set_ylabel(beta, fontsize=labelsize)\naxes[4].set_yticks(beta_ticks)\naxes[4].set_yticklabels(beta_ticks, fontsize=ticksize)\n\naxes[8].set_ylabel(c0, fontsize=labelsize)\naxes[8].set_yticks(c0_ticks)\naxes[8].set_yticklabels(c0_ticks, fontsize=ticksize)\n\naxes[12].set_ylabel(delta_power_law_index, fontsize=labelsize)\naxes[12].set_yticks(dpli_ticks)\naxes[12].set_yticklabels(dpli_ticks, fontsize=ticksize)\n\naxes[12].set_xlabel(delta_LOS, fontsize=labelsize)\naxes[12].set_xticks(dlos_ticks)\naxes[12].set_xticklabels(dlos_ticks, fontsize=ticksize, rotation=rotation)\n\naxes[13].set_xlabel(beta, fontsize=labelsize)\naxes[13].set_xticks(beta_ticks)\naxes[13].set_xticklabels(beta_ticks, fontsize=ticksize, rotation=rotation)\n\naxes[14].set_xlabel(c0, fontsize=labelsize)\naxes[14].set_xticks(c0_ticks)\naxes[14].set_xticklabels(c0_ticks, fontsize=ticksize, rotation=rotation)\n\naxes[15].set_xlabel(delta_power_law_index, fontsize=labelsize)\naxes[15].set_xticks(dpli_ticks)\naxes[15].set_xticklabels(dpli_ticks, fontsize=ticksize, rotation=rotation)\n\naxes[2].annotate(r'$\\Sigma_{\\rm{sub(predicted)}} = 0.05 \\rm{kpc^{-2}}$', fontsize=22,\n xy=(0.26, 0.1), xycoords='axes fraction')\nax_idx = 7\naxins1 = inset_axes(axes[ax_idx],\n width=\"200%\", # width = 50% of parent_bbox width\n height=\"10%\", # height : 5%\n loc='upper right')\nempty = np.zeros((20, 20))\nempty[0,0] = 1\n\nim1 = axes[ax_idx].imshow(empty, interpolation='None', cmap=cmap)\ncb = fig.colorbar(im1, cax=axins1, orientation=\"horizontal\", ticks=[0, 0.25, 0.5, 0.75, 1])\naxes[ax_idx].set_visible(False)\ncb.set_label('probability', fontsize=15)\n\n#plt.savefig('./figures/lensing_likelihood_w.pdf')", "_____no_output_____" ] ], [ [ "## Systematic modeling errors\n\nWe allow for systematic errors in the model by changing the internal mapping between the parameters describing the mass function and concentration-mass relation", "_____no_output_____" ] ], [ [ "error_type = 'INTERPOLATED_GRID'\n\nif error_type == 'INTERPOLATED_GRID':\n \n f = open('./systematic_error_interpolations/systematic_error_interpolation_lowfit_'+mass_function_model+'_pivot'+pivot_string+'_3D', 'rb')\n systematic_interp_lowfit = pickle.load(f)\n f.close()\n\n f = open('./systematic_error_interpolations/systematic_error_interpolation_highfit_'+mass_function_model+'_pivot'+pivot_string+'_3D', 'rb')\n systematic_interp_highfit = pickle.load(f)\n f.close()\n \nelif error_type == 'RELATIVE':\n delta_delta_los = 0.1\n delta_beta = 0.2\n delta_c8 = 0.2\n delta_delta_alpha = 0.05", "_____no_output_____" ] ], [ [ "## Final setup", "_____no_output_____" ] ], [ [ "delta_los_range = [0., 2.5]\nbeta_range = [-0.2, 15.]\nlog10c0_range = [0., 4.]\ndelta_alpha_range = [-0.6, 0.9]\nsigma_sub_range = [0., 0.125]\nparam_ranges_lensing = [delta_los_range, beta_range, log10c0_range, delta_alpha_range, sigma_sub_range]\nn_draw = 50000\nextrapolate_ranges = [[0., 2.5], \n [-0.2, 15.],\n [0., 4.0], \n delta_alpha_range,\n sigma_sub_range]\n\nparam_ranges_pk = [[0.4645, 1.4645], [-0.2, 0.2], [-0.018, 0.018]]\narun_ticks = [-0.16, -0.08, 0.00, 0.08, 0.16]\nbrun_ticks = [-0.014, -0.007, 0.000, 0.007, 0.014]\nns_ticks = [0.5645, 0.9645, 1.3645]", "_____no_output_____" ] ], [ [ "## Compute the likelihood of the power spectrum parameters\n\nWe can compute the likelihood the parameters describing $P\\left(k\\right)$, adding systematic models errors by hand", "_____no_output_____" ] ], [ [ "if error_type == 'INTERPOLATED_GRID':\n \n samples_no_sys, like_no_sys = sample_power_spectra_with_systematic_interp(n_draw, param_ranges_pk, param_ranges_lensing, structure_formation_interp_As, interpolated_lens_likelihood,\n systematic_interp_highfit, extrapolate=extrapolate_likelihood, extrapolate_ranges=extrapolate_ranges, log10c8_sys=False, delta_los_sys=False,\n delta_alpha_sys=False, beta_sys=False, three_D=True)\n \n samples_sys1, like_sys1 = sample_power_spectra_with_systematic_interp(n_draw, param_ranges_pk, param_ranges_lensing, structure_formation_interp_As, interpolated_lens_likelihood,\n systematic_interp_lowfit, extrapolate=extrapolate_likelihood, extrapolate_ranges=extrapolate_ranges, three_D=True)\n\n samples_sys2, like_sys2 = sample_power_spectra_with_systematic_interp(n_draw, param_ranges_pk, param_ranges_lensing, structure_formation_interp_As, interpolated_lens_likelihood,\n systematic_interp_highfit, extrapolate=extrapolate_likelihood, extrapolate_ranges=extrapolate_ranges, three_D=True)\n\n\n samples_sys_noamp_1, like_sys_noamp_1 = sample_power_spectra_with_systematic_interp(n_draw, param_ranges_pk, param_ranges_lensing, structure_formation_interp_As, interpolated_lens_likelihood,\n systematic_interp_lowfit, extrapolate=extrapolate_likelihood, extrapolate_ranges=extrapolate_ranges, log10c8_sys=False, delta_los_sys=False, three_D=True)\n\n samples_sys_noamp_2, like_sys_noamp_2 = sample_power_spectra_with_systematic_interp(n_draw, param_ranges_pk, param_ranges_lensing, structure_formation_interp_As, interpolated_lens_likelihood,\n systematic_interp_highfit, extrapolate=extrapolate_likelihood, extrapolate_ranges=extrapolate_ranges, log10c8_sys=False, delta_los_sys=False, three_D=True)\n\n samples_sys_noslope, like_sys_noslope = sample_power_spectra_with_systematic_interp(n_draw, param_ranges_pk, param_ranges_lensing, structure_formation_interp_As, interpolated_lens_likelihood,\n systematic_interp_lowfit, extrapolate=extrapolate_likelihood, extrapolate_ranges=extrapolate_ranges, delta_alpha_sys=False, beta_sys=False, three_D=True)\n \nelif error_type == 'RELATIVE':\n \n samples_sys1, like_sys1 = sample_power_spectra(n_draw, param_ranges_pk, param_ranges_lensing, structure_formation_interp_As, interpolated_lens_likelihood,\n delta_c8, delta_beta, delta_delta_los, delta_delta_alpha, extrapolate=extrapolate_likelihood, extrapolate_ranges=extrapolate_ranges)\n\n samples_sys2, like_sys2 = sample_power_spectra(n_draw, param_ranges_pk, param_ranges_lensing, structure_formation_interp_As, interpolated_lens_likelihood,\n -delta_c8, -delta_beta, -delta_delta_los, -delta_delta_alpha, extrapolate=extrapolate_likelihood, extrapolate_ranges=extrapolate_ranges)\n\n samples_no_sys, like_no_sys = sample_power_spectra(n_draw, param_ranges_pk, param_ranges_lensing, structure_formation_interp_As, interpolated_lens_likelihood,\n 0., 0., 0., 0., extrapolate=extrapolate_likelihood, extrapolate_ranges=extrapolate_ranges)\n\n samples_sys_noamp_1, like_sys_noamp_1 = sample_power_spectra(n_draw, param_ranges_pk, param_ranges_lensing, structure_formation_interp_As, interpolated_lens_likelihood,\n 0., delta_beta, 0., delta_delta_alpha, extrapolate=extrapolate_likelihood, extrapolate_ranges=extrapolate_ranges)\n\n samples_sys_noamp_2, like_sys_noamp_2 = sample_power_spectra(n_draw, param_ranges_pk, param_ranges_lensing, structure_formation_interp_As, interpolated_lens_likelihood,\n 0., -delta_beta, 0., delta_delta_alpha, extrapolate=extrapolate_likelihood, extrapolate_ranges=extrapolate_ranges)\n\n samples_sys_noslope, like_sys_noslope = sample_power_spectra(n_draw, param_ranges_pk, param_ranges_lensing, structure_formation_interp_As, interpolated_lens_likelihood,\n -delta_c8, 0., 0., 0., extrapolate=extrapolate_likelihood, extrapolate_ranges=extrapolate_ranges)\n \n samples_sys_noslope_2, like_sys_noslope_2 = sample_power_spectra(n_draw, param_ranges_pk, param_ranges_lensing, structure_formation_interp_As, interpolated_lens_likelihood,\n delta_c8, 0., 0., 0., extrapolate=extrapolate_likelihood, extrapolate_ranges=extrapolate_ranges)\n \n \n ", "_____no_output_____" ] ], [ [ "## Plot the likelihood of the parameters describing the power spectrum", "_____no_output_____" ] ], [ [ "nbins = 20\nparam_names_pk = [r'$n_s$', r'$a_{\\rm{run}}$', r'$b_{\\rm{run}}$'] \n\nsamples_marginalized = np.vstack((np.vstack((np.vstack((np.vstack((np.vstack((samples_no_sys, samples_sys1)), samples_sys2)), samples_sys_noamp_1)), samples_sys_noamp_2)), samples_sys_noslope))\nlikelihood_marginalized = np.append(np.append(np.append(np.append(np.append(like_no_sys, like_sys1), like_sys2), like_sys_noamp_1), like_sys_noamp_2), like_sys_noslope)\n# samples_marginalized = samples_no_sys\n# likelihood_marginalized = like_no_sys\ndensity_marginalized = DensitySamples(samples_marginalized, param_names_pk, likelihood_marginalized, \n param_ranges_pk, nbins=nbins, use_kde=False, bandwidth_scale=1.)\npk_likelihood_marginalized = IndepdendentLikelihoods([density_marginalized])\n\ntriplot = TrianglePlot([pk_likelihood_marginalized])\ncmap = 'jet'\ntriplot.set_cmap(cmap, marginal_col='k')\ntriplot.truth_color = 'k'\ntruths= {r'$n_s$': 0.9645, r'$a_{\\rm{run}}$': 0., r'$b_{\\rm{run}}$': 0.}\naxes = triplot.make_triplot(filled_contours=False, show_intervals=False, show_contours=True,\n contour_levels=[0.32], contour_colors=['k', 'k'])\n\naxes[3].set_yticks(arun_ticks)\naxes[3].set_yticklabels(arun_ticks, fontsize=ticksize)\n\naxes[6].set_yticks(brun_ticks)\naxes[6].set_yticklabels(brun_ticks, fontsize=ticksize)\n\naxes[6].set_xticks(ns_ticks)\naxes[6].set_xticklabels(ns_ticks, fontsize=ticksize)\n\naxes[7].set_xticks(arun_ticks)\naxes[7].set_xticklabels(arun_ticks, fontsize=ticksize)\n\naxes[8].set_xticks(brun_ticks)\naxes[8].set_xticklabels(brun_ticks, fontsize=ticksize)\n\nax_idx = 1\naxins1 = inset_axes(axes[ax_idx],\n width=\"200%\", # width = 50% of parent_bbox width\n height=\"10%\", # height : 5%\n loc=6)\nempty = np.zeros((20, 20))\nempty[0,0] = 1\n\nim1 = axes[ax_idx].imshow(empty, interpolation='None', cmap=cmap)\ncb = fig.colorbar(im1, cax=axins1, orientation=\"horizontal\", ticks=[0, 0.25, 0.5, 0.75, 1])\naxes[ax_idx].set_visible(False)\ncb.set_label('probability', fontsize=15)\nplt.savefig('./figures/qP_likelihood_'+mass_function_model+'_pivot'+pivot_string+'.pdf')\n\nimport pickle\nf = open('./interpolated_pq_likelihoods/Pk_likelihood_'+mass_function_model+'_pivot'+pivot_string, 'wb')\npk_likelihood_marginalized_interp = InterpolatedLikelihood(pk_likelihood_marginalized, param_names_pk, param_ranges_pk)\npickle.dump(pk_likelihood_marginalized_interp, f)\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb75fcf5f4b678d95d055f00fab791a35494f0a5
10,860
ipynb
Jupyter Notebook
Lectures/Lecture 12.ipynb
JuliaTagBot/ES313.jl
3601743ca05bdb2562a26efd8b809c1a4f78c7b1
[ "MIT" ]
null
null
null
Lectures/Lecture 12.ipynb
JuliaTagBot/ES313.jl
3601743ca05bdb2562a26efd8b809c1a4f78c7b1
[ "MIT" ]
null
null
null
Lectures/Lecture 12.ipynb
JuliaTagBot/ES313.jl
3601743ca05bdb2562a26efd8b809c1a4f78c7b1
[ "MIT" ]
null
null
null
29.193548
167
0.474586
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb75fd386af52650d97b5d44f2753e1cd65475b7
161,069
ipynb
Jupyter Notebook
notebooks/020_joyplot.ipynb
AltamarMx/mas20problemas
795b1589e10927f8bdbedb44d0e587a0ac4dc049
[ "MIT" ]
5
2022-01-25T06:06:24.000Z
2022-02-14T04:12:50.000Z
notebooks/020_joyplot.ipynb
AltamarMx/mas20problemas
795b1589e10927f8bdbedb44d0e587a0ac4dc049
[ "MIT" ]
null
null
null
notebooks/020_joyplot.ipynb
AltamarMx/mas20problemas
795b1589e10927f8bdbedb44d0e587a0ac4dc049
[ "MIT" ]
2
2022-01-24T19:40:27.000Z
2022-01-24T21:09:59.000Z
469.588921
95,524
0.927292
[ [ [ "import joypy\nimport seaborn as sns\nimport pandas as pd\nfrom matplotlib import cm\n", "_____no_output_____" ], [ "df = pd.read_csv('../data/001_raw/temixco.csv',index_col=0,parse_dates=True)", "_____no_output_____" ] ], [ [ "![image-2.png](attachment:image-2.png)", "_____no_output_____" ] ], [ [ "df['Year'] = df.index.year\ndf['Month'] = df.index.month\ndf['Day'] = df.index.day_of_year", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "fig, axes = joypy.joyplot(df, by=\"Month\", column=\"To\", range_style='own', \n grid=\"y\", linewidth=1, legend=False, figsize=(12,5),colormap=cm.autumn_r)\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb75ff1c0b3ef09b259698143d504c39e5f87af8
2,835
ipynb
Jupyter Notebook
ML/Face recognition/Digital_makeup/Digital Makeup.ipynb
richeyphu/ITE-425
4210b692609fa04cdd00b76a45d9e1e5baacd6e3
[ "MIT" ]
null
null
null
ML/Face recognition/Digital_makeup/Digital Makeup.ipynb
richeyphu/ITE-425
4210b692609fa04cdd00b76a45d9e1e5baacd6e3
[ "MIT" ]
null
null
null
ML/Face recognition/Digital_makeup/Digital Makeup.ipynb
richeyphu/ITE-425
4210b692609fa04cdd00b76a45d9e1e5baacd6e3
[ "MIT" ]
null
null
null
23.625
116
0.560494
[ [ [ "# pip install face_recognition", "_____no_output_____" ], [ "from PIL import Image, ImageDraw\nimport face_recognition", "_____no_output_____" ], [ "# Load the jpg file into a numpy array\nimage = face_recognition.load_image_file(\"people1.jpg\")", "_____no_output_____" ], [ "# Find all facial features in all the faces in the image\nface_landmarks_list = face_recognition.face_landmarks(image)", "_____no_output_____" ], [ "# Load the image into a Python Image Library object so that we can draw on top of it and display it\npil_image = Image.fromarray(image)", "_____no_output_____" ], [ "# Create a PIL drawing object to be able to draw lines later\nd = ImageDraw.Draw(pil_image, 'RGBA')", "_____no_output_____" ], [ "for face_landmarks in face_landmarks_list:\n # The face landmark detection model returns these features:\n # - chin, left_eyebrow, right_eyebrow, nose_bridge, nose_tip, left_eye, right_eye, top_lip, bottom_lip\n\n # Draw a line over the eyebrows\n d.line(face_landmarks['left_eyebrow'], fill=(128, 0, 128, 100), width=3)\n d.line(face_landmarks['right_eyebrow'], fill=(128, 0, 128, 100), width=3)\n\n # Draw over the lips\n d.polygon(face_landmarks['top_lip'], fill=(128, 0, 128, 100))\n d.polygon(face_landmarks['bottom_lip'], fill=(128, 0, 128, 100))", "_____no_output_____" ], [ "# Show the final image\npil_image.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb7600a9707718a0ae380a7f12e5444bf7e64b71
31,319
ipynb
Jupyter Notebook
ICP1/Part 3 - Training Neural Networks (Exercises).ipynb
night2wolf/CS490
b019cf2e76468d0b6be33a3c003aa82fa5c73e27
[ "Apache-2.0" ]
null
null
null
ICP1/Part 3 - Training Neural Networks (Exercises).ipynb
night2wolf/CS490
b019cf2e76468d0b6be33a3c003aa82fa5c73e27
[ "Apache-2.0" ]
null
null
null
ICP1/Part 3 - Training Neural Networks (Exercises).ipynb
night2wolf/CS490
b019cf2e76468d0b6be33a3c003aa82fa5c73e27
[ "Apache-2.0" ]
null
null
null
53.905336
7,908
0.702609
[ [ [ "# Training Neural Networks\n\nThe network we built in the previous part isn't so smart, it doesn't know anything about our handwritten digits. Neural networks with non-linear activations work like universal function approximators. There is some function that maps your input to the output. For example, images of handwritten digits to class probabilities. The power of neural networks is that we can train them to approximate this function, and basically any function given enough data and compute time.\n\n<img src=\"assets/function_approx.png\" width=500px>\n\nAt first the network is naive, it doesn't know the function mapping the inputs to the outputs. We train the network by showing it examples of real data, then adjusting the network parameters such that it approximates this function.\n\nTo find these parameters, we need to know how poorly the network is predicting the real outputs. For this we calculate a **loss function** (also called the cost), a measure of our prediction error. For example, the mean squared loss is often used in regression and binary classification problems\n\n$$\n\\large \\ell = \\frac{1}{2n}\\sum_i^n{\\left(y_i - \\hat{y}_i\\right)^2}\n$$\n\nwhere $n$ is the number of training examples, $y_i$ are the true labels, and $\\hat{y}_i$ are the predicted labels.\n\nBy minimizing this loss with respect to the network parameters, we can find configurations where the loss is at a minimum and the network is able to predict the correct labels with high accuracy. We find this minimum using a process called **gradient descent**. The gradient is the slope of the loss function and points in the direction of fastest change. To get to the minimum in the least amount of time, we then want to follow the gradient (downwards). You can think of this like descending a mountain by following the steepest slope to the base.\n\n<img src='assets/gradient_descent.png' width=350px>", "_____no_output_____" ], [ "## Backpropagation\n\nFor single layer networks, gradient descent is straightforward to implement. However, it's more complicated for deeper, multilayer neural networks like the one we've built. Complicated enough that it took about 30 years before researchers figured out how to train multilayer networks.\n\nTraining multilayer networks is done through **backpropagation** which is really just an application of the chain rule from calculus. It's easiest to understand if we convert a two layer network into a graph representation.\n\n<img src='assets/backprop_diagram.png' width=550px>\n\nIn the forward pass through the network, our data and operations go from bottom to top here. We pass the input $x$ through a linear transformation $L_1$ with weights $W_1$ and biases $b_1$. The output then goes through the sigmoid operation $S$ and another linear transformation $L_2$. Finally we calculate the loss $\\ell$. We use the loss as a measure of how bad the network's predictions are. The goal then is to adjust the weights and biases to minimize the loss.\n\nTo train the weights with gradient descent, we propagate the gradient of the loss backwards through the network. Each operation has some gradient between the inputs and outputs. As we send the gradients backwards, we multiply the incoming gradient with the gradient for the operation. Mathematically, this is really just calculating the gradient of the loss with respect to the weights using the chain rule.\n\n$$\n\\large \\frac{\\partial \\ell}{\\partial W_1} = \\frac{\\partial L_1}{\\partial W_1} \\frac{\\partial S}{\\partial L_1} \\frac{\\partial L_2}{\\partial S} \\frac{\\partial \\ell}{\\partial L_2}\n$$\n\n**Note:** I'm glossing over a few details here that require some knowledge of vector calculus, but they aren't necessary to understand what's going on.\n\nWe update our weights using this gradient with some learning rate $\\alpha$. \n\n$$\n\\large W^\\prime_1 = W_1 - \\alpha \\frac{\\partial \\ell}{\\partial W_1}\n$$\n\nThe learning rate $\\alpha$ is set such that the weight update steps are small enough that the iterative method settles in a minimum.", "_____no_output_____" ], [ "## Losses in PyTorch\n\nLet's start by seeing how we calculate the loss with PyTorch. Through the `nn` module, PyTorch provides losses such as the cross-entropy loss (`nn.CrossEntropyLoss`). You'll usually see the loss assigned to `criterion`. As noted in the last part, with a classification problem such as MNIST, we're using the softmax function to predict class probabilities. With a softmax output, you want to use cross-entropy as the loss. To actually calculate the loss, you first define the criterion then pass in the output of your network and the correct labels.\n\nSomething really important to note here. Looking at [the documentation for `nn.CrossEntropyLoss`](https://pytorch.org/docs/stable/nn.html#torch.nn.CrossEntropyLoss),\n\n> This criterion combines `nn.LogSoftmax()` and `nn.NLLLoss()` in one single class.\n>\n> The input is expected to contain scores for each class.\n\nThis means we need to pass in the raw output of our network into the loss, not the output of the softmax function. This raw output is usually called the *logits* or *scores*. We use the logits because softmax gives you probabilities which will often be very close to zero or one but floating-point numbers can't accurately represent values near zero or one ([read more here](https://docs.python.org/3/tutorial/floatingpoint.html)). It's usually best to avoid doing calculations with probabilities, typically we use log-probabilities.", "_____no_output_____" ] ], [ [ "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\n\n# Define a transform to normalize the data\ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,)),\n ])\n# Download and load the training data\ntrainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)", "_____no_output_____" ] ], [ [ "### Note\nIf you haven't seen `nn.Sequential` yet, please finish the end of the Part 2 notebook.", "_____no_output_____" ] ], [ [ "# Build a feed-forward network\nmodel = nn.Sequential(nn.Linear(784, 128),\n nn.ReLU(),\n nn.Linear(128, 64),\n nn.ReLU(),\n nn.Linear(64, 10))\n\n# Define the loss\ncriterion = nn.CrossEntropyLoss()\n\n# Get our data\nimages, labels = next(iter(trainloader))\n# Flatten images\nimages = images.view(images.shape[0], -1)\n\n# Forward pass, get our logits\nlogits = model(images)\n# Calculate the loss with the logits and the labels\nloss = criterion(logits, labels)\n\nprint(loss)", "tensor(2.3058, grad_fn=<NllLossBackward>)\n" ] ], [ [ "In my experience it's more convenient to build the model with a log-softmax output using `nn.LogSoftmax` or `F.log_softmax` ([documentation](https://pytorch.org/docs/stable/nn.html#torch.nn.LogSoftmax)). Then you can get the actual probabilities by taking the exponential `torch.exp(output)`. With a log-softmax output, you want to use the negative log likelihood loss, `nn.NLLLoss` ([documentation](https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss)).\n\n>**Exercise:** Build a model that returns the log-softmax as the output and calculate the loss using the negative log likelihood loss. Note that for `nn.LogSoftmax` and `F.log_softmax` you'll need to set the `dim` keyword argument appropriately. `dim=0` calculates softmax across the rows, so each column sums to 1, while `dim=1` calculates across the columns so each row sums to 1. Think about what you want the output to be and choose `dim` appropriately.", "_____no_output_____" ] ], [ [ "### Import needed modules\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\n\n# Define a transform to normalize the data\ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,)),\n ])\n# Download and load the training data\ntrainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)\n################################################\n\n# TODO: Build a feed-forward network\nmodel = nn.Sequential(nn.Linear(784, 128),\n nn.LogSoftmax(dim=1),\n nn.Linear(128, 64),\n nn.LogSoftmax(dim=1),\n nn.Linear(64, 10))\n\n# TODO: Define the loss\ncriterion = nn.NLLLoss()\n\n### Run this to check your work\n# Get our data\nimages, labels = next(iter(trainloader))\n# Flatten images\nimages = images.view(images.shape[0], -1)\n\n# Forward pass, get our logits\nlogits = model(images)\n# Calculate the loss with the logits and the labels\nloss = criterion(logits, labels)\n\nprint(loss)", "tensor(2.6158, grad_fn=<NllLossBackward>)\n" ] ], [ [ "## Autograd\n\nNow that we know how to calculate a loss, how do we use it to perform backpropagation? Torch provides a module, `autograd`, for automatically calculating the gradients of tensors. We can use it to calculate the gradients of all our parameters with respect to the loss. Autograd works by keeping track of operations performed on tensors, then going backwards through those operations, calculating gradients along the way. To make sure PyTorch keeps track of operations on a tensor and calculates the gradients, you need to set `requires_grad = True` on a tensor. You can do this at creation with the `requires_grad` keyword, or at any time with `x.requires_grad_(True)`.\n\nYou can turn off gradients for a block of code with the `torch.no_grad()` content:\n```python\nx = torch.zeros(1, requires_grad=True)\n>>> with torch.no_grad():\n... y = x * 2\n>>> y.requires_grad\nFalse\n```\n\nAlso, you can turn on or off gradients altogether with `torch.set_grad_enabled(True|False)`.\n\nThe gradients are computed with respect to some variable `z` with `z.backward()`. This does a backward pass through the operations that created `z`.", "_____no_output_____" ] ], [ [ "x = torch.randn(2,2, requires_grad=True)\nprint(x)", "_____no_output_____" ], [ "y = x**2\nprint(y)", "_____no_output_____" ] ], [ [ "Below we can see the operation that created `y`, a power operation `PowBackward0`.", "_____no_output_____" ] ], [ [ "## grad_fn shows the function that generated this variable\nprint(y.grad_fn)", "_____no_output_____" ] ], [ [ "The autograd module keeps track of these operations and knows how to calculate the gradient for each one. In this way, it's able to calculate the gradients for a chain of operations, with respect to any one tensor. Let's reduce the tensor `y` to a scalar value, the mean.", "_____no_output_____" ] ], [ [ "z = y.mean()\nprint(z)", "_____no_output_____" ] ], [ [ "You can check the gradients for `x` and `y` but they are empty currently.", "_____no_output_____" ] ], [ [ "print(x.grad)", "_____no_output_____" ] ], [ [ "To calculate the gradients, you need to run the `.backward` method on a Variable, `z` for example. This will calculate the gradient for `z` with respect to `x`\n\n$$\n\\frac{\\partial z}{\\partial x} = \\frac{\\partial}{\\partial x}\\left[\\frac{1}{n}\\sum_i^n x_i^2\\right] = \\frac{x}{2}\n$$", "_____no_output_____" ] ], [ [ "z.backward()\nprint(x.grad)\nprint(x/2)", "_____no_output_____" ] ], [ [ "These gradients calculations are particularly useful for neural networks. For training we need the gradients of the cost with respect to the weights. With PyTorch, we run data forward through the network to calculate the loss, then, go backwards to calculate the gradients with respect to the loss. Once we have the gradients we can make a gradient descent step. ", "_____no_output_____" ], [ "## Loss and Autograd together\n\nWhen we create a network with PyTorch, all of the parameters are initialized with `requires_grad = True`. This means that when we calculate the loss and call `loss.backward()`, the gradients for the parameters are calculated. These gradients are used to update the weights with gradient descent. Below you can see an example of calculating the gradients using a backwards pass.", "_____no_output_____" ] ], [ [ "# Build a feed-forward network\nmodel = nn.Sequential(nn.Linear(784, 128),\n nn.ReLU(),\n nn.Linear(128, 64),\n nn.ReLU(),\n nn.Linear(64, 10),\n nn.LogSoftmax(dim=1))\n\ncriterion = nn.NLLLoss()\nimages, labels = next(iter(trainloader))\nimages = images.view(images.shape[0], -1)\n\nlogits = model(images)\nloss = criterion(logits, labels)", "_____no_output_____" ], [ "print('Before backward pass: \\n', model[0].weight.grad)\n\nloss.backward()\n\nprint('After backward pass: \\n', model[0].weight.grad)", "_____no_output_____" ] ], [ [ "## Training the network!\n\nThere's one last piece we need to start training, an optimizer that we'll use to update the weights with the gradients. We get these from PyTorch's [`optim` package](https://pytorch.org/docs/stable/optim.html). For example we can use stochastic gradient descent with `optim.SGD`. You can see how to define an optimizer below.", "_____no_output_____" ] ], [ [ "from torch import optim\n\n# Optimizers require the parameters to optimize and a learning rate\noptimizer = optim.SGD(model.parameters(), lr=0.01)", "_____no_output_____" ] ], [ [ "Now we know how to use all the individual parts so it's time to see how they work together. Let's consider just one learning step before looping through all the data. The general process with PyTorch:\n\n* Make a forward pass through the network \n* Use the network output to calculate the loss\n* Perform a backward pass through the network with `loss.backward()` to calculate the gradients\n* Take a step with the optimizer to update the weights\n\nBelow I'll go through one training step and print out the weights and gradients so you can see how it changes. Note that I have a line of code `optimizer.zero_grad()`. When you do multiple backwards passes with the same parameters, the gradients are accumulated. This means that you need to zero the gradients on each training pass or you'll retain gradients from previous training batches.", "_____no_output_____" ] ], [ [ "print('Initial weights - ', model[0].weight)\n\nimages, labels = next(iter(trainloader))\nimages.resize_(64, 784)\n\n# Clear the gradients, do this because gradients are accumulated\noptimizer.zero_grad()\n\n# Forward pass, then backward pass, then update weights\noutput = model(images)\nloss = criterion(output, labels)\nloss.backward()\nprint('Gradient -', model[0].weight.grad)", "_____no_output_____" ], [ "# Take an update step and few the new weights\noptimizer.step()\nprint('Updated weights - ', model[0].weight)", "_____no_output_____" ] ], [ [ "### Training for real\n\nNow we'll put this algorithm into a loop so we can go through all the images. Some nomenclature, one pass through the entire dataset is called an *epoch*. So here we're going to loop through `trainloader` to get our training batches. For each batch, we'll doing a training pass where we calculate the loss, do a backwards pass, and update the weights.\n\n>**Exercise:** Implement the training pass for our network. If you implemented it correctly, you should see the training loss drop with each epoch.", "_____no_output_____" ] ], [ [ "# Import needed modules\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\nfrom torch import optim\n\n# Define a transform to normalize the data\ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,)),\n ])\n# Download and load the training data\ntrainset = datasets.MNIST('~/.pytorch/MNIST_data/',\n download=True, train=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(\n trainset, batch_size=64, shuffle=True)\n################################################\n## Your solution here\n\nmodel = nn.Sequential(nn.Linear(784, 128),\n nn.ReLU(),\n nn.Linear(128, 64),\n nn.ReLU(),\n nn.Linear(64, 10),\n nn.LogSoftmax(dim=1))\n\ncriterion = nn.NLLLoss()\noptimizer = optim.SGD(model.parameters(), lr=0.003)\n\nepochs = 5\nfor e in range(epochs):\n running_loss = 0\n for images, labels in trainloader:\n # Flatten MNIST images into a 784 long vector\n images = images.view(images.shape[0], -1)\n optimizer.zero_grad()\n # TODO: Training pass\n logits = model(images)\n loss = criterion(logits,labels)\n loss.backward()\n running_loss += loss.item()\n optimizer.step()\n else:\n print(f\"Training loss: {running_loss/len(trainloader)}\")", "Training loss: 1.8140400818416051\nTraining loss: 0.7986363140123485\nTraining loss: 0.5134363588430225\nTraining loss: 0.42579773433808327\nTraining loss: 0.3839154780578257\n" ] ], [ [ "With the network trained, we can check out it's predictions.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport helper\n\nimages, labels = next(iter(trainloader))\n\nimg = images[0].view(1, 784)\n# Turn off gradients to speed up this part\nwith torch.no_grad():\n logps = model(img)\n\n# Output of the network are log-probabilities, need to take exponential for probabilities\nps = torch.exp(logps)\nhelper.view_classify(img.view(1, 28, 28), ps)", "_____no_output_____" ] ], [ [ "Now our network is brilliant. It can accurately predict the digits in our images. Next up you'll write the code for training a neural network on a more complex dataset.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb762b3543006157d2af5cef649df5a654b7954c
3,796
ipynb
Jupyter Notebook
Module2/Module2 - Lab3.ipynb
xoox/DAT210x
8d75eb7ae9d1d1dc1562bf36efbdc44aaabcd3c3
[ "MIT" ]
null
null
null
Module2/Module2 - Lab3.ipynb
xoox/DAT210x
8d75eb7ae9d1d1dc1562bf36efbdc44aaabcd3c3
[ "MIT" ]
null
null
null
Module2/Module2 - Lab3.ipynb
xoox/DAT210x
8d75eb7ae9d1d1dc1562bf36efbdc44aaabcd3c3
[ "MIT" ]
null
null
null
23.432099
371
0.567439
[ [ [ "# DAT210x - Programming with Python for DS", "_____no_output_____" ], [ "## Module2 - Lab3", "_____no_output_____" ] ], [ [ "# Import and alias Pandas\nimport pandas as pd", "_____no_output_____" ] ], [ [ "Often, you will want to load a dataset that is missing explicit header labels. You won't know if your data lacks headers or not unless you load it up and examine the headers to see if they make sense. Pandas by default reads in the first row of data as the header. If that isn't the case for your specific data set, you will lose your first data row. Be careful!\n\nLoad up the `Servo.data` dataset. Examine the headers, and adjust them as necessary, if need be.", "_____no_output_____" ] ], [ [ "df = pd.read_csv('Datasets/servo.data', header = None, names = ['motor', 'screw', 'pgain', 'vgain', 'class'])", "_____no_output_____" ] ], [ [ "Let's try experimenting with some slicing. Create a slice that contains all entries that have a vgain equal to 5. Then print the length of (# of samples in) that slice:", "_____no_output_____" ] ], [ [ "df1 = df[df.vgain == 5]\ndf1.count()", "_____no_output_____" ] ], [ [ "Create a slice that contains all entries having a motor equal to E and screw equal to E. Then print the length of (# of samples in) that slice:", "_____no_output_____" ] ], [ [ "df2 = df[(df.motor == 'E') & (df.screw == 'E')]\ndf2.count()", "_____no_output_____" ] ], [ [ "Create a slice that contains all entries having a pgain equal to 4. Use one of the various methods of finding the mean vgain value for the samples in that slice. Once you've found it, print it:", "_____no_output_____" ] ], [ [ "df3 = df[df.pgain == 4]\ndf3.describe()", "_____no_output_____" ] ], [ [ "Here's a bonus activity for you. See what happens when you display the `.dtypes` property of your dataframe!", "_____no_output_____" ] ], [ [ "df.dtypes", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb76338d67aef95a02e662fb486986ac572abc30
99,283
ipynb
Jupyter Notebook
lstm_xsin.ipynb
claytantor/tensorflow-lstm-regression
905193d43d6f44027293f97035ebdb5237844d78
[ "MIT" ]
2
2017-02-09T17:04:09.000Z
2017-05-24T07:41:39.000Z
lstm_xsin.ipynb
claytantor/tensorflow-lstm-regression
905193d43d6f44027293f97035ebdb5237844d78
[ "MIT" ]
null
null
null
lstm_xsin.ipynb
claytantor/tensorflow-lstm-regression
905193d43d6f44027293f97035ebdb5237844d78
[ "MIT" ]
4
2017-05-24T07:41:40.000Z
2022-01-06T07:05:56.000Z
380.394636
57,288
0.920198
[ [ [ "%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom matplotlib import pyplot as plt\n\nfrom tensorflow.contrib import learn\nfrom sklearn.metrics import mean_squared_error\n\nfrom lstm import x_sin, generate_data, lstm_model", "_____no_output_____" ], [ "LOG_DIR = './ops_logs/x_sin'\nTIMESTEPS = 10\nRNN_LAYERS = [{'num_units': 10}, {'num_units': 5}]\nDENSE_LAYERS = None\nTRAINING_STEPS = 100000\nPRINT_STEPS = TRAINING_STEPS / 10\nBATCH_SIZE = 100", "_____no_output_____" ], [ "regressor = learn.Estimator(model_fn=lstm_model(TIMESTEPS, RNN_LAYERS, DENSE_LAYERS),\n model_dir=LOG_DIR)", "WARNING:tensorflow:Using default config.\n" ], [ "X, y = generate_data(x_sin, np.linspace(0, 100, 10000, dtype=np.float32), TIMESTEPS, seperate=False)\n# create a lstm instance and validation monitor\nvalidation_monitor = learn.monitors.ValidationMonitor(X['val'], y['val'],\n every_n_steps=PRINT_STEPS,\n early_stopping_rounds=10000)\nregressor.fit(X['train'], y['train'], \n monitors=[validation_monitor], \n batch_size=BATCH_SIZE,\n steps=TRAINING_STEPS)", "WARNING:tensorflow:Setting feature info to TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None), Dimension(10), Dimension(1)]), is_sparse=False)\nWARNING:tensorflow:Setting targets info to TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None)]), is_sparse=False)\nWARNING:tensorflow:Given features: Tensor(\"input:0\", shape=(?, 10, 1), dtype=float32), required signatures: TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None), Dimension(10), Dimension(1)]), is_sparse=False).\nWARNING:tensorflow:Given targets: Tensor(\"output:0\", shape=(?,), dtype=float32), required signatures: TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None)]), is_sparse=False).\nWARNING:tensorflow:Given features: Tensor(\"input:0\", shape=(?, 10, 1), dtype=float32), required signatures: TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None), Dimension(10), Dimension(1)]), is_sparse=False).\nWARNING:tensorflow:Given targets: Tensor(\"output:0\", shape=(?,), dtype=float32), required signatures: TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None)]), is_sparse=False).\nWARNING:tensorflow:Given features: Tensor(\"input:0\", shape=(?, 10, 1), dtype=float32), required signatures: TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None), Dimension(10), Dimension(1)]), is_sparse=False).\nWARNING:tensorflow:Given targets: Tensor(\"output:0\", shape=(?,), dtype=float32), required signatures: TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None)]), is_sparse=False).\nWARNING:tensorflow:Given features: Tensor(\"input:0\", shape=(?, 10, 1), dtype=float32), required signatures: TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None), Dimension(10), Dimension(1)]), is_sparse=False).\nWARNING:tensorflow:Given targets: Tensor(\"output:0\", shape=(?,), dtype=float32), required signatures: TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None)]), is_sparse=False).\nWARNING:tensorflow:Given features: Tensor(\"input:0\", shape=(?, 10, 1), dtype=float32), required signatures: TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None), Dimension(10), Dimension(1)]), is_sparse=False).\nWARNING:tensorflow:Given targets: Tensor(\"output:0\", shape=(?,), dtype=float32), required signatures: TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None)]), is_sparse=False).\nWARNING:tensorflow:Given features: Tensor(\"input:0\", shape=(?, 10, 1), dtype=float32), required signatures: TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None), Dimension(10), Dimension(1)]), is_sparse=False).\nWARNING:tensorflow:Given targets: Tensor(\"output:0\", shape=(?,), dtype=float32), required signatures: TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None)]), is_sparse=False).\nWARNING:tensorflow:Given features: Tensor(\"input:0\", shape=(?, 10, 1), dtype=float32), required signatures: TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None), Dimension(10), Dimension(1)]), is_sparse=False).\nWARNING:tensorflow:Given targets: Tensor(\"output:0\", shape=(?,), dtype=float32), required signatures: TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None)]), is_sparse=False).\nWARNING:tensorflow:Given features: Tensor(\"input:0\", shape=(?, 10, 1), dtype=float32), required signatures: TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None), Dimension(10), Dimension(1)]), is_sparse=False).\nWARNING:tensorflow:Given targets: Tensor(\"output:0\", shape=(?,), dtype=float32), required signatures: TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None)]), is_sparse=False).\nWARNING:tensorflow:Given features: Tensor(\"input:0\", shape=(?, 10, 1), dtype=float32), required signatures: TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None), Dimension(10), Dimension(1)]), is_sparse=False).\nWARNING:tensorflow:Given targets: Tensor(\"output:0\", shape=(?,), dtype=float32), required signatures: TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None)]), is_sparse=False).\nWARNING:tensorflow:Given features: Tensor(\"input:0\", shape=(?, 10, 1), dtype=float32), required signatures: TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None), Dimension(10), Dimension(1)]), is_sparse=False).\nWARNING:tensorflow:Given targets: Tensor(\"output:0\", shape=(?,), dtype=float32), required signatures: TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None)]), is_sparse=False).\n" ], [ "predicted = regressor.predict(X['test'])\nrmse = np.sqrt(((predicted - y['test']) ** 2).mean(axis=0))\nscore = mean_squared_error(predicted, y['test'])\nprint (\"MSE: %f\" % score)", "WARNING:tensorflow:Calling predict (from tensorflow.contrib.learn.python.learn.estimators.estimator) with as_iterable=False is deprecated and will be removed after 2016-09-15.\nInstructions for updating:\nThe default behavior of predict() is changing. The default value for\nas_iterable will change to True, and then the flag will be removed\naltogether. The behavior of this flag is described below.\n" ], [ "plot_predicted, = plt.plot(predicted, label='predicted')\nplot_test, = plt.plot(y['test'], label='test')\nplt.legend(handles=[plot_predicted, plot_test])", "_____no_output_____" ], [ "plt.plot(regressor.predict(X['train']))\nplt.plot(y['train'])", "WARNING:tensorflow:Calling predict (from tensorflow.contrib.learn.python.learn.estimators.estimator) with as_iterable=False is deprecated and will be removed after 2016-09-15.\nInstructions for updating:\nThe default behavior of predict() is changing. The default value for\nas_iterable will change to True, and then the flag will be removed\naltogether. The behavior of this flag is described below.\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb763ef6a448e56f024e2bdb59ba886d674256bb
329,410
ipynb
Jupyter Notebook
scripts/d21-en/mxnet/chapter_computer-vision/anchor.ipynb
lucmertins/CapDeepLearningBook
e5959b552c8716e7fc65a21ae9c13c58509544c1
[ "MIT" ]
null
null
null
scripts/d21-en/mxnet/chapter_computer-vision/anchor.ipynb
lucmertins/CapDeepLearningBook
e5959b552c8716e7fc65a21ae9c13c58509544c1
[ "MIT" ]
null
null
null
scripts/d21-en/mxnet/chapter_computer-vision/anchor.ipynb
lucmertins/CapDeepLearningBook
e5959b552c8716e7fc65a21ae9c13c58509544c1
[ "MIT" ]
null
null
null
86.961457
41,041
0.736368
[ [ [ "# Anchor Boxes\n:label:`sec_anchor`\n\nObject detection algorithms usually sample a large number of regions in the input image, determine whether these regions contain objects of interest, and adjust the edges of the regions so as to predict the ground-truth bounding box of the target more accurately. Different models may use different region sampling methods. Here, we introduce one such method: it generates multiple bounding boxes with different sizes and aspect ratios while centering on each pixel. These bounding boxes are called anchor boxes. We will practice object detection based on anchor boxes in the following sections.\n", "_____no_output_____" ], [ "First, import the packages or modules required for this section. Here, we have modified the printing accuracy of NumPy. Because printing tensors actually calls the print function of NumPy, the floating-point numbers in tensors printed in this section are more concise.\n", "_____no_output_____" ] ], [ [ "%matplotlib inline\nfrom mxnet import gluon, image, np, npx\nfrom d2l import mxnet as d2l\n\nnp.set_printoptions(2)\nnpx.set_np()", "_____no_output_____" ] ], [ [ "## Generating Multiple Anchor Boxes\n\nAssume that the input image has a height of $h$ and width of $w$. We generate anchor boxes with different shapes centered on each pixel of the image. Assume the size is $s\\in (0, 1]$, the aspect ratio is $r > 0$, and the width and height of the anchor box are $ws\\sqrt{r}$ and $hs/\\sqrt{r}$, respectively. When the center position is given, an anchor box with known width and height is determined.\n\nBelow we set a set of sizes $s_1,\\ldots, s_n$ and a set of aspect ratios $r_1,\\ldots, r_m$. If we use a combination of all sizes and aspect ratios with each pixel as the center, the input image will have a total of $whnm$ anchor boxes. Although these anchor boxes may cover all ground-truth bounding boxes, the computational complexity is often excessive. Therefore, we are usually only interested in a combination containing $s_1$ or $r_1$ sizes and aspect ratios, that is:\n\n$$(s_1, r_1), (s_1, r_2), \\ldots, (s_1, r_m), (s_2, r_1), (s_3, r_1), \\ldots, (s_n, r_1).$$\n\nThat is, the number of anchor boxes centered on the same pixel is $n+m-1$. For the entire input image, we will generate a total of $wh(n+m-1)$ anchor boxes.\n\nThe above method of generating anchor boxes has been implemented in the `multibox_prior` function. We specify the input, a set of sizes, and a set of aspect ratios, and this function will return all the anchor boxes entered.\n", "_____no_output_____" ] ], [ [ "#@save\ndef multibox_prior(data, sizes, ratios):\n in_height, in_width = data.shape[-2:]\n device, num_sizes, num_ratios = data.ctx, len(sizes), len(ratios)\n boxes_per_pixel = (num_sizes + num_ratios - 1)\n size_tensor = np.array(sizes, ctx=device)\n ratio_tensor = np.array(ratios, ctx=device)\n # Offsets are required to move the anchor to center of a pixel\n # Since pixel (height=1, width=1), we choose to offset our centers by 0.5\n offset_h, offset_w = 0.5, 0.5\n steps_h = 1.0 / in_height # Scaled steps in y axis\n steps_w = 1.0 / in_width # Scaled steps in x axis\n\n # Generate all center points for the anchor boxes\n center_h = (np.arange(in_height, ctx=device) + offset_h) * steps_h\n center_w = (np.arange(in_width, ctx=device) + offset_w) * steps_w\n shift_x, shift_y = np.meshgrid(center_w, center_h)\n shift_x, shift_y = shift_x.reshape(-1), shift_y.reshape(-1)\n\n # Generate boxes_per_pixel number of heights and widths which are later\n # used to create anchor box corner coordinates (xmin, xmax, ymin, ymax)\n # concat (various sizes, first ratio) and (first size, various ratios)\n w = np.concatenate((size_tensor * np.sqrt(ratio_tensor[0]),\n sizes[0] * np.sqrt(ratio_tensor[1:])))\\\n * in_height / in_width # handle rectangular inputs\n h = np.concatenate((size_tensor / np.sqrt(ratio_tensor[0]),\n sizes[0] / np.sqrt(ratio_tensor[1:])))\n # Divide by 2 to get half height and half width\n anchor_manipulations = np.tile(\n np.stack((-w, -h, w, h)).T, (in_height * in_width, 1)) / 2\n\n # Each center point will have boxes_per_pixel number of anchor boxes, so\n # generate grid of all anchor box centers with boxes_per_pixel repeats\n out_grid = np.stack([shift_x, shift_y, shift_x, shift_y],\n axis=1).repeat(boxes_per_pixel, axis=0)\n\n output = out_grid + anchor_manipulations\n return np.expand_dims(output, axis=0)", "_____no_output_____" ] ], [ [ "We can see that the shape of the returned anchor box variable `y` is\n(batch size, number of anchor boxes, 4).\n", "_____no_output_____" ] ], [ [ "img = image.imread('../img/catdog.jpg').asnumpy()\nh, w = img.shape[0:2]\n\nprint(h, w)\nX = np.random.uniform(size=(1, 3, h, w)) # Construct input data\nY = multibox_prior(X, sizes=[0.75, 0.5, 0.25], ratios=[1, 2, 0.5])\nY.shape", "561 728\n" ] ], [ [ "After changing the shape of the anchor box variable `y` to (image height, image width, number of anchor boxes centered on the same pixel, 4), we can obtain all the anchor boxes centered on a specified pixel position. In the following example, we access the first anchor box centered on (250, 250). It has four elements: the $x, y$ axis coordinates in the upper-left corner and the $x, y$ axis coordinates in the lower-right corner of the anchor box. The coordinate values of the $x$ and $y$ axis are divided by the width and height of the image, respectively, so the value range is between 0 and 1.\n", "_____no_output_____" ] ], [ [ "boxes = Y.reshape(h, w, 5, 4)\nboxes[250, 250, 0, :]", "_____no_output_____" ] ], [ [ "In order to describe all anchor boxes centered on one pixel in the image, we first define the `show_bboxes` function to draw multiple bounding boxes on the image.\n", "_____no_output_____" ] ], [ [ "#@save\ndef show_bboxes(axes, bboxes, labels=None, colors=None):\n \"\"\"Show bounding boxes.\"\"\"\n def _make_list(obj, default_values=None):\n if obj is None:\n obj = default_values\n elif not isinstance(obj, (list, tuple)):\n obj = [obj]\n return obj\n\n labels = _make_list(labels)\n colors = _make_list(colors, ['b', 'g', 'r', 'm', 'c'])\n for i, bbox in enumerate(bboxes):\n color = colors[i % len(colors)]\n rect = d2l.bbox_to_rect(bbox.asnumpy(), color)\n axes.add_patch(rect)\n if labels and len(labels) > i:\n text_color = 'k' if color == 'w' else 'w'\n axes.text(rect.xy[0], rect.xy[1], labels[i], va='center',\n ha='center', fontsize=9, color=text_color,\n bbox=dict(facecolor=color, lw=0))", "_____no_output_____" ] ], [ [ "As we just saw, the coordinate values of the $x$ and $y$ axis in the variable `boxes` have been divided by the width and height of the image, respectively. When drawing images, we need to restore the original coordinate values of the anchor boxes and therefore define the variable `bbox_scale`. Now, we can draw all the anchor boxes centered on (250, 250) in the image. As you can see, the blue anchor box with a size of 0.75 and an aspect ratio of 1 covers the dog in the image well.\n", "_____no_output_____" ] ], [ [ "d2l.set_figsize()\nbbox_scale = np.array((w, h, w, h))\nfig = d2l.plt.imshow(img)\nshow_bboxes(fig.axes, boxes[250, 250, :, :] * bbox_scale, [\n 's=0.75, r=1', 's=0.5, r=1', 's=0.25, r=1', 's=0.75, r=2', 's=0.75, r=0.5'\n])", "_____no_output_____" ] ], [ [ "## Intersection over Union\n\nWe just mentioned that the anchor box covers the dog in the image well. If the ground-truth bounding box of the target is known, how can \"well\" here be quantified? An intuitive method is to measure the similarity between anchor boxes and the ground-truth bounding box. We know that the Jaccard index can measure the similarity between two sets. Given sets $\\mathcal{A}$ and $\\mathcal{B}$, their Jaccard index is the size of their intersection divided by the size of their union:\n\n$$J(\\mathcal{A},\\mathcal{B}) = \\frac{\\left|\\mathcal{A} \\cap \\mathcal{B}\\right|}{\\left| \\mathcal{A} \\cup \\mathcal{B}\\right|}.$$\n\n\nIn fact, we can consider the pixel area of a bounding box as a collection of pixels. In this way, we can measure the similarity of the two bounding boxes by the Jaccard index of their pixel sets. When we measure the similarity of two bounding boxes, we usually refer the Jaccard index as intersection over union (IoU), which is the ratio of the intersecting area to the union area of the two bounding boxes, as shown in :numref:`fig_iou`. The value range of IoU is between 0 and 1: 0 means that there are no overlapping pixels between the two bounding boxes, while 1 indicates that the two bounding boxes are equal.\n\n![IoU is the ratio of the intersecting area to the union area of two bounding boxes. ](../img/iou.svg)\n:label:`fig_iou`\n\n\nFor the remainder of this section, we will use IoU to measure the similarity between anchor boxes and ground-truth bounding boxes, and between different anchor boxes.\n", "_____no_output_____" ] ], [ [ "#@save\ndef box_iou(boxes1, boxes2):\n \"\"\"Compute IOU between two sets of boxes of shape (N,4) and (M,4).\"\"\"\n # Compute box areas\n box_area = lambda boxes: ((boxes[:, 2] - boxes[:, 0]) *\n (boxes[:, 3] - boxes[:, 1]))\n area1 = box_area(boxes1)\n area2 = box_area(boxes2)\n lt = np.maximum(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]\n rb = np.minimum(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]\n wh = (rb - lt).clip(min=0) # [N,M,2]\n inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]\n unioun = area1[:, None] + area2 - inter\n return inter / unioun", "_____no_output_____" ] ], [ [ "## Labeling Training Set Anchor Boxes\n\n\nIn the training set, we consider each anchor box as a training example. In order to train the object detection model, we need to mark two types of labels for each anchor box: first, the category of the target contained in the anchor box (category) and, second, the offset of the ground-truth bounding box relative to the anchor box (offset). In object detection, we first generate multiple anchor boxes, predict the categories and offsets for each anchor box, adjust the anchor box position according to the predicted offset to obtain the bounding boxes to be used for prediction, and finally filter out the prediction bounding boxes that need to be output.\n\n\nWe know that, in the object detection training set, each image is labelled with the location of the ground-truth bounding box and the category of the target contained. After the anchor boxes are generated, we primarily label anchor boxes based on the location and category information of the ground-truth bounding boxes similar to the anchor boxes. So how do we assign ground-truth bounding boxes to anchor boxes similar to them?\n\n\nAssume that the anchor boxes in the image are $A_1, A_2, \\ldots, A_{n_a}$ and the ground-truth bounding boxes are $B_1, B_2, \\ldots, B_{n_b}$ and $n_a \\geq n_b$. Define matrix $\\mathbf{X} \\in \\mathbb{R}^{n_a \\times n_b}$, where element $x_{ij}$ in the $i^\\mathrm{th}$ row and $j^\\mathrm{th}$ column is the IoU of the anchor box $A_i$ to the ground-truth bounding box $B_j$.\nFirst, we find the largest element in the matrix $\\mathbf{X}$ and record the row index and column index of the element as $i_1,j_1$. We assign the ground-truth bounding box $B_{j_1}$ to the anchor box $A_{i_1}$. Obviously, anchor box $A_{i_1}$ and ground-truth bounding box $B_{j_1}$ have the highest similarity among all the \"anchor box--ground-truth bounding box\" pairings. Next, discard all elements in the $i_1$th row and the $j_1$th column in the matrix $\\mathbf{X}$. Find the largest remaining element in the matrix $\\mathbf{X}$ and record the row index and column index of the element as $i_2,j_2$. We assign ground-truth bounding box $B_{j_2}$ to anchor box $A_{i_2}$ and then discard all elements in the $i_2$th row and the $j_2$th column in the matrix $\\mathbf{X}$. At this point, elements in two rows and two columns in the matrix $\\mathbf{X}$ have been discarded.\n\n\nWe proceed until all elements in the $n_b$ column in the matrix $\\mathbf{X}$ are discarded. At this time, we have assigned a ground-truth bounding box to each of the $n_b$ anchor boxes.\nNext, we only traverse the remaining $n_a - n_b$ anchor boxes. Given anchor box $A_i$, find the bounding box $B_j$ with the largest IoU with $A_i$ according to the $i^\\mathrm{th}$ row of the matrix $\\mathbf{X}$, and only assign ground-truth bounding box $B_j$ to anchor box $A_i$ when the IoU is greater than the predetermined threshold.\n\n\nAs shown in :numref:`fig_anchor_label` (left), assuming that the maximum value in the matrix $\\mathbf{X}$ is $x_{23}$, we will assign ground-truth bounding box $B_3$ to anchor box $A_2$. Then, we discard all the elements in row 2 and column 3 of the matrix, find the largest element $x_{71}$ of the remaining shaded area, and assign ground-truth bounding box $B_1$ to anchor box $A_7$. Then, as shown in :numref:`fig_anchor_label` (middle), discard all the elements in row 7 and column 1 of the matrix, find the largest element $x_{54}$ of the remaining shaded area, and assign ground-truth bounding box $B_4$ to anchor box $A_5$. Finally, as shown in :numref:`fig_anchor_label` (right), discard all the elements in row 5 and column 4 of the matrix, find the largest element $x_{92}$ of the remaining shaded area, and assign ground-truth bounding box $B_2$ to anchor box $A_9$. After that, we only need to traverse the remaining anchor boxes of $A_1, A_3, A_4, A_6, A_8$ and determine whether to assign ground-truth bounding boxes to the remaining anchor boxes according to the threshold.\n\n![Assign ground-truth bounding boxes to anchor boxes. ](../img/anchor-label.svg)\n:label:`fig_anchor_label`\n", "_____no_output_____" ] ], [ [ "#@save\ndef match_anchor_to_bbox(ground_truth, anchors, device, iou_threshold=0.5):\n \"\"\"Assign ground-truth bounding boxes to anchor boxes similar to them.\"\"\"\n num_anchors, num_gt_boxes = anchors.shape[0], ground_truth.shape[0]\n # Element `x_ij` in the `i^th` row and `j^th` column is the IoU\n # of the anchor box `anc_i` to the ground-truth bounding box `box_j`\n jaccard = box_iou(anchors, ground_truth)\n # Initialize the tensor to hold assigned ground truth bbox for each anchor\n anchors_bbox_map = np.full((num_anchors,), -1, dtype=np.int32, ctx=device)\n # Assign ground truth bounding box according to the threshold\n max_ious, indices = np.max(jaccard, axis=1), np.argmax(jaccard, axis=1)\n anc_i = np.nonzero(max_ious >= 0.5)[0]\n box_j = indices[max_ious >= 0.5]\n anchors_bbox_map[anc_i] = box_j\n # Find the largest iou for each bbox\n col_discard = np.full((num_anchors,), -1)\n row_discard = np.full((num_gt_boxes,), -1)\n for _ in range(num_gt_boxes):\n max_idx = np.argmax(jaccard)\n box_idx = (max_idx % num_gt_boxes).astype('int32')\n anc_idx = (max_idx / num_gt_boxes).astype('int32')\n anchors_bbox_map[anc_idx] = box_idx\n jaccard[:, box_idx] = col_discard\n jaccard[anc_idx, :] = row_discard\n return anchors_bbox_map", "_____no_output_____" ] ], [ [ "Now we can label the categories and offsets of the anchor boxes. If an anchor box $A$ is assigned ground-truth bounding box $B$, the category of the anchor box $A$ is set to the category of $B$. And the offset of the anchor box $A$ is set according to the relative position of the central coordinates of $B$ and $A$ and the relative sizes of the two boxes. Because the positions and sizes of various boxes in the dataset may vary, these relative positions and relative sizes usually require some special transformations to make the offset distribution more uniform and easier to fit. Assume the center coordinates of anchor box $A$ and its assigned ground-truth bounding box $B$ are $(x_a, y_a), (x_b, y_b)$, the widths of $A$ and $B$ are $w_a, w_b$, and their heights are $h_a, h_b$, respectively. In this case, a common technique is to label the offset of $A$ as\n\n$$\\left( \\frac{ \\frac{x_b - x_a}{w_a} - \\mu_x }{\\sigma_x},\n\\frac{ \\frac{y_b - y_a}{h_a} - \\mu_y }{\\sigma_y},\n\\frac{ \\log \\frac{w_b}{w_a} - \\mu_w }{\\sigma_w},\n\\frac{ \\log \\frac{h_b}{h_a} - \\mu_h }{\\sigma_h}\\right),$$\n\nThe default values of the constant are $\\mu_x = \\mu_y = \\mu_w = \\mu_h = 0, \\sigma_x=\\sigma_y=0.1, and \\sigma_w=\\sigma_h=0.2$.\nThis transformation is implemented below in the `offset_boxes` function.\nIf an anchor box is not assigned a ground-truth bounding box, we only need to set the category of the anchor box to background. Anchor boxes whose category is background are often referred to as negative anchor boxes, and the rest are referred to as positive anchor boxes.\n", "_____no_output_____" ] ], [ [ "#@save\ndef offset_boxes(anchors, assigned_bb, eps=1e-6):\n c_anc = d2l.box_corner_to_center(anchors)\n c_assigned_bb = d2l.box_corner_to_center(assigned_bb)\n offset_xy = 10 * (c_assigned_bb[:, :2] - c_anc[:, :2]) / c_anc[:, 2:]\n offset_wh = 5 * np.log(eps + c_assigned_bb[:, 2:] / c_anc[:, 2:])\n offset = np.concatenate([offset_xy, offset_wh], axis=1)\n return offset", "_____no_output_____" ], [ "#@save\ndef multibox_target(anchors, labels):\n batch_size, anchors = labels.shape[0], anchors.squeeze(0)\n batch_offset, batch_mask, batch_class_labels = [], [], []\n device, num_anchors = anchors.ctx, anchors.shape[0]\n for i in range(batch_size):\n label = labels[i, :, :]\n anchors_bbox_map = match_anchor_to_bbox(label[:, 1:], anchors, device)\n bbox_mask = np.tile((np.expand_dims(\n (anchors_bbox_map >= 0), axis=-1)), (1, 4)).astype('int32')\n # Initialize class_labels and assigned bbox coordinates with zeros\n class_labels = np.zeros(num_anchors, dtype=np.int32, ctx=device)\n assigned_bb = np.zeros((num_anchors, 4), dtype=np.float32, ctx=device)\n # Assign class labels to the anchor boxes using matched gt bbox labels\n # If no gt bbox is assigned to an anchor box, then let the\n # class_labels and assigned_bb remain zero, i.e the background class\n indices_true = np.nonzero(anchors_bbox_map >= 0)[0]\n bb_idx = anchors_bbox_map[indices_true]\n class_labels[indices_true] = label[bb_idx, 0].astype('int32') + 1\n assigned_bb[indices_true] = label[bb_idx, 1:]\n # offset transformations\n offset = offset_boxes(anchors, assigned_bb) * bbox_mask\n batch_offset.append(offset.reshape(-1))\n batch_mask.append(bbox_mask.reshape(-1))\n batch_class_labels.append(class_labels)\n bbox_offset = np.stack(batch_offset)\n bbox_mask = np.stack(batch_mask)\n class_labels = np.stack(batch_class_labels)\n return (bbox_offset, bbox_mask, class_labels)", "_____no_output_____" ] ], [ [ "Below we demonstrate a detailed example. We define ground-truth bounding boxes for the cat and dog in the read image, where the first element is category (0 for dog, 1 for cat) and the remaining four elements are the $x, y$ axis coordinates at top-left corner and $x, y$ axis coordinates at lower-right corner (the value range is between 0 and 1). Here, we construct five anchor boxes to be labeled by the coordinates of the upper-left corner and the lower-right corner, which are recorded as $A_0, \\ldots, A_4$, respectively (the index in the program starts from 0). First, draw the positions of these anchor boxes and the ground-truth bounding boxes in the image.\n", "_____no_output_____" ] ], [ [ "ground_truth = np.array([[0, 0.1, 0.08, 0.52, 0.92],\n [1, 0.55, 0.2, 0.9, 0.88]])\nanchors = np.array([[0, 0.1, 0.2, 0.3], [0.15, 0.2, 0.4, 0.4],\n [0.63, 0.05, 0.88, 0.98], [0.66, 0.45, 0.8, 0.8],\n [0.57, 0.3, 0.92, 0.9]])\n\nfig = d2l.plt.imshow(img)\nshow_bboxes(fig.axes, ground_truth[:, 1:] * bbox_scale, ['dog', 'cat'], 'k')\nshow_bboxes(fig.axes, anchors * bbox_scale, ['0', '1', '2', '3', '4']);", "_____no_output_____" ] ], [ [ "We can label categories and offsets for anchor boxes by using the `multibox_target` function. This function sets the background category to 0 and increments the integer index of the target category from zero by 1 (1 for dog and 2 for cat).\n", "_____no_output_____" ], [ "We add example dimensions to the anchor boxes and ground-truth bounding boxes and construct random predicted results with a shape of (batch size, number of categories including background, number of anchor boxes) by using the `expand_dims` function.\n", "_____no_output_____" ] ], [ [ "labels = multibox_target(np.expand_dims(anchors, axis=0),\n np.expand_dims(ground_truth, axis=0))", "_____no_output_____" ] ], [ [ "There are three items in the returned result, all of which are in the tensor format. The third item is represented by the category labeled for the anchor box.\n", "_____no_output_____" ] ], [ [ "labels[2]", "_____no_output_____" ] ], [ [ "We analyze these labelled categories based on positions of anchor boxes and ground-truth bounding boxes in the image. First, in all \"anchor box--ground-truth bounding box\" pairs, the IoU of anchor box $A_4$ to the ground-truth bounding box of the cat is the largest, so the category of anchor box $A_4$ is labeled as cat. Without considering anchor box $A_4$ or the ground-truth bounding box of the cat, in the remaining \"anchor box--ground-truth bounding box\" pairs, the pair with the largest IoU is anchor box $A_1$ and the ground-truth bounding box of the dog, so the category of anchor box $A_1$ is labeled as dog. Next, traverse the remaining three unlabeled anchor boxes. The category of the ground-truth bounding box with the largest IoU with anchor box $A_0$ is dog, but the IoU is smaller than the threshold (the default is 0.5), so the category is labeled as background; the category of the ground-truth bounding box with the largest IoU with anchor box $A_2$ is cat and the IoU is greater than the threshold, so the category is labeled as cat; the category of the ground-truth bounding box with the largest IoU with anchor box $A_3$ is cat, but the IoU is smaller than the threshold, so the category is labeled as background.\n\n\nThe second item of the return value is a mask variable, with the shape of (batch size, four times the number of anchor boxes). The elements in the mask variable correspond one-to-one with the four offset values of each anchor box.\nBecause we do not care about background detection, offsets of the negative class should not affect the target function. By multiplying by element, the 0 in the mask variable can filter out negative class offsets before calculating target function.\n", "_____no_output_____" ] ], [ [ "labels[1]", "_____no_output_____" ] ], [ [ "The first item returned is the four offset values labeled for each anchor box, with the offsets of negative class anchor boxes labeled as 0.\n", "_____no_output_____" ] ], [ [ "labels[0]", "_____no_output_____" ] ], [ [ "## Bounding Boxes for Prediction\n\nDuring model prediction phase, we first generate multiple anchor boxes for the image and then predict categories and offsets for these anchor boxes one by one. Then, we obtain prediction bounding boxes based on anchor boxes and their predicted offsets.\n\nBelow we implement function `offset_inverse` which takes in anchors and\noffset predictions as inputs and applies inverse offset transformations to\nreturn the predicted bounding box coordinates.\n", "_____no_output_____" ] ], [ [ "#@save\ndef offset_inverse(anchors, offset_preds):\n c_anc = d2l.box_corner_to_center(anchors)\n c_pred_bb_xy = (offset_preds[:, :2] * c_anc[:, 2:] / 10) + c_anc[:, :2]\n c_pred_bb_wh = np.exp(offset_preds[:, 2:] / 5) * c_anc[:, 2:]\n c_pred_bb = np.concatenate((c_pred_bb_xy, c_pred_bb_wh), axis=1)\n predicted_bb = d2l.box_center_to_corner(c_pred_bb)\n return predicted_bb", "_____no_output_____" ] ], [ [ "When there are many anchor boxes, many similar prediction bounding boxes may be output for the same target. To simplify the results, we can remove similar prediction bounding boxes. A commonly used method is called non-maximum suppression (NMS).\n\nLet us take a look at how NMS works. For a prediction bounding box $B$, the model calculates the predicted probability for each category. Assume the largest predicted probability is $p$, the category corresponding to this probability is the predicted category of $B$. We also refer to $p$ as the confidence level of prediction bounding box $B$. On the same image, we sort the prediction bounding boxes with predicted categories other than background by confidence level from high to low, and obtain the list $L$. Select the prediction bounding box $B_1$ with highest confidence level from $L$ as a baseline and remove all non-benchmark prediction bounding boxes with an IoU with $B_1$ greater than a certain threshold from $L$. The threshold here is a preset hyperparameter. At this point, $L$ retains the prediction bounding box with the highest confidence level and removes other prediction bounding boxes similar to it.\nNext, select the prediction bounding box $B_2$ with the second highest confidence level from $L$ as a baseline, and remove all non-benchmark prediction bounding boxes with an IoU with $B_2$ greater than a certain threshold from $L$. Repeat this process until all prediction bounding boxes in $L$ have been used as a baseline. At this time, the IoU of any pair of prediction bounding boxes in $L$ is less than the threshold. Finally, output all prediction bounding boxes in the list $L$.\n", "_____no_output_____" ] ], [ [ "#@save\ndef nms(boxes, scores, iou_threshold):\n # sorting scores by the descending order and return their indices\n B = scores.argsort()[::-1]\n keep = [] # boxes indices that will be kept\n while B.size > 0:\n i = B[0]\n keep.append(i)\n if B.size == 1: break\n iou = box_iou(boxes[i, :].reshape(-1, 4),\n boxes[B[1:], :].reshape(-1, 4)).reshape(-1)\n inds = np.nonzero(iou <= iou_threshold)[0]\n B = B[inds + 1]\n return np.array(keep, dtype=np.int32, ctx=boxes.ctx)\n\n#@save\ndef multibox_detection(cls_probs, offset_preds, anchors, nms_threshold=0.5,\n pos_threshold=0.00999999978):\n device, batch_size = cls_probs.ctx, cls_probs.shape[0]\n anchors = np.squeeze(anchors, axis=0)\n num_classes, num_anchors = cls_probs.shape[1], cls_probs.shape[2]\n out = []\n for i in range(batch_size):\n cls_prob, offset_pred = cls_probs[i], offset_preds[i].reshape(-1, 4)\n conf, class_id = np.max(cls_prob[1:], 0), np.argmax(cls_prob[1:], 0)\n predicted_bb = offset_inverse(anchors, offset_pred)\n keep = nms(predicted_bb, conf, nms_threshold)\n # Find all non_keep indices and set the class_id to background\n all_idx = np.arange(num_anchors, dtype=np.int32, ctx=device)\n combined = np.concatenate((keep, all_idx))\n unique, counts = np.unique(combined, return_counts=True)\n non_keep = unique[counts == 1]\n all_id_sorted = np.concatenate((keep, non_keep))\n class_id[non_keep] = -1\n class_id = class_id[all_id_sorted].astype('float32')\n conf, predicted_bb = conf[all_id_sorted], predicted_bb[all_id_sorted]\n # threshold to be a positive prediction\n below_min_idx = (conf < pos_threshold)\n class_id[below_min_idx] = -1\n conf[below_min_idx] = 1 - conf[below_min_idx]\n pred_info = np.concatenate((np.expand_dims(\n class_id, axis=1), np.expand_dims(conf, axis=1), predicted_bb),\n axis=1)\n out.append(pred_info)\n return np.stack(out)", "_____no_output_____" ] ], [ [ "Next, we will look at a detailed example. First, construct four anchor boxes. For the sake of simplicity, we assume that predicted offsets are all 0. This means that the prediction bounding boxes are anchor boxes. Finally, we construct a predicted probability for each category.\n", "_____no_output_____" ] ], [ [ "anchors = np.array([[0.1, 0.08, 0.52, 0.92], [0.08, 0.2, 0.56, 0.95],\n [0.15, 0.3, 0.62, 0.91], [0.55, 0.2, 0.9, 0.88]])\noffset_preds = np.array([0] * anchors.size)\ncls_probs = np.array([[0] * 4, # Predicted probability for background\n [0.9, 0.8, 0.7, 0.1], # Predicted probability for dog\n [0.1, 0.2, 0.3, 0.9]]) # Predicted probability for cat", "_____no_output_____" ] ], [ [ "Print prediction bounding boxes and their confidence levels on the image.\n", "_____no_output_____" ] ], [ [ "fig = d2l.plt.imshow(img)\nshow_bboxes(fig.axes, anchors * bbox_scale,\n ['dog=0.9', 'dog=0.8', 'dog=0.7', 'cat=0.9'])", "_____no_output_____" ] ], [ [ "We use the `multibox_detection` function to perform NMS and set the threshold to 0.5. This adds an example dimension to the tensor input. We can see that the shape of the returned result is (batch size, number of anchor boxes, 6). The 6 elements of each row represent the output information for the same prediction bounding box. The first element is the predicted category index, which starts from 0 (0 is dog, 1 is cat). The value -1 indicates background or removal in NMS. The second element is the confidence level of prediction bounding box. The remaining four elements are the $x, y$ axis coordinates of the upper-left corner and the $x, y$ axis coordinates of the lower-right corner of the prediction bounding box (the value range is between 0 and 1).\n", "_____no_output_____" ] ], [ [ "output = multibox_detection(np.expand_dims(cls_probs, axis=0),\n np.expand_dims(offset_preds, axis=0),\n np.expand_dims(anchors, axis=0),\n nms_threshold=0.5)\noutput", "_____no_output_____" ] ], [ [ "We remove the prediction bounding boxes of category -1 and visualize the results retained by NMS.\n", "_____no_output_____" ] ], [ [ "fig = d2l.plt.imshow(img)\nfor i in output[0].asnumpy():\n if i[0] == -1:\n continue\n label = ('dog=', 'cat=')[int(i[0])] + str(i[1])\n show_bboxes(fig.axes, [np.array(i[2:]) * bbox_scale], label)", "_____no_output_____" ] ], [ [ "In practice, we can remove prediction bounding boxes with lower confidence levels before performing NMS, thereby reducing the amount of computation for NMS. We can also filter the output of NMS, for example, by only retaining results with higher confidence levels as the final output.\n\n\n## Summary\n\n* We generate multiple anchor boxes with different sizes and aspect ratios, centered on each pixel.\n* IoU, also called Jaccard index, measures the similarity of two bounding boxes. It is the ratio of the intersecting area to the union area of two bounding boxes.\n* In the training set, we mark two types of labels for each anchor box: one is the category of the target contained in the anchor box and the other is the offset of the ground-truth bounding box relative to the anchor box.\n* When predicting, we can use non-maximum suppression (NMS) to remove similar prediction bounding boxes, thereby simplifying the results.\n\n## Exercises\n\n1. Change the `sizes` and `ratios` values in the `multibox_prior` function and observe the changes to the generated anchor boxes.\n1. Construct two bounding boxes with an IoU of 0.5, and observe their coincidence.\n1. Verify the output of offset `labels[0]` by marking the anchor box offsets as defined in this section (the constant is the default value).\n1. Modify the variable `anchors` in the \"Labeling Training Set Anchor Boxes\" and \"Output Bounding Boxes for Prediction\" sections. How do the results change?\n", "_____no_output_____" ], [ "[Discussions](https://discuss.d2l.ai/t/370)\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb7648f0e834c822bc08e3d76be6c83dad38faed
950,663
ipynb
Jupyter Notebook
_notebooks/2021-05-16-criancas.ipynb
dados-covid/srag
2dac95fe4fc53f34cb4b9a016a5d305622c52f41
[ "Apache-2.0" ]
null
null
null
_notebooks/2021-05-16-criancas.ipynb
dados-covid/srag
2dac95fe4fc53f34cb4b9a016a5d305622c52f41
[ "Apache-2.0" ]
1
2021-05-12T15:50:41.000Z
2021-05-18T03:20:25.000Z
_notebooks/2021-05-16-criancas.ipynb
dados-covid/srag
2dac95fe4fc53f34cb4b9a016a5d305622c52f41
[ "Apache-2.0" ]
null
null
null
161.238636
223,332
0.572064
[ [ [ "# \"Analise casos de SRAG em crianças e adolecentes\"\n> \"Dados dos casos de hospitalizações por SRAG do opendatasus\"\n\n- toc: true\n- branch: master\n- badges: false\n- comments: false\n- numbersections: true\n- categories: [srag]\n- image: images/some_folder/your_image.png\n- hide:false\n- search_exclude: true\n- metadata_key1: metadata_value1\n- metadata_key2: metadata_value2", "_____no_output_____" ], [ "## Objetivos\n- Analisar comportamento dos casos e letalidade em crianças e adolecentes\n- Analisar ao longo do tempo e em diferentes estados", "_____no_output_____" ] ], [ [ "#hide\n\nimport sqlite3 as sql\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom srag_functions import *\n\ndb_name = 'srag'\ndb_path = f'data/opendatasus/{db_name}.db'\nconn = sql.connect(db_path)\ndf_srag = pd.read_sql(f'SELECT * FROM {db_name} WHERE ano >= 2019', conn)", "_____no_output_____" ], [ "#hide\n\ndef get_proportion_cases(df,index_cols,categories_cols):\n df_categories = pd.DataFrame(df.groupby(by=index_cols + categories_cols).size(),columns=['casos']).reset_index()\n df_categories = df_categories.pivot(index=index_cols,columns=categories_cols,values='casos')\n \n # ex: se index_cols = ['ano','SEM_PRI'], cada linha terá total por ano e semana\n df_subtotal = pd.DataFrame(df.groupby(by=index_cols).size(),columns=['total'])\n \n # ex: calcula proporção de cada categoria na \"semana\", entre 0.0 e 1.0\n df_rel = df_categories.div(df_subtotal.values,axis=0)\n \n # a princípio considera apenas a primeira categoria\n selected_category = categories_cols[0]\n \n df1 = pd.melt(df_rel,ignore_index=False,value_name='proporção').set_index(selected_category,append=True)\n df2 = pd.melt(df_categories,ignore_index=False,value_name='casos').set_index(selected_category,append=True)\n\n return pd.concat([df1,df2],axis=1).reset_index()\n\n\ndef highlight_max(s):\n '''\n highlight the maximum in a Series yellow.\n '''\n is_max = s == s.max()\n return ['background-color: yellow' if v else '' for v in is_max]\n ", "_____no_output_____" ] ], [ [ "## Proporção de casos de SRAG em crianças e adolecentes (até 19 anos)", "_____no_output_____" ] ], [ [ "#hide-input\n\nindex_cols = ['ano','SEM_PRI']\ncategories_cols =['faixa_etaria']\n\n# df_covid = df_srag.query('CLASSI_FIN == \"COVID-19\"')\n# df_casos_faixas = get_proportion_cases(df_covid,index_cols,categories_cols)\ndf_casos_faixas = get_proportion_cases(df_srag,index_cols,categories_cols)\n\ndf_chart = df_casos_faixas.query('faixa_etaria == \"00-20\"')\n\nalt.Chart(df_chart).mark_line(point=True).encode(\n x='SEM_PRI',\n y='proporção',\n color='ano:N',\n tooltip=['SEM_PRI','proporção','casos','ano']\n)", "_____no_output_____" ] ], [ [ "## Total de casos de SRAG em crianças e adolecentes (até 19 anos)", "_____no_output_____" ] ], [ [ "#hide-input\n\nalt.Chart(df_chart).mark_line(point=True).encode(\n x='SEM_PRI',\n y='casos',\n color='ano:N',\n tooltip=['SEM_PRI','proporção','casos','ano']\n)", "_____no_output_____" ] ], [ [ "> Tabela com dados", "_____no_output_____" ] ], [ [ "#collapse-output\n#hide-input\n\ndf_chart.style.apply(highlight_max,subset=['casos','proporção']).format({'casos':'{:.0f}','proporção':'{:.2%}'})", "_____no_output_____" ] ], [ [ "## Proporção de casos de SRAG-COVID em crianças e adolecentes (até 19 anos)", "_____no_output_____" ] ], [ [ "#hide-input\n\nindex_cols = ['ano','SEM_PRI']\ncategories_cols =['faixa_etaria']\n\ndf_covid = df_srag.query('CLASSI_FIN == \"COVID-19\" & ano >= 2020')\ndf_casos_faixas = get_proportion_cases(df_covid,index_cols,categories_cols)\n\ndf_chart = df_casos_faixas.query('faixa_etaria == \"00-20\"')\n\nalt.Chart(df_chart).mark_line(point=True).encode(\n x='SEM_PRI',\n y='proporção',\n color='ano:N',\n tooltip=['SEM_PRI','proporção','casos','ano']\n)", "_____no_output_____" ] ], [ [ "## Total de casos de SRAG-COVID em crianças e adolecentes (até 19 anos)", "_____no_output_____" ] ], [ [ "#hide-input\n\nalt.Chart(df_chart).mark_line(point=True).encode(\n x='SEM_PRI',\n y='casos',\n color='ano:N',\n tooltip=['SEM_PRI','proporção','casos','ano']\n)", "_____no_output_____" ] ], [ [ "> Tabela com dados", "_____no_output_____" ] ], [ [ "#collapse-output\n#hide-input\n\ndf_chart.style.apply(highlight_max,subset=['casos','proporção']).format({'casos':'{:.0f}','proporção':'{:.2%}'})", "_____no_output_____" ] ], [ [ "## Proporção de óbitos de SRAG em crianças e adolecentes (até 19 anos)", "_____no_output_____" ] ], [ [ "#hide-input\n\nindex_cols = ['ano','SEM_PRI']\ncategories_cols =['faixa_etaria']\n\n# df_covid = df_srag.query('CLASSI_FIN == \"COVID-19\" and EVOLUCAO == \"obito\"')\n# df_covid = df_srag.query('EVOLUCAO == \"obito\" and UF_RES == \"29_Bahia\" and idade_anos <= 5')\ndf_covid = df_srag.query('EVOLUCAO == \"obito\"')\n\n\ndf_obitos_faixas = get_proportion_cases(df_covid,index_cols,categories_cols).fillna(0)\ndf_chart = df_obitos_faixas.query('faixa_etaria == \"00-20\"')\n\nalt.Chart(df_chart).mark_line(point=True).encode(\n x='SEM_PRI',\n y='proporção',\n color='ano:N',\n tooltip=['SEM_PRI','proporção','casos','ano']\n)", "_____no_output_____" ] ], [ [ "## Total de óbitos de SRAG-COVID em crianças e adolecentes (até 19 anos)", "_____no_output_____" ] ], [ [ "#hide-input\n\nalt.Chart(df_chart).mark_line(point=True).encode(\n x='SEM_PRI',\n y='casos',\n color='ano:N',\n tooltip=['SEM_PRI','proporção','casos','ano']\n)", "_____no_output_____" ] ], [ [ "> Tabela com dados", "_____no_output_____" ] ], [ [ "#hide-input\n#collapse-output\n\ndf_chart.style.apply(highlight_max,subset=['casos','proporção']).format({'casos':'{:.0f}','proporção':'{:.2%}'})", "_____no_output_____" ] ], [ [ "## Letalidade dos casos de SRAG em crianças e adolecentes (até 19 anos)\n\n> obs: Para o calculo da letalidade considerei tanto os casos de \"obito\" quanto \"obito_outras_causas\"", "_____no_output_____" ] ], [ [ "#hide-input\n\n# df_casos_concluidos = df_srag.query('CLASSI_FIN == \"COVID-19\" and EVOLUCAO in (\"obito\",\"cura\",\"obito_outras_causas\")')\ndf_casos_concluidos = df_srag.query('EVOLUCAO in (\"obito\",\"cura\",\"obito_outras_causas\")')\n\nindex_cols = ['ano','SEM_PRI','faixa_etaria']\n\ndf_casos_concluidos_faixas = pd.DataFrame(df_casos_concluidos.groupby(by=index_cols).size(),columns=['casos'])\n\ndf_obitos = df_casos_concluidos.query('EVOLUCAO in (\"obito\",\"obito_outras_causas\")')\ndf_obitos_faixas = pd.DataFrame(df_obitos.groupby(by=index_cols).size(),columns=['obitos'])\n\ndf_casos_concluidos_faixas\ndf_letalidate = pd.concat([df_casos_concluidos_faixas,df_obitos_faixas],axis=1).fillna(0) \ndf_letalidate['letalidade'] = df_letalidate['obitos'] / df_letalidate['casos']\n\n\ndf_chart = df_letalidate.reset_index().query('faixa_etaria == \"00-20\"')\n# outra forma de fazer a mesma coisa, não sei se seria mais eficiente.\n# df_chart = df_letalidate.sort_index().loc[(slice(None),slice(None),slice('00-20')),:].reset_index()\n\nalt.Chart(df_chart).mark_line(point=True).encode(\n x='SEM_PRI',\n y='letalidade',\n color='ano:N',\n tooltip=df_chart.columns.to_list()\n)", "_____no_output_____" ] ], [ [ "> Tabela com dados", "_____no_output_____" ] ], [ [ "#collapse-output\n#hide-input\n\n\ndf_chart.style.apply(highlight_max,subset=['casos','obitos','letalidade']).format({'obitos':'{:.0f}','letalidade':'{:.2%}'})", "_____no_output_____" ] ], [ [ "## Proporção de casos de SRAG em crianças e adolecentes (até 19 anos) por UF", "_____no_output_____" ] ], [ [ "#hide-input\n\nindex_cols = ['UF_RES']\ncategories_cols =['faixa_etaria']\n\n# df_covid = df_srag.query('CLASSI_FIN == \"COVID-19\"')\n# df = df_covid.query('UF_RES != \"nan_nd\"')\ndf = df_srag.query('UF_RES != \"nan_nd\"')#in (\"29_Bahia\",\"33_Rio de Janeiro\",\"35_São Paulo\")')\n# df = df.query('SEM_PRI_ABS >= 15 and SEM_PRI_ABS <= 69')\ndf_casos_faixas = get_proportion_cases(df,index_cols,categories_cols)\ndf_chart = df_casos_faixas.query('faixa_etaria == \"00-20\"')\n\n\ncategory = 'UF_RES'\nchart = alt.Chart(df_chart).mark_bar().encode(\n# x='UF_RES',\n y='proporção:Q',\n x=alt.X('UF_RES:N',sort='y'),\n color= category,\n tooltip=df_chart.columns.to_list()\n)\n\nns_opacity = 0.01\nselection = alt.selection_multi(empty='all', fields=[category], bind='legend')\nchart = chart.add_selection(\n selection\n).encode(\n opacity=alt.condition(selection, alt.value(1.0), alt.value(ns_opacity))\n)\nchart", "_____no_output_____" ] ], [ [ "## Proporção de óbitos por SRAG em crianças e adolecentes (até 19 anos) por UF", "_____no_output_____" ] ], [ [ "#hide-input\n\nindex_cols = ['UF_RES']\ncategories_cols =['faixa_etaria']\n\n# df_covid = df_srag.query('CLASSI_FIN == \"COVID-19\"')\n# df = df_covid.query('UF_RES != \"nan_nd\"')\ndf = df_srag.query('UF_RES != \"nan_nd\"')#in (\"29_Bahia\",\"33_Rio de Janeiro\",\"35_São Paulo\")')\n# df = df.query('SEM_PRI_ABS >= 15 and SEM_PRI_ABS <= 69')\ndf = df.query('EVOLUCAO == \"obito\"')\ndf_casos_faixas = get_proportion_cases(df,index_cols,categories_cols)\ndf_chart = df_casos_faixas.query('faixa_etaria == \"00-20\"')\n\n\ncategory = 'UF_RES'\nchart = alt.Chart(df_chart).mark_bar().encode(\n# x='UF_RES',\n y='proporção:Q',\n x=alt.X('UF_RES:N',sort='y'),\n color= category,\n tooltip=df_chart.columns.to_list()\n)\n\nns_opacity = 0.01\nselection = alt.selection_multi(empty='all', fields=[category], bind='legend')\nchart = chart.add_selection(\n selection\n).encode(\n opacity=alt.condition(selection, alt.value(1.0), alt.value(ns_opacity))\n)\nchart", "_____no_output_____" ] ], [ [ "> Análise por semana - diferentes UF", "_____no_output_____" ] ], [ [ "#hide-input\n\nindex_cols = ['UF_RES','SEM_PRI_ABS']\ncategories_cols =['faixa_etaria']\n\n# df_covid = df_srag.query('CLASSI_FIN == \"COVID-19\"')\n# df = df_covid.query('UF_RES != \"nan_nd\"')\ndf = df_srag.query('UF_RES != \"nan_nd\"')#in (\"29_Bahia\",\"33_Rio de Janeiro\",\"35_São Paulo\")')\ndf = df.query('SEM_PRI_ABS >= 15 and SEM_PRI_ABS <= 69')\ndf_casos_faixas = get_proportion_cases(df,index_cols,categories_cols)\ndf_chart = df_casos_faixas.query('faixa_etaria == \"00-20\"')\n\ncategory = 'UF_RES'\nchart = alt.Chart(df_chart).mark_line(point=True).encode(\n x='SEM_PRI_ABS',\n y='proporção',\n color= category,\n tooltip=df_chart.columns.to_list()\n)\n\nns_opacity = 0.01\nselection = alt.selection_multi(empty='all', fields=[category], bind='legend')\nchart = chart.add_selection(\n selection\n).encode(\n opacity=alt.condition(selection, alt.value(1.0), alt.value(ns_opacity))\n)\nchart", "_____no_output_____" ] ], [ [ "> Análise por semana - seleciona 1 UF", "_____no_output_____" ] ], [ [ "#hide-input\n\nchart = chart.transform_filter(\n selection\n)\nchart", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb764beca86263dca83d801b05ca23ed6685546c
12,981
ipynb
Jupyter Notebook
Big-Data-Clusters/CU8/Public/content/log-analyzers/tsg034-get-livy-logs.ipynb
meenal-gupta141/tigertoolbox
5c432392f7cab091121a8879ea886b39c54f519b
[ "MIT" ]
541
2019-05-07T11:41:25.000Z
2022-03-29T17:33:19.000Z
Big-Data-Clusters/CU8/Public/content/log-analyzers/tsg034-get-livy-logs.ipynb
sqlworldwide/tigertoolbox
2abcb62a09daf0116ab1ab9c9dd9317319b23297
[ "MIT" ]
89
2019-05-09T14:23:52.000Z
2022-01-13T20:21:04.000Z
Big-Data-Clusters/CU8/Public/content/log-analyzers/tsg034-get-livy-logs.ipynb
sqlworldwide/tigertoolbox
2abcb62a09daf0116ab1ab9c9dd9317319b23297
[ "MIT" ]
338
2019-05-08T05:45:16.000Z
2022-03-28T15:35:03.000Z
44.608247
232
0.400894
[ [ [ "TSG034 - Livy logs\n==================\n\nDescription\n-----------\n\nSteps\n-----\n\n### Parameters", "_____no_output_____" ] ], [ [ "import re\n\ntail_lines = 500\n\npod = None # All\ncontainer = 'hadoop-livy-sparkhistory'\nlog_files = [ '/var/log/supervisor/log/livy*' ]\n\nexpressions_to_analyze = [\n re.compile(\".{17} WARN \"),\n re.compile(\".{17} ERROR \")\n]", "_____no_output_____" ] ], [ [ "### Instantiate Kubernetes client", "_____no_output_____" ] ], [ [ "# Instantiate the Python Kubernetes client into 'api' variable\n\nimport os\nfrom IPython.display import Markdown\n\ntry:\n from kubernetes import client, config\n from kubernetes.stream import stream\n\n if \"KUBERNETES_SERVICE_PORT\" in os.environ and \"KUBERNETES_SERVICE_HOST\" in os.environ:\n config.load_incluster_config()\n else:\n try:\n config.load_kube_config()\n except:\n display(Markdown(f'HINT: Use [TSG118 - Configure Kubernetes config](../repair/tsg118-configure-kube-config.ipynb) to resolve this issue.'))\n raise\n api = client.CoreV1Api()\n\n print('Kubernetes client instantiated')\nexcept ImportError:\n display(Markdown(f'HINT: Use [SOP059 - Install Kubernetes Python module](../install/sop059-install-kubernetes-module.ipynb) to resolve this issue.'))\n raise", "_____no_output_____" ] ], [ [ "### Get the namespace for the big data cluster\n\nGet the namespace of the Big Data Cluster from the Kuberenetes API.\n\n**NOTE:**\n\nIf there is more than one Big Data Cluster in the target Kubernetes\ncluster, then either:\n\n- set \\[0\\] to the correct value for the big data cluster.\n- set the environment variable AZDATA\\_NAMESPACE, before starting\n Azure Data Studio.", "_____no_output_____" ] ], [ [ "# Place Kubernetes namespace name for BDC into 'namespace' variable\n\nif \"AZDATA_NAMESPACE\" in os.environ:\n namespace = os.environ[\"AZDATA_NAMESPACE\"]\nelse:\n try:\n namespace = api.list_namespace(label_selector='MSSQL_CLUSTER').items[0].metadata.name\n except IndexError:\n from IPython.display import Markdown\n display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))\n display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))\n display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))\n raise\n\nprint('The kubernetes namespace for your big data cluster is: ' + namespace)", "_____no_output_____" ] ], [ [ "### Get tail for log", "_____no_output_____" ] ], [ [ "# Display the last 'tail_lines' of files in 'log_files' list\n\npods = api.list_namespaced_pod(namespace)\n\nentries_for_analysis = []\n\nfor p in pods.items:\n if pod is None or p.metadata.name == pod:\n for c in p.spec.containers:\n if container is None or c.name == container:\n for log_file in log_files:\n print (f\"- LOGS: '{log_file}' for CONTAINER: '{c.name}' in POD: '{p.metadata.name}'\")\n try:\n output = stream(api.connect_get_namespaced_pod_exec, p.metadata.name, namespace, command=['/bin/sh', '-c', f'tail -n {tail_lines} {log_file}'], container=c.name, stderr=True, stdout=True)\n except Exception:\n print (f\"FAILED to get LOGS for CONTAINER: {c.name} in POD: {p.metadata.name}\")\n else:\n for line in output.split('\\n'):\n for expression in expressions_to_analyze:\n if expression.match(line):\n entries_for_analysis.append(line)\n print(line)\nprint(\"\")\nprint(f\"{len(entries_for_analysis)} log entries found for further analysis.\")", "_____no_output_____" ] ], [ [ "### Analyze log entries and suggest relevant Troubleshooting Guides", "_____no_output_____" ] ], [ [ "# Analyze log entries and suggest further relevant troubleshooting guides\nfrom IPython.display import Markdown\nimport os\nimport json\nimport requests\nimport ipykernel\nimport datetime\n\nfrom urllib.parse import urljoin\nfrom notebook import notebookapp\n\ndef get_notebook_name():\n \"\"\"Return the full path of the jupyter notebook. Some runtimes (e.g. ADS) \n have the kernel_id in the filename of the connection file. If so, the \n notebook name at runtime can be determined using `list_running_servers`.\n Other runtimes (e.g. azdata) do not have the kernel_id in the filename of\n the connection file, therefore we are unable to establish the filename\n \"\"\"\n connection_file = os.path.basename(ipykernel.get_connection_file())\n \n # If the runtime has the kernel_id in the connection filename, use it to\n # get the real notebook name at runtime, otherwise, use the notebook \n # filename from build time.\n try: \n kernel_id = connection_file.split('-', 1)[1].split('.')[0]\n except:\n pass\n else:\n for servers in list(notebookapp.list_running_servers()):\n try:\n response = requests.get(urljoin(servers['url'], 'api/sessions'), params={'token': servers.get('token', '')}, timeout=.01)\n except:\n pass\n else:\n for nn in json.loads(response.text):\n if nn['kernel']['id'] == kernel_id:\n return nn['path']\n\ndef load_json(filename):\n with open(filename, encoding=\"utf8\") as json_file:\n return json.load(json_file)\n\ndef get_notebook_rules():\n \"\"\"Load the notebook rules from the metadata of this notebook (in the .ipynb file)\"\"\"\n file_name = get_notebook_name()\n\n if file_name == None:\n return None\n else:\n j = load_json(file_name)\n\n if \"azdata\" not in j[\"metadata\"] or \\\n \"expert\" not in j[\"metadata\"][\"azdata\"] or \\\n \"log_analyzer_rules\" not in j[\"metadata\"][\"azdata\"][\"expert\"]:\n return []\n else:\n return j[\"metadata\"][\"azdata\"][\"expert\"][\"log_analyzer_rules\"]\n\nrules = get_notebook_rules()\n\nif rules == None:\n print(\"\")\n print(f\"Log Analysis only available when run in Azure Data Studio. Not available when run in azdata.\")\nelse:\n print(f\"Applying the following {len(rules)} rules to {len(entries_for_analysis)} log entries for analysis, looking for HINTs to further troubleshooting.\")\n print(rules)\n hints = 0\n if len(rules) > 0:\n for entry in entries_for_analysis:\n for rule in rules:\n if entry.find(rule[0]) != -1:\n print (entry)\n\n display(Markdown(f'HINT: Use [{rule[2]}]({rule[3]}) to resolve this issue.'))\n hints = hints + 1\n\n print(\"\")\n print(f\"{len(entries_for_analysis)} log entries analyzed (using {len(rules)} rules). {hints} further troubleshooting hints made inline.\")", "_____no_output_____" ], [ "print('Notebook execution complete.')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb7663101fc6217afc413fc16228a8f1ed323d60
4,558
ipynb
Jupyter Notebook
NBA-Champion-Predictions.ipynb
vtekur/nba-champion-predictions
f0bf9e1808cda3ea3a8192528bc1c3594acc32bf
[ "MIT" ]
null
null
null
NBA-Champion-Predictions.ipynb
vtekur/nba-champion-predictions
f0bf9e1808cda3ea3a8192528bc1c3594acc32bf
[ "MIT" ]
null
null
null
NBA-Champion-Predictions.ipynb
vtekur/nba-champion-predictions
f0bf9e1808cda3ea3a8192528bc1c3594acc32bf
[ "MIT" ]
null
null
null
35.061538
305
0.517332
[ [ [ "from bs4 import BeautifulSoup\nimport requests\nfrom urllib.parse import urljoin\nimport re\nimport numpy as np\nimport pandas as pd\nimport json", "_____no_output_____" ] ], [ [ "## Dataset Generation\n\nStats scraped from basketball reference", "_____no_output_____" ] ], [ [ "nba_champion_url = 'https://www.basketball-reference.com/playoffs/'\nnba_team_stats_url = 'https://www.basketball-reference.com/play-index/tsl_finder.cgi?request=1&match=single&type=advanced&year_min=1980&year_max=&lg_id=NBA&franch_id=&c1stat=&c1comp=&c1val=&c2stat=&c2comp=&c2val=&c3stat=&c3comp=&c3val=&c4stat=&c4comp=&c4val=&order_by=wins&order_by_asc=&offset=0'\nbase_url = requests.get(nba_team_stats_url).url\ndef get_soup_from_url(url):\n return BeautifulSoup(requests.get(url).text, 'html.parser')\ndef create_champion_dict(soup): \n champions = {}\n for row in soup.find_all('tr'):\n if row.find('th') and row.find_all('th')[0].get('data-stat') == 'year_id' and row.find('td') and row.find('a'): \n year = int(row.find('a').text)\n if year < 1980: \n break\n year_str = f'{year - 1}-{year % 100}'\n champ = [stat.text for stat in row.find_all('td') if stat.get('data-stat') == 'champion'][0]\n champions[year_str] = champ\n return champions\ndef create_team_dataset(soup, champions):\n searching = True\n rows_list = []\n while searching: \n for row in soup.find_all('tr'): \n if row.find_all('th')[0].get('data-stat') == 'ranker' and row.find('td') and row.find('a'): \n current_row = {}\n current_row['Team'] = row.find('a').get('title')\n for stat in row.find_all('td'): \n if stat.get('data-stat') == 'season':\n season = stat.text\n current_row['Champion'] = current_row['Team'] == champions.get(season)\n current_row['Team'] += ' ' + season\n elif stat.get('data-stat') == 'win_loss_pct':\n current_row['win_loss_pct'] = float(stat.text)\n elif stat.get('data-stat') == 'efg_pct':\n current_row['efg_pct'] = float(stat.text)\n elif stat.get('data-stat') == 'off_rtg':\n current_row['off_rtg'] = float(stat.text) \n elif stat.get('data-stat') == 'def_rtg':\n current_row['def_rtg'] = float(stat.text)\n rows_list.append(current_row)\n searching = False\n for link in soup.find_all('a'): \n if link.text == 'Next page': \n soup = get_soup_from_url(urljoin(base_url, link.get('href')))\n searching = True\n dataset = pd.DataFrame(rows_list)\n dataset.set_index('Team')\n return dataset", "_____no_output_____" ], [ "champions = create_champion_dict(get_soup_from_url(nba_champion_url))\ndataset = create_team_dataset(get_soup_from_url(nba_team_stats_url), champions)", "_____no_output_____" ], [ "dataset.to_csv('nba_team_data.csv')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb766454f0d8f719e6301d64e74040b5ceb75e97
302,762
ipynb
Jupyter Notebook
Stage 0.ipynb
AdityaSidharta/ST4240_data_mining_competition
514e7f0e2c80cacf4b15de5ed0d0fb2bc697bc11
[ "MIT" ]
null
null
null
Stage 0.ipynb
AdityaSidharta/ST4240_data_mining_competition
514e7f0e2c80cacf4b15de5ed0d0fb2bc697bc11
[ "MIT" ]
null
null
null
Stage 0.ipynb
AdityaSidharta/ST4240_data_mining_competition
514e7f0e2c80cacf4b15de5ed0d0fb2bc697bc11
[ "MIT" ]
null
null
null
123.174125
156,584
0.784164
[ [ [ "# Taxi Price Prediction Competition - Team 40 - Aditya Sidharta", "_____no_output_____" ], [ "## Overall Pipeline", "_____no_output_____" ], [ "In this Taxi price prediction competition, we were asked to build a model which is able to predict the price of a taxi ride, by predicting the duration and the trajectory length of the taxi ride. Then, we will sum the values of the two prediction to get the predicted price values. This prediction will be evaluated using RMPSE method.\n\nIn tackling this problem, I have divided the pipeline into 5 stages - Stage 0, Stage 1, Stage 2, Stage 3, and Stage 4. The input of this pipeline is the training and test dataset which contain the timestamp, location, and taxi ID for each taxi ride. In the training dataset, we have the trajectories as well as the true Duration / Trajectory length values. however, in this prediction, I will not use the Trajectory information. \n\nI will provide a brief summary for each Stage\n\n- Stage 0\n\n In this stage, we will process the raw data for the train and test dataset to add more features so that our model consider more information for the duration/trajlength prediction.\n \n - INPUT : RAW TRAIN DATA, RAW TEST DATA\n - OUTPUT : TRAIN DATA STAGE 0, TEST DATA STAGE 0, LOG DURATION, LOG TRAJLENGTH\n\n - Perform Basic feature engineering for Training Dataset\n - Perform Advanced feature engineering for Training Dataset\n - Perform Basic feature engineering for Test Dataset\n - Perform Advanced feature engineering for Test Dataset\n - Perform Transformation on Training Duration & Training Trajectory length values\n - Perform One hot Encoding on Training Dataset\n - Perform One hot Encoding on Test Dataset\n ", "_____no_output_____" ], [ "\n- Stage 1\n\n In this stage, our primary goal is to detect outliers in the dataset. We will perform this outlier detection by fitting a simple model and predicting our training dataset, and we will remove all observations that have extremely bad predictions using our simple model. \n \n - INPUT : TRAIN DATA STAGE 0, LOG DURATION, LOG TRAJLENGTH\n - MODEL : Random Forest , XGBoost\n - OUTPUT : NON-OUTLIER INDEX STAGE 1\n \n \n- Stage 2\n\n In this stage, our goal is to predict duration and trajlength given the TRAIN DATA STAGE 0. In this case, we will create an ensemble model using Random Forest and XGboost. We will fit a Random Forest and XGBoost model to predict the duration and trajlength, and then we will fit a Lasso linear model to fit the prediction from Random Forest and XGBoost to get the final prediction for duration and trajlength\n \n - INPUT : TRAIN DATA STAGE 0, TEST DATA STAGE 0, LOG DURATION, LOG TRAJLENGTH\n - Model : Random Forest + XGBoost (Ensemble - Lasso)\n - OUTPUT : PREDICTED LOG DURATION - TRAIN STAGE 2, PREDICTED LOG TRAJLENGTH - TRAIN STAGE 2, PREDICTED LOG DURATION - TEST STAGE 2, PREDICTED LOG TRAJLENGTH - TEST STAGE 2\n \n \n- Stage 3\n\n In this stage, we would like to refine our duration and trajlength prediction given that we know the trajlength when we would like to predict duration, and vice versa. We perform this because we believe that trajlength and duration is highly correlated, and it is a useful information to have to improve our model. We will fit another ensemble model, using Random Forest, XGboost, Lasso and Elastic Net using Lasso Model to get our final prediction for duration and length, using training data + trajlength and training data + duration respectively.\n \n - INPUT : TRAIN DATA STAGE 0, TEST DATA STAGE 0, NON-OUTLIER INDEX STAGE 1, PREDICTED LOG DURATION - TRAIN STAGE 2, PREDICTED LOG TRAJLENGTH - TRAIN STAGE 2, PREDICTED LOG DURATION - TEST STAGE 2, PREDICTED LOG TRAJLENGTH - TEST STAGE 2\n - Model : Random Forest + XGBoost + Lasso + Elastic Net (Ensemble - Lasso)\n - OUTPUT : PREDICTED LOG DURATION - TEST STAGE 3, PREDICTED LOG TRAJLENGTH - TEST STAGE 3\n \n \n- Stage 4\n\n In this stage, we would like to perform postprocessing to our prediction from Stage 3. As we have not used information about the coordinates from the train data, we would like to consider this information by manually refining our prediction if we have observations within the same location in the test data as compared to the training data..\n \n - INPUT : PREDICTED LOG DURATION - TEST STAGE 3, PREDICTED LOG TRAJLENGTH - TEST STAGE 3, TRAIN DATA, TEST DATA\n - OUTPUT : PREDICTED LOG DURATION - TEST STAGE 4, PREDICTED LOG TRAJLENGTH - TEST STAGE 4", "_____no_output_____" ], [ " \n![pipeline.png](pipeline.png)", "_____no_output_____" ], [ "The first thing that we would like to do is Data Exploration. Here, we want to understand the general structure about our dataset so that we are able to come out with the accurate pipeline model for our duration and the trajectory length prediction", "_____no_output_____" ] ], [ [ "%matplotlib inline", "_____no_output_____" ], [ "import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport datetime\nimport re\nimport math\nfrom pandas_summary import DataFrameSummary", "_____no_output_____" ], [ "train_file = \"train_data.csv\"\ntest_file = \"test.csv\"", "_____no_output_____" ], [ "def straight_dist(x_start, x_end,\n y_start, y_end):\n return np.sqrt((x_end - x_start)**2\\\n + (y_end - y_start)**2)\n\ndef calc_azt(x_start, x_end, \n y_start, y_end ):\n return math.degrees(math.atan2\\\n (y_end - y_start,\n x_end - x_start)) // 45\n\ndef coordinates_bin(coor):\n return coor // 50 + 21\n\ndef convert_ts_to_datetime(ts):\n return datetime.datetime.\\\nstrptime(ts, '%Y-%m-%d %H:%M:%S')\n\ndef get_weekday(ts):\n dt = convert_ts_to_datetime(ts)\n return dt.weekday()\n\ndef is_weekend(ts):\n dt = convert_ts_to_datetime(ts)\n return dt.weekday() >= 5\n\ndef get_day(ts):\n dt = convert_ts_to_datetime(ts)\n return dt.day\n\ndef get_month(ts):\n dt = convert_ts_to_datetime(ts)\n return dt.month\n\ndef get_year(ts):\n dt = convert_ts_to_datetime(ts)\n return dt.year\n\ndef get_hour(ts):\n dt = convert_ts_to_datetime(ts)\n return dt.hour\n\ndef get_minute(ts):\n dt = convert_ts_to_datetime(ts)\n return dt.minute // 10\n\ndef time_classification(ts):\n hour = get_hour(ts)\n if hour <= 5:\n return \"Midnight\"\n if hour <= 8:\n return \"Morning\"\n if hour <= 11:\n return \"Noon\"\n if hour <= 18:\n return \"Afternoon\"\n if hour <= 20:\n return \"Night\"\n else:\n return \"LateNight\"", "_____no_output_____" ], [ "vec_straight_dist = np.vectorize(straight_dist)\nvec_calc_azt = np.vectorize(calc_azt)\nvec_coordinates_bin = np.vectorize(coordinates_bin)\nvec_get_weekday = np.vectorize(get_weekday)\nvec_is_weekend = np.vectorize(is_weekend)\nvec_get_day = np.vectorize(get_day)\nvec_get_month = np.vectorize(get_month)\nvec_get_year = np.vectorize(get_year)\nvec_get_hour = np.vectorize(get_hour)\nvec_get_minute = np.vectorize(get_minute)\nvec_time_classification = np.vectorize\\\n(time_classification)", "_____no_output_____" ], [ "df_train = pd.read_csv(train_file)\ndf_test = pd.read_csv(test_file)\ndf_train_simple = df_train[[u'ID', \n u'TAXI_ID', \n u'TIMESTAMP', \n u'X_START', \n u'Y_START', \n u'X_END',\n u'Y_END']]\ndf_all = pd.concat([df_train_simple, df_test])", "_____no_output_____" ], [ "n_train = df_train.shape[0]\nn_test = df_test.shape[0]\nprint df_train.shape\nprint df_test.shape\nprint df_train.columns\nprint df_test.columns", "(465172, 11)\n(465172, 7)\nIndex([u'ID', u'TAXI_ID', u'TIMESTAMP', u'DURATION', u'X_START', u'Y_START',\n u'X_END', u'Y_END', u'X_TRAJECTORY', u'Y_TRAJECTORY', u'TRAJ_LENGTH'],\n dtype='object')\nIndex([u'ID', u'TAXI_ID', u'TIMESTAMP', u'X_START', u'Y_START', u'X_END',\n u'Y_END'],\n dtype='object')\n" ] ], [ [ "One of the good things about our dataset is that there are no missing values at all. However, we need to make sure that our features are somewhat normally distributed to improve the performance of our linear model prediction. As stated in the basic exploration pdf, we will perform log transformation for both `TRAJ_LENGTH` column and `DURATION` column", "_____no_output_____" ] ], [ [ "DataFrameSummary(df_train).summary()", "_____no_output_____" ], [ "DataFrameSummary(df_test).summary()", "_____no_output_____" ] ], [ [ "One interesting fact about this dataset is that for some of the observation, even though the starting point and ending point is close to each other, the `TRAJ_LENGTH` in the training dataset is extremely large. As we can see from the plot below, for a small value of `STRAIGHT_DIST`, its possible that the `TRAJ_LENGTH` is extremely large. There are few possibilities why this might happen\n\n - The road within this specific geographical area is not highly connected -> the taxi driver needs to make huge round just to get to the destination.\n - The taxi driver is not familiar with the geographical area, thats why the routes taken by the taxi driver is highly ineffective\n - Outlier in the dataset. This might came from various sources.\n \nThis outlier is highly dangerous for our model as we might learn about wrong information when we fit the model. Therefore, we will first try to remove all the outliers from the training data so that this outliers might not affect our true model.", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(ncols=1, nrows=1)\nax.scatter(df_train['STRAIGHT_DIST'], \n df_train['TRAJ_LENGTH'], alpha=0.2)\nplt.show()", "_____no_output_____" ] ], [ [ "The second important information is that the training and test data contains information between 3 March 2008 and 25 May 2009. The training data contain all dates between the starting date and testing date, and thus we can be sure that all the dates in the test set are contained in the training set. We can then use this as one of our classifier for the train and testing data", "_____no_output_____" ] ], [ [ "from datetime import date, timedelta\ntrain_date = pd.to_datetime\\\n(np.unique(df_train['TIMESTAMP'])).normalize()\ntest_date = pd.to_datetime\\\n(np.unique(df_test['TIMESTAMP'])).normalize()\ndate_set = set(train_date[0] \\\n + timedelta(x) \\\n for x in range((train_date[-1] \\\n - train_date[0]).days))\nprint \"Start-date : \" + str(train_date[0])\nprint \"End-date : \" + str(train_date[-1])\nmissing = sorted(date_set - set(train_date))\nmissing", "Start-date : 2008-03-03 00:00:00\nEnd-date : 2009-05-25 00:00:00\n" ] ], [ [ "We will use K-Means to aggregrate our latitude and longitude information for both starting points and ending points. Using this algorithm, we will cluster all latitude and longitude under training and test data into one of the 750 clusters. We can then use this cluster as the feature for the training and test dataset", "_____no_output_____" ] ], [ [ "from sklearn.cluster import MiniBatchKMeans, KMeans", "_____no_output_____" ], [ "x_coors = np.concatenate([df_all['X_START'],\n df_all['X_END']])\ny_coors = np.concatenate([df_all['Y_START'],\\\n df_all['Y_END']])\nall_coors = np.vstack((x_coors, y_coors)).T\nk_means_model = KMeans(init='k-means++',\n n_clusters=750,\n n_init=3, \n n_jobs=-1,\n verbose = 2).fit(all_coors)", "_____no_output_____" ] ], [ [ "The 750 clusters is plotted in this figure below", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(ncols=1, nrows=1)\nax.scatter(all_coors[:,0], all_coors[:,1], \n s=10, lw=0, cmap='tab20', \n alpha=0.2, c = k_means_model.\\\n predict(all_coors))\nax.set_xlabel('Longitude')\nax.set_ylabel('Latitude')\nplt.show()", "_____no_output_____" ] ], [ [ "The basic features engineering contains all the information that we can extract from the starting/ending location and timestamp from the training and testing dataset. The features engineered in the basic features are as follows:\n\n- `LOG_STRAIGHT_DIST` : the log of the euclidian distance between the starting point and the ending point.\n- `AZT` : bearing between the starting point and ending point.\n- `DAYOFWEEK`: The day of week of the taxi ride\n- `DATE` : The date of the taxi ride\n- `DAY` : The day in the month of the taxi ride\n- `MONTH` : The month of the taxi ride\n- `YEAR` : The year of the taxi ride\n- `HOUR` : The hour of the taxi ride\n- `MINUTE` : The minute of the taxi ride\n- `TIME_CLASS` : The time-class of the taxi ride\n- `START_BIN` : The cluster where the starting point of the taxi ride belongs to\n- `END_BIN` : The cluster where the ending point of the taxi ride belongs to", "_____no_output_____" ] ], [ [ "def get_basic_features_train(df):\n duration_train = df['DURATION'].values\n traj_train = df['TRAJ_LENGTH'].values\n price_train = duration_train + traj_train\n df['LOG_DURATION'] = np.log(duration_train)\n df['LOG_TRAJLENGTH'] = np.log(traj_train)\n df['LOG_PRICE'] = np.log(price_train)\n df['LOG_STRAIGHT_DIST'] = \\\n np.log(vec_straight_dist(df['X_START'], \n df['X_END'],\n df['Y_START'],\n df['Y_END']))\n df['AZT'] = vec_calc_azt(df['X_START'],\n df['X_END'],\n df['Y_START'],\n df['Y_END'])\n df['DAYOFWEEK'] = vec_get_weekday(df['TIMESTAMP'])\n df['DATE'] = pd.to_datetime(df['TIMESTAMP']\\\n .values).normalize()\\\n .astype(str)\n df['DAY'] = vec_get_day(df['TIMESTAMP'])\n df['MONTH'] = vec_get_month(df['TIMESTAMP'])\n df['YEAR'] = vec_get_year(df['TIMESTAMP'])\n df['HOUR'] = vec_get_hour(df['TIMESTAMP'])\n df['MINUTE'] = vec_get_minute(df['TIMESTAMP'])\n df['TIME_CLASS'] = vec_time_classification(df['TIMESTAMP'])\n start_coors = np.vstack((df['X_START'], df['Y_START'])).T\n end_coors = np.vstack((df['X_END'], df['Y_END'])).T\n df['START_BIN'] = k_means_model.predict(start_coors)\n df['END_BIN'] = k_means_model.predict(end_coors)\n return df\n\ndef get_basic_features_test(df):\n df['LOG_STRAIGHT_DIST'] = \\\n np.log(vec_straight_dist(df['X_START'], \n df['X_END'], \n df['Y_START'],\n df['Y_END']))\n df['AZT'] = vec_calc_azt(df['X_START'], \n df['X_END'], \n df['Y_START'],\n df['Y_END'])\n df['DAYOFWEEK'] = vec_get_weekday(df['TIMESTAMP'])\n df['DATE'] = pd.to_datetime(df['TIMESTAMP']\\\n .values).normalize().astype(str)\n df['DAY'] = vec_get_day(df['TIMESTAMP'])\n df['MONTH'] = vec_get_month(df['TIMESTAMP'])\n df['YEAR'] = vec_get_year(df['TIMESTAMP'])\n df['HOUR'] = vec_get_hour(df['TIMESTAMP'])\n df['MINUTE'] = vec_get_minute(df['TIMESTAMP'])\n df['TIME_CLASS'] = vec_time_classification(df['TIMESTAMP'])\n start_coors = np.vstack((df['X_START'], df['Y_START'])).T\n end_coors = np.vstack((df['X_END'], df['Y_END'])).T\n df['START_BIN'] = k_means_model.predict(start_coors)\n df['END_BIN'] = k_means_model.predict(end_coors)\n return df", "_____no_output_____" ], [ "df_train_basic = get_basic_features_train(df_train)\ndf_test_basic = get_basic_features_test(df_test)", "_____no_output_____" ], [ "log_duration = df_train_basic['LOG_DURATION'].values\nlog_trajlength = df_train_basic['LOG_TRAJLENGTH'].values\nlog_price = df_train_basic['LOG_PRICE'].values", "_____no_output_____" ], [ "print df_train_basic.shape\nprint df_test_basic.shape\nprint df_train_basic.columns\nprint df_test_basic.columns", "(465172, 27)\n(465172, 19)\nIndex([u'ID', u'TAXI_ID', u'TIMESTAMP', u'DURATION', u'X_START', u'Y_START',\n u'X_END', u'Y_END', u'X_TRAJECTORY', u'Y_TRAJECTORY', u'TRAJ_LENGTH',\n u'STRAIGHT_DIST', u'LOG_DURATION', u'LOG_TRAJLENGTH', u'LOG_PRICE',\n u'LOG_STRAIGHT_DIST', u'AZT', u'DAYOFWEEK', u'DATE', u'DAY', u'MONTH',\n u'YEAR', u'HOUR', u'MINUTE', u'TIME_CLASS', u'START_BIN', u'END_BIN'],\n dtype='object')\nIndex([u'ID', u'TAXI_ID', u'TIMESTAMP', u'X_START', u'Y_START', u'X_END',\n u'Y_END', u'LOG_STRAIGHT_DIST', u'AZT', u'DAYOFWEEK', u'DATE', u'DAY',\n u'MONTH', u'YEAR', u'HOUR', u'MINUTE', u'TIME_CLASS', u'START_BIN',\n u'END_BIN'],\n dtype='object')\n" ] ], [ [ "We will then only retain all the columns that we will use as the features in the training dataset", "_____no_output_____" ] ], [ [ "df_train_basic_simple = df_train_basic[[u'ID', \n u'TAXI_ID', u'TIMESTAMP', u'X_START', u'Y_START', u'X_END',\n u'Y_END', u'LOG_STRAIGHT_DIST', u'AZT', u'DAYOFWEEK', u'DATE', u'DAY',\n u'MONTH', u'YEAR', u'HOUR', u'MINUTE', u'TIME_CLASS', u'START_BIN',\n u'END_BIN']]\ndf_all_basic = pd.concat((df_train_basic_simple, df_test_basic))\n\nprint df_all_basic.shape\nprint df_all_basic.columns", "(930344, 19)\nIndex([u'ID', u'TAXI_ID', u'TIMESTAMP', u'X_START', u'Y_START', u'X_END',\n u'Y_END', u'LOG_STRAIGHT_DIST', u'AZT', u'DAYOFWEEK', u'DATE', u'DAY',\n u'MONTH', u'YEAR', u'HOUR', u'MINUTE', u'TIME_CLASS', u'START_BIN',\n u'END_BIN'],\n dtype='object')\n" ] ], [ [ "The next feature that we would like to engineer is the advanced features. This takes in the observation within the whole training dataset. The features extracted is as follows:\n\nFor each unique `DATE`, `TAXI_ID`, `MONTH`, `YEAR`, `DAYOFWEEK`, `TIME_CLASS`, `START_BIN` and `END_BIN`, we would like to get the following information\n \n - The mean of the log duration for the specific unique value within the training dataset\n - The mean of the log price for the specific unique value within the training dataset\n - The mean of the log trajlength for the specific value within the training dataset\n - Number of observations with the particular value within the training dataset\n \nFor example, if we observe `TAXI_ID` = 656, we will try to find all taxi rides with `TAXI_ID` = 656 in our training dataset, and we will find the mean log duration, log price, log trajlength, as well as the number of rides within the training dataset. This will be the values for `LOGDURATION_BY_TAXI_ID`, `LOGPRICE_BY_TAXI_ID`, `LOGTRAJLENGTH_BY_TAXI_ID`, and `COUNT_BY_TAXI_ID` respectively\n\nThe main idea behind this advanced feature engineering is to extract information about the price/duration/trajlength of a specific taxi driver / specific day / specific time / specific location and so on", "_____no_output_____" ] ], [ [ "def create_dict_date(df_train, df_all):\n result_dict = {}\n column = ['TAXI_ID', 'DATE', 'MONTH', 'YEAR',\n 'DAYOFWEEK', 'TIME_CLASS', 'START_BIN', 'END_BIN']\n for column_names in column:\n indiv_dict = {}\n duration = df_train.groupby(column_names)\\\n ['LOG_DURATION'].mean()\n mean_duration = duration.mean()\n price = df_train.groupby(column_names)\\\n ['LOG_PRICE'].mean()\n mean_price = price.mean()\n trajlength = df_train.groupby(column_names)\\\n ['LOG_TRAJLENGTH'].mean()\n mean_trajlength = trajlength.mean()\n count = df_all.groupby(column_names)\\\n [column_names].count()\n mean_count = count.mean()\n for index in duration.index:\n indiv_dict[str(index)] = {\n 'duration' : duration[index],\n 'price' : price[index],\n 'trajlength' : trajlength[index],\n 'count' : count[index]\n }\n indiv_dict['avg'] = {\n 'duration' : mean_duration,\n 'price' : mean_price,\n 'trajlength' : mean_trajlength,\n 'count' : mean_count\n }\n result_dict[column_names] = indiv_dict\n return result_dict", "_____no_output_____" ], [ "def get_mean_values(array_column, result_dict, column_name):\n n_obs = len(array_column)\n column_dict = result_dict[column_name]\n column_dict_index = column_dict.keys()\n result_duration = np.zeros(n_obs)\n result_price = np.zeros(n_obs)\n result_trajlength = np.zeros(n_obs)\n result_count = np.zeros(n_obs)\n for idx in range(n_obs):\n target = str(array_column[idx])\n if target not in column_dict_index:\n print str(target) + \" is not found\"\n result_duration[idx] = \\\n column_dict['avg']['duration']\n result_price[idx] = \\\n column_dict['avg']['price']\n result_trajlength[idx] = \\\n column_dict['avg']['trajlength']\n result_count[idx] = \\\n column_dict['avg']['count']\n else:\n result_duration[idx] = \\\n column_dict[target]['duration']\n result_price[idx] = \\\n column_dict[target]['price']\n result_trajlength[idx] = \\\n column_dict[target]['trajlength']\n result_count[idx] = \\\n column_dict[target]['count']\n return result_duration, result_price,\nresult_trajlength, result_count", "_____no_output_____" ], [ "def get_advanced_features(df, result_dict):\n column = ['DATE', 'TAXI_ID', 'DATE', 'MONTH', 'YEAR', \n 'DAYOFWEEK', 'TIME_CLASS', 'START_BIN', 'END_BIN']\n for column_names in column: \n result_duration, result_price, \n result_trajlength, result_count = \\\n get_mean_values(df[column_names].values, \n result_dict, column_names)\n df['LOGDURATION_BY_' + column_names] = \\\n result_duration\n df['LOGPRICE_BY_' + column_names] = \\\n result_price\n df['LOGTRAJLENGTH_BY_' + column_names] \\\n = result_trajlength\n df['COUNT_BY_' + column_names] = \\\n result_count\n return df", "_____no_output_____" ] ], [ [ "If we are unable to get information about specific values, we will take the mean of the whole dataset instead. Luckily, the only value that we couldnt find in our training data is the `TAXI_ID` = 439", "_____no_output_____" ] ], [ [ "result_dict = create_dict_date(df_train_basic, df_all_basic)\ndf_all_advanced = get_advanced_features(df_all_basic, result_dict)", "439 is not found\n" ], [ "print df_all_advanced.shape\nprint df_all_advanced.columns", "(930344, 51)\nIndex([u'ID', u'TAXI_ID', u'TIMESTAMP', u'X_START', u'Y_START', u'X_END',\n u'Y_END', u'LOG_STRAIGHT_DIST', u'AZT', u'DAYOFWEEK', u'DATE', u'DAY',\n u'MONTH', u'YEAR', u'HOUR', u'MINUTE', u'TIME_CLASS', u'START_BIN',\n u'END_BIN', u'LOGDURATION_BY_DATE', u'LOGPRICE_BY_DATE',\n u'LOGTRAJLENGTH_BY_DATE', u'COUNT_BY_DATE', u'LOGDURATION_BY_TAXI_ID',\n u'LOGPRICE_BY_TAXI_ID', u'LOGTRAJLENGTH_BY_TAXI_ID',\n u'COUNT_BY_TAXI_ID', u'LOGDURATION_BY_MONTH', u'LOGPRICE_BY_MONTH',\n u'LOGTRAJLENGTH_BY_MONTH', u'COUNT_BY_MONTH', u'LOGDURATION_BY_YEAR',\n u'LOGPRICE_BY_YEAR', u'LOGTRAJLENGTH_BY_YEAR', u'COUNT_BY_YEAR',\n u'LOGDURATION_BY_DAYOFWEEK', u'LOGPRICE_BY_DAYOFWEEK',\n u'LOGTRAJLENGTH_BY_DAYOFWEEK', u'COUNT_BY_DAYOFWEEK',\n u'LOGDURATION_BY_TIME_CLASS', u'LOGPRICE_BY_TIME_CLASS',\n u'LOGTRAJLENGTH_BY_TIME_CLASS', u'COUNT_BY_TIME_CLASS',\n u'LOGDURATION_BY_START_BIN', u'LOGPRICE_BY_START_BIN',\n u'LOGTRAJLENGTH_BY_START_BIN', u'COUNT_BY_START_BIN',\n u'LOGDURATION_BY_END_BIN', u'LOGPRICE_BY_END_BIN',\n u'LOGTRAJLENGTH_BY_END_BIN', u'COUNT_BY_END_BIN'],\n dtype='object')\n" ], [ "pd.options.display.max_columns = 70\nDataFrameSummary(df_all_advanced).summary()", "_____no_output_____" ], [ "all_features = [u'TAXI_ID', u'LOG_STRAIGHT_DIST', \n u'AZT', u'DAYOFWEEK', u'DATE', u'DAY',\n u'MONTH', u'YEAR', u'HOUR', u'MINUTE',\n u'TIME_CLASS', u'START_BIN',\n u'END_BIN', u'LOGDURATION_BY_DATE', u'LOGPRICE_BY_DATE',\n u'LOGTRAJLENGTH_BY_DATE', u'COUNT_BY_DATE', u'LOGDURATION_BY_TAXI_ID',\n u'LOGPRICE_BY_TAXI_ID', u'LOGTRAJLENGTH_BY_TAXI_ID',\n u'COUNT_BY_TAXI_ID', u'LOGDURATION_BY_MONTH', u'LOGPRICE_BY_MONTH',\n u'LOGTRAJLENGTH_BY_MONTH', u'COUNT_BY_MONTH', u'LOGDURATION_BY_YEAR',\n u'LOGPRICE_BY_YEAR', u'LOGTRAJLENGTH_BY_YEAR', u'COUNT_BY_YEAR',\n u'LOGDURATION_BY_DAYOFWEEK', u'LOGPRICE_BY_DAYOFWEEK',\n u'LOGTRAJLENGTH_BY_DAYOFWEEK', u'COUNT_BY_DAYOFWEEK',\n u'LOGDURATION_BY_TIME_CLASS', u'LOGPRICE_BY_TIME_CLASS',\n u'LOGTRAJLENGTH_BY_TIME_CLASS', u'COUNT_BY_TIME_CLASS',\n u'LOGDURATION_BY_START_BIN', u'LOGPRICE_BY_START_BIN',\n u'LOGTRAJLENGTH_BY_START_BIN', u'COUNT_BY_START_BIN',\n u'LOGDURATION_BY_END_BIN', u'LOGPRICE_BY_END_BIN',\n u'LOGTRAJLENGTH_BY_END_BIN', u'COUNT_BY_END_BIN']\n\ncat_features = [u'TAXI_ID', u'AZT', u'DAYOFWEEK', u'DATE', u'DAY',\n u'MONTH', u'YEAR', u'HOUR', u'MINUTE', u'TIME_CLASS', u'START_BIN',\n u'END_BIN']", "_____no_output_____" ] ], [ [ "Lastly, we will perform one-hot encoding on all categorical variables for both training and test dataset", "_____no_output_____" ] ], [ [ "def get_dummify(df):\n df_final = df[all_features]\n return pd.get_dummies(df_final, columns=cat_features, prefix=cat_features)", "_____no_output_____" ], [ "df_all_advanced_dummy = get_dummify(df_all_advanced)\ndf_train_advanced_dummy = df_all_advanced_dummy.iloc[:n_train, :]\ndf_test_advanced_dummy = df_all_advanced_dummy.iloc[n_train:, :]", "_____no_output_____" ], [ "X_train_stage0 = df_train_advanced_dummy.values\nX_test_stage0 = df_test_advanced_dummy.values\nY_train_duration = log_duration\nY_train_trajlength = log_trajlength\nY_train_price = log_price", "_____no_output_____" ], [ "print X_train_stage0.shape\nprint X_test_stage0.shape\nprint Y_train_duration.shape\nprint Y_train_trajlength.shape\nprint Y_train_price.shape", "(465172, 2701)\n(465172, 2701)\n(465172,)\n(465172,)\n(465172,)\n" ], [ "from scipy import sparse\nsX_train_stage0 = sparse.csc_matrix(X_train_stage0)\nsX_test_stage0 = sparse.csc_matrix(X_test_stage0)", "_____no_output_____" ], [ "from sklearn.externals import joblib\njoblib.dump(sX_train_stage0 , 'sX_train_stage0.pkl')\njoblib.dump(sX_test_stage0, 'sX_test_stage0.pkl')\njoblib.dump(Y_train_duration, 'Y_train_duration.pkl')\njoblib.dump(Y_train_trajlength, 'Y_train_trajlength.pkl')\njoblib.dump(Y_train_price, 'Y_train_price.pkl')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb7671ae89de48fffa41341aee339c4de0dc41dc
13,181
ipynb
Jupyter Notebook
docs/source/estimator_intro.ipynb
wphicks/cuml
3c0132c4aa64095ce1610f5a37cb7f707082a26b
[ "Apache-2.0" ]
2
2020-12-19T23:34:37.000Z
2022-01-13T21:08:51.000Z
docs/source/estimator_intro.ipynb
wphicks/cuml
3c0132c4aa64095ce1610f5a37cb7f707082a26b
[ "Apache-2.0" ]
null
null
null
docs/source/estimator_intro.ipynb
wphicks/cuml
3c0132c4aa64095ce1610f5a37cb7f707082a26b
[ "Apache-2.0" ]
null
null
null
41.062305
427
0.608831
[ [ [ "# Training and Evaluating Machine Learning Models in cuML", "_____no_output_____" ], [ "This notebook explores several basic machine learning estimators in cuML, demonstrating how to train them and evaluate them with built-in metrics functions. All of the models are trained on synthetic data, generated by cuML's dataset utilities.\n\n1. Random Forest Classifier\n2. UMAP\n3. DBSCAN\n4. Linear Regression\n\n\n[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/rapidsai/cuml/blob/branch-0.15/docs/source/estimator_intro.ipynb)", "_____no_output_____" ], [ "### Shared Library Imports", "_____no_output_____" ] ], [ [ "import cuml\nfrom cupy import asnumpy \nfrom joblib import dump, load", "_____no_output_____" ] ], [ [ "## 1. Classification", "_____no_output_____" ], [ "### Random Forest Classification and Accuracy metrics\n\nThe Random Forest algorithm classification model builds several decision trees, and aggregates each of their outputs to make a prediction. For more information on cuML's implementation of the Random Forest Classification model please refer to : \nhttps://docs.rapids.ai/api/cuml/stable/api.html#cuml.ensemble.RandomForestClassifier\n\nAccuracy score is the ratio of correct predictions to the total number of predictions. It is used to measure the performance of classification models. \nFor more information on the accuracy score metric please refer to: https://en.wikipedia.org/wiki/Accuracy_and_precision\n\nFor more information on cuML's implementation of accuracy score metrics please refer to: https://docs.rapids.ai/api/cuml/stable/api.html#cuml.metrics.accuracy.accuracy_score\n\nThe cell below shows an end to end pipeline of the Random Forest Classification model. Here the dataset was generated by using sklearn's make_classification dataset. The generated dataset was used to train and run predict on the model. Random forest's performance is evaluated and then compared between the values obtained from the cuML and sklearn accuracy metrics.", "_____no_output_____" ] ], [ [ "from cuml.datasets.classification import make_classification\nfrom cuml.preprocessing.model_selection import train_test_split\nfrom cuml.ensemble import RandomForestClassifier as cuRF\nfrom sklearn.metrics import accuracy_score\n\n# synthetic dataset dimensions\nn_samples = 1000\nn_features = 10\nn_classes = 2\n\n# random forest depth and size\nn_estimators = 25\nmax_depth = 10\n\n# generate synthetic data [ binary classification task ]\nX, y = make_classification ( n_classes = n_classes,\n n_features = n_features,\n n_samples = n_samples,\n random_state = 0 )\n\nX_train, X_test, y_train, y_test = train_test_split( X, y, random_state = 0 )\n\nmodel = cuRF( max_depth = max_depth, \n n_estimators = n_estimators,\n seed = 0 )\n\ntrained_RF = model.fit ( X_train, y_train )\n\npredictions = model.predict ( X_test )\n\ncu_score = cuml.metrics.accuracy_score( y_test, predictions )\nsk_score = accuracy_score( asnumpy( y_test ), asnumpy( predictions ) )\n\nprint( \" cuml accuracy: \", cu_score )\nprint( \" sklearn accuracy : \", sk_score )\n\n# save \ndump( trained_RF, 'RF.model')\n\n# to reload the model uncomment the line below \nloaded_model = load('RF.model')", "_____no_output_____" ] ], [ [ "## Clustering", "_____no_output_____" ], [ "### UMAP and Trustworthiness metrics\nUMAP is a dimensionality reduction algorithm which performs non-linear dimension reduction. It can also be used for visualization.\nFor additional information on the UMAP model please refer to the documentation on https://docs.rapids.ai/api/cuml/stable/api.html#cuml.UMAP\n\nTrustworthiness is a measure of the extent to which the local structure is retained in the embedding of the model. Therefore, if a sample predicted by the model lied within the unexpected region of the nearest neighbors, then those samples would be penalized. For more information on the trustworthiness metric please refer to: https://scikit-learn.org/dev/modules/generated/sklearn.manifold.t_sne.trustworthiness.html\n\nthe documentation for cuML's implementation of the trustworthiness metric is: https://docs.rapids.ai/api/cuml/stable/api.html#cuml.metrics.trustworthiness.trustworthiness\n\nThe cell below shows an end to end pipeline of UMAP model. Here, the blobs dataset is created by cuml's equivalent of make_blobs function to be used as the input. The output of UMAP's fit_transform is evaluated using the trustworthiness function. The values obtained by sklearn and cuml's trustworthiness are compared below.\n", "_____no_output_____" ] ], [ [ "from cuml.datasets import make_blobs\nfrom cuml.manifold.umap import UMAP as cuUMAP\nfrom sklearn.manifold import trustworthiness\nimport numpy as np\n\nn_samples = 1000\nn_features = 100\ncluster_std = 0.1\n\nX_blobs, y_blobs = make_blobs( n_samples = n_samples,\n cluster_std = cluster_std,\n n_features = n_features,\n random_state = 0,\n dtype=np.float32 )\n\ntrained_UMAP = cuUMAP( n_neighbors = 10 ).fit( X_blobs )\nX_embedded = trained_UMAP.transform( X_blobs )\n \ncu_score = cuml.metrics.trustworthiness( X_blobs, X_embedded )\nsk_score = trustworthiness( asnumpy( X_blobs ), asnumpy( X_embedded ) )\n\nprint(\" cuml's trustworthiness score : \", cu_score )\nprint(\" sklearn's trustworthiness score : \", sk_score )\n\n# save\ndump( trained_UMAP, 'UMAP.model')\n\n# to reload the model uncomment the line below \n# loaded_model = load('UMAP.model')", "_____no_output_____" ] ], [ [ "### DBSCAN and Adjusted Random Index\nDBSCAN is a popular and a powerful clustering algorithm. For additional information on the DBSCAN model please refer to the documentation on https://docs.rapids.ai/api/cuml/stable/api.html#cuml.DBSCAN\n\nWe create the blobs dataset using the cuml equivalent of make_blobs function.\n\nAdjusted random index is a metric which is used to measure the similarity between two data clusters, and it is adjusted to take into consideration the chance grouping of elements.\nFor more information on Adjusted random index please refer to: https://en.wikipedia.org/wiki/Rand_index\n\nThe cell below shows an end to end model of DBSCAN. The output of DBSCAN's fit_predict is evaluated using the Adjusted Random Index function. The values obtained by sklearn and cuml's adjusted random metric are compared below.", "_____no_output_____" ] ], [ [ "from cuml.datasets import make_blobs\nfrom cuml import DBSCAN as cumlDBSCAN\nfrom sklearn.metrics import adjusted_rand_score\nimport numpy as np\n\nn_samples = 1000\nn_features = 100\ncluster_std = 0.1\n\nX_blobs, y_blobs = make_blobs( n_samples = n_samples, \n n_features = n_features, \n cluster_std = cluster_std, \n random_state = 0,\n dtype=np.float32 )\n\ncuml_dbscan = cumlDBSCAN( eps = 3, \n min_samples = 2)\n\ntrained_DBSCAN = cuml_dbscan.fit( X_blobs )\n\ncu_y_pred = trained_DBSCAN.fit_predict ( X_blobs )\n\ncu_adjusted_rand_index = cuml.metrics.cluster.adjusted_rand_score( y_blobs, cu_y_pred )\nsk_adjusted_rand_index = adjusted_rand_score( asnumpy(y_blobs), asnumpy(cu_y_pred) )\n\nprint(\" cuml's adjusted random index score : \", cu_adjusted_rand_index)\nprint(\" sklearn's adjusted random index score : \", sk_adjusted_rand_index)\n\n# save and optionally reload\ndump( trained_DBSCAN, 'DBSCAN.model')\n\n# to reload the model uncomment the line below \n# loaded_model = load('DBSCAN.model')", "_____no_output_____" ] ], [ [ "## Regression", "_____no_output_____" ], [ "### Linear regression and R^2 score\nLinear Regression is a simple machine learning model where the response y is modelled by a linear combination of the predictors in X.\n\nR^2 score is also known as the coefficient of determination. It is used as a metric for scoring regression models. It scores the output of the model based on the proportion of total variation of the model.\nFor more information on the R^2 score metrics please refer to: https://en.wikipedia.org/wiki/Coefficient_of_determination\n\nFor more information on cuML's implementation of the r2 score metrics please refer to : https://docs.rapids.ai/api/cuml/stable/api.html#cuml.metrics.regression.r2_score\n\nThe cell below uses the Linear Regression model to compare the results between cuML and sklearn trustworthiness metric. For more information on cuML's implementation of the Linear Regression model please refer to : \nhttps://docs.rapids.ai/api/cuml/stable/api.html#linear-regression", "_____no_output_____" ] ], [ [ "from cuml.datasets import make_regression\nfrom cuml.preprocessing.model_selection import train_test_split\nfrom cuml.linear_model import LinearRegression as cuLR\nfrom sklearn.metrics import r2_score\n\nn_samples = 2**10\nn_features = 100\nn_info = 70\n\nX_reg, y_reg = make_regression( n_samples = n_samples, \n n_features = n_features,\n n_informative = n_info, \n random_state = 123 )\n\nX_reg_train, X_reg_test, y_reg_train, y_reg_test = train_test_split( X_reg,\n y_reg, \n train_size = 0.8,\n random_state = 10 )\ncuml_reg_model = cuLR( fit_intercept = True,\n normalize = True,\n algorithm = 'eig' )\n\ntrained_LR = cuml_reg_model.fit( X_reg_train, y_reg_train )\ncu_preds = trained_LR.predict( X_reg_test )\n\ncu_r2 = cuml.metrics.r2_score( y_reg_test, cu_preds )\nsk_r2 = r2_score( asnumpy( y_reg_test ), asnumpy( cu_preds ) )\n\nprint(\"cuml's r2 score : \", cu_r2)\nprint(\"sklearn's r2 score : \", sk_r2)\n\n# save and reload \ndump( trained_LR, 'LR.model') \n\n# to reload the model uncomment the line below \n# loaded_model = load('LR.model')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cb7672ebe59ce31b26e75d58fc1aa0c6f1fdb472
127,108
ipynb
Jupyter Notebook
notes/.ipynb_checkpoints/convective_structure-checkpoint.ipynb
gfeiden/MagneticUpperSco
c7727cbeabe7c9bbe17a0f32c6cc9faa35445fb7
[ "MIT" ]
2
2016-07-25T22:28:42.000Z
2016-08-07T18:45:24.000Z
notes/.ipynb_checkpoints/convective_structure-checkpoint.ipynb
gfeiden/MagneticUpperSco
c7727cbeabe7c9bbe17a0f32c6cc9faa35445fb7
[ "MIT" ]
null
null
null
notes/.ipynb_checkpoints/convective_structure-checkpoint.ipynb
gfeiden/MagneticUpperSco
c7727cbeabe7c9bbe17a0f32c6cc9faa35445fb7
[ "MIT" ]
2
2016-04-29T10:45:41.000Z
2019-07-14T15:21:40.000Z
327.597938
24,178
0.91973
[ [ [ "# Radiative Cores & Convective Envelopes\n\nAnalysis of how magnetic fields influence the extent of radiative cores and convective envelopes in young, pre-main-sequence stars.\n\nBegin with some preliminaries.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import interp1d", "_____no_output_____" ] ], [ [ "Load a standard and magnetic isochrone with equivalent ages. Here, the adopted age is 10 Myr to look specifically at the predicted internal structure of stars in Upper Scorpius.", "_____no_output_____" ] ], [ [ "# read standard 10 Myr isochrone\niso_std = np.genfromtxt('../models/iso/std/dmestar_00010.0myr_z+0.00_a+0.00_phx.iso')\n\n# read standard 5 Myr isochrone\niso_5my = np.genfromtxt('../models/iso/std/dmestar_00005.0myr_z+0.00_a+0.00_phx.iso')\n\n# read magnetic isochrone\niso_mag = np.genfromtxt('../models/iso/mag/dmestar_00010.0myr_z+0.00_a+0.00_phx_magBeq.iso')", "_____no_output_____" ] ], [ [ "The magnetic isochrone is known to begin at a lower mass than the standard isochrone and both isochrones have gaps where individual models failed to converge. Gaps need not occur at the same masses along each isochrone. To overcome these inconsistencies, we can interpolate both isochrones onto a pre-defined mass domain.", "_____no_output_____" ] ], [ [ "masses = np.arange(0.09, 1.70, 0.01) # new mass domain\n\n# create an interpolation curve for a standard isochrone \nicurve = interp1d(iso_std[:,0], iso_std, axis=0, kind='cubic')\n\n# and transform to new mass domain\niso_std_eq = icurve(masses)\n\n# create interpolation curve for standard 5 Myr isochrone\nicurve = interp1d(iso_5my[:,0], iso_5my, axis=0, kind='linear')\n\n# and transform to a new mass domain\niso_5my_eq = icurve(masses)\n\n# create an interpolation curve for a magnetic isochrone \nicurve = interp1d(iso_mag[:,0], iso_mag, axis=0, kind='cubic')\n\n# and transform to new mass domain\niso_mag_eq = icurve(masses)", "_____no_output_____" ] ], [ [ "Let's compare the interpolated isochrones to the original, just to be sure that the resulting isochrones are smooth.", "_____no_output_____" ] ], [ [ "plt.plot(10**iso_std[:, 1], iso_std[:, 3], '-', lw=4, color='red')\nplt.plot(10**iso_std_eq[:, 1], iso_std_eq[:, 3], '--', lw=4, color='black')\n\nplt.plot(10**iso_mag[:, 1], iso_mag[:, 3], '-', lw=4, color='blue')\nplt.plot(10**iso_mag_eq[:, 1], iso_mag_eq[:, 3], '--', lw=4, color='black')\n\nplt.grid()\nplt.xlim(2500., 8000.)\nplt.ylim(-2, 1.1)\nplt.xlabel('$T_{\\\\rm eff}\\ [K]$', fontsize=20)\nplt.ylabel('$\\\\log(L / L_{\\\\odot})$', fontsize=20)", "_____no_output_____" ] ], [ [ "The interpolation appears to have worked well as there are no egregious discrepancies between the real and interpolated isochrones.\n\nWe can now analyze the properties of the radiative cores and the convective envelopes. Beginning with the radiative core, we can look as a function of stellar properties, how much of the total stellar mass is contained in the radiative core.", "_____no_output_____" ] ], [ [ "# as a function of stellar mass\nplt.plot(iso_std_eq[:, 0], 1.0 - iso_std_eq[:, -1]/iso_std_eq[:, 0], \n '--', lw=3, color='#333333')\nplt.plot(iso_5my_eq[:, 0], 1.0 - iso_5my_eq[:, -1]/iso_5my_eq[:, 0], \n '-.', lw=3, color='#333333')\nplt.plot(iso_mag_eq[:, 0], 1.0 - iso_mag_eq[:, -1]/iso_mag_eq[:, 0], \n '-' , lw=4, color='#01a9db')\n\nplt.grid()\nplt.xlabel('${\\\\rm Stellar Mass}\\ [M_{\\\\odot}]$', fontsize=20)\nplt.ylabel('$M_{\\\\rm rad\\ core}\\ /\\ M_{\\\\star}$', fontsize=20)", "_____no_output_____" ], [ "# as a function of effective temperature\nplt.plot(10**iso_std_eq[:, 1], 1.0 - iso_std_eq[:, -1]/iso_std_eq[:, 0], \n '--', lw=3, color='#333333')\nplt.plot(10**iso_5my_eq[:, 1], 1.0 - iso_5my_eq[:, -1]/iso_5my_eq[:, 0], \n '-.', lw=3, color='#333333')\nplt.plot(10**iso_mag_eq[:, 1], 1.0 - iso_mag_eq[:, -1]/iso_mag_eq[:, 0], \n '-' , lw=4, color='#01a9db')\n\nplt.grid()\nplt.xlim(3000., 7000.)\nplt.xlabel('${\\\\rm Effective Temperature}\\ [K]$', fontsize=20)\nplt.ylabel('$M_{\\\\rm rad\\ core}\\ /\\ M_{\\\\star}$', fontsize=20)", "_____no_output_____" ] ], [ [ "Now let's look at the relative difference in radiative core mass as a function of these stellar properties.", "_____no_output_____" ] ], [ [ "# as a function of stellar mass (note, there is a minus sign switch b/c we tabulate \n# convective envelope mass)\nplt.plot(iso_mag_eq[:, 0], (iso_mag_eq[:, -1] - iso_std_eq[:, -1]), \n '-' , lw=4, color='#01a9db')\nplt.plot(iso_mag_eq[:, 0], (iso_mag_eq[:, -1] - iso_5my_eq[:, -1]), \n '--' , lw=4, color='#01a9db')\n\nplt.grid()\nplt.xlabel('${\\\\rm Stellar Mass}\\ [M_{\\\\odot}]$', fontsize=20)\nplt.ylabel('$\\\\Delta M_{\\\\rm rad\\ core}\\ [M_{\\\\odot}]$', fontsize=20)", "_____no_output_____" ] ], [ [ "Analysis", "_____no_output_____" ] ], [ [ "# interpolate into the temperature domain\nTeffs = np.log10(np.arange(3050., 7000., 50.))\n\nicurve = interp1d(iso_std[:, 1], iso_std, axis=0, kind='linear')\niso_std_te = icurve(Teffs)\n\nicurve = interp1d(iso_5my[:, 1], iso_5my, axis=0, kind='linear')\niso_5my_te = icurve(Teffs)\n\nicurve = interp1d(iso_mag[:, 1], iso_mag, axis=0, kind='linear')\niso_mag_te = icurve(Teffs)\n\n# as a function of stellar mass \n# (note, there is a minus sign switch b/c we tabulate convective envelope mass)\n#\n# plotting: standard - magnetic where + implies \nplt.plot(10**Teffs, (iso_mag_te[:, 0] - iso_mag_te[:, -1] - \n iso_std_te[:, 0] + iso_std_te[:, -1]), \n '-' , lw=4, color='#01a9db')\nplt.plot(10**Teffs, (iso_mag_te[:, 0] - iso_mag_te[:, -1] - \n iso_5my_te[:, 0] + iso_5my_te[:, -1]), \n '--' , lw=4, color='#01a9db')\n\nnp.savetxt('../models/rad_core_comp.txt', \n np.column_stack((iso_std_te, iso_mag_te)), \n fmt=\"%10.6f\")\n\nnp.savetxt('../models/rad_core_comp_dage.txt', \n np.column_stack((iso_5my_te, iso_mag_te)), \n fmt=\"%10.6f\")\n\nplt.grid()\nplt.xlim(3000., 7000.)\nplt.xlabel('${\\\\rm Effective Temperature}\\ [K]$', fontsize=20)\nplt.ylabel('$\\\\Delta M_{\\\\rm rad\\ core}\\ [M_{\\\\odot}]$', fontsize=20)", "_____no_output_____" ] ], [ [ "Stars are fully convective below 3500 K, regardless of whether there is magnetic inhibition of convection. On the other extreme, stars hotter than about 6500 K are approaching ignition of the CN-cycle, which coincides with the disappearnce of the outer convective envelope. However, delayed contraction means that stars of a given effective temperature have a higher mass in the magnetic case, which leads to a slight mass offset once the radiative core comprises nearly 100% of the star. Note that our use of the term \"radiative core\" is technically invalid in this regime due to the presence of a convective core. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb767aa507d94fdd96429591999e4605341f544b
19,345
ipynb
Jupyter Notebook
pages/gallery/Upperair_Obs.ipynb
rrbuchholz/python-training
6bade362a17b174f44a63d3474e54e0e6402b954
[ "BSD-3-Clause" ]
87
2019-08-29T06:54:06.000Z
2022-03-14T12:52:59.000Z
pages/gallery/Upperair_Obs.ipynb
rrbuchholz/python-training
6bade362a17b174f44a63d3474e54e0e6402b954
[ "BSD-3-Clause" ]
100
2019-08-30T16:52:36.000Z
2022-02-10T12:12:05.000Z
pages/gallery/Upperair_Obs.ipynb
rrbuchholz/python-training
6bade362a17b174f44a63d3474e54e0e6402b954
[ "BSD-3-Clause" ]
58
2019-07-19T20:39:18.000Z
2022-03-07T13:47:32.000Z
38.535857
104
0.531507
[ [ [ "DIFAX Replication\n=================\n\nThis example replicates the traditional DIFAX images for upper-level\nobservations.\n\nBy: Kevin Goebbert\n\nObservation data comes from Iowa State Archive, accessed through the\nSiphon package. Contour data comes from the GFS 0.5 degree analysis.\nClassic upper-level data of Geopotential Height and Temperature are\nplotted.", "_____no_output_____" ] ], [ [ "import urllib.request\n\nfrom datetime import datetime, timedelta\n\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nimport matplotlib.pyplot as plt\nimport metpy.calc as mpcalc\nimport numpy as np\nimport xarray as xr\n\nfrom metpy.plots import StationPlot\nfrom metpy.units import units\nfrom siphon.simplewebservice.iastate import IAStateUpperAir", "_____no_output_____" ] ], [ [ "Plotting High/Low Symbols\n-------------------------\n\nA helper function to plot a text symbol (e.g., H, L) for relative\nmaximum/minimum for a given field (e.g., geopotential height).", "_____no_output_____" ] ], [ [ "def plot_maxmin_points(lon, lat, data, extrema, nsize, symbol, color='k',\n plotValue=True, transform=None):\n \"\"\"\n This function will find and plot relative maximum and minimum for a 2D grid. The function\n can be used to plot an H for maximum values (e.g., High pressure) and an L for minimum\n values (e.g., low pressue). It is best to used filetered data to obtain a synoptic scale\n max/min value. The symbol text can be set to a string value and optionally the color of the\n symbol and any plotted value can be set with the parameter color.\n\n Parameters\n ----------\n lon : 2D array\n Plotting longitude values\n lat : 2D array\n Plotting latitude values\n data : 2D array\n Data that you wish to plot the max/min symbol placement\n extrema : str\n Either a value of max for Maximum Values or min for Minimum Values\n nsize : int\n Size of the grid box to filter the max and min values to plot a reasonable number\n symbol : str\n Text to be placed at location of max/min value\n color : str\n Name of matplotlib colorname to plot the symbol (and numerical value, if plotted)\n plot_value : Boolean (True/False)\n Whether to plot the numeric value of max/min point\n\n Return\n ------\n The max/min symbol will be plotted on the current axes within the bounding frame\n (e.g., clip_on=True)\n \"\"\"\n from scipy.ndimage.filters import maximum_filter, minimum_filter\n\n if (extrema == 'max'):\n data_ext = maximum_filter(data, nsize, mode='nearest')\n elif (extrema == 'min'):\n data_ext = minimum_filter(data, nsize, mode='nearest')\n else:\n raise ValueError('Value for hilo must be either max or min')\n\n if lon.ndim == 1:\n lon, lat = np.meshgrid(lon, lat)\n\n mxx, mxy = np.where(data_ext == data)\n\n for i in range(len(mxy)):\n ax.text(lon[mxx[i], mxy[i]], lat[mxx[i], mxy[i]], symbol, color=color, size=36,\n clip_on=True, horizontalalignment='center', verticalalignment='center',\n transform=transform)\n ax.text(lon[mxx[i], mxy[i]], lat[mxx[i], mxy[i]],\n '\\n' + str(np.int(data[mxx[i], mxy[i]])),\n color=color, size=12, clip_on=True, fontweight='bold',\n horizontalalignment='center', verticalalignment='top', transform=transform)\n ax.plot(lon[mxx[i], mxy[i]], lat[mxx[i], mxy[i]], marker='o', markeredgecolor='black',\n markerfacecolor='white', transform=transform)\n ax.plot(lon[mxx[i], mxy[i]], lat[mxx[i], mxy[i]],\n marker='x', color='black', transform=transform)", "_____no_output_____" ] ], [ [ "Station Information\n-------------------\n\nA helper function for obtaining radiosonde station information (e.g.,\nlatitude/longitude) requried to plot data obtained from each station.\nOriginal code by github user sgdecker.", "_____no_output_____" ] ], [ [ "def station_info(stid):\n r\"\"\"Provide information about weather stations.\n\n Parameters\n ----------\n stid: str or iterable object containing strs\n The ICAO or IATA code(s) for which station information is requested.\n with_units: bool\n Whether to include units for values that have them. Default True.\n\n Returns\n -------\n info: dict\n Information about the station(s) within a dictionary with these keys:\n 'state': Two-character ID of the state/province where the station is located,\n if applicable\n 'name': The name of the station\n 'lat': The latitude of the station [deg]\n 'lon': The longitude of the station [deg]\n 'elevation': The elevation of the station [m]\n 'country': Two-character ID of the country where the station is located\n\n Modified code from Steven Decker, Rutgers University\n\n \"\"\"\n # Provide a helper function for later usage\n def str2latlon(s):\n deg = float(s[:3])\n mn = float(s[-3:-1])\n if s[-1] == 'S' or s[-1] == 'W':\n deg = -deg\n mn = -mn\n return deg + mn / 60.\n\n # Various constants describing the underlying data\n url = 'https://www.aviationweather.gov/docs/metar/stations.txt'\n # file = 'stations.txt'\n state_bnds = slice(0, 2)\n name_bnds = slice(3, 19)\n icao_bnds = slice(20, 24)\n iata_bnds = slice(26, 29)\n lat_bnds = slice(39, 45)\n lon_bnds = slice(47, 54)\n z_bnds = slice(55, 59)\n cntry_bnds = slice(81, 83)\n\n # Generalize to any number of IDs\n if isinstance(stid, str):\n stid = [stid]\n\n # Get the station dataset\n infile = urllib.request.urlopen(url)\n data = infile.readlines()\n# infile = open(file, 'rb')\n# data = infile.readlines()\n\n state = []\n name = []\n lat = []\n lon = []\n z = []\n cntry = []\n\n for s in stid:\n s = s.upper()\n for line_bytes in data:\n line = line_bytes.decode('UTF-8')\n icao = line[icao_bnds]\n iata = line[iata_bnds]\n if len(s) == 3 and s in iata or len(s) == 4 and s in icao:\n state.append(line[state_bnds].strip())\n name.append(line[name_bnds].strip())\n lat.append(str2latlon(line[lat_bnds]))\n lon.append(str2latlon(line[lon_bnds]))\n z.append(float(line[z_bnds]))\n cntry.append(line[cntry_bnds])\n\n break\n else:\n state.append('NA')\n name.append('NA')\n lat.append(np.nan)\n lon.append(np.nan)\n z.append(np.nan)\n cntry.append('NA')\n\n infile.close()\n\n return {'state': np.array(state), 'name': np.array(name), 'lat': np.array(lat),\n 'lon': np.array(lon), 'elevation': np.array(z), 'country': np.array(cntry),\n 'units': {'lat': 'deg', 'lon': 'deg', 'z': 'm'}}", "_____no_output_____" ] ], [ [ "Observation Data\n----------------\n\nSet a date and time for upper-air observations (should only be 00 or 12\nUTC for the hour).\n\nRequest all data from Iowa State using the Siphon package. The result is\na pandas DataFrame containing all of the sounding data from all\navailable stations.\n", "_____no_output_____" ] ], [ [ "# Set date for desired UPA data\ntoday = datetime.utcnow()\n\n# Go back one day to ensure data availability\ndate = datetime(today.year, today.month, today.day, 0) - timedelta(days=1)\n\n# Request data using Siphon request for data from Iowa State Archive\ndata = IAStateUpperAir.request_all_data(date)", "_____no_output_____" ] ], [ [ "Subset Observational Data\n-------------------------\n\nFrom the request above will give all levels from all radisonde sites\navailable through the service. For plotting a pressure surface map there\nis only need to have the data from that level. Below the data is subset\nand a few parameters set based on the level chosen. Additionally, the\nstation information is obtained and latitude and longitude data is added\nto the DataFrame.\n", "_____no_output_____" ] ], [ [ "level = 500\n\nif (level == 925) | (level == 850) | (level == 700):\n cint = 30\n def hght_format(v): return format(v, '.0f')[1:]\nelif level == 500:\n cint = 60\n def hght_format(v): return format(v, '.0f')[:3]\nelif level == 300:\n cint = 120\n def hght_format(v): return format(v, '.0f')[:3]\nelif level < 300:\n cint = 120\n def hght_format(v): return format(v, '.0f')[1:4]\n\n# Create subset of all data for a given level\ndata_subset = data.pressure == level\ndf = data[data_subset]\n\n# Get station lat/lon from look-up file; add to Dataframe\nstn_info = station_info(list(df.station.values))\ndf.insert(10, 'latitude', stn_info['lat'])\ndf.insert(11, 'longitude', stn_info['lon'])", "_____no_output_____" ] ], [ [ "Gridded Data\n------------\n\nObtain GFS gridded output for contour plotting. Specifically,\ngeopotential height and temperature data for the given level and subset\nfor over North America. Data are smoothed for aesthetic reasons.\n", "_____no_output_____" ] ], [ [ "# Get GFS data and subset to North America for Geopotential Height and Temperature\nds = xr.open_dataset('https://thredds.ucar.edu/thredds/dodsC/grib/NCEP/GFS/Global_0p5deg_ana/'\n 'GFS_Global_0p5deg_ana_{0:%Y%m%d}_{0:%H}00.grib2'.format(\n date)).metpy.parse_cf()\n\n# Geopotential height and smooth\nhght = ds.Geopotential_height_isobaric.metpy.sel(\n vertical=level*units.hPa, time=date, lat=slice(70, 15), lon=slice(360-145, 360-50))\nsmooth_hght = mpcalc.smooth_n_point(hght, 9, 10)\n\n# Temperature, smooth, and convert to Celsius\ntmpk = ds.Temperature_isobaric.metpy.sel(\n vertical=level*units.hPa, time=date, lat=slice(70, 15), lon=slice(360-145, 360-50))\nsmooth_tmpc = (mpcalc.smooth_n_point(tmpk, 9, 10)).to('degC')", "_____no_output_____" ] ], [ [ "Create DIFAX Replication\n------------------------\n\nPlot the observational data and contours on a Lambert Conformal map and\nadd features that resemble the historic DIFAX maps.\n", "_____no_output_____" ] ], [ [ "# Set up map coordinate reference system\nmapcrs = ccrs.LambertConformal(\n central_latitude=45, central_longitude=-100, standard_parallels=(30, 60))\n\n# Set up station locations for plotting observations\npoint_locs = mapcrs.transform_points(\n ccrs.PlateCarree(), df['longitude'].values, df['latitude'].values)\n\n# Start figure and set graphics extent\nfig = plt.figure(1, figsize=(17, 15))\nax = plt.subplot(111, projection=mapcrs)\nax.set_extent([-125, -70, 20, 55])\n\n# Add map features for geographic reference\nax.add_feature(cfeature.COASTLINE.with_scale('50m'), edgecolor='grey')\nax.add_feature(cfeature.LAND.with_scale('50m'), facecolor='white')\nax.add_feature(cfeature.STATES.with_scale('50m'), edgecolor='grey')\n\n# Plot plus signs every degree lat/lon\nplus_lat = []\nplus_lon = []\nother_lat = []\nother_lon = []\n\nfor x in hght.lon.values[::2]:\n for y in hght.lat.values[::2]:\n if (x % 5 == 0) | (y % 5 == 0):\n plus_lon.append(x)\n plus_lat.append(y)\n else:\n other_lon.append(x)\n other_lat.append(y)\nax.scatter(other_lon, other_lat, s=5, marker='o',\n transform=ccrs.PlateCarree(), color='lightgrey', zorder=-1)\nax.scatter(plus_lon, plus_lat, s=30, marker='+',\n transform=ccrs.PlateCarree(), color='lightgrey', zorder=-1)\n\n# Add gridlines for every 5 degree lat/lon\nax.gridlines(linestyle='solid', ylocs=range(15, 71, 5), xlocs=range(-150, -49, 5))\n\n# Start the station plot by specifying the axes to draw on, as well as the\n# lon/lat of the stations (with transform). We also the fontsize to 10 pt.\nstationplot = StationPlot(ax, df['longitude'].values, df['latitude'].values, clip_on=True,\n transform=ccrs.PlateCarree(), fontsize=10)\n\n# Plot the temperature and dew point to the upper and lower left, respectively, of\n# the center point.\nstationplot.plot_parameter('NW', df['temperature'], color='black')\nstationplot.plot_parameter('SW', df['dewpoint'], color='black')\n\n# A more complex example uses a custom formatter to control how the geopotential height\n# values are plotted. This is set in an earlier if-statement to work appropriate for\n# different levels.\nstationplot.plot_parameter('NE', df['height'], formatter=hght_format)\n\n# Add wind barbs\nstationplot.plot_barb(df['u_wind'], df['v_wind'], length=7, pivot='tip')\n\n# Plot Solid Contours of Geopotential Height\ncs = ax.contour(hght.lon, hght.lat, smooth_hght,\n range(0, 20000, cint), colors='black', transform=ccrs.PlateCarree())\nclabels = plt.clabel(cs, fmt='%d', colors='white', inline_spacing=5, use_clabeltext=True)\n\n# Contour labels with black boxes and white text\nfor t in cs.labelTexts:\n t.set_bbox({'facecolor': 'black', 'pad': 4})\n t.set_fontweight('heavy')\n\n# Plot Dashed Contours of Temperature\ncs2 = ax.contour(hght.lon, hght.lat, smooth_tmpc, range(-60, 51, 5),\n colors='black', transform=ccrs.PlateCarree())\nclabels = plt.clabel(cs2, fmt='%d', colors='white', inline_spacing=5, use_clabeltext=True)\n\n# Set longer dashes than default\nfor c in cs2.collections:\n c.set_dashes([(0, (5.0, 3.0))])\n\n# Contour labels with black boxes and white text\nfor t in cs.labelTexts:\n t.set_bbox({'facecolor': 'black', 'pad': 4})\n t.set_fontweight('heavy')\n\n# Plot filled circles for Radiosonde Obs\nax.scatter(df['longitude'].values, df['latitude'].values, s=12,\n marker='o', color='black', transform=ccrs.PlateCarree())\n\n# Use definition to plot H/L symbols\nplot_maxmin_points(hght.lon, hght.lat, smooth_hght.m, 'max', 50,\n symbol='H', color='black', transform=ccrs.PlateCarree())\nplot_maxmin_points(hght.lon, hght.lat, smooth_hght.m, 'min', 25,\n symbol='L', color='black', transform=ccrs.PlateCarree())\n\n# Add titles\nplt.title('Upper-air Observations at {}-hPa Analysis Heights/Temperature'.format(level),\n loc='left')\nplt.title(f'Valid: {date}', loc='right');", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb768586995e84eadd37aa9261269fe3278f8d12
68,364
ipynb
Jupyter Notebook
Copy_of_LS_DS_233_assignment.ipynb
Cknowles11/DS-Unit-2-Applied-Modeling
e8ef51748211bcf4606d83b97d7afaba7411b8f6
[ "MIT" ]
null
null
null
Copy_of_LS_DS_233_assignment.ipynb
Cknowles11/DS-Unit-2-Applied-Modeling
e8ef51748211bcf4606d83b97d7afaba7411b8f6
[ "MIT" ]
null
null
null
Copy_of_LS_DS_233_assignment.ipynb
Cknowles11/DS-Unit-2-Applied-Modeling
e8ef51748211bcf4606d83b97d7afaba7411b8f6
[ "MIT" ]
null
null
null
39.042833
264
0.37135
[ [ [ "<a href=\"https://colab.research.google.com/github/Cknowles11/DS-Unit-2-Applied-Modeling/blob/master/Copy_of_LS_DS_233_assignment.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "Lambda School Data Science\n\n*Unit 2, Sprint 3, Module 3*\n\n---\n\n\n# Permutation & Boosting\n\nYou will use your portfolio project dataset for all assignments this sprint.\n\n## Assignment\n\nComplete these tasks for your project, and document your work.\n\n- [ ] If you haven't completed assignment #1, please do so first.\n- [ ] Continue to clean and explore your data. Make exploratory visualizations.\n- [ ] Fit a model. Does it beat your baseline? \n- [ ] Try xgboost.\n- [ ] Get your model's permutation importances.\n\nYou should try to complete an initial model today, because the rest of the week, we're making model interpretation visualizations.\n\nBut, if you aren't ready to try xgboost and permutation importances with your dataset today, that's okay. You can practice with another dataset instead. You may choose any dataset you've worked with previously.\n\nThe data subdirectory includes the Titanic dataset for classification and the NYC apartments dataset for regression. You may want to choose one of these datasets, because example solutions will be available for each.\n\n\n## Reading\n\nTop recommendations in _**bold italic:**_\n\n#### Permutation Importances\n- _**[Kaggle / Dan Becker: Machine Learning Explainability](https://www.kaggle.com/dansbecker/permutation-importance)**_\n- [Christoph Molnar: Interpretable Machine Learning](https://christophm.github.io/interpretable-ml-book/feature-importance.html)\n\n#### (Default) Feature Importances\n - [Ando Saabas: Selecting good features, Part 3, Random Forests](https://blog.datadive.net/selecting-good-features-part-iii-random-forests/)\n - [Terence Parr, et al: Beware Default Random Forest Importances](https://explained.ai/rf-importance/index.html)\n\n#### Gradient Boosting\n - [A Gentle Introduction to the Gradient Boosting Algorithm for Machine Learning](https://machinelearningmastery.com/gentle-introduction-gradient-boosting-algorithm-machine-learning/)\n - [An Introduction to Statistical Learning](http://www-bcf.usc.edu/~gareth/ISL/ISLR%20Seventh%20Printing.pdf), Chapter 8\n - _**[Gradient Boosting Explained](https://www.gormanalysis.com/blog/gradient-boosting-explained/)**_ — Ben Gorman\n - [Gradient Boosting Explained](http://arogozhnikov.github.io/2016/06/24/gradient_boosting_explained.html) — Alex Rogozhnikov\n - [How to explain gradient boosting](https://explained.ai/gradient-boosting/) — Terence Parr & Jeremy Howard", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n!pip install category_encoders==2.*\n!pip install eli5", "_____no_output_____" ], [ "from google.colab import drive\ndrive.mount('/content/drive')", "Mounted at /content/drive\n" ], [ "df = pd.read_csv('/content/drive/My Drive/Local Repo/wineQualityWhites.csv')", "_____no_output_____" ], [ "df.sample(5)", "_____no_output_____" ], [ "df = df.drop('Unnamed: 0', axis = 1)", "_____no_output_____" ], [ "df.dtypes", "_____no_output_____" ] ], [ [ "# Feature Engineering", "_____no_output_____" ] ], [ [ "# Free Sulfur Dioxide in comparison to Total Sulfur Dioxide\ndf['fsd_perc'] = df['free.sulfur.dioxide'] / df['total.sulfur.dioxide']", "_____no_output_____" ], [ "df['fsd_perc'] = df['fsd_perc'].round(3)", "_____no_output_____" ] ], [ [ "# Exploration", "_____no_output_____" ] ], [ [ "above_avg_sub = df[df['quality'] >= 5 ]", "_____no_output_____" ], [ "above_avg_sub.head()", "_____no_output_____" ], [ "above_avg_sub.describe()", "_____no_output_____" ], [ "below_avg_sub = df[df['quality'] <= 5 ]", "_____no_output_____" ], [ "below_avg_sub.describe()", "_____no_output_____" ] ], [ [ "# Fit Model", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\nimport category_encoders as ce\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import make_pipeline\nimport eli5\nfrom eli5.sklearn import PermutationImportance\nfrom sklearn.metrics import accuracy_score", "_____no_output_____" ], [ "train,test = train_test_split(df, train_size = .8, test_size = .2, stratify = df['quality'], random_state = 21)", "_____no_output_____" ], [ "train, val = train_test_split(train, train_size = .80, test_size = .20, stratify = train['quality'], random_state = 21)", "_____no_output_____" ], [ "print(train.shape)\nprint(val.shape)\ntest.shape", "(3134, 13)\n(784, 13)\n" ], [ "target = 'quality'\nX_train = train.drop(columns=target)\ny_train = train[target]\nX_val = val.drop(columns=target)\ny_val = val[target]\nX_test = test", "_____no_output_____" ], [ "tfs = make_pipeline(\n ce.OrdinalEncoder(), \n SimpleImputer(strategy='median')\n)\n\nX_train_transformed = tfs.fit_transform(X_train)\nX_val_transformed = tfs.transform(X_val)\n\nmodel = RandomForestClassifier(random_state=42)\nmodel.fit(X_train_transformed, y_train)", "_____no_output_____" ], [ "pipeline = make_pipeline(\n ce.OrdinalEncoder(),\n SimpleImputer(strategy = 'median'),\n RandomForestClassifier()\n)", "_____no_output_____" ] ], [ [ "# Permutation Importances / Model Fit", "_____no_output_____" ] ], [ [ "permuter = PermutationImportance(model, scoring = 'accuracy', n_iter = 5, random_state=42)\npermuter.fit(X_val_transformed, y_val)", "_____no_output_____" ], [ "feature_names = X_val.columns.tolist()", "_____no_output_____" ], [ "eli5.show_weights(permuter, top = None, feature_names = feature_names)", "_____no_output_____" ], [ "pipeline.fit(X_train,y_train)\npipeline.score(X_val, y_val)", "/usr/local/lib/python3.6/dist-packages/category_encoders/utils.py:21: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead\n elif pd.api.types.is_categorical(cols):\n" ], [ "from xgboost import XGBClassifier\n\nxgb_pipeline = make_pipeline(\n ce.OrdinalEncoder(),\n XGBClassifier()\n)\n\nxgb_pipeline.fit(X_train, y_train)", "_____no_output_____" ], [ "y_pred = pipeline.predict(X_val)\naccuracy_score(y_val, y_pred)", "_____no_output_____" ] ], [ [ "# Parameter Tuning", "_____no_output_____" ] ], [ [ "encoder = ce.OrdinalEncoder()\nX_train_encoded = encoder.fit_transform(X_train)\nX_val_encoded = encoder.transform(X_val)\n\nx_model = XGBClassifier(\n n_estimators = 1000,\n max_depth = 10,\n learning_rate = 0.5,\n)\n\neval_set = [(X_train_encoded, y_train),\n (X_val_encoded, y_val)]\n\nx_model.fit(X_train_encoded, y_train,\n eval_set = eval_set,\n eval_metric = 'merror', \n early_stopping_rounds = 50)", "[0]\tvalidation_0-merror:0.22559\tvalidation_1-merror:0.432398\nMultiple eval metrics have been passed: 'validation_1-merror' will be used for early stopping.\n\nWill train until validation_1-merror hasn't improved in 50 rounds.\n[1]\tvalidation_0-merror:0.158583\tvalidation_1-merror:0.422194\n[2]\tvalidation_0-merror:0.115507\tvalidation_1-merror:0.399235\n[3]\tvalidation_0-merror:0.093172\tvalidation_1-merror:0.394133\n[4]\tvalidation_0-merror:0.071793\tvalidation_1-merror:0.380102\n[5]\tvalidation_0-merror:0.047543\tvalidation_1-merror:0.373724\n[6]\tvalidation_0-merror:0.0418\tvalidation_1-merror:0.376276\n[7]\tvalidation_0-merror:0.02776\tvalidation_1-merror:0.371173\n[8]\tvalidation_0-merror:0.022974\tvalidation_1-merror:0.377551\n[9]\tvalidation_0-merror:0.019464\tvalidation_1-merror:0.364796\n[10]\tvalidation_0-merror:0.014997\tvalidation_1-merror:0.367347\n[11]\tvalidation_0-merror:0.013082\tvalidation_1-merror:0.362245\n[12]\tvalidation_0-merror:0.006701\tvalidation_1-merror:0.360969\n[13]\tvalidation_0-merror:0.005105\tvalidation_1-merror:0.36352\n[14]\tvalidation_0-merror:0.002234\tvalidation_1-merror:0.362245\n[15]\tvalidation_0-merror:0.001914\tvalidation_1-merror:0.358418\n[16]\tvalidation_0-merror:0.001276\tvalidation_1-merror:0.358418\n[17]\tvalidation_0-merror:0.000957\tvalidation_1-merror:0.358418\n[18]\tvalidation_0-merror:0.000638\tvalidation_1-merror:0.355867\n[19]\tvalidation_0-merror:0.000638\tvalidation_1-merror:0.360969\n[20]\tvalidation_0-merror:0.000319\tvalidation_1-merror:0.354592\n[21]\tvalidation_0-merror:0.000319\tvalidation_1-merror:0.354592\n[22]\tvalidation_0-merror:0.000319\tvalidation_1-merror:0.355867\n[23]\tvalidation_0-merror:0\tvalidation_1-merror:0.359694\n[24]\tvalidation_0-merror:0\tvalidation_1-merror:0.362245\n[25]\tvalidation_0-merror:0\tvalidation_1-merror:0.364796\n[26]\tvalidation_0-merror:0\tvalidation_1-merror:0.364796\n[27]\tvalidation_0-merror:0\tvalidation_1-merror:0.36352\n[28]\tvalidation_0-merror:0\tvalidation_1-merror:0.360969\n[29]\tvalidation_0-merror:0\tvalidation_1-merror:0.360969\n[30]\tvalidation_0-merror:0\tvalidation_1-merror:0.358418\n[31]\tvalidation_0-merror:0\tvalidation_1-merror:0.355867\n[32]\tvalidation_0-merror:0\tvalidation_1-merror:0.357143\n[33]\tvalidation_0-merror:0\tvalidation_1-merror:0.358418\n[34]\tvalidation_0-merror:0\tvalidation_1-merror:0.355867\n[35]\tvalidation_0-merror:0\tvalidation_1-merror:0.353316\n[36]\tvalidation_0-merror:0\tvalidation_1-merror:0.354592\n[37]\tvalidation_0-merror:0\tvalidation_1-merror:0.354592\n[38]\tvalidation_0-merror:0\tvalidation_1-merror:0.34949\n[39]\tvalidation_0-merror:0\tvalidation_1-merror:0.34949\n[40]\tvalidation_0-merror:0\tvalidation_1-merror:0.353316\n[41]\tvalidation_0-merror:0\tvalidation_1-merror:0.355867\n[42]\tvalidation_0-merror:0\tvalidation_1-merror:0.34949\n[43]\tvalidation_0-merror:0\tvalidation_1-merror:0.34949\n[44]\tvalidation_0-merror:0\tvalidation_1-merror:0.34949\n[45]\tvalidation_0-merror:0\tvalidation_1-merror:0.348214\n[46]\tvalidation_0-merror:0\tvalidation_1-merror:0.346939\n[47]\tvalidation_0-merror:0\tvalidation_1-merror:0.353316\n[48]\tvalidation_0-merror:0\tvalidation_1-merror:0.353316\n[49]\tvalidation_0-merror:0\tvalidation_1-merror:0.34949\n[50]\tvalidation_0-merror:0\tvalidation_1-merror:0.353316\n[51]\tvalidation_0-merror:0\tvalidation_1-merror:0.346939\n[52]\tvalidation_0-merror:0\tvalidation_1-merror:0.352041\n[53]\tvalidation_0-merror:0\tvalidation_1-merror:0.352041\n[54]\tvalidation_0-merror:0\tvalidation_1-merror:0.350765\n[55]\tvalidation_0-merror:0\tvalidation_1-merror:0.350765\n[56]\tvalidation_0-merror:0\tvalidation_1-merror:0.350765\n[57]\tvalidation_0-merror:0\tvalidation_1-merror:0.354592\n[58]\tvalidation_0-merror:0\tvalidation_1-merror:0.353316\n[59]\tvalidation_0-merror:0\tvalidation_1-merror:0.354592\n[60]\tvalidation_0-merror:0\tvalidation_1-merror:0.357143\n[61]\tvalidation_0-merror:0\tvalidation_1-merror:0.355867\n[62]\tvalidation_0-merror:0\tvalidation_1-merror:0.355867\n[63]\tvalidation_0-merror:0\tvalidation_1-merror:0.357143\n[64]\tvalidation_0-merror:0\tvalidation_1-merror:0.358418\n[65]\tvalidation_0-merror:0\tvalidation_1-merror:0.359694\n[66]\tvalidation_0-merror:0\tvalidation_1-merror:0.358418\n[67]\tvalidation_0-merror:0\tvalidation_1-merror:0.355867\n[68]\tvalidation_0-merror:0\tvalidation_1-merror:0.358418\n[69]\tvalidation_0-merror:0\tvalidation_1-merror:0.359694\n[70]\tvalidation_0-merror:0\tvalidation_1-merror:0.359694\n[71]\tvalidation_0-merror:0\tvalidation_1-merror:0.358418\n[72]\tvalidation_0-merror:0\tvalidation_1-merror:0.359694\n[73]\tvalidation_0-merror:0\tvalidation_1-merror:0.36352\n[74]\tvalidation_0-merror:0\tvalidation_1-merror:0.359694\n[75]\tvalidation_0-merror:0\tvalidation_1-merror:0.358418\n[76]\tvalidation_0-merror:0\tvalidation_1-merror:0.362245\n[77]\tvalidation_0-merror:0\tvalidation_1-merror:0.355867\n[78]\tvalidation_0-merror:0\tvalidation_1-merror:0.355867\n[79]\tvalidation_0-merror:0\tvalidation_1-merror:0.359694\n[80]\tvalidation_0-merror:0\tvalidation_1-merror:0.357143\n[81]\tvalidation_0-merror:0\tvalidation_1-merror:0.357143\n[82]\tvalidation_0-merror:0\tvalidation_1-merror:0.358418\n[83]\tvalidation_0-merror:0\tvalidation_1-merror:0.352041\n[84]\tvalidation_0-merror:0\tvalidation_1-merror:0.357143\n[85]\tvalidation_0-merror:0\tvalidation_1-merror:0.355867\n[86]\tvalidation_0-merror:0\tvalidation_1-merror:0.352041\n[87]\tvalidation_0-merror:0\tvalidation_1-merror:0.352041\n[88]\tvalidation_0-merror:0\tvalidation_1-merror:0.352041\n[89]\tvalidation_0-merror:0\tvalidation_1-merror:0.352041\n[90]\tvalidation_0-merror:0\tvalidation_1-merror:0.352041\n[91]\tvalidation_0-merror:0\tvalidation_1-merror:0.350765\n[92]\tvalidation_0-merror:0\tvalidation_1-merror:0.352041\n[93]\tvalidation_0-merror:0\tvalidation_1-merror:0.350765\n[94]\tvalidation_0-merror:0\tvalidation_1-merror:0.352041\n[95]\tvalidation_0-merror:0\tvalidation_1-merror:0.350765\n[96]\tvalidation_0-merror:0\tvalidation_1-merror:0.350765\nStopping. Best iteration:\n[46]\tvalidation_0-merror:0\tvalidation_1-merror:0.346939\n\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb768647db0256259cbb56398b90b656bb158b56
10,276
ipynb
Jupyter Notebook
Chapter__8/decorators.ipynb
nil1729/python__noob
d82d951dc511eafa9f4315e1fdfdc749f484abf1
[ "MIT" ]
null
null
null
Chapter__8/decorators.ipynb
nil1729/python__noob
d82d951dc511eafa9f4315e1fdfdc749f484abf1
[ "MIT" ]
null
null
null
Chapter__8/decorators.ipynb
nil1729/python__noob
d82d951dc511eafa9f4315e1fdfdc749f484abf1
[ "MIT" ]
null
null
null
28.623955
117
0.55907
[ [ [ "# Decorator Introduction\ndef func():\n return 1\nprint(func())\nprint(func) # that means we can assign this function to a variable", "1\n<function func at 0x000002C2F3587790>\n" ], [ "def hello():\n return \"Hello\"\n\ngreet = hello\nprint(hello)\nprint(greet())\n\n# Delete hello\ndel hello\n\ntry:\n print(hello())\nexcept NameError:\n print(\"hello() is not defined\")\n\nprint(greet)\nprint(greet())", "<function hello at 0x000002C2F3587550>\nHello\nhello() is not defined\n<function hello at 0x000002C2F3587550>\nHello\n" ], [ "# Returning function from functions\ndef outer():\n print(\"The outer() function is executing ...\")\n\n def inner():\n return \"The inner() function is executing ...\"\n\n print(\"I am going to return a function\")\n \n return inner\n\nmy_new_func = outer()\nprint(my_new_func)\nprint(my_new_func())", "The outer() function is executing ...\nI am going to return a function\n<function outer.<locals>.inner at 0x0000028A220151F0>\nThe inner() function is executing ...\n" ], [ "# Passing function to other functions\ndef secondary(original_function):\n print(\"Some more codes which executed inside the secondary() function\")\n original_function()\n\ndef say_hello():\n print(\"Hi Nilanjan\")\n\nsay_hello()\nsecondary(say_hello)", "Hi Nilanjan\nSome more codes which executed inside the secondary() function\nHi Nilanjan\n" ], [ "# Defining Decorator function\ndef my_decorator(original_function):\n\n def wrapper(*args, **kwargs):\n print(f\"\\nSome extra code, before execution of {original_function.__name__} function\")\n\n original_function(*args, **kwargs)\n\n print(f\"Some more code, after execution of {original_function.__name__} function\")\n \n return wrapper\n\n# Using Decorator\ndef function_needs_decorator():\n print(\"This function need some decoration!\")\n\nfunction_needs_decorator()\n\n# New decorated function\ndecorated_function = my_decorator(function_needs_decorator)\ndecorated_function()\n\n# Comment and Uncomment this @my_decorator to use another_function() as decorated and normal function\n@my_decorator # ON/OFF Switch\ndef another_function():\n print(\"Another function which needs some decoraion!\")\n\nanother_function()", "This function need some decoration!\n\nSome extra code, before execution of function_needs_decorator function\nThis function need some decoration!\nSome more code, after execution of function_needs_decorator function\n\nSome extra code, before execution of another_function function\nAnother function which needs some decoraion!\nSome more code, after execution of another_function function\n" ], [ "# Decorator with no arguments\ndef decorator_one(original_function):\n\n def wrapper():\n print(f\"\\nSome extra code, before execution of {original_function.__name__} function\")\n\n original_function()\n\n print(f\"Some more code, after execution of {original_function.__name__} function\")\n \n return wrapper\n\n# Decorator which accepts arguments\ndef decorator_two(original_function):\n\n def wrapper(*args, **kwargs):\n print(f\"\\nSome extra code, before execution of {original_function.__name__} function\")\n\n original_function(*args, **kwargs)\n\n print(f\"Some more code, after execution of {original_function.__name__} function\")\n \n return wrapper\n\n@decorator_one\ndef display_info_one(name, age):\n print(f\"display_info_one function ran with arguments ({name}, {age})\")\n\n@decorator_two\ndef display_info_two(name, age):\n print(f\"display_info_two function ran with arguments ({name}, {age})\")\n\ntry:\n display_info_one(\"Nilanjan\", 21)\nexcept Exception as err_msg:\n print(\"\\ndecorated display_info_one function throw a Error:\", err_msg)\n\n\ntry:\n display_info_two(\"Nilanjan\", 21)\nexcept Exception as err_msg:\n print(\"\\nThis decorated display_info_decorated function throw a Type Error:\", err_msg)", "\ndecorated display_info_one function throw a Error: wrapper() takes 0 positional arguments but 2 were given\n\nSome extra code, before execution of display_info_two function\ndisplay_info_two function ran with arguments (Nilanjan, 21)\nSome more code, after execution of display_info_two function\n" ], [ "# Using Class as decorator\nclass class_decorator(object):\n def __init__(self, original_function):\n self.original_function = original_function\n \n def __call__(self, *args, **kwargs):\n print(f\"\\nSome extra code, before execution of {self.original_function.__name__} function\")\n\n self.original_function(*args, **kwargs)\n\n print(f\"Some more code, after execution of {self.original_function.__name__} function\")\n\n@class_decorator\ndef display_info(name, age):\n print(f\"display_info function ran with arguments ({name}, {age})\")\ndisplay_info('Nilanjan', 21)", "\nSome extra code, before execution of display_info function\ndisplay_info function ran with arguments (Nilanjan, 21)\nSome more code, after execution of display_info function\n" ], [ "# When we use decorators, the newly decorated functions show some unexpected results\n# Preserving the information about original_function\ndef some_decorator(original_function):\n \n def my_wrapper(*args, **kwargs):\n print(f\"Some code before {original_function.__name__}() function\")\n return original_function(*args, **kwargs)\n \n return my_wrapper\n\ndef some_func(name, country='India'):\n print(f\"{name} lives in {country}\")\n\n@some_decorator\ndef hey():\n print(\"I am in hey() function\")\n\nmy_decorated_func = some_decorator(some_func)\nprint(my_decorated_func.__name__) #output: wrapper\nmy_decorated_func = hey\nprint(my_decorated_func.__name__) #output: wrapper\n\n# Solution\nfrom functools import wraps\ndef my_new_decorator(original_function):\n \n @wraps(original_function)\n def wrapper(*args, **kwargs):\n print(f\"Some code before {original_function.__name__}() function\")\n return original_function(*args, **kwargs)\n \n return wrapper\n\n@my_new_decorator\ndef hey():\n print(\"I am in hey() function\")\n\nmy_decorated_func = my_new_decorator(some_func)\nprint(my_decorated_func.__name__) #output: some_func\nmy_decorated_func = hey\nprint(my_decorated_func.__name__) #output: hey", "my_wrapper\nmy_wrapper\nsome_func\nhey\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb769421c2105712f95becdf0d05299932b98d0f
32,645
ipynb
Jupyter Notebook
notes-code-snippets/section-2-neural-networks.ipynb
diazdc/deep-learning-udacity
03bb5afa9012bdff0de2ea0ea0616a9104c8985c
[ "MIT" ]
null
null
null
notes-code-snippets/section-2-neural-networks.ipynb
diazdc/deep-learning-udacity
03bb5afa9012bdff0de2ea0ea0616a9104c8985c
[ "MIT" ]
null
null
null
notes-code-snippets/section-2-neural-networks.ipynb
diazdc/deep-learning-udacity
03bb5afa9012bdff0de2ea0ea0616a9104c8985c
[ "MIT" ]
1
2020-10-12T05:24:47.000Z
2020-10-12T05:24:47.000Z
31.090476
357
0.533619
[ [ [ "# Section 2 - Neural Networks", "_____no_output_____" ], [ "## Lesson 1 - Introduction to Neural Networks", "_____no_output_____" ], [ "### 27. The Gradient Descent Algorithm", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n#Some helper functions for plotting and drawing lines\n\ndef plot_points(X, y):\n admitted = X[np.argwhere(y==1)]\n rejected = X[np.argwhere(y==0)]\n plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'blue', edgecolor = 'k')\n plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'red', edgecolor = 'k')\n\ndef display(m, b, color='g--'):\n plt.xlim(-0.05,1.05)\n plt.ylim(-0.05,1.05)\n x = np.arange(-10, 10, 0.1)\n plt.plot(x, m*x+b, color)", "_____no_output_____" ] ], [ [ "#### Reading and plotting the data", "_____no_output_____" ] ], [ [ "data = pd.read_csv('data.csv', header=None)\nX = np.array(data[[0,1]])\ny = np.array(data[2])\nplot_points(X,y)\nplt.show()", "_____no_output_____" ] ], [ [ "#### TODO: Implementing the basic functions\nHere is your turn to shine. Implement the following formulas, as explained in the text.\n- Sigmoid activation function\n\n$$\\sigma(x) = \\frac{1}{1+e^{-x}}$$\n\n- Output (prediction) formula\n\n$$\\hat{y} = \\sigma(w_1 x_1 + w_2 x_2 + b)$$\n\n- Error function\n\n$$Error(y, \\hat{y}) = - y \\log(\\hat{y}) - (1-y) \\log(1-\\hat{y})$$\n\n- The function that updates the weights\n\n$$ w_i \\longrightarrow w_i + \\alpha (y - \\hat{y}) x_i$$\n\n$$ b \\longrightarrow b + \\alpha (y - \\hat{y})$$", "_____no_output_____" ] ], [ [ "# Implement the following functions\n\n# Activation (sigmoid) function\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n# Output (prediction) formula\ndef output_formula(features, weights, bias):\n return sigmoid(np.dot(features, weights) + bias)\n\n# Error (log-loss) formula\ndef error_formula(y, output):\n return - y*np.log(output) - (1 - y) * np.log(1-output)\n\n# Gradient descent step\ndef update_weights(x, y, weights, bias, learnrate):\n output = output_formula(x, weights, bias)\n d_error = y - output\n weights += learnrate * d_error * x\n bias += learnrate * d_error\n return weights, bias", "_____no_output_____" ] ], [ [ "#### Training function\nThis function will help us iterate the gradient descent algorithm through all the data, for a number of epochs. It will also plot the data, and some of the boundary lines obtained as we run the algorithm.", "_____no_output_____" ] ], [ [ "np.random.seed(44)\n\nepochs = 100\nlearnrate = 0.01\n\ndef train(features, targets, epochs, learnrate, graph_lines=False):\n \n errors = []\n n_records, n_features = features.shape\n last_loss = None\n weights = np.random.normal(scale=1 / n_features**.5, size=n_features)\n bias = 0\n for e in range(epochs):\n del_w = np.zeros(weights.shape)\n for x, y in zip(features, targets):\n output = output_formula(x, weights, bias)\n error = error_formula(y, output)\n weights, bias = update_weights(x, y, weights, bias, learnrate)\n \n # Printing out the log-loss error on the training set\n out = output_formula(features, weights, bias)\n loss = np.mean(error_formula(targets, out))\n errors.append(loss)\n if e % (epochs / 10) == 0:\n print(\"\\n========== Epoch\", e,\"==========\")\n if last_loss and last_loss < loss:\n print(\"Train loss: \", loss, \" WARNING - Loss Increasing\")\n else:\n print(\"Train loss: \", loss)\n last_loss = loss\n predictions = out > 0.5\n accuracy = np.mean(predictions == targets)\n print(\"Accuracy: \", accuracy)\n if graph_lines and e % (epochs / 100) == 0:\n display(-weights[0]/weights[1], -bias/weights[1])\n \n\n # Plotting the solution boundary\n plt.title(\"Solution boundary\")\n display(-weights[0]/weights[1], -bias/weights[1], 'black')\n\n # Plotting the data\n plot_points(features, targets)\n plt.show()\n\n # Plotting the error\n plt.title(\"Error Plot\")\n plt.xlabel('Number of epochs')\n plt.ylabel('Error')\n plt.plot(errors)\n plt.show()", "_____no_output_____" ] ], [ [ "#### Time to train the algorithm!\nWhen we run the function, we'll obtain the following:\n- 10 updates with the current training loss and accuracy\n- A plot of the data and some of the boundary lines obtained. The final one is in black. Notice how the lines get closer and closer to the best fit, as we go through more epochs.\n- A plot of the error function. Notice how it decreases as we go through more epochs.", "_____no_output_____" ] ], [ [ "train(X, y, epochs, learnrate, True)", "_____no_output_____" ] ], [ [ "#### 36. Predicting Student Admissions with Neural Networks\nIn this notebook, we predict student admissions to graduate school at UCLA based on three pieces of data:\n- GRE Scores (Test)\n- GPA Scores (Grades)\n- Class rank (1-4)\n\nThe dataset originally came from here: http://www.ats.ucla.edu/\n\n##### Loading the data\nTo load the data and format it nicely, we will use two very useful packages called Pandas and Numpy. You can read on the documentation here:\n- https://pandas.pydata.org/pandas-docs/stable/\n- https://docs.scipy.org/", "_____no_output_____" ] ], [ [ "# Importing pandas and numpy\nimport pandas as pd\nimport numpy as np\n\n# Reading the csv file into a pandas DataFrame\ndata = pd.read_csv('student_data.csv')\n\n# Printing out the first 10 rows of our data\ndata[:10]", "_____no_output_____" ] ], [ [ "##### Plotting the data\n\nFirst let's make a plot of our data to see how it looks. In order to have a 2D plot, let's ingore the rank.", "_____no_output_____" ] ], [ [ "# Importing matplotlib\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n# Function to help us plot\ndef plot_points(data):\n X = np.array(data[[\"gre\",\"gpa\"]])\n y = np.array(data[\"admit\"])\n admitted = X[np.argwhere(y==1)]\n rejected = X[np.argwhere(y==0)]\n plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'red', edgecolor = 'k')\n plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'cyan', edgecolor = 'k')\n plt.xlabel('Test (GRE)')\n plt.ylabel('Grades (GPA)')\n \n# Plotting the points\nplot_points(data)\nplt.show()", "_____no_output_____" ] ], [ [ "Roughly, it looks like the students with high scores in the grades and test passed, while the ones with low scores didn't, but the data is not as nicely separable as we hoped it would. Maybe it would help to take the rank into account? Let's make 4 plots, each one for each rank.", "_____no_output_____" ] ], [ [ "# Separating the ranks\ndata_rank1 = data[data[\"rank\"]==1]\ndata_rank2 = data[data[\"rank\"]==2]\ndata_rank3 = data[data[\"rank\"]==3]\ndata_rank4 = data[data[\"rank\"]==4]\n\n# Plotting the graphs\nplot_points(data_rank1)\nplt.title(\"Rank 1\")\nplt.show()\nplot_points(data_rank2)\nplt.title(\"Rank 2\")\nplt.show()\nplot_points(data_rank3)\nplt.title(\"Rank 3\")\nplt.show()\nplot_points(data_rank4)\nplt.title(\"Rank 4\")\nplt.show()", "_____no_output_____" ] ], [ [ "This looks more promising, as it seems that the lower the rank, the higher the acceptance rate. Let's use the rank as one of our inputs. In order to do this, we should one-hot encode it.\n\n##### TODO: One-hot encoding the rank\nUse the `get_dummies` function in pandas in order to one-hot encode the data.\n\nHint: To drop a column, it's suggested that you use `one_hot_data`[.drop( )](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.drop.html).", "_____no_output_____" ] ], [ [ "# Make dummy variables for rank\none_hot_data = pd.concat([data, pd.get_dummies(data['rank'], prefix='rank')], axis=1)\n\n# Drop the previous rank column\none_hot_data = one_hot_data.drop('rank', axis=1)\n\n# Print the first 10 rows of our data\none_hot_data[:10]", "_____no_output_____" ] ], [ [ "##### TODO: Scaling the data\nThe next step is to scale the data. We notice that the range for grades is 1.0-4.0, whereas the range for test scores is roughly 200-800, which is much larger. This means our data is skewed, and that makes it hard for a neural network to handle. Let's fit our two features into a range of 0-1, by dividing the grades by 4.0, and the test score by 800.", "_____no_output_____" ] ], [ [ "# Copying our data\nprocessed_data = one_hot_data[:]\n\n# Scaling the columns\nprocessed_data['gre'] = processed_data['gre']/800\nprocessed_data['gpa'] = processed_data['gpa']/4.0\nprocessed_data[:10]", "_____no_output_____" ] ], [ [ "##### Splitting the data into Training and Testing", "_____no_output_____" ], [ "In order to test our algorithm, we'll split the data into a Training and a Testing set. The size of the testing set will be 10% of the total data.", "_____no_output_____" ] ], [ [ "sample = np.random.choice(processed_data.index, size=int(len(processed_data)*0.9), replace=False)\ntrain_data, test_data = processed_data.iloc[sample], processed_data.drop(sample)\n\nprint(\"Number of training samples is\", len(train_data))\nprint(\"Number of testing samples is\", len(test_data))\nprint(train_data[:10])\nprint(test_data[:10])", "_____no_output_____" ] ], [ [ "##### Splitting the data into features and targets (labels)\nNow, as a final step before the training, we'll split the data into features (X) and targets (y).", "_____no_output_____" ] ], [ [ "features = train_data.drop('admit', axis=1)\ntargets = train_data['admit']\nfeatures_test = test_data.drop('admit', axis=1)\ntargets_test = test_data['admit']\n\nprint(features[:10])\nprint(targets[:10])", "_____no_output_____" ] ], [ [ "##### Training the 2-layer Neural Network\nThe following function trains the 2-layer neural network. First, we'll write some helper functions.", "_____no_output_____" ] ], [ [ "# Activation (sigmoid) function\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\ndef sigmoid_prime(x):\n return sigmoid(x) * (1-sigmoid(x))\ndef error_formula(y, output):\n return - y*np.log(output) - (1 - y) * np.log(1-output)", "_____no_output_____" ] ], [ [ "##### TODO: Backpropagate the error\nNow it's your turn to shine. Write the error term. Remember that this is given by the equation $$ (y-\\hat{y}) \\sigma'(x) $$", "_____no_output_____" ] ], [ [ "def error_term_formula(x, y, output):\n return (y - output)*sigmoid_prime(x)", "_____no_output_____" ], [ "# Neural Network hyperparameters\nepochs = 1000\nlearnrate = 0.5\n\n# Training function\ndef train_nn(features, targets, epochs, learnrate):\n \n # Use to same seed to make debugging easier\n np.random.seed(42)\n\n n_records, n_features = features.shape\n last_loss = None\n\n # Initialize weights\n weights = np.random.normal(scale=1 / n_features**.5, size=n_features)\n\n for e in range(epochs):\n del_w = np.zeros(weights.shape)\n for x, y in zip(features.values, targets):\n # Loop through all records, x is the input, y is the target\n\n # Activation of the output unit\n # Notice we multiply the inputs and the weights here \n # rather than storing h as a separate variable \n output = sigmoid(np.dot(x, weights))\n\n # The error, the target minus the network output\n error = error_formula(y, output)\n\n # The error term\n error_term = error_term_formula(x, y, output)\n\n # The gradient descent step, the error times the gradient times the inputs\n del_w += error_term * x\n\n # Update the weights here. The learning rate times the \n # change in weights, divided by the number of records to average\n weights += learnrate * del_w / n_records\n\n # Printing out the mean square error on the training set\n if e % (epochs / 10) == 0:\n out = sigmoid(np.dot(features, weights))\n loss = np.mean((out - targets) ** 2)\n print(\"Epoch:\", e)\n if last_loss and last_loss < loss:\n print(\"Train loss: \", loss, \" WARNING - Loss Increasing\")\n else:\n print(\"Train loss: \", loss)\n last_loss = loss\n print(\"=========\")\n print(\"Finished training!\")\n return weights\n \nweights = train_nn(features, targets, epochs, learnrate)", "_____no_output_____" ] ], [ [ "##### Calculating the Accuracy on the Test Data", "_____no_output_____" ] ], [ [ "# Calculate accuracy on test data\ntest_out = sigmoid(np.dot(features_test, weights))\npredictions = test_out > 0.5\naccuracy = np.mean(predictions == targets_test)\nprint(\"Prediction accuracy: {:.3f}\".format(accuracy))", "_____no_output_____" ] ], [ [ "## Lesson 2 - Implementing Gradient Descent", "_____no_output_____" ], [ "### 4. Gradient Descent : The Code", "_____no_output_____" ], [ "Example for a single network output", "_____no_output_____" ] ], [ [ "# Defining the sigmoid function for activations\ndef sigmoid(x):\n return 1/(1+np.exp(-x))\n\n# Derivative of the sigmoid function\ndef sigmoid_prime(x):\n return sigmoid(x) * (1 - sigmoid(x))\n\n# Input data\nx = np.array([0.1, 0.3])\n# Target\ny = 0.2\n# Input to output weights\nweights = np.array([-0.8, 0.5])\n\n# The learning rate, eta in the weight step equation\nlearnrate = 0.5\n\n# the linear combination performed by the node (h in f(h) and f'(h))\nh = x[0]*weights[0] + x[1]*weights[1]\n# or h = np.dot(x, weights)\n\n# The neural network output (y-hat)\nnn_output = sigmoid(h)\n\n# output error (y - y-hat)\nerror = y - nn_output\n\n# output gradient (f'(h))\noutput_grad = sigmoid_prime(h)\n\n# error term (lowercase delta)\nerror_term = error * output_grad\n\n# Gradient descent step \ndel_w = [ learnrate * error_term * x[0],\n learnrate * error_term * x[1]]\n# or del_w = learnrate * error_term * x", "_____no_output_____" ] ], [ [ "Quiz for part 4", "_____no_output_____" ] ], [ [ "import numpy as np\n\ndef sigmoid(x):\n \"\"\"\n Calculate sigmoid\n \"\"\"\n return 1/(1+np.exp(-x))\n\ndef sigmoid_prime(x):\n \"\"\"\n # Derivative of the sigmoid function\n \"\"\"\n return sigmoid(x) * (1 - sigmoid(x))\n\nlearnrate = 0.5\nx = np.array([1, 2, 3, 4])\ny = np.array(0.5)\n\n# Initial weights\nw = np.array([0.5, -0.5, 0.3, 0.1])\n\n### Calculate one gradient descent step for each weight\n### Note: Some steps have been consilated, so there are\n### fewer variable names than in the above sample code\n\n# TODO: Calculate the node's linear combination of inputs and weights\nh = np.dot(x, w)\n\n# TODO: Calculate output of neural network\nnn_output = sigmoid(h)\n\n# TODO: Calculate error of neural network\nerror = y - nn_output\n\n# TODO: Calculate the error term\n# Remember, this requires the output gradient, which we haven't\n# specifically added a variable for.\nerror_term = error * sigmoid_prime(h)\n# Note: The sigmoid_prime function calculates sigmoid(h) twice,\n# but you've already calculated it once. You can make this\n# code more efficient by calculating the derivative directly\n# rather than calling sigmoid_prime, like this:\n# error_term = error * nn_output * (1 - nn_output)\n\n# TODO: Calculate change in weights\ndel_w = learnrate * error_term * x\n\nprint('Neural Network output:')\nprint(nn_output)\nprint('Amount of Error:')\nprint(error)\nprint('Change in Weights:')\nprint(del_w)", "_____no_output_____" ] ], [ [ "### 5. Implementing Gradient Descent", "_____no_output_____" ], [ "Quiz", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom data_prep import features, targets, features_test, targets_test\n\n\ndef sigmoid(x):\n \"\"\"\n Calculate sigmoid\n \"\"\"\n return 1 / (1 + np.exp(-x))\n\n# TODO: We haven't provided the sigmoid_prime function like we did in\n# the previous lesson to encourage you to come up with a more\n# efficient solution. If you need a hint, check out the comments\n# in solution.py from the previous lecture.\n\n# Use to same seed to make debugging easier\nnp.random.seed(42)\n\nn_records, n_features = features.shape\nlast_loss = None\n\n# Initialize weights\nweights = np.random.normal(scale=1 / n_features**.5, size=n_features)\n\n# Neural Network hyperparameters\nepochs = 1000\nlearnrate = 0.5\n\nfor e in range(epochs):\n del_w = np.zeros(weights.shape)\n for x, y in zip(features.values, targets):\n # Loop through all records, x is the input, y is the target\n\n # Note: We haven't included the h variable from the previous\n # lesson. You can add it if you want, or you can calculate\n # the h together with the output\n\n # TODO: Calculate the output\n output = sigmoid(np.dot(x, weights))\n\n # TODO: Calculate the error\n error = y - output\n\n # TODO: Calculate the error term\n error_term = error * output * (1 - output)\n\n # TODO: Calculate the change in weights for this sample\n # and add it to the total weight change\n del_w += error_term * x\n\n # TODO: Update weights using the learning rate and the average change in weights\n weights += learnrate * del_w / n_records\n\n # Printing out the mean square error on the training set\n if e % (epochs / 10) == 0:\n out = sigmoid(np.dot(features, weights))\n loss = np.mean((out - targets) ** 2)\n if last_loss and last_loss < loss:\n print(\"Train loss: \", loss, \" WARNING - Loss Increasing\")\n else:\n print(\"Train loss: \", loss)\n last_loss = loss\n\n\n# Calculate accuracy on test data\ntes_out = sigmoid(np.dot(features_test, weights))\npredictions = tes_out > 0.5\naccuracy = np.mean(predictions == targets_test)\nprint(\"Prediction accuracy: {:.3f}\".format(accuracy))", "_____no_output_____" ] ], [ [ "### 6. Multilayer Perceptrons", "_____no_output_____" ], [ "Quiz", "_____no_output_____" ] ], [ [ "import numpy as np\n\ndef sigmoid(x):\n \"\"\"\n Calculate sigmoid\n \"\"\"\n return 1/(1+np.exp(-x))\n\n# Network size\nN_input = 4\nN_hidden = 3\nN_output = 2\n\nnp.random.seed(42)\n# Make some fake data\nX = np.random.randn(4)\n\nweights_input_to_hidden = np.random.normal(0, scale=0.1, size=(N_input, N_hidden))\nweights_hidden_to_output = np.random.normal(0, scale=0.1, size=(N_hidden, N_output))\n\n\n# TODO: Make a forward pass through the network\n\n# TODO: Make a forward pass through the network\n\nhidden_layer_in = np.dot(X, weights_input_to_hidden)\nhidden_layer_out = sigmoid(hidden_layer_in)\n\nprint('Hidden-layer Output:')\nprint(hidden_layer_out)\n\noutput_layer_in = np.dot(hidden_layer_out, weights_hidden_to_output)\noutput_layer_out = sigmoid(output_layer_in)\n\nprint('Output-layer Output:')\nprint(output_layer_out)", "_____no_output_____" ] ], [ [ "### 7. Backpropagation", "_____no_output_____" ], [ "Quiz", "_____no_output_____" ] ], [ [ "import numpy as np\n\n\ndef sigmoid(x):\n \"\"\"\n Calculate sigmoid\n \"\"\"\n return 1 / (1 + np.exp(-x))\n\n\nx = np.array([0.5, 0.1, -0.2])\ntarget = 0.6\nlearnrate = 0.5\n\nweights_input_hidden = np.array([[0.5, -0.6],\n [0.1, -0.2],\n [0.1, 0.7]])\n\nweights_hidden_output = np.array([0.1, -0.3])\n\n## Forward pass\nhidden_layer_input = np.dot(x, weights_input_hidden)\nhidden_layer_output = sigmoid(hidden_layer_input)\n\noutput_layer_in = np.dot(hidden_layer_output, weights_hidden_output)\noutput = sigmoid(output_layer_in)\n\n## Backwards pass\n## TODO: Calculate output error\nerror = target - output\n\n# TODO: Calculate error term for output layer\noutput_error_term = error * output * (1 - output)\n\n# TODO: Calculate error term for hidden layer\nhidden_error_term = np.dot(output_error_term, weights_hidden_output) * \\\n hidden_layer_output * (1 - hidden_layer_output)\n\n# TODO: Calculate change in weights for hidden layer to output layer\ndelta_w_h_o = learnrate * output_error_term * hidden_layer_output\n\n# TODO: Calculate change in weights for input layer to hidden layer\ndelta_w_i_h = learnrate * hidden_error_term * x[:, None]\n\nprint('Change in weights for hidden layer to output layer:')\nprint(delta_w_h_o)\nprint('Change in weights for input layer to hidden layer:')\nprint(delta_w_i_h)", "_____no_output_____" ] ], [ [ "### 8. Implementing Backpropagation", "_____no_output_____" ], [ "Quiz", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom data_prep import features, targets, features_test, targets_test\n\nnp.random.seed(21)\n\ndef sigmoid(x):\n \"\"\"\n Calculate sigmoid\n \"\"\"\n return 1 / (1 + np.exp(-x))\n\n\n# Hyperparameters\nn_hidden = 2 # number of hidden units\nepochs = 900\nlearnrate = 0.005\n\nn_records, n_features = features.shape\nlast_loss = None\n# Initialize weights\nweights_input_hidden = np.random.normal(scale=1 / n_features ** .5,\n size=(n_features, n_hidden))\nweights_hidden_output = np.random.normal(scale=1 / n_features ** .5,\n size=n_hidden)\n\nfor e in range(epochs):\n del_w_input_hidden = np.zeros(weights_input_hidden.shape)\n del_w_hidden_output = np.zeros(weights_hidden_output.shape)\n for x, y in zip(features.values, targets):\n ## Forward pass ##\n # TODO: Calculate the output\n hidden_input = np.dot(x, weights_input_hidden)\n hidden_output = sigmoid(hidden_input)\n\n output = sigmoid(np.dot(hidden_output,\n weights_hidden_output))\n\n ## Backward pass ##\n # TODO: Calculate the network's prediction error\n error = y - output\n\n # TODO: Calculate error term for the output unit\n output_error_term = error * output * (1 - output)\n\n ## propagate errors to hidden layer\n\n # TODO: Calculate the hidden layer's contribution to the error\n hidden_error = np.dot(output_error_term, weights_hidden_output)\n\n # TODO: Calculate the error term for the hidden layer\n hidden_error_term = hidden_error * hidden_output * (1 - hidden_output)\n\n # TODO: Update the change in weights\n del_w_hidden_output += output_error_term * hidden_output\n del_w_input_hidden += hidden_error_term * x[:, None]\n\n # TODO: Update weights\n weights_input_hidden += learnrate * del_w_input_hidden / n_records\n weights_hidden_output += learnrate * del_w_hidden_output / n_records\n\n # Printing out the mean square error on the training set\n if e % (epochs / 10) == 0:\n hidden_output = sigmoid(np.dot(x, weights_input_hidden))\n out = sigmoid(np.dot(hidden_output,\n weights_hidden_output))\n loss = np.mean((out - targets) ** 2)\n\n if last_loss and last_loss < loss:\n print(\"Train loss: \", loss, \" WARNING - Loss Increasing\")\n else:\n print(\"Train loss: \", loss)\n last_loss = loss\n\n# Calculate accuracy on test data\nhidden = sigmoid(np.dot(features_test, weights_input_hidden))\nout = sigmoid(np.dot(hidden, weights_hidden_output))\npredictions = out > 0.5\naccuracy = np.mean(predictions == targets_test)\nprint(\"Prediction accuracy: {:.3f}\".format(accuracy))\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cb7698f1689a47cd8a867190fb45b2744338a8a5
10,003
ipynb
Jupyter Notebook
logistic_regression/indiv_task_notebooks/logistic_regression_mnist.ipynb
wengcindy/671-final
660b61465178949d825cfd2d976ffdcba1a03ad8
[ "MIT" ]
null
null
null
logistic_regression/indiv_task_notebooks/logistic_regression_mnist.ipynb
wengcindy/671-final
660b61465178949d825cfd2d976ffdcba1a03ad8
[ "MIT" ]
null
null
null
logistic_regression/indiv_task_notebooks/logistic_regression_mnist.ipynb
wengcindy/671-final
660b61465178949d825cfd2d976ffdcba1a03ad8
[ "MIT" ]
null
null
null
24.457213
107
0.518344
[ [ [ "import torch\nimport torch.nn as nn\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.optim as optim\nimport numpy as np\nimport pandas as pd", "_____no_output_____" ], [ "# Hyperparameters \ninput_size = 28 * 28 # 784\nnum_classes = 10\nnum_epochs = 5\nbatch_size = 100\nlr = 0.01", "_____no_output_____" ], [ "# MNIST dataset (images and labels)\ntrain_dataset = torchvision.datasets.MNIST(root='../../data', \n train=True, \n transform=transforms.ToTensor(),\n download=True)\n\ntest_dataset = torchvision.datasets.MNIST(root='../../data', \n train=False, \n transform=transforms.ToTensor())\n\n# Data loader (input pipeline)\ntrain_loader_mnist = torch.utils.data.DataLoader(dataset=train_dataset, \n batch_size=batch_size, \n shuffle=True)\n\ntest_loader_mnist = torch.utils.data.DataLoader(dataset=test_dataset, \n batch_size=batch_size, \n shuffle=False)", "_____no_output_____" ], [ "# Logistic regression model.\nmodel = torch.nn.Sequential(\n torch.nn.Flatten(),\n torch.nn.Linear(input_size, num_classes),\n torch.nn.LogSoftmax(dim=1) \n)", "_____no_output_____" ], [ "# Loss and optimizer\n# nn.CrossEntropyLoss() computes softmax internally\ncriterion = nn.CrossEntropyLoss() ", "_____no_output_____" ], [ "# Train and test functions.\ndef train(model, train_loader, optimizer, num_epochs, criterion, input_size, log_interval):\n model.train()\n for epoch in range(num_epochs):\n print('Epoch {}'.format(epoch+1))\n for i, (images, labels) in enumerate(train_loader):\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = model(images)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n \n # Log the loss.\n if i % log_interval == 0:\n print('Current loss: {}'.format(loss))\n\ndef test(model, test_loader,criterion):\n model.eval()\n test_acc = 0\n total_data = 0\n loss = 0\n with torch.no_grad():\n for _, (images, labels) in enumerate(test_loader):\n output = model(images)\n pred = output.argmax(dim=1, keepdim=True)\n test_acc += pred.eq(labels.view_as(pred)).sum().item()\n total_data += len(images)\n loss = criterion(output, labels)\n \n print('Loss: {}'.format(loss))\n \n test_acc /= total_data\n print('Test accuracy over {} data points: {}%'.format(total_data, test_acc * 100))\n \n return loss.item()", "_____no_output_____" ], [ "test_losses = []", "_____no_output_____" ] ], [ [ "# SGD", "_____no_output_____" ] ], [ [ "optimizer = torch.optim.SGD(model.parameters(), lr=lr) \ntrain(model, train_loader_mnist, optimizer, num_epochs, criterion, input_size, 100)", "_____no_output_____" ], [ "test_loss = test(model, test_loader_mnist, criterion)\ntest_losses.append(test_loss)", "_____no_output_____" ] ], [ [ "# SGD Momentum", "_____no_output_____" ] ], [ [ "optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9) \ntrain(model, train_loader_mnist, optimizer, num_epochs, criterion, input_size, 100)", "_____no_output_____" ], [ "test_loss = test(model, test_loader_mnist, criterion)\ntest_losses.append(test_loss)", "_____no_output_____" ] ], [ [ "# SGD Nesterov", "_____no_output_____" ] ], [ [ "optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, nesterov=True)\ntrain(model, train_loader_mnist, optimizer, num_epochs, criterion, input_size, 100)", "_____no_output_____" ], [ "test_loss = test(model, test_loader_mnist, criterion)\ntest_losses.append(test_loss)", "_____no_output_____" ] ], [ [ "# Adagrad", "_____no_output_____" ] ], [ [ "optimizer = optim.Adagrad(model.parameters(), lr=0.01)\ntrain(model, train_loader_mnist, optimizer, num_epochs, criterion, input_size, 100)", "_____no_output_____" ], [ "test_loss = test(model, test_loader_mnist, criterion)\ntest_losses.append(test_loss)", "_____no_output_____" ] ], [ [ "# RMSProp", "_____no_output_____" ] ], [ [ "optimizer = optim.RMSprop(model.parameters(), lr=0.001)\ntrain(model, train_loader_mnist, optimizer, num_epochs, criterion, input_size, 100)", "_____no_output_____" ], [ "test_loss = test(model, test_loader_mnist, criterion)\ntest_losses.append(test_loss)", "_____no_output_____" ] ], [ [ "# Adam", "_____no_output_____" ] ], [ [ "optimizer = torch.optim.Adam(model.parameters(), lr=lr) \ntrain(model, train_loader_mnist, optimizer, num_epochs, criterion, input_size, 100)", "_____no_output_____" ], [ "test_loss = test(model, test_loader_mnist, criterion)\ntest_losses.append(test_loss)", "_____no_output_____" ], [ "col = ['SGD','Momentum','Nesterov','Adagrad','RMSProp','Adam']\ndf = pd.DataFrame(data=[test_losses], columns=col)\ndf", "_____no_output_____" ], [ "df.to_csv('logistic_regression_mnist_loss.csv')", "_____no_output_____" ] ], [ [ "# Normalize loss", "_____no_output_____" ] ], [ [ "test_losses = np.asarray(test_losses)\nnormalized_test_losses = []\n\nmean = np.mean(test_losses)\nminus_mean = test_losses - mean\nnormalized_test_losses.append((minus_mean)/np.linalg.norm(minus_mean))\n\nprint(normalized_test_losses)", "_____no_output_____" ], [ "col = ['SGD','Momentum','Nesterov','Adagrad','RMSProp','Adam']\ndf = pd.DataFrame(data=normalized_test_losses, columns=col, index = ['Logistic regression MNIST'])\ndf", "_____no_output_____" ], [ "df.to_csv('logistic_regression_mnist_normalized_loss.csv')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb76aea4cf97dc504aba1963475038803d837614
12,566
ipynb
Jupyter Notebook
.ipynb_checkpoints/mission_to_mars-checkpoint.ipynb
Lareena/web-scraping-challenge
9a4c53145196d84ad7c074e591f2bb73a49606f6
[ "ADSL" ]
null
null
null
.ipynb_checkpoints/mission_to_mars-checkpoint.ipynb
Lareena/web-scraping-challenge
9a4c53145196d84ad7c074e591f2bb73a49606f6
[ "ADSL" ]
null
null
null
.ipynb_checkpoints/mission_to_mars-checkpoint.ipynb
Lareena/web-scraping-challenge
9a4c53145196d84ad7c074e591f2bb73a49606f6
[ "ADSL" ]
null
null
null
26.016563
263
0.576317
[ [ [ "!pip install splinter", "_____no_output_____" ], [ "#Import Dependencies\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport pandas as pd\nimport pymongo", "_____no_output_____" ], [ "from selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\n\n", "_____no_output_____" ] ], [ [ "\nNASA Mars News¶\nScrape the NASA Mars News Site and collect the latest News Title and Paragraph Text. Assign the text to variables that you can reference later.", "_____no_output_____" ] ], [ [ "# URL of page to be scraped\nurl = 'https://mars.nasa.gov/news/'", "_____no_output_____" ], [ "# Retrieve page with the requests module\nresponse = requests.get(url)\n#response.headers\n#response.content", "_____no_output_____" ], [ "# Create BeautifulSoup object; parse with 'html.parser'\nsoup = BeautifulSoup(response.text, 'html.parser')", "_____no_output_____" ], [ "# Examine the results, then determine element that contains sought info\n#print(soup.prettify())", "_____no_output_____" ], [ "# First paragraph result returned for first article on page\nnews_paragraph = soup.find('div', class_=\"rollover_description_inner\").text.strip()\nnews_paragraph", "_____no_output_____" ], [ "# First title result returned for first article on page\nnews_title = soup.find('div', class_=\"content_title\").a.text.strip()\nnews_title", "_____no_output_____" ] ], [ [ "JPL Mars Space Images - Featured Image\nVisit the url for JPL Featured Space Image here.\n\nUse splinter to navigate the site and find the image url for the current Featured Mars Image and assign the url string to a variable called featured_image_url.\n\nMake sure to find the image url to the full size .jpg image.\n\nMake sure to save a complete url string for this image.", "_____no_output_____" ] ], [ [ "executable_path = {'executable_path': '\\User\\reena\\OneDrive\\Desktop\\chromedriver.exe'}\nbrowser = Browser('chrome', **executable_path, headless=False)", "_____no_output_____" ], [ "url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\nbrowser.visit(url)", "_____no_output_____" ], [ "browser.click_link_by_partial_text('more info')\n", "_____no_output_____" ], [ "# Extra code added here to test\nhtml = browser.html\nsoup = BeautifulSoup(html, 'html.parser')\n\nfeat_img = soup.find('figure', class_='lede')", "_____no_output_____" ], [ "featured_image_url = f'https://www.jpl.nasa.gov{feat_img.a.img[\"src\"]}'\n", "_____no_output_____" ], [ "featured_image_url", "_____no_output_____" ] ], [ [ "Mars Weather\n\nVisit the Mars Weather twitter account here and scrape the latest Mars weather tweet from the page. Save the tweet text for the weather report as a variable called mars_weather.\n\nNote: Be sure you are not signed in to twitter, or scraping may become more difficult.\n\nNote: Twitter frequently changes how information is presented on their website. If you are having difficulty getting the correct html tag data, consider researching Regular Expression Patterns and how they can be used in combination with the .find() method.", "_____no_output_____" ] ], [ [ "# URL of page to be scraped\nurl = 'https://twitter.com/marswxreport?lang=en'", "_____no_output_____" ], [ "# Retrieve page with the requests module\nresponse = requests.get(url)\n#response.headers\n#response.content", "_____no_output_____" ], [ "# Create BeautifulSoup object; parse with 'html.parser'\nsoup = BeautifulSoup(response.text, 'html.parser')", "_____no_output_____" ], [ "# Examine the results, then determine element that contains sought info\n#print(soup.prettify())", "_____no_output_____" ], [ "# results are returned as an iterable list\ntweets = soup.find_all('p', class_=\"TweetTextSize TweetTextSize--normal js-tweet-text tweet-text\")", "_____no_output_____" ], [ "# Loop through returned results and match the first tweet that starts with 'Insight sol'\nfor tweet in tweets:\n # Error handling\n try:\n # Create a regular expression to match the first phrase of a tweet about the weather\n regex = '^InSight sol'\n # Print results only if title, price, and link are available\n if re.match(regex,tweet.text) is not None:\n weather_data = tweet.text\n break\n except AttributeError as e:\n print(e)", "_____no_output_____" ], [ "#weather_data", "_____no_output_____" ] ], [ [ "Mars Facts\n\nVisit the Mars Facts webpage here and use Pandas to scrape the table containing facts about the planet including Diameter, Mass, etc.\n\nUse Pandas to convert the data to a HTML table string.", "_____no_output_____" ] ], [ [ "url = 'https://space-facts.com/mars/'\n", "_____no_output_____" ], [ "# Use pandas to read the html table data on the page into a list of dictionaries\ntables = pd.read_html(url)\n#tables", "_____no_output_____" ], [ "\n# Read the first dictionary in the list into a pandas dataframe and name columns\ndf = tables[0]\ndf.columns = ['Parameter', 'Value']", "_____no_output_____" ], [ "df.set_index('Parameter', inplace=True)\ndf", "_____no_output_____" ], [ "\n# Convert the dataframe into an html table, strip the end of line newlines and \n# write the result to an html file to view \nfact_table = df.to_html()\nfact_table = fact_table.replace('\\n', '')\nfact_table", "_____no_output_____" ], [ "# Inspect the result in a browser\ndf.to_html('table.html')\n!explorer table.html", "_____no_output_____" ] ], [ [ "Mars Hemispheres\n\nVisit the USGS Astrogeology site here to obtain high resolution images for each of Mar's hemispheres.\n\nYou will need to click each of the links to the hemispheres in order to find the image url to the full resolution image.\n\nSave both the image url string for the full resolution hemisphere image, and the Hemisphere title containing the hemisphere name. Use a Python dictionary to store the data using the keys img_url and title.\n\nAppend the dictionary with the image url string and the hemisphere title to a list. This list will contain one dictionary for each hemisphere.", "_____no_output_____" ] ], [ [ "executable_path = {'executable_path': 'chromedriver.exe'}\nbrowser = Browser('chrome', **executable_path, headless=False)", "_____no_output_____" ], [ "url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\nbrowser.visit(url)", "_____no_output_____" ], [ "# Get page html and make beautifulsoup object\nhtml = browser.html\nsoup = BeautifulSoup(html, 'html.parser')", "_____no_output_____" ], [ "# Get the html containing the titles and put into a list\ntitle_list = soup.find_all('div', class_='description')", "_____no_output_____" ], [ "# Loop through the div objects and scrape titles and urls of hires images\n# Initiate the list to store dictionaries\nhemisphere_image_urls = []\nfor title in title_list:\n # Navigate browser to page then click on title link to hires image page\n browser.visit(url)\n browser.click_link_by_partial_text(title.a.h3.text)\n\n # Grab the destination page html and make into BeautifulSoup object\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n # Parse the hires image source(src) relative url then append to domain name\n # for absolute url \n img_url_list = soup.find('img', class_='wide-image')\n img_url = f\"https://astrogeology.usgs.gov{img_url_list['src']}\"\n\n # Create dictionary with returned values and add dict to hemisphere_image_urls list\n post = {\n 'title': title.a.h3.text,\n 'image_url': img_url\n }\n hemisphere_image_urls.append(post)", "_____no_output_____" ], [ "hemisphere_image_urls", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb76ba8c3c7f2965ac66a1f2269873c217c663bd
1,380
ipynb
Jupyter Notebook
conditional/main_conditional_cifar_bs900_drop_0_5_run2.ipynb
minhtannguyen/ffjord
f3418249eaa4647f4339aea8d814cf2ce33be141
[ "MIT" ]
null
null
null
conditional/main_conditional_cifar_bs900_drop_0_5_run2.ipynb
minhtannguyen/ffjord
f3418249eaa4647f4339aea8d814cf2ce33be141
[ "MIT" ]
null
null
null
conditional/main_conditional_cifar_bs900_drop_0_5_run2.ipynb
minhtannguyen/ffjord
f3418249eaa4647f4339aea8d814cf2ce33be141
[ "MIT" ]
null
null
null
26.037736
510
0.608696
[ [ [ "import os\nos.environ['CUDA_VISIBLE_DEVICES']='4,5,6,7'", "_____no_output_____" ], [ "%run -p ../train_cnf_drop_cifar.py --data cifar10 --dims 64,64,64 --strides 1,1,1,1 --num_blocks 2 --layer_type concat --multiscale True --rademacher True --batch_size 900 --test_batch_size 500 --save ../experiments_published/cnf_conditional_cifar10_bs900_drop_0_5_run2 --resume ../experiments_published/cnf_conditional_cifar10_bs900_drop_0_5_run2/epoch_250_checkpt.pth --seed 2 --lr 0.0001 --conditional True --controlled_tol False --train_mode semisup --log_freq 10 --weight_y 0.5 --dropout_rate 0.5\n#", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
cb76c76021b7b6642b896d8e84c37bc1c0ca4526
1,455
ipynb
Jupyter Notebook
index.ipynb
olippuner/Pandas_Docu_as_Notebooks
2a496d0684ba9d6502dd065b9125719f6b9fc3e7
[ "BSD-3-Clause" ]
null
null
null
index.ipynb
olippuner/Pandas_Docu_as_Notebooks
2a496d0684ba9d6502dd065b9125719f6b9fc3e7
[ "BSD-3-Clause" ]
null
null
null
index.ipynb
olippuner/Pandas_Docu_as_Notebooks
2a496d0684ba9d6502dd065b9125719f6b9fc3e7
[ "BSD-3-Clause" ]
null
null
null
21.716418
75
0.586942
[ [ [ "# Welcome to Pandas documentation prepared as Python Notebooks’s\n\nassembled as notebooks by LPNO, Switzerland, 2021.08.\n\nPlease read LICENSE in 01_getting_started/overview", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
cb76c7cf83b77be02f3f0ba27ab17eca4753bdc2
92,248
ipynb
Jupyter Notebook
SageMaker Project.ipynb
haocui88/sentiment_analysis_udacity_ml_pro1
98b3293d5e6c2f9677177906f70dbd02aaf5aff1
[ "MIT" ]
null
null
null
SageMaker Project.ipynb
haocui88/sentiment_analysis_udacity_ml_pro1
98b3293d5e6c2f9677177906f70dbd02aaf5aff1
[ "MIT" ]
null
null
null
SageMaker Project.ipynb
haocui88/sentiment_analysis_udacity_ml_pro1
98b3293d5e6c2f9677177906f70dbd02aaf5aff1
[ "MIT" ]
null
null
null
49.72938
1,155
0.615742
[ [ [ "# Creating a Sentiment Analysis Web App\n## Using PyTorch and SageMaker\n\n_Deep Learning Nanodegree Program | Deployment_\n\n---\n\nNow that we have a basic understanding of how SageMaker works we will try to use it to construct a complete project from end to end. Our goal will be to have a simple web page which a user can use to enter a movie review. The web page will then send the review off to our deployed model which will predict the sentiment of the entered review.\n\n## Instructions\n\nSome template code has already been provided for you, and you will need to implement additional functionality to successfully complete this notebook. You will not need to modify the included code beyond what is requested. Sections that begin with '**TODO**' in the header indicate that you need to complete or implement some portion within them. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `# TODO: ...` comment. Please be sure to read the instructions carefully!\n\nIn addition to implementing code, there will be questions for you to answer which relate to the task and your implementation. Each section where you will answer a question is preceded by a '**Question:**' header. Carefully read each question and provide your answer below the '**Answer:**' header by editing the Markdown cell.\n\n> **Note**: Code and Markdown cells can be executed using the **Shift+Enter** keyboard shortcut. In addition, a cell can be edited by typically clicking it (double-click for Markdown cells) or by pressing **Enter** while it is highlighted.\n\n## General Outline\n\nRecall the general outline for SageMaker projects using a notebook instance.\n\n1. Download or otherwise retrieve the data.\n2. Process / Prepare the data.\n3. Upload the processed data to S3.\n4. Train a chosen model.\n5. Test the trained model (typically using a batch transform job).\n6. Deploy the trained model.\n7. Use the deployed model.\n\nFor this project, you will be following the steps in the general outline with some modifications. \n\nFirst, you will not be testing the model in its own step. You will still be testing the model, however, you will do it by deploying your model and then using the deployed model by sending the test data to it. One of the reasons for doing this is so that you can make sure that your deployed model is working correctly before moving forward.\n\nIn addition, you will deploy and use your trained model a second time. In the second iteration you will customize the way that your trained model is deployed by including some of your own code. In addition, your newly deployed model will be used in the sentiment analysis web app.", "_____no_output_____" ], [ "## Step 1: Downloading the data\n\nAs in the XGBoost in SageMaker notebook, we will be using the [IMDb dataset](http://ai.stanford.edu/~amaas/data/sentiment/)\n\n> Maas, Andrew L., et al. [Learning Word Vectors for Sentiment Analysis](http://ai.stanford.edu/~amaas/data/sentiment/). In _Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies_. Association for Computational Linguistics, 2011.", "_____no_output_____" ] ], [ [ "%mkdir ../data\n!wget -O ../data/aclImdb_v1.tar.gz http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz\n!tar -zxf ../data/aclImdb_v1.tar.gz -C ../data", "--2020-01-27 19:21:48-- http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz\nResolving ai.stanford.edu (ai.stanford.edu)... 171.64.68.10\nConnecting to ai.stanford.edu (ai.stanford.edu)|171.64.68.10|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 84125825 (80M) [application/x-gzip]\nSaving to: ‘../data/aclImdb_v1.tar.gz’\n\n../data/aclImdb_v1. 100%[===================>] 80.23M 23.7MB/s in 4.5s \n\n2020-01-27 19:21:53 (17.9 MB/s) - ‘../data/aclImdb_v1.tar.gz’ saved [84125825/84125825]\n\n" ] ], [ [ "## Step 2: Preparing and Processing the data\n\nAlso, as in the XGBoost notebook, we will be doing some initial data processing. The first few steps are the same as in the XGBoost example. To begin with, we will read in each of the reviews and combine them into a single input structure. Then, we will split the dataset into a training set and a testing set.", "_____no_output_____" ] ], [ [ "import os\nimport glob\n\ndef read_imdb_data(data_dir='../data/aclImdb'):\n data = {}\n labels = {}\n \n for data_type in ['train', 'test']:\n data[data_type] = {}\n labels[data_type] = {}\n \n for sentiment in ['pos', 'neg']:\n data[data_type][sentiment] = []\n labels[data_type][sentiment] = []\n \n path = os.path.join(data_dir, data_type, sentiment, '*.txt')\n files = glob.glob(path)\n \n for f in files:\n with open(f) as review:\n data[data_type][sentiment].append(review.read())\n # Here we represent a positive review by '1' and a negative review by '0'\n labels[data_type][sentiment].append(1 if sentiment == 'pos' else 0)\n \n assert len(data[data_type][sentiment]) == len(labels[data_type][sentiment]), \\\n \"{}/{} data size does not match labels size\".format(data_type, sentiment)\n \n return data, labels", "_____no_output_____" ], [ "data, labels = read_imdb_data()\nprint(\"IMDB reviews: train = {} pos / {} neg, test = {} pos / {} neg\".format(\n len(data['train']['pos']), len(data['train']['neg']),\n len(data['test']['pos']), len(data['test']['neg'])))", "IMDB reviews: train = 12500 pos / 12500 neg, test = 12500 pos / 12500 neg\n" ] ], [ [ "Now that we've read the raw training and testing data from the downloaded dataset, we will combine the positive and negative reviews and shuffle the resulting records.", "_____no_output_____" ] ], [ [ "from sklearn.utils import shuffle\n\ndef prepare_imdb_data(data, labels):\n \"\"\"Prepare training and test sets from IMDb movie reviews.\"\"\"\n \n #Combine positive and negative reviews and labels\n data_train = data['train']['pos'] + data['train']['neg']\n data_test = data['test']['pos'] + data['test']['neg']\n labels_train = labels['train']['pos'] + labels['train']['neg']\n labels_test = labels['test']['pos'] + labels['test']['neg']\n \n #Shuffle reviews and corresponding labels within training and test sets\n data_train, labels_train = shuffle(data_train, labels_train)\n data_test, labels_test = shuffle(data_test, labels_test)\n \n # Return a unified training data, test data, training labels, test labets\n return data_train, data_test, labels_train, labels_test", "_____no_output_____" ], [ "train_X, test_X, train_y, test_y = prepare_imdb_data(data, labels)\nprint(\"IMDb reviews (combined): train = {}, test = {}\".format(len(train_X), len(test_X)))", "IMDb reviews (combined): train = 25000, test = 25000\n" ] ], [ [ "Now that we have our training and testing sets unified and prepared, we should do a quick check and see an example of the data our model will be trained on. This is generally a good idea as it allows you to see how each of the further processing steps affects the reviews and it also ensures that the data has been loaded correctly.", "_____no_output_____" ] ], [ [ "print(train_X[100])\nprint(train_y[100])", "Frank Capra's creativity must have been just about spent by the time he made this film. While it has a few charming moments, and many wonderful performers, Capra's outright recycling of not just the script but considerable footage from his first version of this story, Broadway Bill (1934), is downright shoddy. It is understandable that he would re-use footage from the climactic horse race, which is thrilling. But he uses entire dialogue scenes with minor actors, then brings back those actors and apparently expects us not to notice, for example, that Ward Bond is 14 years older! Unless you want to see one of the last appearances of Oliver Hardy, skip this one and watch Broadway Bill instead.\n0\n" ] ], [ [ "The first step in processing the reviews is to make sure that any html tags that appear should be removed. In addition we wish to tokenize our input, that way words such as *entertained* and *entertaining* are considered the same with regard to sentiment analysis.", "_____no_output_____" ] ], [ [ "import nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import *\n\nimport re\nfrom bs4 import BeautifulSoup\n\ndef review_to_words(review):\n nltk.download(\"stopwords\", quiet=True)\n stemmer = PorterStemmer()\n \n text = BeautifulSoup(review, \"html.parser\").get_text() # Remove HTML tags\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower()) # Convert to lower case\n words = text.split() # Split string into words\n words = [w for w in words if w not in stopwords.words(\"english\")] # Remove stopwords\n words = [PorterStemmer().stem(w) for w in words] # stem\n \n return words", "_____no_output_____" ] ], [ [ "The `review_to_words` method defined above uses `BeautifulSoup` to remove any html tags that appear and uses the `nltk` package to tokenize the reviews. As a check to ensure we know how everything is working, try applying `review_to_words` to one of the reviews in the training set.", "_____no_output_____" ] ], [ [ "# TODO: Apply review_to_words to a review (train_X[100] or any other review)\nreview_to_words(train_X[100])", "_____no_output_____" ] ], [ [ "**Question:** Above we mentioned that `review_to_words` method removes html formatting and allows us to tokenize the words found in a review, for example, converting *entertained* and *entertaining* into *entertain* so that they are treated as though they are the same word. What else, if anything, does this method do to the input?", "_____no_output_____" ], [ "**Answer:**\nBesides the function mentioned above, this method also do the following stuff:\n\n-Removes HTML\n\n-Converts words to lowercase\n\n-Spits sentences into separate words\n\n-Removes stopwords from words\n", "_____no_output_____" ], [ "The method below applies the `review_to_words` method to each of the reviews in the training and testing datasets. In addition it caches the results. This is because performing this processing step can take a long time. This way if you are unable to complete the notebook in the current session, you can come back without needing to process the data a second time.", "_____no_output_____" ] ], [ [ "import pickle\n\ncache_dir = os.path.join(\"../cache\", \"sentiment_analysis\") # where to store cache files\nos.makedirs(cache_dir, exist_ok=True) # ensure cache directory exists\n\ndef preprocess_data(data_train, data_test, labels_train, labels_test,\n cache_dir=cache_dir, cache_file=\"preprocessed_data.pkl\"):\n \"\"\"Convert each review to words; read from cache if available.\"\"\"\n\n # If cache_file is not None, try to read from it first\n cache_data = None\n if cache_file is not None:\n try:\n with open(os.path.join(cache_dir, cache_file), \"rb\") as f:\n cache_data = pickle.load(f)\n print(\"Read preprocessed data from cache file:\", cache_file)\n except:\n pass # unable to read from cache, but that's okay\n \n # If cache is missing, then do the heavy lifting\n if cache_data is None:\n # Preprocess training and test data to obtain words for each review\n #words_train = list(map(review_to_words, data_train))\n #words_test = list(map(review_to_words, data_test))\n words_train = [review_to_words(review) for review in data_train]\n words_test = [review_to_words(review) for review in data_test]\n \n # Write to cache file for future runs\n if cache_file is not None:\n cache_data = dict(words_train=words_train, words_test=words_test,\n labels_train=labels_train, labels_test=labels_test)\n with open(os.path.join(cache_dir, cache_file), \"wb\") as f:\n pickle.dump(cache_data, f)\n print(\"Wrote preprocessed data to cache file:\", cache_file)\n else:\n # Unpack data loaded from cache file\n words_train, words_test, labels_train, labels_test = (cache_data['words_train'],\n cache_data['words_test'], cache_data['labels_train'], cache_data['labels_test'])\n \n return words_train, words_test, labels_train, labels_test", "_____no_output_____" ], [ "# Preprocess data\ntrain_X, test_X, train_y, test_y = preprocess_data(train_X, test_X, train_y, test_y)", "Read preprocessed data from cache file: preprocessed_data.pkl\n" ] ], [ [ "## Transform the data\n\nIn the XGBoost notebook we transformed the data from its word representation to a bag-of-words feature representation. For the model we are going to construct in this notebook we will construct a feature representation which is very similar. To start, we will represent each word as an integer. Of course, some of the words that appear in the reviews occur very infrequently and so likely don't contain much information for the purposes of sentiment analysis. The way we will deal with this problem is that we will fix the size of our working vocabulary and we will only include the words that appear most frequently. We will then combine all of the infrequent words into a single category and, in our case, we will label it as `1`.\n\nSince we will be using a recurrent neural network, it will be convenient if the length of each review is the same. To do this, we will fix a size for our reviews and then pad short reviews with the category 'no word' (which we will label `0`) and truncate long reviews.", "_____no_output_____" ], [ "### (TODO) Create a word dictionary\n\nTo begin with, we need to construct a way to map words that appear in the reviews to integers. Here we fix the size of our vocabulary (including the 'no word' and 'infrequent' categories) to be `5000` but you may wish to change this to see how it affects the model.\n\n> **TODO:** Complete the implementation for the `build_dict()` method below. Note that even though the vocab_size is set to `5000`, we only want to construct a mapping for the most frequently appearing `4998` words. This is because we want to reserve the special labels `0` for 'no word' and `1` for 'infrequent word'.", "_____no_output_____" ] ], [ [ "import numpy as np\n\ndef build_dict(data, vocab_size = 5000):\n \"\"\"Construct and return a dictionary mapping each of the most frequently appearing words to a unique integer.\"\"\"\n \n # TODO: Determine how often each word appears in `data`. Note that `data` is a list of sentences and that a\n # sentence is a list of words.\n \n word_count = {} # A dict storing the words that appear in the reviews along with how often they occur\n \n for review in data:\n for word in review:\n if word in word_count:\n word_count[word] += 1\n else: \n word_count[word] = 1\n \n # TODO: Sort the words found in `data` so that sorted_words[0] is the most frequently appearing word and\n # sorted_words[-1] is the least frequently appearing word.\n \n\n sorted_wordlist =sorted(word_count.items(), key=lambda x: x[1], reverse=True)\n \n sorted_words = [k[0] for k in sorted_wordlist]\n \n \n word_dict = {} # This is what we are building, a dictionary that translates words into integers\n for idx, word in enumerate(sorted_words[:vocab_size - 2]): # The -2 is so that we save room for the 'no word'\n word_dict[word] = idx + 2 # 'infrequent' labels\n \n return word_dict", "_____no_output_____" ], [ "word_dict = build_dict(train_X)", "_____no_output_____" ] ], [ [ "**Question:** What are the five most frequently appearing (tokenized) words in the training set? Does it makes sense that these words appear frequently in the training set?", "_____no_output_____" ], [ "**Answer:**\nThe five most frequently appearing words in the training set will be \"movie\", \"film\", \"one\", \"like\", \"time\".\nIt makes senses to me becasuse these words are most likely to be extracted in the reviews of a film.", "_____no_output_____" ] ], [ [ "# TODO: Use this space to determine the five most frequently appearing words in the training set.\nprint(list(word_dict.keys())[0:5])", "['movi', 'film', 'one', 'like', 'time']\n" ] ], [ [ "### Save `word_dict`\n\nLater on when we construct an endpoint which processes a submitted review we will need to make use of the `word_dict` which we have created. As such, we will save it to a file now for future use.", "_____no_output_____" ] ], [ [ "data_dir = '../data/pytorch' # The folder we will use for storing data\nif not os.path.exists(data_dir): # Make sure that the folder exists\n os.makedirs(data_dir)", "_____no_output_____" ], [ "with open(os.path.join(data_dir, 'word_dict.pkl'), \"wb\") as f:\n pickle.dump(word_dict, f)", "_____no_output_____" ] ], [ [ "### Transform the reviews\n\nNow that we have our word dictionary which allows us to transform the words appearing in the reviews into integers, it is time to make use of it and convert our reviews to their integer sequence representation, making sure to pad or truncate to a fixed length, which in our case is `500`.", "_____no_output_____" ] ], [ [ "def convert_and_pad(word_dict, sentence, pad=500):\n NOWORD = 0 # We will use 0 to represent the 'no word' category\n INFREQ = 1 # and we use 1 to represent the infrequent words, i.e., words not appearing in word_dict\n \n working_sentence = [NOWORD] * pad\n \n for word_index, word in enumerate(sentence[:pad]):\n if word in word_dict:\n working_sentence[word_index] = word_dict[word]\n else:\n working_sentence[word_index] = INFREQ\n \n return working_sentence, min(len(sentence), pad)\n\ndef convert_and_pad_data(word_dict, data, pad=500):\n result = []\n lengths = []\n \n for sentence in data:\n converted, leng = convert_and_pad(word_dict, sentence, pad)\n result.append(converted)\n lengths.append(leng)\n \n return np.array(result), np.array(lengths)", "_____no_output_____" ], [ "train_X, train_X_len = convert_and_pad_data(word_dict, train_X)\ntest_X, test_X_len = convert_and_pad_data(word_dict, test_X)", "_____no_output_____" ] ], [ [ "As a quick check to make sure that things are working as intended, check to see what one of the reviews in the training set looks like after having been processeed. Does this look reasonable? What is the length of a review in the training set?", "_____no_output_____" ] ], [ [ "# Use this cell to examine one of the processed reviews to make sure everything is working as intended.\nprint(train_X[100])", "[1087 1 1116 130 1014 6 34 3 582 169 46 108 60 1\n 4877 3678 125 1968 854 28 198 13 1914 702 1 2316 4102 244\n 15 65 854 3792 1188 1074 1142 65 266 338 18 1081 43 324\n 64 43 450 148 97 707 360 2795 1249 2097 44 856 830 49\n 11 4 136 185 1885 2398 1311 4 12 1914 702 235 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0]\n" ] ], [ [ "**Question:** In the cells above we use the `preprocess_data` and `convert_and_pad_data` methods to process both the training and testing set. Why or why not might this be a problem?", "_____no_output_____" ], [ "**Answer:**\nAlthought the `preprocess_data` will remove the punctuation and `convert_and_pad_data`will truncate each review to the lenghth of 500, doing these two process for both traning and testing set could make consistency of the both training and testing data. Thus, it won't be a problem.", "_____no_output_____" ], [ "## Step 3: Upload the data to S3\n\nAs in the XGBoost notebook, we will need to upload the training dataset to S3 in order for our training code to access it. For now we will save it locally and we will upload to S3 later on.\n\n### Save the processed training dataset locally\n\nIt is important to note the format of the data that we are saving as we will need to know it when we write the training code. In our case, each row of the dataset has the form `label`, `length`, `review[500]` where `review[500]` is a sequence of `500` integers representing the words in the review.", "_____no_output_____" ] ], [ [ "import pandas as pd\n \npd.concat([pd.DataFrame(train_y), pd.DataFrame(train_X_len), pd.DataFrame(train_X)], axis=1) \\\n .to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False)", "_____no_output_____" ] ], [ [ "### Uploading the training data\n\n\nNext, we need to upload the training data to the SageMaker default S3 bucket so that we can provide access to it while training our model.", "_____no_output_____" ] ], [ [ "import sagemaker\n\nsagemaker_session = sagemaker.Session()\n\nbucket = sagemaker_session.default_bucket()\nprefix = 'sagemaker/sentiment_rnn'\n\nrole = sagemaker.get_execution_role()", "_____no_output_____" ], [ "input_data = sagemaker_session.upload_data(path=data_dir, bucket=bucket, key_prefix=prefix)", "_____no_output_____" ] ], [ [ "**NOTE:** The cell above uploads the entire contents of our data directory. This includes the `word_dict.pkl` file. This is fortunate as we will need this later on when we create an endpoint that accepts an arbitrary review. For now, we will just take note of the fact that it resides in the data directory (and so also in the S3 training bucket) and that we will need to make sure it gets saved in the model directory.", "_____no_output_____" ], [ "## Step 4: Build and Train the PyTorch Model\n\nIn the XGBoost notebook we discussed what a model is in the SageMaker framework. In particular, a model comprises three objects\n\n - Model Artifacts,\n - Training Code, and\n - Inference Code,\n \neach of which interact with one another. In the XGBoost example we used training and inference code that was provided by Amazon. Here we will still be using containers provided by Amazon with the added benefit of being able to include our own custom code.\n\nWe will start by implementing our own neural network in PyTorch along with a training script. For the purposes of this project we have provided the necessary model object in the `model.py` file, inside of the `train` folder. You can see the provided implementation by running the cell below.", "_____no_output_____" ] ], [ [ "!pygmentize train/model.py", "\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mtorch.nn\u001b[39;49;00m \u001b[34mas\u001b[39;49;00m \u001b[04m\u001b[36mnn\u001b[39;49;00m\r\n\r\n\u001b[34mclass\u001b[39;49;00m \u001b[04m\u001b[32mLSTMClassifier\u001b[39;49;00m(nn.Module):\r\n \u001b[33m\"\"\"\u001b[39;49;00m\r\n\u001b[33m This is the simple RNN model we will be using to perform Sentiment Analysis.\u001b[39;49;00m\r\n\u001b[33m \"\"\"\u001b[39;49;00m\r\n\r\n \u001b[34mdef\u001b[39;49;00m \u001b[32m__init__\u001b[39;49;00m(\u001b[36mself\u001b[39;49;00m, embedding_dim, hidden_dim, vocab_size):\r\n \u001b[33m\"\"\"\u001b[39;49;00m\r\n\u001b[33m Initialize the model by settingg up the various layers.\u001b[39;49;00m\r\n\u001b[33m \"\"\"\u001b[39;49;00m\r\n \u001b[36msuper\u001b[39;49;00m(LSTMClassifier, \u001b[36mself\u001b[39;49;00m).\u001b[32m__init__\u001b[39;49;00m()\r\n\r\n \u001b[36mself\u001b[39;49;00m.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=\u001b[34m0\u001b[39;49;00m)\r\n \u001b[36mself\u001b[39;49;00m.lstm = nn.LSTM(embedding_dim, hidden_dim)\r\n \u001b[36mself\u001b[39;49;00m.dense = nn.Linear(in_features=hidden_dim, out_features=\u001b[34m1\u001b[39;49;00m)\r\n \u001b[36mself\u001b[39;49;00m.sig = nn.Sigmoid()\r\n \r\n \u001b[36mself\u001b[39;49;00m.word_dict = \u001b[36mNone\u001b[39;49;00m\r\n\r\n \u001b[34mdef\u001b[39;49;00m \u001b[32mforward\u001b[39;49;00m(\u001b[36mself\u001b[39;49;00m, x):\r\n \u001b[33m\"\"\"\u001b[39;49;00m\r\n\u001b[33m Perform a forward pass of our model on some input.\u001b[39;49;00m\r\n\u001b[33m \"\"\"\u001b[39;49;00m\r\n x = x.t()\r\n lengths = x[\u001b[34m0\u001b[39;49;00m,:]\r\n reviews = x[\u001b[34m1\u001b[39;49;00m:,:]\r\n embeds = \u001b[36mself\u001b[39;49;00m.embedding(reviews)\r\n lstm_out, _ = \u001b[36mself\u001b[39;49;00m.lstm(embeds)\r\n out = \u001b[36mself\u001b[39;49;00m.dense(lstm_out)\r\n out = out[lengths - \u001b[34m1\u001b[39;49;00m, \u001b[36mrange\u001b[39;49;00m(\u001b[36mlen\u001b[39;49;00m(lengths))]\r\n \u001b[34mreturn\u001b[39;49;00m \u001b[36mself\u001b[39;49;00m.sig(out.squeeze())\r\n" ] ], [ [ "The important takeaway from the implementation provided is that there are three parameters that we may wish to tweak to improve the performance of our model. These are the embedding dimension, the hidden dimension and the size of the vocabulary. We will likely want to make these parameters configurable in the training script so that if we wish to modify them we do not need to modify the script itself. We will see how to do this later on. To start we will write some of the training code in the notebook so that we can more easily diagnose any issues that arise.\n\nFirst we will load a small portion of the training data set to use as a sample. It would be very time consuming to try and train the model completely in the notebook as we do not have access to a gpu and the compute instance that we are using is not particularly powerful. However, we can work on a small bit of the data to get a feel for how our training script is behaving.", "_____no_output_____" ] ], [ [ "import torch\nimport torch.utils.data\n\n# Read in only the first 250 rows\ntrain_sample = pd.read_csv(os.path.join(data_dir, 'train.csv'), header=None, names=None, nrows=250)\n\n# Turn the input pandas dataframe into tensors\ntrain_sample_y = torch.from_numpy(train_sample[[0]].values).float().squeeze()\ntrain_sample_X = torch.from_numpy(train_sample.drop([0], axis=1).values).long()\n\n# Build the dataset\ntrain_sample_ds = torch.utils.data.TensorDataset(train_sample_X, train_sample_y)\n# Build the dataloader\ntrain_sample_dl = torch.utils.data.DataLoader(train_sample_ds, batch_size=50)", "_____no_output_____" ] ], [ [ "### (TODO) Writing the training method\n\nNext we need to write the training code itself. This should be very similar to training methods that you have written before to train PyTorch models. We will leave any difficult aspects such as model saving / loading and parameter loading until a little later.", "_____no_output_____" ] ], [ [ "def train(model, train_loader, epochs, optimizer, loss_fn, device):\n for epoch in range(1, epochs + 1):\n model.train()\n total_loss = 0\n for batch in train_loader: \n batch_X, batch_y = batch\n \n batch_X = batch_X.to(device)\n batch_y = batch_y.to(device)\n \n # TODO: Complete this train method to train the model provided.\n optimizer.zero_grad()\n out = model.forward(batch_X)\n loss = loss_fn(out, batch_y)\n loss.backward()\n optimizer.step()\n \n total_loss += loss.data.item()\n print(\"Epoch: {}, BCELoss: {}\".format(epoch, total_loss / len(train_loader)))", "_____no_output_____" ] ], [ [ "Supposing we have the training method above, we will test that it is working by writing a bit of code in the notebook that executes our training method on the small sample training set that we loaded earlier. The reason for doing this in the notebook is so that we have an opportunity to fix any errors that arise early when they are easier to diagnose.", "_____no_output_____" ] ], [ [ "import torch.optim as optim\nfrom train.model import LSTMClassifier\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nmodel = LSTMClassifier(32, 100, 5000).to(device)\noptimizer = optim.Adam(model.parameters())\nloss_fn = torch.nn.BCELoss()\n\ntrain(model, train_sample_dl, 5, optimizer, loss_fn, device)", "Epoch: 1, BCELoss: 0.6916102766990662\nEpoch: 2, BCELoss: 0.6811666488647461\nEpoch: 3, BCELoss: 0.6722036361694336\nEpoch: 4, BCELoss: 0.6627201437950134\nEpoch: 5, BCELoss: 0.6516857743263245\n" ] ], [ [ "In order to construct a PyTorch model using SageMaker we must provide SageMaker with a training script. We may optionally include a directory which will be copied to the container and from which our training code will be run. When the training container is executed it will check the uploaded directory (if there is one) for a `requirements.txt` file and install any required Python libraries, after which the training script will be run.", "_____no_output_____" ], [ "### (TODO) Training the model\n\nWhen a PyTorch model is constructed in SageMaker, an entry point must be specified. This is the Python file which will be executed when the model is trained. Inside of the `train` directory is a file called `train.py` which has been provided and which contains most of the necessary code to train our model. The only thing that is missing is the implementation of the `train()` method which you wrote earlier in this notebook.\n\n**TODO**: Copy the `train()` method written above and paste it into the `train/train.py` file where required.\n\nThe way that SageMaker passes hyperparameters to the training script is by way of arguments. These arguments can then be parsed and used in the training script. To see how this is done take a look at the provided `train/train.py` file.", "_____no_output_____" ] ], [ [ "from sagemaker.pytorch import PyTorch\n\nestimator = PyTorch(entry_point=\"train.py\",\n source_dir=\"train\",\n role=role,\n framework_version='0.4.0',\n train_instance_count=1,\n train_instance_type='ml.p2.xlarge',\n hyperparameters={\n 'epochs': 10,\n 'hidden_dim': 200,\n })", "_____no_output_____" ], [ "estimator.fit({'training': input_data})", "2020-01-28 00:38:48 Starting - Starting the training job...\n2020-01-28 00:38:50 Starting - Launching requested ML instances......\n2020-01-28 00:39:57 Starting - Preparing the instances for training......\n2020-01-28 00:41:08 Downloading - Downloading input data......\n2020-01-28 00:42:13 Training - Training image download completed. Training in progress..\u001b[34mbash: cannot set terminal process group (-1): Inappropriate ioctl for device\u001b[0m\n\u001b[34mbash: no job control in this shell\u001b[0m\n\u001b[34m2020-01-28 00:42:14,580 sagemaker-containers INFO Imported framework sagemaker_pytorch_container.training\u001b[0m\n\u001b[34m2020-01-28 00:42:14,604 sagemaker_pytorch_container.training INFO Block until all host DNS lookups succeed.\u001b[0m\n\u001b[34m2020-01-28 00:42:14,819 sagemaker_pytorch_container.training INFO Invoking user training script.\u001b[0m\n\u001b[34m2020-01-28 00:42:15,036 sagemaker-containers INFO Module train does not provide a setup.py. \u001b[0m\n\u001b[34mGenerating setup.py\u001b[0m\n\u001b[34m2020-01-28 00:42:15,036 sagemaker-containers INFO Generating setup.cfg\u001b[0m\n\u001b[34m2020-01-28 00:42:15,036 sagemaker-containers INFO Generating MANIFEST.in\u001b[0m\n\u001b[34m2020-01-28 00:42:15,037 sagemaker-containers INFO Installing module with the following command:\u001b[0m\n\u001b[34m/usr/bin/python -m pip install -U . -r requirements.txt\u001b[0m\n\u001b[34mProcessing /opt/ml/code\u001b[0m\n\u001b[34mCollecting pandas (from -r requirements.txt (line 1))\n Downloading https://files.pythonhosted.org/packages/74/24/0cdbf8907e1e3bc5a8da03345c23cbed7044330bb8f73bb12e711a640a00/pandas-0.24.2-cp35-cp35m-manylinux1_x86_64.whl (10.0MB)\u001b[0m\n\u001b[34mCollecting numpy (from -r requirements.txt (line 2))\u001b[0m\n\u001b[34m Downloading https://files.pythonhosted.org/packages/52/e6/1715e592ef47f28f3f50065322423bb75619ed2f7c24be86380ecc93503c/numpy-1.18.1-cp35-cp35m-manylinux1_x86_64.whl (19.9MB)\u001b[0m\n\u001b[34mCollecting nltk (from -r requirements.txt (line 3))\n Downloading https://files.pythonhosted.org/packages/f6/1d/d925cfb4f324ede997f6d47bea4d9babba51b49e87a767c170b77005889d/nltk-3.4.5.zip (1.5MB)\u001b[0m\n\u001b[34mCollecting beautifulsoup4 (from -r requirements.txt (line 4))\n Downloading https://files.pythonhosted.org/packages/cb/a1/c698cf319e9cfed6b17376281bd0efc6bfc8465698f54170ef60a485ab5d/beautifulsoup4-4.8.2-py3-none-any.whl (106kB)\u001b[0m\n\u001b[34mCollecting html5lib (from -r requirements.txt (line 5))\n Downloading https://files.pythonhosted.org/packages/a5/62/bbd2be0e7943ec8504b517e62bab011b4946e1258842bc159e5dfde15b96/html5lib-1.0.1-py2.py3-none-any.whl (117kB)\u001b[0m\n\u001b[34mRequirement already satisfied, skipping upgrade: python-dateutil>=2.5.0 in /usr/local/lib/python3.5/dist-packages (from pandas->-r requirements.txt (line 1)) (2.7.5)\u001b[0m\n\u001b[34mCollecting pytz>=2011k (from pandas->-r requirements.txt (line 1))\n Downloading https://files.pythonhosted.org/packages/e7/f9/f0b53f88060247251bf481fa6ea62cd0d25bf1b11a87888e53ce5b7c8ad2/pytz-2019.3-py2.py3-none-any.whl (509kB)\u001b[0m\n\u001b[34mRequirement already satisfied, skipping upgrade: six in /usr/local/lib/python3.5/dist-packages (from nltk->-r requirements.txt (line 3)) (1.11.0)\u001b[0m\n\u001b[34mCollecting soupsieve>=1.2 (from beautifulsoup4->-r requirements.txt (line 4))\n Downloading https://files.pythonhosted.org/packages/81/94/03c0f04471fc245d08d0a99f7946ac228ca98da4fa75796c507f61e688c2/soupsieve-1.9.5-py2.py3-none-any.whl\u001b[0m\n\u001b[34mCollecting webencodings (from html5lib->-r requirements.txt (line 5))\n Downloading https://files.pythonhosted.org/packages/f4/24/2a3e3df732393fed8b3ebf2ec078f05546de641fe1b667ee316ec1dcf3b7/webencodings-0.5.1-py2.py3-none-any.whl\u001b[0m\n\u001b[34mBuilding wheels for collected packages: nltk, train\n Running setup.py bdist_wheel for nltk: started\u001b[0m\n\u001b[34m Running setup.py bdist_wheel for nltk: finished with status 'done'\n Stored in directory: /root/.cache/pip/wheels/96/86/f6/68ab24c23f207c0077381a5e3904b2815136b879538a24b483\n Running setup.py bdist_wheel for train: started\u001b[0m\n\u001b[34m Running setup.py bdist_wheel for train: finished with status 'done'\n Stored in directory: /tmp/pip-ephem-wheel-cache-q238_691/wheels/35/24/16/37574d11bf9bde50616c67372a334f94fa8356bc7164af8ca3\u001b[0m\n\u001b[34mSuccessfully built nltk train\u001b[0m\n\u001b[34mInstalling collected packages: numpy, pytz, pandas, nltk, soupsieve, beautifulsoup4, webencodings, html5lib, train\n Found existing installation: numpy 1.15.4\n Uninstalling numpy-1.15.4:\u001b[0m\n\u001b[34m Successfully uninstalled numpy-1.15.4\u001b[0m\n\u001b[34mSuccessfully installed beautifulsoup4-4.8.2 html5lib-1.0.1 nltk-3.4.5 numpy-1.18.1 pandas-0.24.2 pytz-2019.3 soupsieve-1.9.5 train-1.0.0 webencodings-0.5.1\u001b[0m\n\u001b[34mYou are using pip version 18.1, however version 20.0.2 is available.\u001b[0m\n\u001b[34mYou should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n\u001b[34m2020-01-28 00:42:26,755 sagemaker-containers INFO Invoking user script\n\u001b[0m\n\u001b[34mTraining Env:\n\u001b[0m\n\u001b[34m{\n \"num_gpus\": 1,\n \"hosts\": [\n \"algo-1\"\n ],\n \"output_data_dir\": \"/opt/ml/output/data\",\n \"additional_framework_parameters\": {},\n \"hyperparameters\": {\n \"hidden_dim\": 200,\n \"epochs\": 10\n },\n \"output_dir\": \"/opt/ml/output\",\n \"input_data_config\": {\n \"training\": {\n \"S3DistributionType\": \"FullyReplicated\",\n \"RecordWrapperType\": \"None\",\n \"TrainingInputMode\": \"File\"\n }\n },\n \"module_dir\": \"s3://sagemaker-us-east-1-045339395970/sagemaker-pytorch-2020-01-28-00-38-47-129/source/sourcedir.tar.gz\",\n \"model_dir\": \"/opt/ml/model\",\n \"output_intermediate_dir\": \"/opt/ml/output/intermediate\",\n \"input_dir\": \"/opt/ml/input\",\n \"module_name\": \"train\",\n \"current_host\": \"algo-1\",\n \"input_config_dir\": \"/opt/ml/input/config\",\n \"channel_input_dirs\": {\n \"training\": \"/opt/ml/input/data/training\"\n },\n \"network_interface_name\": \"eth0\",\n \"framework_module\": \"sagemaker_pytorch_container.training:main\",\n \"job_name\": \"sagemaker-pytorch-2020-01-28-00-38-47-129\",\n \"user_entry_point\": \"train.py\",\n \"resource_config\": {\n \"hosts\": [\n \"algo-1\"\n ],\n \"network_interface_name\": \"eth0\",\n \"current_host\": \"algo-1\"\n },\n \"num_cpus\": 4,\n \"log_level\": 20\u001b[0m\n\u001b[34m}\n\u001b[0m\n\u001b[34mEnvironment variables:\n\u001b[0m\n\u001b[34mSM_INPUT_DATA_CONFIG={\"training\":{\"RecordWrapperType\":\"None\",\"S3DistributionType\":\"FullyReplicated\",\"TrainingInputMode\":\"File\"}}\u001b[0m\n\u001b[34mSM_INPUT_CONFIG_DIR=/opt/ml/input/config\u001b[0m\n\u001b[34mSM_FRAMEWORK_MODULE=sagemaker_pytorch_container.training:main\u001b[0m\n\u001b[34mSM_CHANNEL_TRAINING=/opt/ml/input/data/training\u001b[0m\n\u001b[34mSM_MODULE_DIR=s3://sagemaker-us-east-1-045339395970/sagemaker-pytorch-2020-01-28-00-38-47-129/source/sourcedir.tar.gz\u001b[0m\n\u001b[34mSM_NUM_GPUS=1\u001b[0m\n\u001b[34mSM_CURRENT_HOST=algo-1\u001b[0m\n\u001b[34mSM_HOSTS=[\"algo-1\"]\u001b[0m\n\u001b[34mPYTHONPATH=/usr/local/bin:/usr/lib/python35.zip:/usr/lib/python3.5:/usr/lib/python3.5/plat-x86_64-linux-gnu:/usr/lib/python3.5/lib-dynload:/usr/local/lib/python3.5/dist-packages:/usr/lib/python3/dist-packages\u001b[0m\n\u001b[34mSM_OUTPUT_INTERMEDIATE_DIR=/opt/ml/output/intermediate\u001b[0m\n\u001b[34mSM_TRAINING_ENV={\"additional_framework_parameters\":{},\"channel_input_dirs\":{\"training\":\"/opt/ml/input/data/training\"},\"current_host\":\"algo-1\",\"framework_module\":\"sagemaker_pytorch_container.training:main\",\"hosts\":[\"algo-1\"],\"hyperparameters\":{\"epochs\":10,\"hidden_dim\":200},\"input_config_dir\":\"/opt/ml/input/config\",\"input_data_config\":{\"training\":{\"RecordWrapperType\":\"None\",\"S3DistributionType\":\"FullyReplicated\",\"TrainingInputMode\":\"File\"}},\"input_dir\":\"/opt/ml/input\",\"job_name\":\"sagemaker-pytorch-2020-01-28-00-38-47-129\",\"log_level\":20,\"model_dir\":\"/opt/ml/model\",\"module_dir\":\"s3://sagemaker-us-east-1-045339395970/sagemaker-pytorch-2020-01-28-00-38-47-129/source/sourcedir.tar.gz\",\"module_name\":\"train\",\"network_interface_name\":\"eth0\",\"num_cpus\":4,\"num_gpus\":1,\"output_data_dir\":\"/opt/ml/output/data\",\"output_dir\":\"/opt/ml/output\",\"output_intermediate_dir\":\"/opt/ml/output/intermediate\",\"resource_config\":{\"current_host\":\"algo-1\",\"hosts\":[\"algo-1\"],\"network_interface_name\":\"eth0\"},\"user_entry_point\":\"train.py\"}\u001b[0m\n\u001b[34mSM_HPS={\"epochs\":10,\"hidden_dim\":200}\u001b[0m\n\u001b[34mSM_HP_EPOCHS=10\u001b[0m\n\u001b[34mSM_MODULE_NAME=train\u001b[0m\n\u001b[34mSM_NUM_CPUS=4\u001b[0m\n\u001b[34mSM_NETWORK_INTERFACE_NAME=eth0\u001b[0m\n\u001b[34mSM_INPUT_DIR=/opt/ml/input\u001b[0m\n\u001b[34mSM_OUTPUT_DIR=/opt/ml/output\u001b[0m\n\u001b[34mSM_MODEL_DIR=/opt/ml/model\u001b[0m\n\u001b[34mSM_OUTPUT_DATA_DIR=/opt/ml/output/data\u001b[0m\n\u001b[34mSM_HP_HIDDEN_DIM=200\u001b[0m\n\u001b[34mSM_LOG_LEVEL=20\u001b[0m\n\u001b[34mSM_USER_ENTRY_POINT=train.py\u001b[0m\n\u001b[34mSM_FRAMEWORK_PARAMS={}\u001b[0m\n\u001b[34mSM_CHANNELS=[\"training\"]\u001b[0m\n\u001b[34mSM_RESOURCE_CONFIG={\"current_host\":\"algo-1\",\"hosts\":[\"algo-1\"],\"network_interface_name\":\"eth0\"}\u001b[0m\n\u001b[34mSM_USER_ARGS=[\"--epochs\",\"10\",\"--hidden_dim\",\"200\"]\n\u001b[0m\n\u001b[34mInvoking script with the following command:\n\u001b[0m\n\u001b[34m/usr/bin/python -m train --epochs 10 --hidden_dim 200\n\n\u001b[0m\n\u001b[34mUsing device cuda.\u001b[0m\n\u001b[34mGet train data loader.\u001b[0m\n" ] ], [ [ "## Step 5: Testing the model\n\nAs mentioned at the top of this notebook, we will be testing this model by first deploying it and then sending the testing data to the deployed endpoint. We will do this so that we can make sure that the deployed model is working correctly.\n\n## Step 6: Deploy the model for testing\n\nNow that we have trained our model, we would like to test it to see how it performs. Currently our model takes input of the form `review_length, review[500]` where `review[500]` is a sequence of `500` integers which describe the words present in the review, encoded using `word_dict`. Fortunately for us, SageMaker provides built-in inference code for models with simple inputs such as this.\n\nThere is one thing that we need to provide, however, and that is a function which loads the saved model. This function must be called `model_fn()` and takes as its only parameter a path to the directory where the model artifacts are stored. This function must also be present in the python file which we specified as the entry point. In our case the model loading function has been provided and so no changes need to be made.\n\n**NOTE**: When the built-in inference code is run it must import the `model_fn()` method from the `train.py` file. This is why the training code is wrapped in a main guard ( ie, `if __name__ == '__main__':` )\n\nSince we don't need to change anything in the code that was uploaded during training, we can simply deploy the current model as-is.\n\n**NOTE:** When deploying a model you are asking SageMaker to launch an compute instance that will wait for data to be sent to it. As a result, this compute instance will continue to run until *you* shut it down. This is important to know since the cost of a deployed endpoint depends on how long it has been running for.\n\nIn other words **If you are no longer using a deployed endpoint, shut it down!**\n\n**TODO:** Deploy the trained model.", "_____no_output_____" ] ], [ [ "# TODO: Deploy the trained model\nestimator_predictor = estimator.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')", "-----------------!" ] ], [ [ "## Step 7 - Use the model for testing\n\nOnce deployed, we can read in the test data and send it off to our deployed model to get some results. Once we collect all of the results we can determine how accurate our model is.", "_____no_output_____" ] ], [ [ "test_X = pd.concat([pd.DataFrame(test_X_len), pd.DataFrame(test_X)], axis=1)", "_____no_output_____" ], [ "# We split the data into chunks and send each chunk seperately, accumulating the results.\n\ndef predict(data, rows=512):\n split_array = np.array_split(data, int(data.shape[0] / float(rows) + 1))\n predictions = np.array([])\n for array in split_array:\n predictions = np.append(predictions, estimator_predictor.predict(array))\n \n return predictions", "_____no_output_____" ], [ "predictions = predict(test_X.values)\npredictions = [round(num) for num in predictions]", "_____no_output_____" ], [ "from sklearn.metrics import accuracy_score\naccuracy_score(test_y, predictions)", "_____no_output_____" ] ], [ [ "**Question:** How does this model compare to the XGBoost model you created earlier? Why might these two models perform differently on this dataset? Which do *you* think is better for sentiment analysis?", "_____no_output_____" ], [ "**Answer:**\nThe result is quite comparable to the result from the XGBoost model. Personlly, I THINK THE RNN model is better for sentiment analysis as the RNN is good at capturing the dimention of the words. Also, it has built-in memeroy which is useful for tasks that are time or sequence dependent like sentiment analysis.", "_____no_output_____" ], [ "### (TODO) More testing\n\nWe now have a trained model which has been deployed and which we can send processed reviews to and which returns the predicted sentiment. However, ultimately we would like to be able to send our model an unprocessed review. That is, we would like to send the review itself as a string. For example, suppose we wish to send the following review to our model.", "_____no_output_____" ] ], [ [ "test_review = 'The simplest pleasures in life are the best, and this film is one of them. Combining a rather basic storyline of love and adventure this movie transcends the usual weekend fair with wit and unmitigated charm.'", "_____no_output_____" ] ], [ [ "The question we now need to answer is, how do we send this review to our model?\n\nRecall in the first section of this notebook we did a bunch of data processing to the IMDb dataset. In particular, we did two specific things to the provided reviews.\n - Removed any html tags and stemmed the input\n - Encoded the review as a sequence of integers using `word_dict`\n \nIn order process the review we will need to repeat these two steps.\n\n**TODO**: Using the `review_to_words` and `convert_and_pad` methods from section one, convert `test_review` into a numpy array `test_data` suitable to send to our model. Remember that our model expects input of the form `review_length, review[500]`.", "_____no_output_____" ] ], [ [ "# TODO: Convert test_review into a form usable by the model and save the results in test_data\ndata, length = convert_and_pad(word_dict, review_to_words(test_review))\ntest_data = [[length] + data]", "_____no_output_____" ] ], [ [ "Now that we have processed the review, we can send the resulting array to our model to predict the sentiment of the review.", "_____no_output_____" ] ], [ [ "estimator_predictor.predict(test_data)", "_____no_output_____" ] ], [ [ "Since the return value of our model is close to `1`, we can be certain that the review we submitted is positive.", "_____no_output_____" ], [ "### Delete the endpoint\n\nOf course, just like in the XGBoost notebook, once we've deployed an endpoint it continues to run until we tell it to shut down. Since we are done using our endpoint for now, we can delete it.", "_____no_output_____" ] ], [ [ "estimator.delete_endpoint()", "_____no_output_____" ] ], [ [ "## Step 6 (again) - Deploy the model for the web app\n\nNow that we know that our model is working, it's time to create some custom inference code so that we can send the model a review which has not been processed and have it determine the sentiment of the review.\n\nAs we saw above, by default the estimator which we created, when deployed, will use the entry script and directory which we provided when creating the model. However, since we now wish to accept a string as input and our model expects a processed review, we need to write some custom inference code.\n\nWe will store the code that we write in the `serve` directory. Provided in this directory is the `model.py` file that we used to construct our model, a `utils.py` file which contains the `review_to_words` and `convert_and_pad` pre-processing functions which we used during the initial data processing, and `predict.py`, the file which will contain our custom inference code. Note also that `requirements.txt` is present which will tell SageMaker what Python libraries are required by our custom inference code.\n\nWhen deploying a PyTorch model in SageMaker, you are expected to provide four functions which the SageMaker inference container will use.\n - `model_fn`: This function is the same function that we used in the training script and it tells SageMaker how to load our model.\n - `input_fn`: This function receives the raw serialized input that has been sent to the model's endpoint and its job is to de-serialize and make the input available for the inference code.\n - `output_fn`: This function takes the output of the inference code and its job is to serialize this output and return it to the caller of the model's endpoint.\n - `predict_fn`: The heart of the inference script, this is where the actual prediction is done and is the function which you will need to complete.\n\nFor the simple website that we are constructing during this project, the `input_fn` and `output_fn` methods are relatively straightforward. We only require being able to accept a string as input and we expect to return a single value as output. You might imagine though that in a more complex application the input or output may be image data or some other binary data which would require some effort to serialize.\n\n### (TODO) Writing inference code\n\nBefore writing our custom inference code, we will begin by taking a look at the code which has been provided.", "_____no_output_____" ] ], [ [ "!pygmentize serve/predict.py", "\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36margparse\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mjson\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mos\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mpickle\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36msys\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36msagemaker_containers\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mpandas\u001b[39;49;00m \u001b[34mas\u001b[39;49;00m \u001b[04m\u001b[36mpd\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mnumpy\u001b[39;49;00m \u001b[34mas\u001b[39;49;00m \u001b[04m\u001b[36mnp\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mtorch\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mtorch.nn\u001b[39;49;00m \u001b[34mas\u001b[39;49;00m \u001b[04m\u001b[36mnn\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mtorch.optim\u001b[39;49;00m \u001b[34mas\u001b[39;49;00m \u001b[04m\u001b[36moptim\u001b[39;49;00m\r\n\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mtorch.utils.data\u001b[39;49;00m\r\n\r\n\u001b[34mfrom\u001b[39;49;00m \u001b[04m\u001b[36mmodel\u001b[39;49;00m \u001b[34mimport\u001b[39;49;00m LSTMClassifier\r\n\r\n\u001b[34mfrom\u001b[39;49;00m \u001b[04m\u001b[36mutils\u001b[39;49;00m \u001b[34mimport\u001b[39;49;00m review_to_words, convert_and_pad\r\n\r\n\u001b[34mdef\u001b[39;49;00m \u001b[32mmodel_fn\u001b[39;49;00m(model_dir):\r\n \u001b[33m\"\"\"Load the PyTorch model from the `model_dir` directory.\"\"\"\u001b[39;49;00m\r\n \u001b[34mprint\u001b[39;49;00m(\u001b[33m\"\u001b[39;49;00m\u001b[33mLoading model.\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m)\r\n\r\n \u001b[37m# First, load the parameters used to create the model.\u001b[39;49;00m\r\n model_info = {}\r\n model_info_path = os.path.join(model_dir, \u001b[33m'\u001b[39;49;00m\u001b[33mmodel_info.pth\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\r\n \u001b[34mwith\u001b[39;49;00m \u001b[36mopen\u001b[39;49;00m(model_info_path, \u001b[33m'\u001b[39;49;00m\u001b[33mrb\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m) \u001b[34mas\u001b[39;49;00m f:\r\n model_info = torch.load(f)\r\n\r\n \u001b[34mprint\u001b[39;49;00m(\u001b[33m\"\u001b[39;49;00m\u001b[33mmodel_info: {}\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m.format(model_info))\r\n\r\n \u001b[37m# Determine the device and construct the model.\u001b[39;49;00m\r\n device = torch.device(\u001b[33m\"\u001b[39;49;00m\u001b[33mcuda\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m \u001b[34mif\u001b[39;49;00m torch.cuda.is_available() \u001b[34melse\u001b[39;49;00m \u001b[33m\"\u001b[39;49;00m\u001b[33mcpu\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m)\r\n model = LSTMClassifier(model_info[\u001b[33m'\u001b[39;49;00m\u001b[33membedding_dim\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m], model_info[\u001b[33m'\u001b[39;49;00m\u001b[33mhidden_dim\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m], model_info[\u001b[33m'\u001b[39;49;00m\u001b[33mvocab_size\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m])\r\n\r\n \u001b[37m# Load the store model parameters.\u001b[39;49;00m\r\n model_path = os.path.join(model_dir, \u001b[33m'\u001b[39;49;00m\u001b[33mmodel.pth\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\r\n \u001b[34mwith\u001b[39;49;00m \u001b[36mopen\u001b[39;49;00m(model_path, \u001b[33m'\u001b[39;49;00m\u001b[33mrb\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m) \u001b[34mas\u001b[39;49;00m f:\r\n model.load_state_dict(torch.load(f))\r\n\r\n \u001b[37m# Load the saved word_dict.\u001b[39;49;00m\r\n word_dict_path = os.path.join(model_dir, \u001b[33m'\u001b[39;49;00m\u001b[33mword_dict.pkl\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\r\n \u001b[34mwith\u001b[39;49;00m \u001b[36mopen\u001b[39;49;00m(word_dict_path, \u001b[33m'\u001b[39;49;00m\u001b[33mrb\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m) \u001b[34mas\u001b[39;49;00m f:\r\n model.word_dict = pickle.load(f)\r\n\r\n model.to(device).eval()\r\n\r\n \u001b[34mprint\u001b[39;49;00m(\u001b[33m\"\u001b[39;49;00m\u001b[33mDone loading model.\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m)\r\n \u001b[34mreturn\u001b[39;49;00m model\r\n\r\n\u001b[34mdef\u001b[39;49;00m \u001b[32minput_fn\u001b[39;49;00m(serialized_input_data, content_type):\r\n \u001b[34mprint\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33mDeserializing the input data.\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\r\n \u001b[34mif\u001b[39;49;00m content_type == \u001b[33m'\u001b[39;49;00m\u001b[33mtext/plain\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m:\r\n data = serialized_input_data.decode(\u001b[33m'\u001b[39;49;00m\u001b[33mutf-8\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\r\n \u001b[34mreturn\u001b[39;49;00m data\r\n \u001b[34mraise\u001b[39;49;00m \u001b[36mException\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33mRequested unsupported ContentType in content_type: \u001b[39;49;00m\u001b[33m'\u001b[39;49;00m + content_type)\r\n\r\n\u001b[34mdef\u001b[39;49;00m \u001b[32moutput_fn\u001b[39;49;00m(prediction_output, accept):\r\n \u001b[34mprint\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33mSerializing the generated output.\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\r\n \u001b[34mreturn\u001b[39;49;00m \u001b[36mstr\u001b[39;49;00m(prediction_output)\r\n\r\n\u001b[34mdef\u001b[39;49;00m \u001b[32mpredict_fn\u001b[39;49;00m(input_data, model):\r\n \u001b[34mprint\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33mInferring sentiment of input data.\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\r\n\r\n device = torch.device(\u001b[33m\"\u001b[39;49;00m\u001b[33mcuda\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m \u001b[34mif\u001b[39;49;00m torch.cuda.is_available() \u001b[34melse\u001b[39;49;00m \u001b[33m\"\u001b[39;49;00m\u001b[33mcpu\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m)\r\n \r\n \u001b[34mif\u001b[39;49;00m model.word_dict \u001b[35mis\u001b[39;49;00m \u001b[36mNone\u001b[39;49;00m:\r\n \u001b[34mraise\u001b[39;49;00m \u001b[36mException\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33mModel has not been loaded properly, no word_dict.\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\r\n \r\n \u001b[37m# TODO: Process input_data so that it is ready to be sent to our model.\u001b[39;49;00m\r\n \u001b[37m# You should produce two variables:\u001b[39;49;00m\r\n \u001b[37m# data_X - A sequence of length 500 which represents the converted review\u001b[39;49;00m\r\n \u001b[37m# data_len - The length of the review\u001b[39;49;00m\r\n\r\n data_X = \u001b[36mNone\u001b[39;49;00m\r\n data_len = \u001b[36mNone\u001b[39;49;00m\r\n\r\n \u001b[37m# Using data_X and data_len we construct an appropriate input tensor. Remember\u001b[39;49;00m\r\n \u001b[37m# that our model expects input data of the form 'len, review[500]'.\u001b[39;49;00m\r\n data_pack = np.hstack((data_len, data_X))\r\n data_pack = data_pack.reshape(\u001b[34m1\u001b[39;49;00m, -\u001b[34m1\u001b[39;49;00m)\r\n \r\n data = torch.from_numpy(data_pack)\r\n data = data.to(device)\r\n\r\n \u001b[37m# Make sure to put the model into evaluation mode\u001b[39;49;00m\r\n model.eval()\r\n\r\n \u001b[37m# TODO: Compute the result of applying the model to the input data. The variable `result` should\u001b[39;49;00m\r\n \u001b[37m# be a numpy array which contains a single integer which is either 1 or 0\u001b[39;49;00m\r\n\r\n result = \u001b[36mNone\u001b[39;49;00m\r\n\r\n \u001b[34mreturn\u001b[39;49;00m result\r\n" ] ], [ [ "As mentioned earlier, the `model_fn` method is the same as the one provided in the training code and the `input_fn` and `output_fn` methods are very simple and your task will be to complete the `predict_fn` method. Make sure that you save the completed file as `predict.py` in the `serve` directory.\n\n**TODO**: Complete the `predict_fn()` method in the `serve/predict.py` file.", "_____no_output_____" ], [ "### Deploying the model\n\nNow that the custom inference code has been written, we will create and deploy our model. To begin with, we need to construct a new PyTorchModel object which points to the model artifacts created during training and also points to the inference code that we wish to use. Then we can call the deploy method to launch the deployment container.\n\n**NOTE**: The default behaviour for a deployed PyTorch model is to assume that any input passed to the predictor is a `numpy` array. In our case we want to send a string so we need to construct a simple wrapper around the `RealTimePredictor` class to accomodate simple strings. In a more complicated situation you may want to provide a serialization object, for example if you wanted to sent image data.", "_____no_output_____" ] ], [ [ "from sagemaker.predictor import RealTimePredictor\nfrom sagemaker.pytorch import PyTorchModel\n\nclass StringPredictor(RealTimePredictor):\n def __init__(self, endpoint_name, sagemaker_session):\n super(StringPredictor, self).__init__(endpoint_name, sagemaker_session, content_type='text/plain')\n\nmodel = PyTorchModel(model_data=estimator.model_data,\n role = role,\n framework_version='0.4.0',\n entry_point='predict.py',\n source_dir='serve',\n predictor_cls=StringPredictor)\npredictor = model.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')", "-----------------!" ] ], [ [ "### Testing the model\n\nNow that we have deployed our model with the custom inference code, we should test to see if everything is working. Here we test our model by loading the first `250` positive and negative reviews and send them to the endpoint, then collect the results. The reason for only sending some of the data is that the amount of time it takes for our model to process the input and then perform inference is quite long and so testing the entire data set would be prohibitive.", "_____no_output_____" ] ], [ [ "import glob\n\ndef test_reviews(data_dir='../data/aclImdb', stop=250):\n \n results = []\n ground = []\n \n # We make sure to test both positive and negative reviews \n for sentiment in ['pos', 'neg']:\n \n path = os.path.join(data_dir, 'test', sentiment, '*.txt')\n files = glob.glob(path)\n \n files_read = 0\n \n print('Starting ', sentiment, ' files')\n \n # Iterate through the files and send them to the predictor\n for f in files:\n with open(f) as review:\n # First, we store the ground truth (was the review positive or negative)\n if sentiment == 'pos':\n ground.append(1)\n else:\n ground.append(0)\n # Read in the review and convert to 'utf-8' for transmission via HTTP\n review_input = review.read().encode('utf-8')\n # Send the review to the predictor and store the results\n pred = predictor.predict(review_input)\n results.append(float(pred))\n # Sending reviews to our endpoint one at a time takes a while so we\n # only send a small number of reviews\n files_read += 1\n if files_read == stop:\n break\n \n return ground, results", "_____no_output_____" ], [ "ground, results = test_reviews()", "Starting pos files\nStarting neg files\n" ], [ "from sklearn.metrics import accuracy_score\naccuracy_score(ground, results)", "_____no_output_____" ] ], [ [ "As an additional test, we can try sending the `test_review` that we looked at earlier.", "_____no_output_____" ] ], [ [ "predictor.predict(test_review)", "_____no_output_____" ] ], [ [ "Now that we know our endpoint is working as expected, we can set up the web page that will interact with it. If you don't have time to finish the project now, make sure to skip down to the end of this notebook and shut down your endpoint. You can deploy it again when you come back.", "_____no_output_____" ], [ "## Step 7 (again): Use the model for the web app\n\n> **TODO:** This entire section and the next contain tasks for you to complete, mostly using the AWS console.\n\nSo far we have been accessing our model endpoint by constructing a predictor object which uses the endpoint and then just using the predictor object to perform inference. What if we wanted to create a web app which accessed our model? The way things are set up currently makes that not possible since in order to access a SageMaker endpoint the app would first have to authenticate with AWS using an IAM role which included access to SageMaker endpoints. However, there is an easier way! We just need to use some additional AWS services.\n\n<img src=\"Web App Diagram.svg\">\n\nThe diagram above gives an overview of how the various services will work together. On the far right is the model which we trained above and which is deployed using SageMaker. On the far left is our web app that collects a user's movie review, sends it off and expects a positive or negative sentiment in return.\n\nIn the middle is where some of the magic happens. We will construct a Lambda function, which you can think of as a straightforward Python function that can be executed whenever a specified event occurs. We will give this function permission to send and recieve data from a SageMaker endpoint.\n\nLastly, the method we will use to execute the Lambda function is a new endpoint that we will create using API Gateway. This endpoint will be a url that listens for data to be sent to it. Once it gets some data it will pass that data on to the Lambda function and then return whatever the Lambda function returns. Essentially it will act as an interface that lets our web app communicate with the Lambda function.\n\n### Setting up a Lambda function\n\nThe first thing we are going to do is set up a Lambda function. This Lambda function will be executed whenever our public API has data sent to it. When it is executed it will receive the data, perform any sort of processing that is required, send the data (the review) to the SageMaker endpoint we've created and then return the result.\n\n#### Part A: Create an IAM Role for the Lambda function\n\nSince we want the Lambda function to call a SageMaker endpoint, we need to make sure that it has permission to do so. To do this, we will construct a role that we can later give the Lambda function.\n\nUsing the AWS Console, navigate to the **IAM** page and click on **Roles**. Then, click on **Create role**. Make sure that the **AWS service** is the type of trusted entity selected and choose **Lambda** as the service that will use this role, then click **Next: Permissions**.\n\nIn the search box type `sagemaker` and select the check box next to the **AmazonSageMakerFullAccess** policy. Then, click on **Next: Review**.\n\nLastly, give this role a name. Make sure you use a name that you will remember later on, for example `LambdaSageMakerRole`. Then, click on **Create role**.\n\n#### Part B: Create a Lambda function\n\nNow it is time to actually create the Lambda function.\n\nUsing the AWS Console, navigate to the AWS Lambda page and click on **Create a function**. When you get to the next page, make sure that **Author from scratch** is selected. Now, name your Lambda function, using a name that you will remember later on, for example `sentiment_analysis_func`. Make sure that the **Python 3.6** runtime is selected and then choose the role that you created in the previous part. Then, click on **Create Function**.\n\nOn the next page you will see some information about the Lambda function you've just created. If you scroll down you should see an editor in which you can write the code that will be executed when your Lambda function is triggered. In our example, we will use the code below. \n\n```python\n# We need to use the low-level library to interact with SageMaker since the SageMaker API\n# is not available natively through Lambda.\nimport boto3\n\ndef lambda_handler(event, context):\n\n # The SageMaker runtime is what allows us to invoke the endpoint that we've created.\n runtime = boto3.Session().client('sagemaker-runtime')\n\n # Now we use the SageMaker runtime to invoke our endpoint, sending the review we were given\n response = runtime.invoke_endpoint(EndpointName = '**ENDPOINT NAME HERE**', # The name of the endpoint we created\n ContentType = 'text/plain', # The data format that is expected\n Body = event['body']) # The actual review\n\n # The response is an HTTP response whose body contains the result of our inference\n result = response['Body'].read().decode('utf-8')\n\n return {\n 'statusCode' : 200,\n 'headers' : { 'Content-Type' : 'text/plain', 'Access-Control-Allow-Origin' : '*' },\n 'body' : result\n }\n```\n\nOnce you have copy and pasted the code above into the Lambda code editor, replace the `**ENDPOINT NAME HERE**` portion with the name of the endpoint that we deployed earlier. You can determine the name of the endpoint using the code cell below.", "_____no_output_____" ] ], [ [ "predictor.endpoint", "_____no_output_____" ] ], [ [ "Once you have added the endpoint name to the Lambda function, click on **Save**. Your Lambda function is now up and running. Next we need to create a way for our web app to execute the Lambda function.\n\n### Setting up API Gateway\n\nNow that our Lambda function is set up, it is time to create a new API using API Gateway that will trigger the Lambda function we have just created.\n\nUsing AWS Console, navigate to **Amazon API Gateway** and then click on **Get started**.\n\nOn the next page, make sure that **New API** is selected and give the new api a name, for example, `sentiment_analysis_api`. Then, click on **Create API**.\n\nNow we have created an API, however it doesn't currently do anything. What we want it to do is to trigger the Lambda function that we created earlier.\n\nSelect the **Actions** dropdown menu and click **Create Method**. A new blank method will be created, select its dropdown menu and select **POST**, then click on the check mark beside it.\n\nFor the integration point, make sure that **Lambda Function** is selected and click on the **Use Lambda Proxy integration**. This option makes sure that the data that is sent to the API is then sent directly to the Lambda function with no processing. It also means that the return value must be a proper response object as it will also not be processed by API Gateway.\n\nType the name of the Lambda function you created earlier into the **Lambda Function** text entry box and then click on **Save**. Click on **OK** in the pop-up box that then appears, giving permission to API Gateway to invoke the Lambda function you created.\n\nThe last step in creating the API Gateway is to select the **Actions** dropdown and click on **Deploy API**. You will need to create a new Deployment stage and name it anything you like, for example `prod`.\n\nYou have now successfully set up a public API to access your SageMaker model. Make sure to copy or write down the URL provided to invoke your newly created public API as this will be needed in the next step. This URL can be found at the top of the page, highlighted in blue next to the text **Invoke URL**.", "_____no_output_____" ], [ "## Step 4: Deploying our web app\n\nNow that we have a publicly available API, we can start using it in a web app. For our purposes, we have provided a simple static html file which can make use of the public api you created earlier.\n\nIn the `website` folder there should be a file called `index.html`. Download the file to your computer and open that file up in a text editor of your choice. There should be a line which contains **\\*\\*REPLACE WITH PUBLIC API URL\\*\\***. Replace this string with the url that you wrote down in the last step and then save the file.\n\nNow, if you open `index.html` on your local computer, your browser will behave as a local web server and you can use the provided site to interact with your SageMaker model.\n\nIf you'd like to go further, you can host this html file anywhere you'd like, for example using github or hosting a static site on Amazon's S3. Once you have done this you can share the link with anyone you'd like and have them play with it too!\n\n> **Important Note** In order for the web app to communicate with the SageMaker endpoint, the endpoint has to actually be deployed and running. This means that you are paying for it. Make sure that the endpoint is running when you want to use the web app but that you shut it down when you don't need it, otherwise you will end up with a surprisingly large AWS bill.\n\n**TODO:** Make sure that you include the edited `index.html` file in your project submission.", "_____no_output_____" ], [ "Now that your web app is working, trying playing around with it and see how well it works.\n\n**Question**: Give an example of a review that you entered into your web app. What was the predicted sentiment of your example review?", "_____no_output_____" ], [ "**Answer:**\nReview: I really enjoy watch this movie. The plot is well developed and the characters are so vivid!\nOutput: Your review was POSITIVE!", "_____no_output_____" ], [ "### Delete the endpoint\n\nRemember to always shut down your endpoint if you are no longer using it. You are charged for the length of time that the endpoint is running so if you forget and leave it on you could end up with an unexpectedly large bill.", "_____no_output_____" ] ], [ [ "predictor.delete_endpoint()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ] ]
cb76e3fac793271b50a5fadb1fa410520ecc7435
96,014
ipynb
Jupyter Notebook
day3_simple_model.ipynb
kuband93/dw_marix_car
4d98cbc3addb70f3913feea044a4afcc7af00bac
[ "MIT" ]
null
null
null
day3_simple_model.ipynb
kuband93/dw_marix_car
4d98cbc3addb70f3913feea044a4afcc7af00bac
[ "MIT" ]
null
null
null
day3_simple_model.ipynb
kuband93/dw_marix_car
4d98cbc3addb70f3913feea044a4afcc7af00bac
[ "MIT" ]
null
null
null
96,014
96,014
0.594028
[ [ [ "!pip install --upgrade tables\n!pip install eli5", "Collecting tables\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/ed/c3/8fd9e3bb21872f9d69eb93b3014c86479864cca94e625fd03713ccacec80/tables-3.6.1-cp36-cp36m-manylinux1_x86_64.whl (4.3MB)\n\u001b[K |████████████████████████████████| 4.3MB 2.8MB/s \n\u001b[?25hRequirement already satisfied, skipping upgrade: numpy>=1.9.3 in /usr/local/lib/python3.6/dist-packages (from tables) (1.17.5)\nRequirement already satisfied, skipping upgrade: numexpr>=2.6.2 in /usr/local/lib/python3.6/dist-packages (from tables) (2.7.1)\nInstalling collected packages: tables\n Found existing installation: tables 3.4.4\n Uninstalling tables-3.4.4:\n Successfully uninstalled tables-3.4.4\nSuccessfully installed tables-3.6.1\nCollecting eli5\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/97/2f/c85c7d8f8548e460829971785347e14e45fa5c6617da374711dec8cb38cc/eli5-0.10.1-py2.py3-none-any.whl (105kB)\n\u001b[K |████████████████████████████████| 112kB 2.8MB/s \n\u001b[?25hRequirement already satisfied: tabulate>=0.7.7 in /usr/local/lib/python3.6/dist-packages (from eli5) (0.8.6)\nRequirement already satisfied: jinja2 in /usr/local/lib/python3.6/dist-packages (from eli5) (2.11.1)\nRequirement already satisfied: attrs>16.0.0 in /usr/local/lib/python3.6/dist-packages (from eli5) (19.3.0)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from eli5) (1.12.0)\nRequirement already satisfied: numpy>=1.9.0 in /usr/local/lib/python3.6/dist-packages (from eli5) (1.17.5)\nRequirement already satisfied: graphviz in /usr/local/lib/python3.6/dist-packages (from eli5) (0.10.1)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from eli5) (1.4.1)\nRequirement already satisfied: scikit-learn>=0.18 in /usr/local/lib/python3.6/dist-packages (from eli5) (0.22.1)\nRequirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.6/dist-packages (from jinja2->eli5) (1.1.1)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn>=0.18->eli5) (0.14.1)\nInstalling collected packages: eli5\nSuccessfully installed eli5-0.10.1\n" ], [ "import pandas as pd\nimport numpy as np\n\nfrom sklearn.dummy import DummyRegressor\nfrom sklearn.tree import DecisionTreeRegressor\n\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import cross_val_score\n\nimport eli5\nfrom eli5.sklearn import PermutationImportance\n", "_____no_output_____" ] ], [ [ "#Loading Data", "_____no_output_____" ] ], [ [ "cd \"/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_marix_car/\"", "/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_marix_car\n" ], [ "df = pd.read_hdf('data/car.h5')\ndf.shape", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ] ], [ [ "#Dummy Model", "_____no_output_____" ] ], [ [ "df.select_dtypes(np.number).columns #show int and floats feats", "_____no_output_____" ], [ "feats = ['car_id']\nX = df[feats].values\ny = df['price_value'].values\n\nmodel = DummyRegressor() #create model\nmodel.fit(X,y) #train dummy model - returns mean price_value\ny_pred = model.predict(X) #forecast\n\nmean_absolute_error(y, y_pred) #our benchamark we want to imporve, average difference between observed and predicted values is almost 40k zł", "_____no_output_____" ], [ "[x for x in df.columns if 'price' in x] #check for variables with price in the name", "_____no_output_____" ], [ "df['price_currency'].value_counts() #we have PLN and EURO", "_____no_output_____" ], [ "df['price_currency'].value_counts(normalize=True)", "_____no_output_____" ], [ "df = df [ df['price_currency'] != 'EUR']\ndf.shape #corrent number of PLN rows confirmed", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "#Features", "_____no_output_____" ] ], [ [ "for feat in df.columns:\n \n print(feat)", "_____no_output_____" ], [ "df['param_color'].factorize()[0] #each strings got an id, [0] to show just ids", "_____no_output_____" ], [ "SUFFIX_CAT = '__cat'\nfor feat in df.columns:\n if isinstance(df[feat][0], list): continue #if the df is a list then skip it\n\n factorized_values = df[feat].factorize()[0]\n if SUFFIX_CAT in feat: #if the feature contains suffic __cat\n df[feat] = factorized_values #then assign the same value agian a__cat = a__cat\n else:\n df[feat + SUFFIX_CAT] = factorized_values #else add a suffix", "_____no_output_____" ], [ "cat_feats = [x for x in df.columns if SUFFIX_CAT in x]\ncat_feats = [x for x in cat_feats if 'price' not in x] #151 feats on which we want to learn\nlen(cat_feats)", "_____no_output_____" ], [ "X = df[cat_feats].values\ny = df['price_value'].values\n\nmodel = DecisionTreeRegressor(max_depth=5)\nscores = cross_val_score(model, X, y, cv=3, scoring=\"neg_mean_absolute_error\") #neg mean \nnp.mean(scores) #almost 2 times better result", "_____no_output_____" ], [ "m = DecisionTreeRegressor(max_depth=5)\nm.fit(X,y)\n\nimp = PermutationImportance(m, random_state=0).fit(X,y)\neli5.show_weights(m, feature_names = cat_feats)", "_____no_output_____" ], [ "!git config --global user.email \"[email protected]\"\n!git config --global user.name \"Jakub Andrzejewski\"\n!git add day3_simple_model.ipynb", "fatal: pathspec 'day3_simple_model.ipynb' did not match any files\n" ], [ "ls", "\u001b[0m\u001b[01;34mdata\u001b[0m/ day2_visualization.ipynb LICENSE README.md\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb76e9b6654ef46cd95e49cc8c72d93ba09fc1c7
7,016
ipynb
Jupyter Notebook
Chapter05/Machine Learning with Logistic Regression.ipynb
kraussc/scikit-learn-Cookbook-Second-Edition
49a59e5478f6e478fbea5e85d3a35d35607b9b70
[ "MIT" ]
74
2017-11-28T08:50:53.000Z
2022-01-25T05:52:34.000Z
Chapter05/Machine Learning with Logistic Regression.ipynb
kraussc/scikit-learn-Cookbook-Second-Edition
49a59e5478f6e478fbea5e85d3a35d35607b9b70
[ "MIT" ]
2
2019-02-25T05:57:58.000Z
2020-09-23T12:11:28.000Z
Chapter05/Machine Learning with Logistic Regression.ipynb
kraussc/scikit-learn-Cookbook-Second-Edition
49a59e5478f6e478fbea5e85d3a35d35607b9b70
[ "MIT" ]
66
2017-12-07T00:18:07.000Z
2022-03-13T11:18:21.000Z
26.081784
138
0.387258
[ [ [ "Machine Learning with Logistic Regression", "_____no_output_____" ], [ "import pandas as pd\n\ndata_web_address = \"https://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data\"\ncolumn_names = ['pregnancy_x', \n 'plasma_con', \n 'blood_pressure', \n 'skin_mm', \n 'insulin', \n 'bmi', \n 'pedigree_func', \n 'age', \n 'target']\n\nfeature_names = column_names[:-1]\nall_data = pd.read_csv(data_web_address , names=column_names)\nall_data.head()", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\n\nX = all_data[feature_names]\ny = all_data['target']", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=7,stratify=y)", "_____no_output_____" ], [ "from sklearn.linear_model import LogisticRegression\nlr = LogisticRegression()\nlr.fit(X_train,y_train)", "_____no_output_____" ], [ "y_pred = lr.predict(X_test)", "_____no_output_____" ], [ "from sklearn.metrics import accuracy_score\naccuracy_score(y_test,y_pred)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb76ea172f5ce294f33efcd0cb96859f8d64a755
11,749
ipynb
Jupyter Notebook
nbs/14_cluster_interpretation/interpret_cluster.run.ipynb
greenelab/phenoplier
95f04b17f0b5227560fcf32ac0a85b2c5aa9001f
[ "BSD-2-Clause-Patent" ]
3
2021-08-17T21:59:19.000Z
2022-03-08T15:46:24.000Z
nbs/14_cluster_interpretation/interpret_cluster.run.ipynb
greenelab/phenoplier
95f04b17f0b5227560fcf32ac0a85b2c5aa9001f
[ "BSD-2-Clause-Patent" ]
4
2021-08-04T13:57:24.000Z
2021-10-11T14:57:15.000Z
nbs/14_cluster_interpretation/interpret_cluster.run.ipynb
greenelab/phenoplier
95f04b17f0b5227560fcf32ac0a85b2c5aa9001f
[ "BSD-2-Clause-Patent" ]
null
null
null
22.126177
212
0.525577
[ [ [ "# Description", "_____no_output_____" ], [ "This notebook contains the interpretation of a cluster (which features/latent variables in the original data are useful to distinguish traits in the cluster).\n\nSee section [LV analysis](#lv_analysis) below", "_____no_output_____" ], [ "# Modules loading", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "import pickle\nimport re\nfrom pathlib import Path\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom IPython.display import HTML\n\nfrom clustering.methods import ClusterInterpreter\nfrom data.recount2 import LVAnalysis\nfrom data.cache import read_data\nimport conf", "_____no_output_____" ] ], [ [ "# Settings", "_____no_output_____" ] ], [ [ "PARTITION_K = None\nPARTITION_CLUSTER_ID = None", "_____no_output_____" ] ], [ [ "# Load MultiPLIER summary", "_____no_output_____" ] ], [ [ "multiplier_model_summary = read_data(conf.MULTIPLIER[\"MODEL_SUMMARY_FILE\"])", "_____no_output_____" ], [ "multiplier_model_summary.shape", "_____no_output_____" ], [ "multiplier_model_summary.head()", "_____no_output_____" ] ], [ [ "# Load data", "_____no_output_____" ], [ "## Original data", "_____no_output_____" ] ], [ [ "INPUT_SUBSET = \"z_score_std\"", "_____no_output_____" ], [ "INPUT_STEM = \"projection-smultixcan-efo_partial-mashr-zscores\"", "_____no_output_____" ], [ "input_filepath = Path(\n conf.RESULTS[\"DATA_TRANSFORMATIONS_DIR\"],\n INPUT_SUBSET,\n f\"{INPUT_SUBSET}-{INPUT_STEM}.pkl\",\n).resolve()\ndisplay(input_filepath)\n\nassert input_filepath.exists(), \"Input file does not exist\"\n\ninput_filepath_stem = input_filepath.stem\ndisplay(input_filepath_stem)", "_____no_output_____" ], [ "data = pd.read_pickle(input_filepath)", "_____no_output_____" ], [ "data.shape", "_____no_output_____" ], [ "data.head()", "_____no_output_____" ] ], [ [ "## Clustering partitions", "_____no_output_____" ] ], [ [ "CONSENSUS_CLUSTERING_DIR = Path(\n conf.RESULTS[\"CLUSTERING_DIR\"], \"consensus_clustering\"\n).resolve()\n\ndisplay(CONSENSUS_CLUSTERING_DIR)", "_____no_output_____" ], [ "input_file = Path(CONSENSUS_CLUSTERING_DIR, \"best_partitions_by_k.pkl\").resolve()\ndisplay(input_file)", "_____no_output_____" ], [ "best_partitions = pd.read_pickle(input_file)", "_____no_output_____" ], [ "best_partitions.shape", "_____no_output_____" ], [ "best_partitions.head()", "_____no_output_____" ] ], [ [ "# Functions", "_____no_output_____" ] ], [ [ "def show_cluster_stats(data, partition, cluster):\n cluster_traits = data[partition == cluster].index\n display(f\"Cluster '{cluster}' has {len(cluster_traits)} traits\")\n display(cluster_traits)", "_____no_output_____" ] ], [ [ "# LV analysis\n<a id=\"lv_analysis\"></a>", "_____no_output_____" ], [ "## Associated traits", "_____no_output_____" ] ], [ [ "display(best_partitions.loc[PARTITION_K])\npart = best_partitions.loc[PARTITION_K, \"partition\"]", "_____no_output_____" ], [ "show_cluster_stats(data, part, PARTITION_CLUSTER_ID)", "_____no_output_____" ] ], [ [ "## Associated latent variables", "_____no_output_____" ] ], [ [ "ci = ClusterInterpreter(\n threshold=1.0,\n max_features=20,\n max_features_to_explore=100,\n)", "_____no_output_____" ], [ "ci.fit(data, part, PARTITION_CLUSTER_ID)", "_____no_output_____" ], [ "ci.features_", "_____no_output_____" ], [ "# save interpreter instance\noutput_dir = Path(\n conf.RESULTS[\"CLUSTERING_INTERPRETATION\"][\"BASE_DIR\"],\n \"cluster_lvs\",\n f\"part{PARTITION_K}\",\n)\noutput_dir.mkdir(exist_ok=True, parents=True)", "_____no_output_____" ], [ "output_file = Path(\n output_dir, f\"cluster_interpreter-part{PARTITION_K}_k{PARTITION_CLUSTER_ID}.pkl\"\n)\ndisplay(output_file)", "_____no_output_____" ], [ "ci.features_.to_pickle(output_file)", "_____no_output_____" ] ], [ [ "## Top attributes", "_____no_output_____" ], [ "Here we go through the list of associated latent variables and, for each, we show associated pathways (prior knowledge), top traits, top genes and the top tissues/cell types where those genes are expressed.", "_____no_output_____" ] ], [ [ "for lv_idx, lv_info in ci.features_.iterrows():\n display(HTML(f\"<h2>LV{lv_idx}</h2>\"))\n\n lv_name = lv_info[\"name\"]\n lv_obj = lv_exp = LVAnalysis(lv_name, data)\n\n # show lv prior knowledge match (pathways)\n lv_pathways = multiplier_model_summary[\n multiplier_model_summary[\"LV index\"].isin((lv_name[2:],))\n & (\n (multiplier_model_summary[\"FDR\"] < 0.05)\n | (multiplier_model_summary[\"AUC\"] >= 0.75)\n )\n ]\n display(lv_pathways)\n\n lv_data = lv_obj.get_experiments_data()\n\n display(\"\")\n display(lv_obj.lv_traits.head(20))\n display(\"\")\n display(lv_obj.lv_genes.head(10))\n\n lv_attrs = lv_obj.get_attributes_variation_score()\n _tmp = pd.Series(lv_attrs.index)\n lv_attrs = lv_attrs[\n _tmp.str.match(\n \"(?:cell.+type$)|(?:tissue$)|(?:tissue.+type$)\",\n case=False,\n flags=re.IGNORECASE,\n ).values\n ].sort_values(ascending=False)\n display(lv_attrs)\n\n for _lva in lv_attrs.index:\n display(HTML(f\"<h3>{_lva}</h3>\"))\n display(lv_data[_lva].dropna().reset_index()[\"project\"].unique())\n\n with sns.plotting_context(\"paper\", font_scale=1.0), sns.axes_style(\"whitegrid\"):\n fig, ax = plt.subplots(figsize=(14, 8))\n ax = lv_obj.plot_attribute(_lva, top_x_values=20)\n if ax is None:\n plt.close(fig)\n continue\n display(fig)\n plt.close(fig)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cb770d039cdae6e96222f2398fc19ad75b8163ab
308,975
ipynb
Jupyter Notebook
01_introduction.ipynb
z-arabi/notebooks
b70e992a33a39f7aa8e3c2a25ac173e743b9872d
[ "Apache-2.0" ]
null
null
null
01_introduction.ipynb
z-arabi/notebooks
b70e992a33a39f7aa8e3c2a25ac173e743b9872d
[ "Apache-2.0" ]
null
null
null
01_introduction.ipynb
z-arabi/notebooks
b70e992a33a39f7aa8e3c2a25ac173e743b9872d
[ "Apache-2.0" ]
null
null
null
37.225904
1,014
0.477013
[ [ [ "<a href=\"https://colab.research.google.com/github/z-arabi/notebooks/blob/main/01_introduction.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "# Uncomment and run this cell if you're on Colab or Kaggle\n!git clone https://github.com/nlp-with-transformers/notebooks.git\n%cd notebooks\nfrom install import *\ninstall_requirements()", "Cloning into 'notebooks'...\nremote: Enumerating objects: 408, done.\u001b[K\nremote: Counting objects: 100% (408/408), done.\u001b[K\nremote: Compressing objects: 100% (303/303), done.\u001b[K\nremote: Total 408 (delta 195), reused 265 (delta 99), pack-reused 0\u001b[K\nReceiving objects: 100% (408/408), 24.35 MiB | 20.10 MiB/s, done.\nResolving deltas: 100% (195/195), done.\n/content/notebooks\n⏳ Installing base requirements ...\n✅ Base requirements installed!\n⏳ Installing Git LFS ...\n✅ Git LFS installed!\n" ], [ "#hide\nfrom utils import *\nsetup_chapter()", "No GPU was detected! This notebook can be *very* slow without a GPU 🐢\nGo to Runtime > Change runtime type and select a GPU hardware accelerator.\nUsing transformers v4.11.3\nUsing datasets v1.16.1\n" ] ], [ [ "# Hello Transformers", "_____no_output_____" ], [ "## The Encoder-Decoder Framework", "_____no_output_____" ], [ "## Attention Mechanisms", "_____no_output_____" ], [ "## Transfer Learning in NLP", "_____no_output_____" ], [ "## Hugging Face Transformers: Bridging the Gap", "_____no_output_____" ], [ "## A Tour of Transformer Applications", "_____no_output_____" ] ], [ [ "text = \"\"\"Dear Amazon, last week I ordered an Optimus Prime action figure \\\nfrom your online store in Germany. Unfortunately, when I opened the package, \\\nI discovered to my horror that I had been sent an action figure of Megatron \\\ninstead! As a lifelong enemy of the Decepticons, I hope you can understand my \\\ndilemma. To resolve the issue, I demand an exchange of Megatron for the \\\nOptimus Prime figure I ordered. Enclosed are copies of my records concerning \\\nthis purchase. I expect to hear from you soon. Sincerely, Bumblebee.\"\"\"", "_____no_output_____" ] ], [ [ "## pipelines\nhttps://huggingface.co/docs/transformers/main_classes/pipelines ", "_____no_output_____" ], [ "### Text Classification", "_____no_output_____" ] ], [ [ "#hide_output\nfrom transformers import pipeline\n\nclassifier = pipeline(\"text-classification\")", "_____no_output_____" ], [ "import pandas as pd\n\noutputs = classifier(text)\npd.DataFrame(outputs) ", "_____no_output_____" ], [ "my_text = \"\"\"Hi, I got a device from amazon, but I did not like its color. \\\nIn addition to its function, this device also had a beautiful appearance. \\\nMy only problem with this device was its color.\"\"\"\n\nmy_output = classifier(my_text)\npd.DataFrame(my_output) ", "_____no_output_____" ], [ "print(my_output) # the lis of dictionary\n# convert the list of dictionary to the DF\npd.DataFrame(my_output) ", "[{'label': 'POSITIVE', 'score': 0.993431031703949}]\n" ] ], [ [ "### Named Entity Recognition", "_____no_output_____" ] ], [ [ "ner_tagger = pipeline(\"ner\", aggregation_strategy=\"simple\")\noutputs = ner_tagger(text)\npd.DataFrame(outputs) ", "_____no_output_____" ], [ "ner_output = ner_tagger(my_text)\npd.DataFrame(ner_output) ", "_____no_output_____" ] ], [ [ "### Question Answering ", "_____no_output_____" ] ], [ [ "reader = pipeline(\"question-answering\")\nquestion = \"What does the customer want?\"\noutputs = reader(question=question, context=text)\npd.DataFrame([outputs]) ", "_____no_output_____" ], [ "my_question = \"What was the customer dissatisfied with?\"\nq_output = reader(question=my_question, context=my_text)\npd.DataFrame([q_output])", "_____no_output_____" ] ], [ [ "### Summarization", "_____no_output_____" ] ], [ [ "summarizer = pipeline(\"summarization\")", "_____no_output_____" ], [ "outputs = summarizer(text, max_length=45, clean_up_tokenization_spaces=True)\noutputs", "_____no_output_____" ], [ "outputs[0]['summary_text']", "_____no_output_____" ], [ "sum_output = summarizer(my_text, clean_up_tokenization_spaces=True)\nsum_output[0]['summary_text']", "_____no_output_____" ] ], [ [ "### Translation", "_____no_output_____" ] ], [ [ "translator = pipeline(\"translation_en_to_de\", \n model=\"Helsinki-NLP/opus-mt-en-de\")\noutputs = translator(text, clean_up_tokenization_spaces=True, min_length=100)\noutputs[0]['translation_text']", "_____no_output_____" ], [ "# https://huggingface.co/languages\n# ValueError: The task does not provide any default models for options ('en', 'fa')\n\nmy_translator = pipeline(\"translation_en_to_it\",model=\"Helsinki-NLP/opus-mt-en-it\")\ntranslate_output = my_translator(my_text, clean_up_tokenization_spaces=True, min_length=100)\ntranslate_output[0]['translation_text']", "_____no_output_____" ] ], [ [ "### Text Generation", "_____no_output_____" ] ], [ [ "#hide\nfrom transformers import set_seed\nset_seed(42) # Set the seed to get reproducible results", "_____no_output_____" ], [ "generator = pipeline(\"text-generation\")\nresponse = \"Dear Bumblebee, I am sorry to hear that your order was mixed up.\"\nprompt = text + \"\\n\\nCustomer service response:\\n\" + response\noutputs = generator(prompt, max_length=200)\noutputs[0]['generated_text']", "_____no_output_____" ], [ "my_response = \"Dear Customer, we will take care of your problem .\"\nprompt = my_text + \"\\n\\nCustomer service response:\\n\" + my_response\ngen_output = generator(prompt, max_length=200)\ngen_output[0]['generated_text']", "_____no_output_____" ] ], [ [ "## The Hugging Face Ecosystem", "_____no_output_____" ], [ "### The Hugging Face Hub", "_____no_output_____" ], [ "### Hugging Face Tokenizers", "_____no_output_____" ], [ "### Hugging Face Datasets", "_____no_output_____" ], [ "### Hugging Face Accelerate", "_____no_output_____" ], [ "## Main Challenges with Transformers", "_____no_output_____" ], [ "## Conclusion", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ] ]
cb772e63630137d642d4de77122202f39cd5f8e0
11,296
ipynb
Jupyter Notebook
lecture55.ipynb
snehashis1997/dlvcnptel
2d05b2c0ab1131502ca2e85e1c762f46cd535877
[ "MIT" ]
125
2018-01-18T16:00:16.000Z
2022-03-21T15:47:23.000Z
lecture55.ipynb
snehashis1997/dlvcnptel
2d05b2c0ab1131502ca2e85e1c762f46cd535877
[ "MIT" ]
6
2018-01-13T14:16:24.000Z
2020-02-12T08:38:51.000Z
lecture55.ipynb
prachipalsodkar/dlvcnptel
2d05b2c0ab1131502ca2e85e1c762f46cd535877
[ "MIT" ]
162
2018-01-12T11:53:47.000Z
2022-03-17T13:34:31.000Z
28.670051
123
0.517528
[ [ [ "# Lecture 55: Adversarial Autoencoder for Classification", "_____no_output_____" ], [ "## Load Packages", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport os\nimport math\nimport torch\nimport itertools\nimport torch.nn as nn\nimport torch.optim as optim\nfrom IPython import display\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nimport torchvision.datasets as dsets\nimport torchvision.transforms as transforms\n\nprint(torch.__version__) # This code has been updated for PyTorch 1.0.0", "_____no_output_____" ] ], [ [ "## Load Data", "_____no_output_____" ] ], [ [ "# MNIST Dataset \ndataset = dsets.MNIST(root='./MNIST', train=True, transform=transforms.ToTensor(), download=True)\ntestset = dsets.MNIST(root='./MNIST', train=False, transform=transforms.ToTensor(), download=True)\n\n# Data Loader (Input Pipeline)\ndata_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=100, shuffle=True)\ntest_loader = torch.utils.data.DataLoader(dataset=testset, batch_size=100, shuffle=False)", "_____no_output_____" ], [ "# Check availability of GPU\n\nuse_gpu = torch.cuda.is_available()\n# use_gpu = False # Uncomment in case of GPU memory error\nif use_gpu:\n print('GPU is available!')\n device = \"cuda\"\nelse:\n print('GPU is not available!')\n device = \"cpu\"", "_____no_output_____" ] ], [ [ "## Defining network architecture", "_____no_output_____" ] ], [ [ "#Encoder\nclass Q_net(nn.Module): \n def __init__(self,X_dim,N,z_dim):\n super(Q_net, self).__init__()\n self.lin1 = nn.Linear(X_dim, N)\n self.lin2 = nn.Linear(N, N)\n self.lin3gauss = nn.Linear(N, z_dim)\n def forward(self, x):\n x = F.dropout(self.lin1(x), p=0.25, training=self.training)\n x = F.relu(x)\n x = F.dropout(self.lin2(x), p=0.25, training=self.training)\n x = F.relu(x)\n x = self.lin3gauss(x)\n return x\n\n# Decoder\nclass P_net(nn.Module): \n def __init__(self,X_dim,N,z_dim):\n super(P_net, self).__init__()\n self.lin1 = nn.Linear(z_dim, N)\n self.lin2 = nn.Linear(N, N)\n self.lin3 = nn.Linear(N, X_dim)\n def forward(self, x):\n x = F.dropout(self.lin1(x), p=0.25, training=self.training)\n x = F.relu(x)\n x = F.dropout(self.lin2(x), p=0.25, training=self.training)\n x = self.lin3(x)\n return torch.sigmoid(x)\n\n# Discriminator\nclass D_net_gauss(nn.Module): \n def __init__(self,N,z_dim):\n super(D_net_gauss, self).__init__()\n self.lin1 = nn.Linear(z_dim, N)\n self.lin2 = nn.Linear(N, N)\n self.lin3 = nn.Linear(N, 1)\n def forward(self, x):\n x = F.dropout(self.lin1(x), p=0.2, training=self.training)\n x = F.relu(x)\n x = F.dropout(self.lin2(x), p=0.2, training=self.training)\n x = F.relu(x)\n return torch.sigmoid(self.lin3(x)) ", "_____no_output_____" ] ], [ [ "## Define optimizer", "_____no_output_____" ] ], [ [ "z_red_dims = 100\nQ = Q_net(784,1000,z_red_dims).to(device)\nP = P_net(784,1000,z_red_dims).to(device)\nD_gauss = D_net_gauss(500,z_red_dims).to(device)\n\n# Set learning rates\ngen_lr = 0.0001\nreg_lr = 0.00005\n\n#encode/decode optimizers\noptim_P = optim.Adam(P.parameters(), lr=gen_lr)\noptim_Q_enc = optim.Adam(Q.parameters(), lr=gen_lr)\n#regularizing optimizers\noptim_Q_gen = optim.Adam(Q.parameters(), lr=reg_lr)\noptim_D = optim.Adam(D_gauss.parameters(), lr=reg_lr)", "_____no_output_____" ] ], [ [ "## Test Data", "_____no_output_____" ] ], [ [ "num_test_samples = 100\n\ntest_noise = torch.randn(num_test_samples,z_red_dims).to(device)", "_____no_output_____" ] ], [ [ "## Training", "_____no_output_____" ] ], [ [ "# create figure for plotting\nsize_figure_grid = int(math.sqrt(num_test_samples))\nfig, ax = plt.subplots(size_figure_grid, size_figure_grid, figsize=(6, 6))\nfor i, j in itertools.product(range(size_figure_grid), range(size_figure_grid)):\n ax[i,j].get_xaxis().set_visible(False)\n ax[i,j].get_yaxis().set_visible(False)\n \n \ndata_iter = iter(data_loader)\niter_per_epoch = len(data_loader)\ntotal_step = 5#5000\n\n# Start training\nfor step in range(total_step):\n\n # Reset the data_iter\n if (step+1) % iter_per_epoch == 0:\n data_iter = iter(data_loader)\n\n # Fetch the images and labels and convert them to variables\n images, labels = next(data_iter)\n images, labels = images.view(images.size(0), -1).to(device), labels.to(device)\n\n #reconstruction loss\n P.zero_grad()\n Q.zero_grad()\n D_gauss.zero_grad()\n\n z_sample = Q(images) #encode to z\n X_sample = P(z_sample) #decode to X reconstruction\n recon_loss = F.binary_cross_entropy(X_sample,images)\n\n recon_loss.backward()\n optim_P.step()\n optim_Q_enc.step()\n\n # Discriminator\n ## true prior is random normal (randn)\n ## this is constraining the Z-projection to be normal!\n Q.eval()\n z_real_gauss = torch.randn(images.size()[0], z_red_dims).to(device)\n D_real_gauss = D_gauss(z_real_gauss)\n\n z_fake_gauss = Q(images)\n D_fake_gauss = D_gauss(z_fake_gauss)\n\n D_loss = -torch.mean(torch.log(D_real_gauss) + torch.log(1 - D_fake_gauss))\n\n D_loss.backward()\n optim_D.step()\n\n # Generator\n Q.train()\n z_fake_gauss = Q(images)\n D_fake_gauss = D_gauss(z_fake_gauss)\n \n G_loss = -torch.mean(torch.log(D_fake_gauss))\n\n G_loss.backward()\n optim_Q_gen.step() \n \n P.eval()\n test_images = P(test_noise)\n P.train()\n if use_gpu:\n test_images = test_images.cpu().detach()\n \n for k in range(num_test_samples):\n i = k//10\n j = k%10\n ax[i,j].cla()\n ax[i,j].imshow(test_images[k,:].numpy().reshape(28, 28), cmap='Greys')\n display.clear_output(wait=True)\n display.display(plt.gcf())\n\n ", "_____no_output_____" ] ], [ [ "## Classifier", "_____no_output_____" ] ], [ [ "#Encoder\nclass Classifier(nn.Module): \n def __init__(self):\n super(Classifier, self).__init__()\n self.l1 = Q\n self.l2 = nn.Linear(100,10)\n def forward(self, x):\n x = self.l1(x)\n x = self.l2(x)\n return x", "_____no_output_____" ], [ "net = Classifier().to(device)\nprint(net)", "_____no_output_____" ], [ "criterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(net.parameters(), lr=1e-4)", "_____no_output_____" ] ], [ [ "## Training", "_____no_output_____" ] ], [ [ "iterations = 10\n\nfor epoch in range(iterations): # loop over the dataset multiple times\n\n runningLoss = 0.0\n for i, data in enumerate(data_loader, 0):\n # get the inputs\n inputs, labels = data\n inputs, labels = inputs.view(inputs.size(0), -1).to(device), labels.to(device)\n \n net.train()\n optimizer.zero_grad() # zeroes the gradient buffers of all parameters\n outputs = net(inputs) # forward \n loss = criterion(outputs, labels) # calculate loss\n loss.backward() # backpropagate the loss\n optimizer.step()\n correct = 0\n total = 0\n net.eval()\n with torch.no_grad():\n for data in test_loader: \n inputs, labels = data\n inputs, labels = inputs.view(inputs.size(0), -1).to(device), labels.to(device)\n outputs = net(inputs)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels.data).sum()\n print('At Iteration : %d / %d ;Test Accuracy : %f'%(epoch + 1,iterations,100 * float(correct) /float(total)))\nprint('Finished Training')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb7738d8b211e45410a8317380037c57c1088c7a
971,986
ipynb
Jupyter Notebook
notebooks/SHAP Values.ipynb
jchen42703/earthquake_forecasting
716f9424a340775e8eb5ffb1fa802e86c7d9469c
[ "MIT" ]
null
null
null
notebooks/SHAP Values.ipynb
jchen42703/earthquake_forecasting
716f9424a340775e8eb5ffb1fa802e86c7d9469c
[ "MIT" ]
null
null
null
notebooks/SHAP Values.ipynb
jchen42703/earthquake_forecasting
716f9424a340775e8eb5ffb1fa802e86c7d9469c
[ "MIT" ]
null
null
null
587.657799
56,056
0.693687
[ [ [ "# LightGBM", "_____no_output_____" ], [ "## Single Prediction", "_____no_output_____" ] ], [ [ "from backend.api.prediction import initialize_pipeline\nconfig_path = \"/home/joseph/Coding/ml_projects/earthquake_forecasting/backend/config.yml\"\nlgb_pipeline = initialize_pipeline(config_path, \"lightgbm\")\nlgb_pipeline", "_____no_output_____" ], [ "import pandas as pd\ndata = {\n \"building_id\": 802906,\n \"geo_level_1_id\": 6,\n \"geo_level_2_id\": 487,\n \"geo_level_3_id\": 12198,\n \"count_floors_pre_eq\": 2,\n \"age\": 30,\n \"area_percentage\": 6,\n \"height_percentage\": 5,\n \"land_surface_condition\": \"t\",\n \"foundation_type\": \"r\",\n \"roof_type\": \"n\",\n \"ground_floor_type\": \"f\",\n \"other_floor_type\": \"q\",\n \"position\": \"t\",\n \"plan_configuration\": \"d\",\n \"has_superstructure_adobe_mud\": 1,\n \"has_superstructure_mud_mortar_stone\": 1,\n \"has_superstructure_stone_flag\": 0,\n \"has_superstructure_cement_mortar_stone\": 0,\n \"has_superstructure_mud_mortar_brick\": 0,\n \"has_superstructure_cement_mortar_brick\": 0,\n \"has_superstructure_timber\": 0,\n \"has_superstructure_bamboo\": 0,\n \"has_superstructure_rc_non_engineered\": 0,\n \"has_superstructure_rc_engineered\": 0,\n \"has_superstructure_other\": 0,\n \"legal_ownership_status\": \"v\",\n \"count_families\": 1,\n \"has_secondary_use\": 0,\n \"has_secondary_use_agriculture\": 0,\n \"has_secondary_use_hotel\": 0,\n \"has_secondary_use_rental\": 0,\n \"has_secondary_use_institution\": 0,\n \"has_secondary_use_school\": 0,\n \"has_secondary_use_industry\": 0,\n \"has_secondary_use_health_post\": 0,\n \"has_secondary_use_gov_office\": 0,\n \"has_secondary_use_use_police\": 0,\n \"has_secondary_use_other\": 0,\n }\ndf = pd.DataFrame(data, index=[0])\ndf.head()", "_____no_output_____" ], [ "out = lgb_pipeline.predict(df)\nout", "_____no_output_____" ], [ "import shap\n# explain the model's predictions using SHAP\n# (same syntax works for LightGBM, CatBoost, scikit-learn, transformers, Spark, etc.)\nexplainer = shap.TreeExplainer(lgb_pipeline.model.model)", "_____no_output_____" ], [ "df: pd.DataFrame = lgb_pipeline.encoder.replace_with_new_embeds(df, batch_size=1)\nif \"building_id\" in df.columns:\n df = df.drop([\"building_id\"], axis=1)\n\nif \"Unnamed: 0\" in df.columns:\n df = df.drop([\"Unnamed: 0\"], axis=1)\n\ndf.head()", "_____no_output_____" ], [ "lgb_pipeline.model.model.params[\"objective\"] = \"multiclass\"\nshap_values = explainer(df)\nshap_values", "_____no_output_____" ], [ "shap.initjs()\nshap.force_plot(explainer.expected_value[1], shap_values.data[0,:], df.iloc[0,:])", "_____no_output_____" ], [ "shap.summary_plot(shap_values.data, df)", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ], [ "feat_importances = lgb_pipeline.model.model.feature_importance()\nimportances = {col:weight for (col, weight) in zip(df.columns, feat_importances)}\nimportances", "_____no_output_____" ], [ "shap.dependence_plot(\"has_superstructure_adobe_mud\", shap_values, df)", "_____no_output_____" ], [ "shap.summary_plot(shap_values, df)", "_____no_output_____" ], [ "shap.plots.bar(shap_values)", "_____no_output_____" ], [ "# visualize the first prediction's explanation\nshap.plots.waterfall(shap_values[0])", "_____no_output_____" ] ], [ [ "## With Whole Dataset", "_____no_output_____" ] ], [ [ "from backend.api.prediction import initialize_pipeline\nconfig_path = \"/home/joseph/Coding/ml_projects/earthquake_forecasting/backend/config.yml\"\nlgb_pipeline = initialize_pipeline(config_path, \"lightgbm\")\nlgb_pipeline", "_____no_output_____" ], [ "import pandas as pd\ndf = pd.read_csv(\"~/datasets/earthquake_damage_forecasting/train_data_embeds.csv\").drop([\"Unnamed: 0\", \"building_id\"], axis=1)\ndf.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 260601 entries, 0 to 260600\nData columns (total 81 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 count_floors_pre_eq 260601 non-null int64 \n 1 age 260601 non-null int64 \n 2 area_percentage 260601 non-null int64 \n 3 height_percentage 260601 non-null int64 \n 4 has_superstructure_adobe_mud 260601 non-null int64 \n 5 has_superstructure_mud_mortar_stone 260601 non-null int64 \n 6 has_superstructure_stone_flag 260601 non-null int64 \n 7 has_superstructure_cement_mortar_stone 260601 non-null int64 \n 8 has_superstructure_mud_mortar_brick 260601 non-null int64 \n 9 has_superstructure_cement_mortar_brick 260601 non-null int64 \n 10 has_superstructure_timber 260601 non-null int64 \n 11 has_superstructure_bamboo 260601 non-null int64 \n 12 has_superstructure_rc_non_engineered 260601 non-null int64 \n 13 has_superstructure_rc_engineered 260601 non-null int64 \n 14 has_superstructure_other 260601 non-null int64 \n 15 count_families 260601 non-null int64 \n 16 has_secondary_use 260601 non-null int64 \n 17 has_secondary_use_agriculture 260601 non-null int64 \n 18 has_secondary_use_hotel 260601 non-null int64 \n 19 has_secondary_use_rental 260601 non-null int64 \n 20 has_secondary_use_institution 260601 non-null int64 \n 21 has_secondary_use_school 260601 non-null int64 \n 22 has_secondary_use_industry 260601 non-null int64 \n 23 has_secondary_use_health_post 260601 non-null int64 \n 24 has_secondary_use_gov_office 260601 non-null int64 \n 25 has_secondary_use_use_police 260601 non-null int64 \n 26 has_secondary_use_other 260601 non-null int64 \n 27 land_surface_condition_n 260601 non-null int64 \n 28 land_surface_condition_o 260601 non-null int64 \n 29 land_surface_condition_t 260601 non-null int64 \n 30 foundation_type_h 260601 non-null int64 \n 31 foundation_type_i 260601 non-null int64 \n 32 foundation_type_r 260601 non-null int64 \n 33 foundation_type_u 260601 non-null int64 \n 34 foundation_type_w 260601 non-null int64 \n 35 roof_type_n 260601 non-null int64 \n 36 roof_type_q 260601 non-null int64 \n 37 roof_type_x 260601 non-null int64 \n 38 ground_floor_type_f 260601 non-null int64 \n 39 ground_floor_type_m 260601 non-null int64 \n 40 ground_floor_type_v 260601 non-null int64 \n 41 ground_floor_type_x 260601 non-null int64 \n 42 ground_floor_type_z 260601 non-null int64 \n 43 other_floor_type_j 260601 non-null int64 \n 44 other_floor_type_q 260601 non-null int64 \n 45 other_floor_type_s 260601 non-null int64 \n 46 other_floor_type_x 260601 non-null int64 \n 47 position_j 260601 non-null int64 \n 48 position_o 260601 non-null int64 \n 49 position_s 260601 non-null int64 \n 50 position_t 260601 non-null int64 \n 51 plan_configuration_a 260601 non-null int64 \n 52 plan_configuration_c 260601 non-null int64 \n 53 plan_configuration_d 260601 non-null int64 \n 54 plan_configuration_f 260601 non-null int64 \n 55 plan_configuration_m 260601 non-null int64 \n 56 plan_configuration_n 260601 non-null int64 \n 57 plan_configuration_o 260601 non-null int64 \n 58 plan_configuration_q 260601 non-null int64 \n 59 plan_configuration_s 260601 non-null int64 \n 60 plan_configuration_u 260601 non-null int64 \n 61 legal_ownership_status_a 260601 non-null int64 \n 62 legal_ownership_status_r 260601 non-null int64 \n 63 legal_ownership_status_v 260601 non-null int64 \n 64 legal_ownership_status_w 260601 non-null int64 \n 65 geo_feat1 260601 non-null float64\n 66 geo_feat2 260601 non-null float64\n 67 geo_feat3 260601 non-null float64\n 68 geo_feat4 260601 non-null float64\n 69 geo_feat5 260601 non-null float64\n 70 geo_feat6 260601 non-null float64\n 71 geo_feat7 260601 non-null float64\n 72 geo_feat8 260601 non-null float64\n 73 geo_feat9 260601 non-null float64\n 74 geo_feat10 260601 non-null float64\n 75 geo_feat11 260601 non-null float64\n 76 geo_feat12 260601 non-null float64\n 77 geo_feat13 260601 non-null float64\n 78 geo_feat14 260601 non-null float64\n 79 geo_feat15 260601 non-null float64\n 80 geo_feat16 260601 non-null float64\ndtypes: float64(16), int64(65)\nmemory usage: 161.0 MB\n" ], [ "import shap\n# explain the model's predictions using SHAP\n# (same syntax works for LightGBM, CatBoost, scikit-learn, transformers, Spark, etc.)\nexplainer = shap.TreeExplainer(lgb_pipeline.model.model)\nexplainer", "_____no_output_____" ], [ "lgb_pipeline.model.model.params[\"objective\"] = \"multiclass\"\nshap_values = explainer(df.iloc[:50])\nshap_values", "_____no_output_____" ], [ "shap.initjs()\nshap.force_plot(explainer.expected_value[1], shap_values.data[1,:], df.iloc[1,:])", "_____no_output_____" ], [ "shap.summary_plot(shap_values.data, df.iloc[:50])", "_____no_output_____" ], [ "shap.plots.bar(shap_values[1])", "_____no_output_____" ], [ "help(explainer)", "Help on Tree in module shap.explainers._tree object:\n\nclass Tree(shap.explainers._explainer.Explainer)\n | Tree(model, data=None, model_output='raw', feature_perturbation='interventional', feature_names=None, approximate=False, **deprecated_options)\n | \n | Uses Tree SHAP algorithms to explain the output of ensemble tree models.\n | \n | Tree SHAP is a fast and exact method to estimate SHAP values for tree models and ensembles of trees,\n | under several different possible assumptions about feature dependence. It depends on fast C++\n | implementations either inside an externel model package or in the local compiled C extention.\n | \n | Method resolution order:\n | Tree\n | shap.explainers._explainer.Explainer\n | shap._serializable.Serializable\n | builtins.object\n | \n | Methods defined here:\n | \n | __call__(self, X, y=None, interactions=False, check_additivity=True)\n | Explains the output of model(*args), where args is a list of parallel iteratable datasets.\n | \n | Note this default version could be an abstract method that is implemented by each algorithm-specific\n | subclass of Explainer. Descriptions of each subclasses' __call__ arguments\n | are available in their respective doc-strings.\n | \n | __init__(self, model, data=None, model_output='raw', feature_perturbation='interventional', feature_names=None, approximate=False, **deprecated_options)\n | Build a new Tree explainer for the passed model.\n | \n | Parameters\n | ----------\n | model : model object\n | The tree based machine learning model that we want to explain. XGBoost, LightGBM, CatBoost, Pyspark\n | and most tree-based scikit-learn models are supported.\n | \n | data : numpy.array or pandas.DataFrame\n | The background dataset to use for integrating out features. This argument is optional when\n | feature_perturbation=\"tree_path_dependent\", since in that case we can use the number of training\n | samples that went down each tree path as our background dataset (this is recorded in the model object).\n | \n | feature_perturbation : \"interventional\" (default) or \"tree_path_dependent\" (default when data=None)\n | Since SHAP values rely on conditional expectations we need to decide how to handle correlated\n | (or otherwise dependent) input features. The \"interventional\" approach breaks the dependencies between\n | features according to the rules dictated by causal inference (Janzing et al. 2019). Note that the\n | \"interventional\" option requires a background dataset and its runtime scales linearly with the size\n | of the background dataset you use. Anywhere from 100 to 1000 random background samples are good\n | sizes to use. The \"tree_path_dependent\" approach is to just follow the trees and use the number\n | of training examples that went down each leaf to represent the background distribution. This approach\n | does not require a background dataset and so is used by default when no background dataset is provided.\n | \n | model_output : \"raw\", \"probability\", \"log_loss\", or model method name\n | What output of the model should be explained. If \"raw\" then we explain the raw output of the\n | trees, which varies by model. For regression models \"raw\" is the standard output, for binary\n | classification in XGBoost this is the log odds ratio. If model_output is the name of a supported\n | prediction method on the model object then we explain the output of that model method name.\n | For example model_output=\"predict_proba\" explains the result of calling model.predict_proba.\n | If \"probability\" then we explain the output of the model transformed into probability space\n | (note that this means the SHAP values now sum to the probability output of the model). If \"logloss\"\n | then we explain the log base e of the model loss function, so that the SHAP values sum up to the\n | log loss of the model for each sample. This is helpful for breaking down model performance by feature.\n | Currently the probability and logloss options are only supported when feature_dependence=\"independent\".\n | \n | Examples\n | --------\n | See `Tree explainer examples <https://shap.readthedocs.io/en/latest/api_examples/explainers/Tree.html>`_\n | \n | assert_additivity(self, phi, model_output)\n | \n | shap_interaction_values(self, X, y=None, tree_limit=None)\n | Estimate the SHAP interaction values for a set of samples.\n | \n | Parameters\n | ----------\n | X : numpy.array, pandas.DataFrame or catboost.Pool (for catboost)\n | A matrix of samples (# samples x # features) on which to explain the model's output.\n | \n | y : numpy.array\n | An array of label values for each sample. Used when explaining loss functions (not yet supported).\n | \n | tree_limit : None (default) or int\n | Limit the number of trees used by the model. By default None means no use the limit of the\n | original model, and -1 means no limit.\n | \n | Returns\n | -------\n | array or list\n | For models with a single output this returns a tensor of SHAP values\n | (# samples x # features x # features). The matrix (# features x # features) for each sample sums\n | to the difference between the model output for that sample and the expected value of the model output\n | (which is stored in the expected_value attribute of the explainer). Each row of this matrix sums to the\n | SHAP value for that feature for that sample. The diagonal entries of the matrix represent the\n | \"main effect\" of that feature on the prediction and the symmetric off-diagonal entries represent the\n | interaction effects between all pairs of features for that sample. For models with vector outputs\n | this returns a list of tensors, one for each output.\n | \n | shap_values(self, X, y=None, tree_limit=None, approximate=False, check_additivity=True, from_call=False)\n | Estimate the SHAP values for a set of samples.\n | \n | Parameters\n | ----------\n | X : numpy.array, pandas.DataFrame or catboost.Pool (for catboost)\n | A matrix of samples (# samples x # features) on which to explain the model's output.\n | \n | y : numpy.array\n | An array of label values for each sample. Used when explaining loss functions.\n | \n | tree_limit : None (default) or int\n | Limit the number of trees used by the model. By default None means no use the limit of the\n | original model, and -1 means no limit.\n | \n | approximate : bool\n | Run fast, but only roughly approximate the Tree SHAP values. This runs a method\n | previously proposed by Saabas which only considers a single feature ordering. Take care\n | since this does not have the consistency guarantees of Shapley values and places too\n | much weight on lower splits in the tree.\n | \n | check_additivity : bool\n | Run a validation check that the sum of the SHAP values equals the output of the model. This\n | check takes only a small amount of time, and will catch potential unforeseen errors.\n | Note that this check only runs right now when explaining the margin of the model.\n | \n | Returns\n | -------\n | array or list\n | For models with a single output this returns a matrix of SHAP values\n | (# samples x # features). Each row sums to the difference between the model output for that\n | sample and the expected value of the model output (which is stored in the expected_value\n | attribute of the explainer when it is constant). For models with vector outputs this returns\n | a list of such matrices, one for each output.\n | \n | ----------------------------------------------------------------------\n | Static methods defined here:\n | \n | supports_model_with_masker(model, masker)\n | Determines if this explainer can handle the given model.\n | \n | This is an abstract static method meant to be implemented by each subclass.\n | \n | ----------------------------------------------------------------------\n | Methods inherited from shap.explainers._explainer.Explainer:\n | \n | explain_row(self, *row_args, max_evals, main_effects, error_bounds, outputs, silent, **kwargs)\n | Explains a single row and returns the tuple (row_values, row_expected_values, row_mask_shapes, main_effects).\n | \n | This is an abstract method meant to be implemented by each subclass.\n | \n | Returns\n | -------\n | tuple\n | A tuple of (row_values, row_expected_values, row_mask_shapes), where row_values is an array of the\n | attribution values for each sample, row_expected_values is an array (or single value) representing\n | the expected value of the model for each sample (which is the same for all samples unless there\n | are fixed inputs present, like labels when explaining the loss), and row_mask_shapes is a list\n | of all the input shapes (since the row_values is always flattened),\n | \n | save(self, out_file, model_saver='.save', masker_saver='.save')\n | Write the explainer to the given file stream.\n | \n | ----------------------------------------------------------------------\n | Class methods inherited from shap.explainers._explainer.Explainer:\n | \n | load(in_file, model_loader=<bound method Model.load of <class 'shap.models._model.Model'>>, masker_loader=<bound method Serializable.load of <class 'shap.maskers._masker.Masker'>>, instantiate=True) from builtins.type\n | Load an Explainer from the given file stream.\n | \n | Parameters\n | ----------\n | in_file : The file stream to load objects from.\n | \n | ----------------------------------------------------------------------\n | Data descriptors inherited from shap._serializable.Serializable:\n | \n | __dict__\n | dictionary for instance variables (if defined)\n | \n | __weakref__\n | list of weak references to the object (if defined)\n\n" ], [ "shap_values[0][0]", "_____no_output_____" ], [ "# shap_values.values = shap_values.values[:,:,1]\n# shap_values.base_values = shap_values.base_values[:,1]\n\nshap.plots.waterfall(explainer.expected_value[0], shap_values[0], df.iloc[0])", "_____no_output_____" ] ], [ [ "# Catboost", "_____no_output_____" ] ], [ [ "from backend.api.prediction import initialize_pipeline\nconfig_path = \"/home/joseph/Coding/ml_projects/earthquake_forecasting/backend/config.yml\"\nlgb_pipeline = initialize_pipeline(config_path, \"catboost\")\nlgb_pipeline", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb773919064b9560026d95e1db4fd44e0e697364
13,869
ipynb
Jupyter Notebook
dataproc/notebooks/naive.ipynb
MAhsanAkhtar/BDA_TERM_PROJECT
68b0deb2b126f81924997b7d9f8c6c1ad53bbac0
[ "MIT" ]
29
2018-01-08T21:31:12.000Z
2022-02-14T01:11:01.000Z
dataproc/notebooks/naive.ipynb
MAhsanAkhtar/BDA_TERM_PROJECT
68b0deb2b126f81924997b7d9f8c6c1ad53bbac0
[ "MIT" ]
null
null
null
dataproc/notebooks/naive.ipynb
MAhsanAkhtar/BDA_TERM_PROJECT
68b0deb2b126f81924997b7d9f8c6c1ad53bbac0
[ "MIT" ]
9
2018-01-09T21:04:16.000Z
2021-10-18T15:50:35.000Z
28.654959
1,238
0.55159
[ [ [ "from collections import defaultdict\nimport pyspark.sql.types as stypes\nimport operator\nimport math", "_____no_output_____" ], [ "d = sc.textFile(\"gs://lbanor/dataproc_example/data/2017-11-01\").zipW", "_____no_output_____" ], [ "r = (sc.textFile(\"gs://lbanor/dataproc_example/data/2017-11-01\").zipWithIndex()\n .filter(lambda x: x[1] > 0)\n .map(lambda x: x[0].split(','))\n .map(lambda x: (x[0], (x[1], 0.5 if x[2] == '1' else 2 if x[2] == '2' else 6)))\n .groupByKey().mapValues(list)\n .flatMap(lambda x: aggregate_skus(x)))", "_____no_output_____" ], [ "print(r.collect()[:10])", "_____no_output_____" ], [ "print(r.collect()[:10])", "_____no_output_____" ], [ "d2 = spark.read.csv(\"gs://lbanor/dataproc_example/data/2017-11-01\", header=True)", "_____no_output_____" ], [ "t = sc.parallelize([('1', 'sku0', 1), ('2', 'sku2', 2), ('1', 'sku1', 1)])", "_____no_output_____" ], [ "t.zipWithIndex().map(lambda x: (x[0][0], (x[0][1], x[0][2]))).groupByKey().mapValues(list).collect()[:10]", "_____no_output_____" ], [ "def aggregate_skus(row):\n \"\"\"Aggregates skus from customers and their respective scores\n :type row: list\n :param row: list having values [user, (sku, score)]\n :rtype: list\n :returns: `yield` on [user, (sku, sum(score))]\n \"\"\"\n d = defaultdict(float)\n for inner_row in row[1]:\n d[inner_row[0]] += inner_row[1]\n yield (row[0], list(d.items()))", "_____no_output_____" ], [ "r = d2.rdd.collect()[:10]", "_____no_output_____" ], [ "r[0].user", "_____no_output_____" ], [ "print(r.flatMap(lambda x: aggregate_skus(x)).collect()[:10])", "_____no_output_____" ], [ "r.toDF(schema=_load_users_matrix_schema()).write.json('gs://lbanor/dataproc_example/intermediary/2017-11-01')", "_____no_output_____" ], [ "def _load_users_matrix_schema():\n \"\"\"Loads schema with data type [user, [(sku, score), (sku, score)]]\n :rtype: `pyspark.sql.type.StructType`\n :returns: schema speficiation for user -> (sku, score) data.\n \"\"\"\n return stypes.StructType(fields=[\n stypes.StructField(\"user\", stypes.StringType()),\n stypes.StructField('interactions', stypes.ArrayType(\n stypes.StructType(fields=[stypes.StructField('item', \n stypes.StringType()), stypes.StructField('score', \n stypes.FloatType())])))])", "_____no_output_____" ], [ "dir()", "_____no_output_____" ], [ "t = sc.parallelize([[0, [1, 2]], [0, [3]]])", "_____no_output_____" ], [ "print(t.collect())", "_____no_output_____" ], [ "t.write.json?", "_____no_output_____" ], [ "t = spark.read.json('gs://lbanor/dataproc_example/intermediary/2017-11-02', schema=_load_users_matrix_schema())", "_____no_output_____" ], [ "t = spark.read.json('gs://lbanor/dataproc_example/intermediary/2017-11-02/*.gz')", "_____no_output_____" ], [ "t.rdd.map(lambda x: x).collect()[:10]", "_____no_output_____" ], [ "t.head(3)", "_____no_output_____" ], [ "t.rdd.reduceByKey(operator.add).collect()[:10]", "_____no_output_____" ], [ "print(t.reduceByKey(operator.add).collect())", "_____no_output_____" ], [ " data = (t.rdd\n .reduceByKey(operator.add)\n .flatMap(lambda x: aggregate_skus(x))\n .filter(lambda x: len(x[1]) > 1 and len(x[1]) < 10))", "_____no_output_____" ], [ "def _process_scores(row):\n \"\"\"After all user -> score aggregation is done, this method loops\n through each sku for a given user and yields its squared score so\n that we can compute the norm ``||c||`` for each sku column.\n\n :type row: list\n :param row: list of type [(user, (sku, score))]\n\n :rtype: tuple\n :returns: tuple of type (sku, (score ** 2))\n \"\"\"\n for inner_row in row[1]:\n yield (inner_row[0], inner_row[1] ** 2)\n", "_____no_output_____" ], [ "norms = {sku: norm for sku, norm in (data.flatMap(lambda x: _process_scores(x))\n .reduceByKey(operator.add)\n .map(lambda x: (x[0], math.sqrt(x[1])))\n .collect())}\n", "_____no_output_____" ], [ "data = (data\n .flatMap(lambda x: process_intersections(x, norms))\n .reduceByKey(operator.add)\n .collect()[:20])", "_____no_output_____" ], [ "data", "_____no_output_____" ], [ "def process_intersections(row, norms):\n for i in range(len(row[1])):\n for j in range(i + 1, len(row[1])):\n #yield row[1][i]\n yield ((row[1][i][0], row[1][j][0]), row[1][i][1] * row[1][j][1] / (norms[row[1][i][0]] * norms[row[1][j][0]]))", "_____no_output_____" ], [ "re = t.flatMap(lambda x: process_intersections(x))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb77401e365f944040f447d29ba42d4f1f1b268f
3,662
ipynb
Jupyter Notebook
docs/1_languages/example_code/notebook/demo.ipynb
speters-github/cs-tech-primer
d87e4b8b90464df01839653f070a30f1a91c00c0
[ "Apache-2.0" ]
4
2021-09-01T16:54:41.000Z
2021-10-01T01:05:08.000Z
docs/1_languages/example_code/notebook/demo.ipynb
speters-github/cs-tech-primer
d87e4b8b90464df01839653f070a30f1a91c00c0
[ "Apache-2.0" ]
24
2021-09-07T14:56:00.000Z
2022-02-01T14:14:44.000Z
docs/1_languages/example_code/notebook/demo.ipynb
speters-github/cs-tech-primer
d87e4b8b90464df01839653f070a30f1a91c00c0
[ "Apache-2.0" ]
4
2021-09-01T01:06:41.000Z
2022-03-18T23:39:22.000Z
17.35545
83
0.451393
[ [ [ "# Notebook demo\n\nIt can do *a lot* of things\n\nThe idea is to have documentation, code, and output all in the same place.\n\nImages, too\n\n![An image](https://i.redd.it/ubib0j1unsq71.jpg)\n\nAnd $\\LaTeX$, too!\n\n$x = x + 1$", "_____no_output_____" ] ], [ [ "# do a math\nvalues = range(10)\nfor single in values:\n print(single ** 2)\n", "0\n1\n4\n9\n16\n25\n36\n49\n64\n81\n" ], [ "# show state issues\nx = 0", "_____no_output_____" ], [ "x += 1\nx", "_____no_output_____" ], [ "# get an IP address\nimport urllib\nimport json\nurl = \"https://api.ipify.org?format=json\"\npage = urllib.request.urlopen(url)\npageText = page.read()\nasDict = json.loads(pageText)\n\nprint(\"Your ip address is {}\".format(asDict['ip']))\n\n# and whatever is last will be shown, too\nasDict", "Your ip address is 172.84.169.47\n" ], [ "v = \"words\"\n", "_____no_output_____" ], [ "v = 1", "_____no_output_____" ], [ "v + 1", "_____no_output_____" ], [ "type(v)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb7741c201c47421738bcca89ca7e131d9080eeb
364,187
ipynb
Jupyter Notebook
notebooks/practice.ipynb
ylin00/seizurevista
de4f167e217b06372e97fc9ac0553e4384953305
[ "MIT" ]
null
null
null
notebooks/practice.ipynb
ylin00/seizurevista
de4f167e217b06372e97fc9ac0553e4384953305
[ "MIT" ]
null
null
null
notebooks/practice.ipynb
ylin00/seizurevista
de4f167e217b06372e97fc9ac0553e4384953305
[ "MIT" ]
2
2021-01-22T06:58:08.000Z
2021-11-27T05:11:16.000Z
31.662928
27,420
0.433439
[ [ [ "# SQL", "_____no_output_____" ] ], [ [ "import psycopg2\nimport sys, os\nimport numpy as np\nimport pandas as pd\nimport example_psql as creds\nimport pandas.io.sql as psql", "_____no_output_____" ], [ "# Create connection to postgresql\nimport example_psql as creds\nfrom sqlalchemy import create_engine\nengine = create_engine(f'postgresql://{creds.PGUSER}:{creds.PGPASSWORD}@{creds.PGHOST}:5432/{creds.PGDATABASE}')", "_____no_output_____" ], [ "# Table1: EDFID\n# Field: edfid, path, montage, ...,\nimport sys\nsys.path.append('..')", "_____no_output_____" ], [ "from src.data.file_io import listdir_edfs\n\ndf = listdir_edfs('/Users/yanxlin/github/ids/tusz_1_5_2/edf/')\n\ndf = df.rename(columns = {'path7':'train_test'})\ndf.to_sql('directory', con=engine, if_exists='replace')", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df = pd.read_table('/Users/yanxlin/github/ids/tusz_1_5_2/_DOCS/ref_train.txt', header=None, sep=' ', \n names =['token', 'time_start', 'time_end', 'label', 'prob']).assign(train_test='train')\ndf2 = pd.read_table('/Users/yanxlin/github/ids/tusz_1_5_2/_DOCS/ref_dev.txt', header=None, sep=' ', \n names =['token', 'time_start', 'time_end', 'label', 'prob']).assign(train_test='test')\ndf.append(df2).to_sql('seiz_bckg', engine, if_exists='replace')", "_____no_output_____" ], [ "df.append(df2).head()", "_____no_output_____" ], [ "# chop edf data into pieces and compute\n \n\n# read all edf\ndf = pd.read_sql_table('directory', engine).head(4)\n\nchunk_size = 10\ntoken, token_paths = [], []\n# clear sql DB table\nfor irow, row in df.iterrows():\n if irow % chunk_size != 0:\n token.append(row['token'])\n token_paths.append(row['token_path'])\n continue\n# else:\n# # get a list of features from token_path\n# df = token_path_to_data_frame(token_paths)\n \n# # append to sql DB\n# token, token_paths = [], []\n\nprint(token)\n\n\n# compute dataset and labels \n# save table: token time_abs time_rel features", "['00010418_s018_t009', '00010418_s018_t001', '00010418_s018_t000']\n" ], [ "from src.data import file_io\nfrom src.features import dataset_funcs\n# def get_features_():\n# tokens = pd.read_sql_table('directory', engine).loc(lambda df: df['train_test']=='train').head(2).loc[:, 'token_path']\n# # ds = file_io.make_dataset(tokens, 100, 100, 100)\n# # return dataset_funcs.get_features(ds)\n# return tokens\n# get_features_().head()\n\ntks = pd.read_sql(\"select token, token_path from directory where train_test = 'train' and tcp_type = '01_tcp_ar';\", engine)\nds, _ = file_io.make_dataset(tks.loc[:,'token_path'].head(1).to_numpy(), 100, 100, 100)\n\n\n", "_____no_output_____" ], [ "dataset_funcs.get_features(ds)", "_____no_output_____" ], [ "tk = tks.loc[:, 'token_path'].sample(100, random_state = 103).head(1).to_numpy()[0]\nintvs, lbls = file_io.load_tse_bi(tk)\nf, s, l = file_io.read_1_token(tk)\nintvs, lbls, np.shape(s)/np.mean(f)", "_____no_output_____" ], [ "from src.features.to_sql import __feature_1_token\n\nfsamp = 256\n\ntks = pd.read_sql(\"select token, token_path from directory where train_test = 'train' and tcp_type = '01_tcp_ar';\",\n SQLengine)\n\npd.concat([__feature_1_token(Series['token_path'], fsamp=fsamp).assign(token = Series['token'])\n for (index, Series) in tks.head(1).iterrows()])\n\n", "_____no_output_____" ], [ "# timestamps = range(1, 500)\n# rt = intvs_[-1] - np.array(list(reversed(timestamps)))\n# rit = intvs_[-1] - np.array(list(reversed([0] + list(intvs_)[:-1])))\n# rlb = list(reversed(lbls))\n# rt, rit, rlb\n# list(reversed(post_sezure_s(rt, rit, rlb)))\n# pres_seizure_s(timestamps, intvs_, lbls)\n# res = post_sezure_s(rt, rit, list(reversed(lbls)))\n", "_____no_output_____" ], [ "df = dataset_funcs.get_features(ds)", "_____no_output_____" ], [ "from src.data import label\n\ndf.assign(post = lambda df: label.post_sezure_s(df.index+1, intvs_, lbls),\n pres = lambda df: label.pres_seizure_s(df.index+1, intvs_, lbls)).head(390)", "_____no_output_____" ] ], [ [ "#### backup", "_____no_output_____" ] ], [ [ "## ****** LOAD PSQL DATABASE ***** ##\n\n\n# Set up a connection to the postgres server.\nconn_string = \"host=\"+ creds.PGHOST +\" port=\"+ \"5432\" +\" dbname=\"+ creds.PGDATABASE +\" user=\" + creds.PGUSER \\\n+\" password=\"+ creds.PGPASSWORD\nconn=psycopg2.connect(conn_string)\nprint(\"Connected!\")\n\n# Create a cursor object\ncursor = conn.cursor()\n\n\ndef read_file(schema, table):\n\n sql_command = \"SELECT * FROM {}.{};\".format(str(schema), str(table))\n print (sql_command)\n\n # Load the data\n data = pd.read_sql(sql_command, conn)\n\n print(data.shape)\n return (data)", "Connected!\n" ] ], [ [ "# Numpy Scipy", "_____no_output_____" ] ], [ [ "np.array([\n [[1,2,3],\n [4,5,6],\n [7,8,9]],\n [[9.2,8.2,7.2],\n [6.2,5.2,4.2],\n [3.2,2.2,1.2]]\n ]).transpose([1,2,0]).reshape([9,2])", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom itertools import cycle\n\nfrom sklearn import svm, datasets\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom scipy import interp\nfrom sklearn.metrics import roc_auc_score\n\n# Import some data to play with\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\n\nX.shape, y.shape", "_____no_output_____" ], [ "# Binarize the output\ny = label_binarize(y, classes=[0, 1, 2])\nn_classes = y.shape[1]", "_____no_output_____" ], [ "# Add noisy features to make the problem harder\nrandom_state = np.random.RandomState(0)\nn_samples, n_features = X.shape\nX = np.c_[X, random_state.randn(n_samples, 200 * n_features)]\n\n# shuffle and split training and test sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,\n random_state=0)\n\n# Learn to predict each class against the other\nclassifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,\n random_state=random_state))\ny_score = classifier.fit(X_train, y_train).decision_function(X_test)\n\ndef calc_roc(y_test, y_score):\n \"\"\"\n Args:\n y_test: 2-d np.array\n \"\"\"\n # Compute ROC curve and ROC area for each class\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n for i in range(y_test.shape[1]):\n fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n\n # Compute micro-average ROC curve and ROC area\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_test.ravel(), y_score.ravel())\n roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n return fpr, tpr, roc_auc", "_____no_output_____" ], [ "def plot_roc(fpr, tpr, roc_auc, title='Receiver operating characteristic example')\n \"\"\"ref: https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html\"\"\"\n plt.figure()\n lw = 2\n plt.plot(fpr, tpr, color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title(title)\n plt.legend(loc=\"lower right\")\n plt.show()", "_____no_output_____" ], [ "import numpy as np\narr = np.arange(0, 10)\nnp.where(arr==1)[0]", "_____no_output_____" ], [ "import numpy as np\n\nimport pandas as pd\nimport glob\nimport os\nimport re\n\ntrain_path = '../tusz_1_5_2/edf/train'\ntcp_type = '01_tcp_ar'\npatient_group = '004'\npatient = '00000492'\nsession = 's003_2003_07_18'\ntoken = '00000492_s003_t001'\n\n\ndef listdir_edfs():\n \"\"\"Returns all edf filepaths in a DataFrame\n \n Returns:\n pd.DataFrame: filepaths\n \"\"\"\n columns=('path0','path1','path2','path3', 'tcp_type', 'patient_group', 'patient', 'session', 'token')\n\n filelist = glob.glob(os.path.join('../tusz_1_5_2/edf/train/01_tcp_ar', '**', '*.edf'), recursive=True)\n fparts = [re.split('/|[.]edf',filename)[:-1] for filename in filelist]\n\n df = pd.DataFrame({key:value for key, value in zip(tuple(columns), tuple(zip(*fparts)))})\n\n # A very complicated lambda function\n return df.assign(token_path = lambda x: eval(\"\"\"eval(\"+'/'+\".join([\"x.\"\"\"+'\",\"x.'.join(x.columns)+'\"]))'))\n\n\ndf = listdir_edfs()\ndf.shape\ndf.head()\n\n\n", "_____no_output_____" ], [ "import re\n[re.split('/|[.]edf',filename)[:-1] for filename in filelist]\n", "_____no_output_____" ] ], [ [ "# PostgreSQL", "_____no_output_____" ] ], [ [ "import numpy as np\narr = np.array([[1,2,3], [4,5,6], [7,8,9]])\narr[np.array((0,1)),:-1].shape", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nplt.plot(np.random.randn(1000))", "_____no_output_____" ] ], [ [ "# Panda.DataFrame", "_____no_output_____" ] ], [ [ "# panda data frame group by and aggregation\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "np.random.seed(0)\ndf = pd.DataFrame(dict(a=np.random.rand(10), b=np.random.rand(10), group=np.repeat(['A', 'B'], 5)))\n\ndf", "_____no_output_____" ], [ "# group by c=a+b\ndf.groupby('group').agg('sum')", "_____no_output_____" ], [ "res = []\nfor name, group in df.groupby('group'):\n res.append(group.assign(c = lambda x: x.a+x.b))\npd.concat(res)", "_____no_output_____" ], [ "# long to wide\npd.concat(res).assign(index=np.tile(np.arange(0,5),2)).pivot(index='index',columns='group', values=['a', 'b', 'c'])\n\n# df2.reset_index()", "_____no_output_____" ] ], [ [ "## Scikit learn", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import cross_val_score\nfrom sklearn.datasets import make_blobs\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nimport numpy as np\n\nX, y = make_blobs(n_samples=10000, n_features=10, centers=100,\n random_state=0)\n\nnp.shape(X), np.shape(y)", "_____no_output_____" ], [ "from sklearn import preprocessing\nX_scaled = preprocessing.scale(X)\nX[0:5], X_scaled[0:5]", "_____no_output_____" ], [ "clf = DecisionTreeClassifier(max_depth=None, min_samples_split=2,\n random_state=0)\nscores = cross_val_score(clf, X, y, cv=5)\nscores.mean()", "_____no_output_____" ], [ "clf = RandomForestClassifier(n_estimators=10, max_features='sqrt', max_depth=None,\n min_samples_split=2, random_state=0)\nscores = cross_val_score(clf, X, y, cv=5)\nscores.mean()", "_____no_output_____" ], [ "clf = ExtraTreesClassifier(n_estimators=10, max_depth=None,\n min_samples_split=2, random_state=0)\nscores = cross_val_score(clf, X, y, cv=5)\nscores.mean() > 0.999", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb774327104690ece7a78874f486585c0f31efe3
54,798
ipynb
Jupyter Notebook
docs/tutorials/sparse_federated_learning.ipynb
teo-milea/federated
ce0707a954a531860eb38864b44d7b748fd62aa7
[ "Apache-2.0" ]
null
null
null
docs/tutorials/sparse_federated_learning.ipynb
teo-milea/federated
ce0707a954a531860eb38864b44d7b748fd62aa7
[ "Apache-2.0" ]
null
null
null
docs/tutorials/sparse_federated_learning.ipynb
teo-milea/federated
ce0707a954a531860eb38864b44d7b748fd62aa7
[ "Apache-2.0" ]
null
null
null
40.292647
498
0.556024
[ [ [ "##### Copyright 2021 The TensorFlow Federated Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Client-efficient large-model federated learning via `federated_select` and sparse aggregation\n", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/federated/tutorials/sparse_federated_learning\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/federated/blob/v0.20.0/docs/tutorials/sparse_federated_learning.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/federated/blob/v0.20.0/docs/tutorials/sparse_federated_learning.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/federated/docs/tutorials/sparse_federated_learning.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>", "_____no_output_____" ], [ "\n\nThis tutorial shows how TFF can be used to train a very large model where each client device only downloads and updates a small part of the model, using\n`tff.federated_select` and sparse aggregation. While this tutorial is fairly self-contained, the [`tff.federated_select` tutorial](https://www.tensorflow.org/federated/tutorials/federated_select) and [custom FL algorithms tutorial](https://www.tensorflow.org/federated/tutorials/building_your_own_federated_learning_algorithm) provide good introductions to some of the techniques used here.\n\nConcretely, in this tutorial we consider logistic regression for multi-label classification, predicting which \"tags\" are associated with a text string based on a bag-of-words feature representation. Importantly, communication and client-side computation costs are controlled by a fixed constant (`MAX_TOKENS_SELECTED_PER_CLIENT`), and *do not* scale with the overall vocabulary size, which could be extremely large in practical settings.", "_____no_output_____" ] ], [ [ "#@test {\"skip\": true}\n!pip install --quiet --upgrade tensorflow-federated\n!pip install --quiet --upgrade nest-asyncio\n\nimport nest_asyncio\nnest_asyncio.apply()", "_____no_output_____" ], [ "import collections\nimport itertools\nimport numpy as np\n\nfrom typing import Callable, List, Tuple\n\nimport tensorflow as tf\nimport tensorflow_federated as tff\ntff.backends.native.set_local_python_execution_context()", "_____no_output_____" ] ], [ [ "Each client will `federated_select` the rows of the model weights for at most this many unique tokens. This upper-bounds the size of the client's local model and the amount of server -> client (`federated_select`) and client - > server `(federated_aggregate`) communication performed.\n\nThis tutorial should still run correctly even if you set this as small as 1 (ensuring not all tokens from each client are selected) or to a large value, though model convergence may be effected.", "_____no_output_____" ] ], [ [ "MAX_TOKENS_SELECTED_PER_CLIENT = 6", "_____no_output_____" ] ], [ [ "We also define a few constants for various types. For this colab, a **token** is an integer identifier for a particular word after parsing the dataset. ", "_____no_output_____" ] ], [ [ "# There are some constraints on types\n# here that will require some explicit type conversions:\n# - `tff.federated_select` requires int32\n# - `tf.SparseTensor` requires int64 indices.\nTOKEN_DTYPE = tf.int64\nSELECT_KEY_DTYPE = tf.int32\n\n# Type for counts of token occurences.\nTOKEN_COUNT_DTYPE = tf.int32\n\n# A sparse feature vector can be thought of as a map\n# from TOKEN_DTYPE to FEATURE_DTYPE. \n# Our features are {0, 1} indicators, so we could potentially\n# use tf.int8 as an optimization.\nFEATURE_DTYPE = tf.int32", "_____no_output_____" ] ], [ [ "# Setting up the problem: Dataset and Model\n\nWe construct a tiny toy dataset for easy experimentation in this tutorial. However, the format of the dataset is compatible with [Federated StackOverflow](https://www.tensorflow.org/federated/api_docs/python/tff/simulation/datasets/stackoverflow/load_data), and \nthe [pre-processing](https://github.com/google-research/federated/blob/0a558bac8a724fc38175ff4f0ce46c7af3d24be2/utils/datasets/stackoverflow_tag_prediction.py) and [model architecture](https://github.com/google-research/federated/blob/49a43456aa5eaee3e1749855eed89c0087983541/utils/models/stackoverflow_lr_models.py) are adopted from the StackOverflow \ntag prediction problem of [*Adaptive Federated Optimization*](https://arxiv.org/abs/2003.00295).", "_____no_output_____" ], [ "## Dataset parsing and pre-processing", "_____no_output_____" ] ], [ [ "NUM_OOV_BUCKETS = 1\n\nBatchType = collections.namedtuple('BatchType', ['tokens', 'tags'])\n\ndef build_to_ids_fn(word_vocab: List[str],\n tag_vocab: List[str]) -> Callable[[tf.Tensor], tf.Tensor]:\n \"\"\"Constructs a function mapping examples to sequences of token indices.\"\"\"\n word_table_values = np.arange(len(word_vocab), dtype=np.int64)\n word_table = tf.lookup.StaticVocabularyTable(\n tf.lookup.KeyValueTensorInitializer(word_vocab, word_table_values),\n num_oov_buckets=NUM_OOV_BUCKETS)\n\n tag_table_values = np.arange(len(tag_vocab), dtype=np.int64)\n tag_table = tf.lookup.StaticVocabularyTable(\n tf.lookup.KeyValueTensorInitializer(tag_vocab, tag_table_values),\n num_oov_buckets=NUM_OOV_BUCKETS)\n\n def to_ids(example):\n \"\"\"Converts a Stack Overflow example to a bag-of-words/tags format.\"\"\"\n sentence = tf.strings.join([example['tokens'], example['title']],\n separator=' ')\n\n # We represent that label (output tags) densely.\n raw_tags = example['tags']\n tags = tf.strings.split(raw_tags, sep='|')\n tags = tag_table.lookup(tags)\n tags, _ = tf.unique(tags)\n tags = tf.one_hot(tags, len(tag_vocab) + NUM_OOV_BUCKETS)\n tags = tf.reduce_max(tags, axis=0)\n\n # We represent the features as a SparseTensor of {0, 1}s.\n words = tf.strings.split(sentence)\n tokens = word_table.lookup(words)\n tokens, _ = tf.unique(tokens)\n # Note: We could choose to use the word counts as the feature vector\n # instead of just {0, 1} values (see tf.unique_with_counts).\n tokens = tf.reshape(tokens, shape=(tf.size(tokens), 1))\n tokens_st = tf.SparseTensor(\n tokens,\n tf.ones(tf.size(tokens), dtype=FEATURE_DTYPE),\n dense_shape=(len(word_vocab) + NUM_OOV_BUCKETS,))\n tokens_st = tf.sparse.reorder(tokens_st)\n\n return BatchType(tokens_st, tags)\n\n return to_ids", "_____no_output_____" ], [ "def build_preprocess_fn(word_vocab, tag_vocab):\n\n @tf.function\n def preprocess_fn(dataset):\n to_ids = build_to_ids_fn(word_vocab, tag_vocab)\n # We *don't* shuffle in order to make this colab deterministic for\n # easier testing and reproducibility.\n # But real-world training should use `.shuffle()`.\n return dataset.map(to_ids, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n return preprocess_fn", "_____no_output_____" ] ], [ [ "## A tiny toy dataset\nWe construct a tiny toy dataset with a global vocabulary of 12 words and 3 clients. This tiny example is useful for testing edge cases (for example,\nwe have two clients with less than `MAX_TOKENS_SELECTED_PER_CLIENT = 6` distinct tokens, and one with more) and developing the code.\n \nHowever, the real-world use cases of this approach would be global vocabularies of 10s of millions or more, with perhaps 1000s of distinct tokens appearing on each client. Because the format of the data is the same, the extension to more realistic testbed problems, e.g. the `tff.simulation.datasets.stackoverflow.load_data()` dataset, should be straightforward.\n\nFirst, we define our word and tag vocabularies.", "_____no_output_____" ] ], [ [ "# Features\nFRUIT_WORDS = ['apple', 'orange', 'pear', 'kiwi']\nVEGETABLE_WORDS = ['carrot', 'broccoli', 'arugula', 'peas']\nFISH_WORDS = ['trout', 'tuna', 'cod', 'salmon']\nWORD_VOCAB = FRUIT_WORDS + VEGETABLE_WORDS + FISH_WORDS\n\n# Labels\nTAG_VOCAB = ['FRUIT', 'VEGETABLE', 'FISH']", "_____no_output_____" ] ], [ [ "Now, we create 3 clients with small local datasets. If you are running this tutorial in colab, it may be useful to use the \"mirror cell in tab\" feature to pin this cell and its output in order to interpret/check the output of the functions developed below.", "_____no_output_____" ] ], [ [ "preprocess_fn = build_preprocess_fn(WORD_VOCAB, TAG_VOCAB)\n\n\ndef make_dataset(raw):\n d = tf.data.Dataset.from_tensor_slices(\n # Matches the StackOverflow formatting\n collections.OrderedDict(\n tokens=tf.constant([t[0] for t in raw]),\n tags=tf.constant([t[1] for t in raw]),\n title=['' for _ in raw]))\n d = preprocess_fn(d)\n return d\n\n\n# 4 distinct tokens\nCLIENT1_DATASET = make_dataset([\n ('apple orange apple orange', 'FRUIT'),\n ('carrot trout', 'VEGETABLE|FISH'),\n ('orange apple', 'FRUIT'),\n ('orange', 'ORANGE|CITRUS') # 2 OOV tag\n])\n\n# 6 distinct tokens\nCLIENT2_DATASET = make_dataset([\n ('pear cod', 'FRUIT|FISH'),\n ('arugula peas', 'VEGETABLE'),\n ('kiwi pear', 'FRUIT'),\n ('sturgeon', 'FISH'), # OOV word\n ('sturgeon bass', 'FISH') # 2 OOV words\n])\n\n# A client with all possible words & tags (13 distinct tokens).\n# With MAX_TOKENS_SELECTED_PER_CLIENT = 6, we won't download the model\n# slices for all tokens that occur on this client.\nCLIENT3_DATASET = make_dataset([\n (' '.join(WORD_VOCAB + ['oovword']), '|'.join(TAG_VOCAB)),\n # Mathe the OOV token and 'salmon' occur in the largest number\n # of examples on this client:\n ('salmon oovword', 'FISH|OOVTAG')\n])\n\nprint('Word vocab')\nfor i, word in enumerate(WORD_VOCAB):\n print(f'{i:2d} {word}')\n\nprint('\\nTag vocab')\nfor i, tag in enumerate(TAG_VOCAB):\n print(f'{i:2d} {tag}')", "Word vocab\n 0 apple\n 1 orange\n 2 pear\n 3 kiwi\n 4 carrot\n 5 broccoli\n 6 arugula\n 7 peas\n 8 trout\n 9 tuna\n10 cod\n11 salmon\n\nTag vocab\n 0 FRUIT\n 1 VEGETABLE\n 2 FISH\n" ] ], [ [ "Define constants for the raw numbers of input features (tokens/words) and labels (post tags). Our actual input/output spaces are `NUM_OOV_BUCKETS = 1` larger because we add an OOV token / tag.", "_____no_output_____" ] ], [ [ "NUM_WORDS = len(WORD_VOCAB) \nNUM_TAGS = len(TAG_VOCAB)\n\nWORD_VOCAB_SIZE = NUM_WORDS + NUM_OOV_BUCKETS\nTAG_VOCAB_SIZE = NUM_TAGS + NUM_OOV_BUCKETS", "_____no_output_____" ] ], [ [ "Create batched versions of the datasets, and individual batches, which will be useful in testing code as we go.", "_____no_output_____" ] ], [ [ "batched_dataset1 = CLIENT1_DATASET.batch(2)\nbatched_dataset2 = CLIENT2_DATASET.batch(3)\nbatched_dataset3 = CLIENT3_DATASET.batch(2)\n\nbatch1 = next(iter(batched_dataset1))\nbatch2 = next(iter(batched_dataset2))\nbatch3 = next(iter(batched_dataset3))", "_____no_output_____" ] ], [ [ "## Define a model with sparse inputs\n\nWe use a simple independent logistic regression model for each tag.", "_____no_output_____" ] ], [ [ "def create_logistic_model(word_vocab_size: int, vocab_tags_size: int):\n\n model = tf.keras.models.Sequential([\n tf.keras.layers.InputLayer(input_shape=(word_vocab_size,), sparse=True),\n tf.keras.layers.Dense(\n vocab_tags_size,\n activation='sigmoid',\n kernel_initializer=tf.keras.initializers.zeros,\n # For simplicity, don't use a bias vector; this means the model\n # is a single tensor, and we only need sparse aggregation of\n # the per-token slices of the model. Generalizing to also handle\n # other model weights that are fully updated \n # (non-dense broadcast and aggregate) would be a good exercise.\n use_bias=False),\n ])\n\n return model", "_____no_output_____" ] ], [ [ "Let's make sure it works, first by making predictions:", "_____no_output_____" ] ], [ [ "model = create_logistic_model(WORD_VOCAB_SIZE, TAG_VOCAB_SIZE)\np = model.predict(batch1.tokens)\nprint(p)", "[[0.5 0.5 0.5 0.5]\n [0.5 0.5 0.5 0.5]]\n" ] ], [ [ "And some simple centralized training:", "_____no_output_____" ] ], [ [ "model.compile(optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.001),\n loss=tf.keras.losses.BinaryCrossentropy())\nmodel.train_on_batch(batch1.tokens, batch1.tags)", "_____no_output_____" ] ], [ [ "# Building blocks for the federated computation\n \nWe will implement a simple version of the [Federated Averaging](https://arxiv.org/abs/1602.05629) algorithm with the key difference that each device only downloads a relevant subset of the model, and only contributes updates to that subset.\n \nWe use `M` as shorthand for `MAX_TOKENS_SELECTED_PER_CLIENT`. At a high level, one round of training involves these steps:\n\n 1. Each participating client scans over its local dataset, parsing the input strings and mapping them to the correct tokens (int indexes). This requires access to the global (large) dictionary (this could potentially be avoided using [feature hashing](https://en.wikipedia.org/wiki/Feature_hashing) techniques). We then sparsely count how many times each token occurs. If `U` unique tokens occur on device, we choose the `num_actual_tokens = min(U, M)` most frequent tokens to train.\n\n 1. The clients use `federated_select` to retrieve the model coefficients for the `num_actual_tokens` selected tokens from the server. Each model slice is a tensor of shape `(TAG_VOCAB_SIZE, )`, so the total data transmitted to the client is at most of size `TAG_VOCAB_SIZE * M` (see note below).\n\n 1. The clients construct a mapping `global_token -> local_token` where the local token (int index) is the index of the global token in the list of selected tokens.\n \n 1. The clients use a \"small\" version of the global model that only has coefficients for at most `M` tokens, from the range `[0, num_actual_tokens)`. The `global -> local` mapping is used to initialize the dense parameters of this model from the selected model slices.\n \n 1. Clients train their local model using SGD on data preprocessed with the `global -> local` mapping.\n \n 1. Clients turn the parameters of their local model into `IndexedSlices` updates using the `local -> global` mapping to index the rows. The server aggregates these updates using a sparse sum aggregation.\n \n 1. The server takes the (dense) result of the above aggregation, divides it by the number of clients participating, and applies the resulting average update to the global model.\n \nIn this section we construct the building blocks for these steps, which will then be combined in a final `federated_computation` that captures the full logic of one training round.\n \n> NOTE: The above description hides one technical detail: Both `federated_select` and the construction of the local model require statically known shapes, and so we cannot use the dynamic per-client `num_actual_tokens` size. Instead, we use the static value `M`, adding padding where needed. This does not impact that semantics of the algorithm.", "_____no_output_____" ], [ "### Count client tokens and decide which model slices to `federated_select`\n\nEach device needs to decide which \"slices\" of the model are relevant to its local training dataset. For our problem, we do this by (sparsely!) counting how many examples contain each token in the client training data set.\n", "_____no_output_____" ] ], [ [ "@tf.function\ndef token_count_fn(token_counts, batch):\n \"\"\"Adds counts from `batch` to the running `token_counts` sum.\"\"\"\n # Sum across the batch dimension.\n flat_tokens = tf.sparse.reduce_sum(\n batch.tokens, axis=0, output_is_sparse=True)\n flat_tokens = tf.cast(flat_tokens, dtype=TOKEN_COUNT_DTYPE)\n return tf.sparse.add(token_counts, flat_tokens)", "_____no_output_____" ], [ "# Simple tests\n# Create the initial zero token counts using empty tensors.\ninitial_token_counts = tf.SparseTensor(\n indices=tf.zeros(shape=(0, 1), dtype=TOKEN_DTYPE),\n values=tf.zeros(shape=(0,), dtype=TOKEN_COUNT_DTYPE),\n dense_shape=(WORD_VOCAB_SIZE,))\n\nclient_token_counts = batched_dataset1.reduce(initial_token_counts,\n token_count_fn)\ntokens = tf.reshape(client_token_counts.indices, (-1,)).numpy()\nprint('tokens:', tokens)\nnp.testing.assert_array_equal(tokens, [0, 1, 4, 8])\n# The count is the number of *examples* in which the token/word\n# occurs, not the total number of occurences, since we still featurize\n# multiple occurences in the same example as a \"1\".\ncounts = client_token_counts.values.numpy()\nprint('counts:', counts)\nnp.testing.assert_array_equal(counts, [2, 3, 1, 1])", "tokens: [0 1 4 8]\ncounts: [2 3 1 1]\n" ] ], [ [ "We will select the model parameters corresponding to the `MAX_TOKENS_SELECTED_PER_CLIENT` most frequently occuring tokens on device. If\nfewer than this many tokens occur on device, we pad the list to enable the use\nof `federated_select`.\n \nNote that other strategies are possibly better, for example, randomly selecting tokens (perhaps based on their occurrence probability). This would ensure that all slices of the model (for which the client has data) have some chance of being updated.", "_____no_output_____" ] ], [ [ "@tf.function\ndef keys_for_client(client_dataset, max_tokens_per_client):\n \"\"\"Computes a set of max_tokens_per_client keys.\"\"\"\n initial_token_counts = tf.SparseTensor(\n indices=tf.zeros((0, 1), dtype=TOKEN_DTYPE),\n values=tf.zeros((0,), dtype=TOKEN_COUNT_DTYPE),\n dense_shape=(WORD_VOCAB_SIZE,))\n client_token_counts = client_dataset.reduce(initial_token_counts,\n token_count_fn)\n # Find the most-frequently occuring tokens\n tokens = tf.reshape(client_token_counts.indices, shape=(-1,))\n counts = client_token_counts.values\n perm = tf.argsort(counts, direction='DESCENDING')\n tokens = tf.gather(tokens, perm)\n counts = tf.gather(counts, perm)\n num_raw_tokens = tf.shape(tokens)[0]\n actual_num_tokens = tf.minimum(max_tokens_per_client, num_raw_tokens)\n selected_tokens = tokens[:actual_num_tokens]\n paddings = [[0, max_tokens_per_client - tf.shape(selected_tokens)[0]]]\n padded_tokens = tf.pad(selected_tokens, paddings=paddings)\n # Make sure the type is statically determined\n padded_tokens = tf.reshape(padded_tokens, shape=(max_tokens_per_client,))\n\n # We will pass these tokens as keys into `federated_select`, which\n # requires SELECT_KEY_DTYPE=tf.int32 keys.\n padded_tokens = tf.cast(padded_tokens, dtype=SELECT_KEY_DTYPE)\n return padded_tokens, actual_num_tokens", "_____no_output_____" ], [ "# Simple test\n\n# Case 1: actual_num_tokens > max_tokens_per_client\nselected_tokens, actual_num_tokens = keys_for_client(batched_dataset1, 3)\nassert tf.size(selected_tokens) == 3\nassert actual_num_tokens == 3\n\n# Case 2: actual_num_tokens < max_tokens_per_client\nselected_tokens, actual_num_tokens = keys_for_client(batched_dataset1, 10)\nassert tf.size(selected_tokens) == 10\nassert actual_num_tokens == 4", "_____no_output_____" ] ], [ [ "### Map global tokens to local tokens\nThe above selection gives us a dense set of tokens in the range `[0, actual_num_tokens)` which we will use for the on-device model. However, the dataset we read has tokens from the much larger global vocabulary range `[0, WORD_VOCAB_SIZE)`. \n\nThus, we need to map the global tokens to their corresponding local tokens. The\nlocal token ids are simply given by the indexes into the `selected_tokens` tensor computed in the previous step.", "_____no_output_____" ] ], [ [ "@tf.function\ndef map_to_local_token_ids(client_data, client_keys):\n global_to_local = tf.lookup.StaticHashTable(\n # Note int32 -> int64 maps are not supported\n tf.lookup.KeyValueTensorInitializer(\n keys=tf.cast(client_keys, dtype=TOKEN_DTYPE),\n # Note we need to use tf.shape, not the static \n # shape client_keys.shape[0]\n values=tf.range(0, limit=tf.shape(client_keys)[0],\n dtype=TOKEN_DTYPE)),\n # We use -1 for tokens that were not selected, which can occur for clients\n # with more than MAX_TOKENS_SELECTED_PER_CLIENT distinct tokens.\n # We will simply remove these invalid indices from the batch below.\n default_value=-1)\n\n def to_local_ids(sparse_tokens):\n indices_t = tf.transpose(sparse_tokens.indices)\n batch_indices = indices_t[0] # First column\n tokens = indices_t[1] # Second column\n tokens = tf.map_fn(\n lambda global_token_id: global_to_local.lookup(global_token_id), tokens)\n # Remove tokens that aren't actually available (looked up as -1):\n available_tokens = tokens >= 0\n tokens = tokens[available_tokens]\n batch_indices = batch_indices[available_tokens]\n\n updated_indices = tf.transpose(\n tf.concat([[batch_indices], [tokens]], axis=0))\n st = tf.sparse.SparseTensor(\n updated_indices,\n tf.ones(tf.size(tokens), dtype=FEATURE_DTYPE),\n dense_shape=sparse_tokens.dense_shape)\n st = tf.sparse.reorder(st)\n return st\n\n return client_data.map(lambda b: BatchType(to_local_ids(b.tokens), b.tags))", "_____no_output_____" ], [ "# Simple test\nclient_keys, actual_num_tokens = keys_for_client(\n batched_dataset3, MAX_TOKENS_SELECTED_PER_CLIENT)\nclient_keys = client_keys[:actual_num_tokens]\n\nd = map_to_local_token_ids(batched_dataset3, client_keys)\nbatch = next(iter(d))\nall_tokens = tf.gather(batch.tokens.indices, indices=1, axis=1)\n# Confirm we have local indices in the range [0, MAX):\nassert tf.math.reduce_max(all_tokens) < MAX_TOKENS_SELECTED_PER_CLIENT\nassert tf.math.reduce_max(all_tokens) >= 0", "_____no_output_____" ] ], [ [ "### Train the local (sub)model on each client\n\nNote `federated_select` will return the selected slices as a `tf.data.Dataset` in the same order as the selection keys. So, we first define a utility function to take such a Dataset and convert it to a single dense tensor which can be used as the model weights of the client model.", "_____no_output_____" ] ], [ [ "@tf.function\ndef slices_dataset_to_tensor(slices_dataset):\n \"\"\"Convert a dataset of slices to a tensor.\"\"\"\n # Use batching to gather all of the slices into a single tensor.\n d = slices_dataset.batch(MAX_TOKENS_SELECTED_PER_CLIENT,\n drop_remainder=False)\n iter_d = iter(d)\n tensor = next(iter_d)\n # Make sure we have consumed everything\n opt = iter_d.get_next_as_optional()\n tf.Assert(tf.logical_not(opt.has_value()), data=[''], name='CHECK_EMPTY')\n return tensor", "_____no_output_____" ], [ "# Simple test\nweights = np.random.random(\n size=(MAX_TOKENS_SELECTED_PER_CLIENT, TAG_VOCAB_SIZE)).astype(np.float32)\nmodel_slices_as_dataset = tf.data.Dataset.from_tensor_slices(weights)\nweights2 = slices_dataset_to_tensor(model_slices_as_dataset)\nnp.testing.assert_array_equal(weights, weights2)", "_____no_output_____" ] ], [ [ "We now have all the components we need to define a simple local training loop which will run on each client.", "_____no_output_____" ] ], [ [ "@tf.function\ndef client_train_fn(model, client_optimizer,\n model_slices_as_dataset, client_data,\n client_keys, actual_num_tokens):\n \n initial_model_weights = slices_dataset_to_tensor(model_slices_as_dataset)\n assert len(model.trainable_variables) == 1\n model.trainable_variables[0].assign(initial_model_weights)\n\n # Only keep the \"real\" (unpadded) keys.\n client_keys = client_keys[:actual_num_tokens]\n \n client_data = map_to_local_token_ids(client_data, client_keys)\n\n loss_fn = tf.keras.losses.BinaryCrossentropy()\n for features, labels in client_data:\n with tf.GradientTape() as tape:\n predictions = model(features)\n loss = loss_fn(labels, predictions)\n grads = tape.gradient(loss, model.trainable_variables)\n client_optimizer.apply_gradients(zip(grads, model.trainable_variables))\n\n model_weights_delta = model.trainable_weights[0] - initial_model_weights\n model_weights_delta = tf.slice(model_weights_delta, begin=[0, 0], \n size=[actual_num_tokens, -1])\n return client_keys, model_weights_delta", "_____no_output_____" ], [ "# Simple test\n# Note if you execute this cell a second time, you need to also re-execute\n# the preceeding cell to avoid \"tf.function-decorated function tried to \n# create variables on non-first call\" errors.\non_device_model = create_logistic_model(MAX_TOKENS_SELECTED_PER_CLIENT,\n TAG_VOCAB_SIZE)\nclient_optimizer = tf.keras.optimizers.SGD(learning_rate=0.001)\nclient_keys, actual_num_tokens = keys_for_client(\n batched_dataset2, MAX_TOKENS_SELECTED_PER_CLIENT)\n\nmodel_slices_as_dataset = tf.data.Dataset.from_tensor_slices(\n np.zeros((MAX_TOKENS_SELECTED_PER_CLIENT, TAG_VOCAB_SIZE),\n dtype=np.float32))\n\nkeys, delta = client_train_fn(\n on_device_model,\n client_optimizer,\n model_slices_as_dataset,\n client_data=batched_dataset3,\n client_keys=client_keys,\n actual_num_tokens=actual_num_tokens)\n\nprint(delta)", "_____no_output_____" ] ], [ [ "### Aggregate IndexedSlices\nWe use `tff.federated_aggregate` to construct a federated sparse sum for `IndexedSlices`. This simple implementation has the constraint that the\n`dense_shape` is known statically in advance. Note also that this sum is only *semi-sparse*, in the sense that the client -> server communication is sparse, but the server maintains a dense representation of the sum in `accumulate` and `merge`, and outputs this dense representation.\n", "_____no_output_____" ] ], [ [ "def federated_indexed_slices_sum(slice_indices, slice_values, dense_shape):\n \"\"\"\n Sumes IndexedSlices@CLIENTS to a dense @SERVER Tensor.\n\n Intermediate aggregation is performed by converting to a dense representation,\n which may not be suitable for all applications.\n\n Args:\n slice_indices: An IndexedSlices.indices tensor @CLIENTS.\n slice_values: An IndexedSlices.values tensor @CLIENTS.\n dense_shape: A statically known dense shape.\n\n Returns:\n A dense tensor placed @SERVER representing the sum of the client's\n IndexedSclies.\n \"\"\"\n slices_dtype = slice_values.type_signature.member.dtype\n zero = tff.tf_computation(\n lambda: tf.zeros(dense_shape, dtype=slices_dtype))()\n\n @tf.function\n def accumulate_slices(dense, client_value):\n indices, slices = client_value\n # There is no built-in way to add `IndexedSlices`, but \n # tf.convert_to_tensor is a quick way to convert to a dense representation\n # so we can add them.\n return dense + tf.convert_to_tensor(\n tf.IndexedSlices(slices, indices, dense_shape))\n\n\n return tff.federated_aggregate(\n (slice_indices, slice_values),\n zero=zero,\n accumulate=tff.tf_computation(accumulate_slices),\n merge=tff.tf_computation(lambda d1, d2: tf.add(d1, d2, name='merge')),\n report=tff.tf_computation(lambda d: d))\n", "_____no_output_____" ] ], [ [ "Construct a minimal `federated_computation` as a test", "_____no_output_____" ] ], [ [ "dense_shape = (6, 2)\nindices_type = tff.TensorType(tf.int64, (None,))\nvalues_type = tff.TensorType(tf.float32, (None, 2))\nclient_slice_type = tff.type_at_clients(\n (indices_type, values_type))\n\[email protected]_computation(client_slice_type)\ndef test_sum_indexed_slices(indices_values_at_client):\n indices, values = indices_values_at_client\n return federated_indexed_slices_sum(indices, values, dense_shape)\n\nprint(test_sum_indexed_slices.type_signature)", "({<int64[?],float32[?,2]>}@CLIENTS -> float32[6,2]@SERVER)\n" ], [ "x = tf.IndexedSlices(\n values=np.array([[2., 2.1], [0., 0.1], [1., 1.1], [5., 5.1]],\n dtype=np.float32),\n indices=[2, 0, 1, 5],\n dense_shape=dense_shape)\ny = tf.IndexedSlices(\n values=np.array([[0., 0.3], [3.1, 3.2]], dtype=np.float32),\n indices=[1, 3],\n dense_shape=dense_shape)\n\n# Sum one.\nresult = test_sum_indexed_slices([(x.indices, x.values)])\nnp.testing.assert_array_equal(tf.convert_to_tensor(x), result)\n\n# Sum two.\nexpected = [[0., 0.1], [1., 1.4], [2., 2.1], [3.1, 3.2], [0., 0.], [5., 5.1]]\nresult = test_sum_indexed_slices([(x.indices, x.values), (y.indices, y.values)])\nnp.testing.assert_array_almost_equal(expected, result)", "_____no_output_____" ] ], [ [ "# Putting it all together in a `federated_computation`\nWe now uses TFF to bind together the components into a `tff.federated_computation`.\n", "_____no_output_____" ] ], [ [ "DENSE_MODEL_SHAPE = (WORD_VOCAB_SIZE, TAG_VOCAB_SIZE)\nclient_data_type = tff.SequenceType(batched_dataset1.element_spec)\nmodel_type = tff.TensorType(tf.float32, shape=DENSE_MODEL_SHAPE)", "_____no_output_____" ] ], [ [ "We use a basic server training function based on Federated Averaging, applying the update with a server learning rate of 1.0. It is important that we apply an update (delta) to the model, rather than simply averaging client-supplied models, as otherwise if a given slice of the model wasn't trained on by any client on a given round its coefficients could be zeroed out.", "_____no_output_____" ] ], [ [ "@tff.tf_computation\ndef server_update(current_model_weights, update_sum, num_clients):\n average_update = update_sum / num_clients\n return current_model_weights + average_update", "_____no_output_____" ] ], [ [ "We need a couple more `tff.tf_computation` components:", "_____no_output_____" ] ], [ [ "# Function to select slices from the model weights in federated_select:\nselect_fn = tff.tf_computation(\n lambda model_weights, index: tf.gather(model_weights, index))\n\n\n# We need to wrap `client_train_fn` as a `tff.tf_computation`, making\n# sure we do any operations that might construct `tf.Variable`s outside\n# of the `tf.function` we are wrapping.\[email protected]_computation\ndef client_train_fn_tff(model_slices_as_dataset, client_data, client_keys,\n actual_num_tokens):\n # Note this is amaller than the global model, using\n # MAX_TOKENS_SELECTED_PER_CLIENT which is much smaller than WORD_VOCAB_SIZE.\n # W7e would like a model of size `actual_num_tokens`, but we\n # can't build the model dynamically, so we will slice off the padded\n # weights at the end.\n client_model = create_logistic_model(MAX_TOKENS_SELECTED_PER_CLIENT,\n TAG_VOCAB_SIZE)\n client_optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)\n return client_train_fn(client_model, client_optimizer,\n model_slices_as_dataset, client_data, client_keys,\n actual_num_tokens)\n\[email protected]_computation\ndef keys_for_client_tff(client_data):\n return keys_for_client(client_data, MAX_TOKENS_SELECTED_PER_CLIENT)", "_____no_output_____" ] ], [ [ "We're now ready to put all the pieces together!", "_____no_output_____" ] ], [ [ "@tff.federated_computation(\n tff.type_at_server(model_type), tff.type_at_clients(client_data_type))\ndef sparse_model_update(server_model, client_data):\n max_tokens = tff.federated_value(MAX_TOKENS_SELECTED_PER_CLIENT, tff.SERVER)\n keys_at_clients, actual_num_tokens = tff.federated_map(\n keys_for_client_tff, client_data)\n\n model_slices = tff.federated_select(keys_at_clients, max_tokens, server_model,\n select_fn)\n\n update_keys, update_slices = tff.federated_map(\n client_train_fn_tff,\n (model_slices, client_data, keys_at_clients, actual_num_tokens))\n\n dense_update_sum = federated_indexed_slices_sum(update_keys, update_slices,\n DENSE_MODEL_SHAPE)\n num_clients = tff.federated_sum(tff.federated_value(1.0, tff.CLIENTS))\n\n updated_server_model = tff.federated_map(\n server_update, (server_model, dense_update_sum, num_clients))\n\n return updated_server_model\n\n\nprint(sparse_model_update.type_signature)", "(<server_model=float32[13,4]@SERVER,client_data={<tokens=<indices=int64[?,2],values=int32[?],dense_shape=int64[2]>,tags=float32[?,4]>*}@CLIENTS> -> float32[13,4]@SERVER)\n" ] ], [ [ "# Let's train a model!\n\nNow that we have our training function, let's try it out.", "_____no_output_____" ] ], [ [ "server_model = create_logistic_model(WORD_VOCAB_SIZE, TAG_VOCAB_SIZE)\nserver_model.compile( # Compile to make evaluation easy.\n optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.0), # Unused\n loss=tf.keras.losses.BinaryCrossentropy(),\n metrics=[ \n tf.keras.metrics.Precision(name='precision'),\n tf.keras.metrics.AUC(name='auc'),\n tf.keras.metrics.Recall(top_k=2, name='recall_at_2'),\n ])\n\ndef evaluate(model, dataset, name):\n metrics = model.evaluate(dataset, verbose=0)\n metrics_str = ', '.join([f'{k}={v:.2f}' for k, v in \n (zip(server_model.metrics_names, metrics))])\n print(f'{name}: {metrics_str}')", "_____no_output_____" ], [ "print('Before training')\nevaluate(server_model, batched_dataset1, 'Client 1')\nevaluate(server_model, batched_dataset2, 'Client 2')\nevaluate(server_model, batched_dataset3, 'Client 3')\n\nmodel_weights = server_model.trainable_weights[0]\n\nclient_datasets = [batched_dataset1, batched_dataset2, batched_dataset3]\nfor _ in range(10): # Run 10 rounds of FedAvg\n # We train on 1, 2, or 3 clients per round, selecting\n # randomly.\n cohort_size = np.random.randint(1, 4)\n clients = np.random.choice([0, 1, 2], cohort_size, replace=False)\n print('Training on clients', clients)\n model_weights = sparse_model_update(\n model_weights, [client_datasets[i] for i in clients])\nserver_model.set_weights([model_weights])\n\nprint('After training')\nevaluate(server_model, batched_dataset1, 'Client 1')\nevaluate(server_model, batched_dataset2, 'Client 2')\nevaluate(server_model, batched_dataset3, 'Client 3')", "Before training\nClient 1: loss=0.69, precision=0.00, auc=0.50, recall_at_2=0.60\nClient 2: loss=0.69, precision=0.00, auc=0.50, recall_at_2=0.50\nClient 3: loss=0.69, precision=0.00, auc=0.50, recall_at_2=0.40\nTraining on clients [0 1]\nTraining on clients [0 2 1]\nTraining on clients [2 0]\nTraining on clients [1 0 2]\nTraining on clients [2]\nTraining on clients [2 0]\nTraining on clients [1 2 0]\nTraining on clients [0]\nTraining on clients [2]\nTraining on clients [1 2]\nAfter training\nClient 1: loss=0.67, precision=0.80, auc=0.91, recall_at_2=0.80\nClient 2: loss=0.68, precision=0.67, auc=0.96, recall_at_2=1.00\nClient 3: loss=0.65, precision=1.00, auc=0.93, recall_at_2=0.80\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb774ad308befbad48490cb03840752d3f32dc3b
41,251
ipynb
Jupyter Notebook
docs/examples/jupyter-notebooks/map_titanic.ipynb
OLarionova-HORIS/lets-plot
89e30a574fe2de3da17186acdbe1cf427d66d87f
[ "MIT" ]
null
null
null
docs/examples/jupyter-notebooks/map_titanic.ipynb
OLarionova-HORIS/lets-plot
89e30a574fe2de3da17186acdbe1cf427d66d87f
[ "MIT" ]
null
null
null
docs/examples/jupyter-notebooks/map_titanic.ipynb
OLarionova-HORIS/lets-plot
89e30a574fe2de3da17186acdbe1cf427d66d87f
[ "MIT" ]
null
null
null
32.151988
306
0.439432
[ [ [ "<em><sub>This page is available as an executable or viewable <strong>Jupyter Notebook</strong>:</sub></em>\n<br/><br/>\n<a href=\"https://mybinder.org/v2/gh/JetBrains/lets-plot/v1.5.2demos1?filepath=docs%2Fexamples%2Fjupyter-notebooks%2Fmap_titanic.ipynb\"\n target=\"_parent\">\n <img align=\"left\"\n src=\"https://mybinder.org/badge_logo.svg\">\n</a>\n<a href=\"https://nbviewer.jupyter.org/github/JetBrains/lets-plot/blob/master/docs/examples/jupyter-notebooks/map_titanic.ipynb\"\n target=\"_parent\">\n <img align=\"right\"\n src=\"https://raw.githubusercontent.com/jupyter/design/master/logos/Badges/nbviewer_badge.png\"\n width=\"109\" height=\"20\">\n</a>\n<br/>\n<br/>", "_____no_output_____" ], [ "## Visualization of the Titanic's voyage.\n\nThe tasks completed in this notebook:\n- Load an interactive basemap layer.\n- Geocode Titanic's ports of of embarkation and show them as markers on the map.\n- Show the \"Titanic's site\" on the map.\n- Geocode the Titanic destination port and show on the map.\n- Connect all markers on the map with dashed lines.\n- Compute a simple statistic related to the ports of of embarkation and show the plot and the map on the same figure.\n\nWe will use the [Lets-Plot for Python](https://github.com/JetBrains/lets-plot#lets-plot-for-python) library for all charting and geocoding tasks in this notebook.\n\nThe Titanic dataset for this demo was downloaded from [\"Titanic: cleaned data\" dataset](https://www.kaggle.com/jamesleslie/titanic-cleaned-data?select=train_clean.csv) (train_clean.csv) available at [kaggle](https://www.kaggle.com).", "_____no_output_____" ] ], [ [ "from lets_plot import *\n\nLetsPlot.setup_html()", "_____no_output_____" ] ], [ [ "### The ports of embarkation.\n\nThe Titanic's ports of of embarkation were:\n- Southampton (UK)\n- Cherbourg (France)\n- Cobh (Ireland)\n\nLets find geographical coordinates of these cities using the `Lets-Plot` geocoding package.", "_____no_output_____" ] ], [ [ "from lets_plot.geo_data import *\n\nports_of_embarkation = ['Southampton', 'Cherbourg', 'Cobh']", "The geodata is provided by © OpenStreetMap contributors and is made available here under the Open Database License (ODbL).\n" ] ], [ [ "#### 1. Using the `regions` function.\n\nTo geocode our port cities we can try to call the `regions` function like this:\n\n regions(level='city', request=ports_of_embarkation)\nor its equivalent:\n\n regions_city(request=ports_of_embarkation)\n\nUnfortunately, this call results in a `ValueError`:\n\n>Multiple objects (6) were found for Southampton:\n>- Southampton (United Kingdom, England, South East)\n>- Southampton (United States of America, New York, Suffolk County)\n>- Southampton (United States of America, Massachusetts)\n>- Southampton Township (United States of America, New Jersey, Burlington County)\n>- Lower Southampton Township (United States of America, Pennsylvania, Bucks County)\n>- Upper Southampton Township (United States of America, Pennsylvania, Bucks County)\n>Multiple objects (2) were found for Cherbourg:\n>- Saint-Jean-de-Cherbourg (Canada, Québec, Bas-Saint-Laurent, La Matanie)\n>- Cherbourg-en-Cotentin (France, France métropolitaine, Normandie, Manche)\n", "_____no_output_____" ] ], [ [ "#\n# This call will fail with an error shown above.\n#\n#regions_city(ports_of_embarkation)", "_____no_output_____" ] ], [ [ "#### 2. Resolving geocoding ambiguity using the `within` parameter.\n\nWe can try to resolve ambiguity of the name \"Southampton\" (found in the United Kingdom and in the US)\nand the name \"Cherbourg\" (found in Canada and France) by narrowing the scope of search using \nparameter `within` and function `regions_country` like this:\n\n regions_city(ports_of_embarkation, within=regions_country(['France', 'UK']))\n\nBut this call results in another `ValueError`:\n\n>No objects were found for Cobh.", "_____no_output_____" ] ], [ [ "#\n# This call will fail with \"No objects were found for Cobh.\" error.\n#\n#regions_city(ports_of_embarkation, within=regions_country(['France', 'UK']))", "_____no_output_____" ] ], [ [ "An alternative way of using parameter `within` is to specify\nan array of names of all the countries. \n\nThe territory names must be in the same order \nas the names of the geocoded cities:", "_____no_output_____" ] ], [ [ "regions_city(ports_of_embarkation, within=['UK', 'France', 'Ireland'])", "_____no_output_____" ] ], [ [ "#### 3. Using `regions_builder` for advanced geocoding.\n\nThere are many situations where a simple call of the function `regions` \nwill not resolve all geocoding ambiguities.\n\nIn other cases, we might want to retrieve all objects matching a name and\nnot to treat names ambiguity as an error.\n\nThe `regions builder` object provides advanced capabilities in fine tuning of geocoding queries.\n\nLet's resolve ambiguity of names \"Southampton\" and \"Cherbourg\" with the help of `regions builder`.", "_____no_output_____" ] ], [ [ "ports_of_embarkation_geocoded = regions_builder(level='city', request=ports_of_embarkation) \\\n .where('Cherbourg', within='France') \\\n .where('Southampton', within='England') \\\n .build()\nports_of_embarkation_geocoded", "_____no_output_____" ] ], [ [ "### Markers on the interactive basemap.\n\nThe `Lets-Plot` API makes it easy to create an interactive basemap layer using its own vector tiles service or \nby configuring 3rd party ZXY raster tile providers.\n\nIn this notebook we will use raster tiles provided by [Wikimedia Foundation](https://foundation.wikimedia.org/wiki/Maps_Terms_of_Use).\n\nSimple markers (points) can be added to the map either via the `geom_point` layer\nor directly on the `livemap` base-layer.\n\nIn this demo we will add the ports of embarkation markers right to the `livemap` base-layer (using the `map` parameter)\nand, later, add the other markers and shapes via additional `geom` layers.", "_____no_output_____" ] ], [ [ "LetsPlot.set(maptiles_zxy(url='https://maps.wikimedia.org/osm-intl/{z}/{x}/{y}@2x.png'))\n\nbasemap = ggplot() + ggsize(800, 300) \\\n + geom_livemap(map=ports_of_embarkation_geocoded,\n size=7, \n shape=21, color='black', fill='yellow')\n\nbasemap", "_____no_output_____" ] ], [ [ "### The 'Titanic's site' marker", "_____no_output_____" ] ], [ [ "from shapely.geometry import Point, LineString\ntitanic_site = Point(-38.056641, 46.920255)\n\n# Add the marker using the `geom_point` geometry layer.\ntitanic_site_marker = geom_point(x=titanic_site.x, y = titanic_site.y, size=10, shape=9, color='red')\nbasemap + titanic_site_marker", "_____no_output_____" ] ], [ [ "### Connecting the markers on the map.\n\nThe `ports_of_embarkation_geocoded` variable in this demo is an object of type `Regions`. \n\nObject `Regions`, if necessary, can be tranfrormed to a `GeoDataFrame`\nby calling its `centroids()`, `boundaries()` or `limits()` method.\n\nTo create the Titanic's path we will use the `centroids()` method to obtain the points of embarkation and then append \nthe \"Titanic's site\" point to complete the polyline.", "_____no_output_____" ] ], [ [ "from geopandas import GeoSeries\nfrom geopandas import GeoDataFrame\n\n# The points of embarkation\nembarkation_points = ports_of_embarkation_geocoded.centroids().geometry\ntitanic_journey_points = embarkation_points.append(GeoSeries(titanic_site), ignore_index=True)\n\n# New GeoDataFrame containing a `LineString` geometry.\ntitanic_journey_gdf = GeoDataFrame(dict(geometry=[LineString(titanic_journey_points)]))\n\n# App the polyline using the `geom_path` layer.\ntitanic_path = geom_path(map=titanic_journey_gdf, color='dark-blue', linetype='dotted', size=1.2)\n\nbasemap + titanic_path + titanic_site_marker", "_____no_output_____" ] ], [ [ "### The last segment that Titanic didn't made.", "_____no_output_____" ] ], [ [ "# Geocoding of The New York City is a trivial task.\nNYC = regions_city(['New York']).centroids().geometry[0]\n\nmap_layers = titanic_path \\\n + geom_segment(x=titanic_site.x, y=titanic_site.y, \n xend=NYC.x, yend=NYC.y, \n color='white', linetype='dotted', size=1.2) \\\n + geom_point(x=NYC.x, y=NYC.y, size=7, shape=21, color='black', fill='white') \\\n + titanic_site_marker\n\nbasemap + map_layers", "_____no_output_____" ] ], [ [ "### The Titanic survival rates by the port of embarkation.", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "df = pd.read_csv(\"../data/titanic.csv\")\ndf.head()", "_____no_output_____" ] ], [ [ "In this Titanic dataset the column `Embarked`contains a single-letter codes of the Titanic's ports of embarkation:\n- S: Southampton (UK)\n- C: Cherbourg (France)\n- Q: Cobh (Ireland)\n\nLets visualize the `Survived` counts by the port of embarkation:", "_____no_output_____" ] ], [ [ "from lets_plot.mapping import as_discrete\n\nbars = ggplot(df) \\\n + geom_bar(aes('Embarked', fill=as_discrete('Survived')), position='dodge') \\\n + scale_fill_discrete(labels=['No', 'Yes']) \\\n + scale_x_discrete(labels=['Southampton', 'Cobh', 'Cherbourg'], limits=['S', 'C', 'Q'])\n\nbars + ggsize(800, 250)", "_____no_output_____" ] ], [ [ "### The final figure.", "_____no_output_____" ] ], [ [ "bars_settings = theme(axis_title='blank', \n axis_line='blank', \n axis_ticks_y='blank',\n axis_text_y='blank',\n legend_position=[1.12, 1.07],\n legend_justification=[1, 1]) + scale_x_discrete(expand=[0, 0.05])\n\n\nmap = ggplot() + ggsize(800, 300) \\\n + geom_livemap(map=ports_of_embarkation_geocoded.centroids(), \n size=8, \n shape=21, color='black', fill='yellow',\n zoom=4, location=[-12, 48])\n\nfig = GGBunch()\nfig.add_plot(map + map_layers, 0, 0)\nfig.add_plot(bars + bars_settings, 535, 135, 250, 150)\nfig", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb77575c8b885ce4002e31dd6fa37dfff0c57bad
118,495
ipynb
Jupyter Notebook
babi_visualization_UT_act_pondertime.ipynb
elliotthwang/bAbI-T2T
2667555c9b90bd1cf6bf0f112d72efd8c9aed888
[ "Apache-2.0" ]
33
2018-07-13T09:56:34.000Z
2021-04-22T14:12:55.000Z
babi_visualization_UT_act_pondertime.ipynb
elliotthwang/bAbI-T2T
2667555c9b90bd1cf6bf0f112d72efd8c9aed888
[ "Apache-2.0" ]
7
2018-07-19T09:52:46.000Z
2018-12-13T14:34:10.000Z
babi_visualization_UT_act_pondertime.ipynb
elliotthwang/bAbI-T2T
2667555c9b90bd1cf6bf0f112d72efd8c9aed888
[ "Apache-2.0" ]
10
2018-07-13T10:13:14.000Z
2022-02-22T02:25:41.000Z
172.985401
93,582
0.87667
[ [ [ "import os\nimport json\n\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib import cm\n\nfrom tensor2tensor import problems\nfrom tensor2tensor import models\nfrom tensor2tensor.bin import t2t_decoder # To register the hparams set\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import trainer_lib\nfrom tensor2tensor.data_generators import babi_qa", "/home/mdehgha1/bin/anaconda3/envs/tf/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n" ] ], [ [ "## HParams", "_____no_output_____" ] ], [ [ "# HParams\nbabi_task_id = 'qa3'\nsubset = \"1k\"\nproblem_name = 'babi_qa_sentence_task' + babi_task_id.replace(\"qa\", \"\") + \"_\" + subset\nmodel_name = \"babi_r_transformer\"\nhparams_set = \"r_transformer_act_step_position_timing_tiny\"\n\ndata_dir = '~/babi/data/' + problem_name \n\n# PUT THE MODEL YOU WANT TO LOAD HERE!\nCHECKPOINT = '~/babi/output/' + problem_name+ '/' + model_name + '/' + hparams_set + '/'\nprint(CHECKPOINT)", "/cns/lu-d/home/dehghani/babi/output_visualization/babi_qa_sentence_single_task3_10k/babi_r_transformer/r_transformer_act_step_position_timing_tiny/\n" ], [ "_TASKS = {\n 'qa1': 'qa1_single-supporting-fact',\n 'qa2': 'qa2_two-supporting-facts',\n 'qa3': 'qa3_three-supporting-facts',\n 'qa4': 'qa4_two-arg-relations',\n 'qa5': 'qa5_three-arg-relations',\n 'qa6': 'qa6_yes-no-questions',\n 'qa7': 'qa7_counting',\n 'qa8': 'qa8_lists-sets',\n 'qa9': 'qa9_simple-negation',\n 'qa10': 'qa10_indefinite-knowledge',\n 'qa11': 'qa11_basic-coreference',\n 'qa12': 'qa12_conjunction',\n 'qa13': 'qa13_compound-coreference',\n 'qa14': 'qa14_time-reasoning',\n 'qa15': 'qa15_basic-deduction',\n 'qa16': 'qa16_basic-induction',\n 'qa17': 'qa17_positional-reasoning',\n 'qa18': 'qa18_size-reasoning',\n 'qa19': 'qa19_path-finding',\n 'qa20': 'qa20_agents-motivations'\n }\n\nmeta_data_filename = _TASKS[babi_task_id] + '-meta_data.json'\nmetadata_path = os.path.join(data_dir, meta_data_filename)\n\n\nFLAGS = tf.flags.FLAGS\nFLAGS.data_dir = data_dir\n\ntruncated_story_length = 130 if babi_task_id == 'qa3' else 70\n\nwith tf.gfile.GFile(metadata_path, mode='r') as f:\n metadata = json.load(f)\nmax_story_length = metadata['max_story_length']\nmax_sentence_length = metadata['max_sentence_length']\nmax_question_length = metadata['max_question_length']\n\nprint(max_story_length)\nprint(max_sentence_length)\nprint(max_question_length)", "130\n7\n8\n" ], [ "tf.reset_default_graph()\n\nclass bAbiACTVisualizer(object):\n \"\"\"Helper object for creating act visualizations.\"\"\"\n\n def __init__(\n self, hparams_set, model_name, data_dir, problem_name, beam_size=1):\n story, question, targets, samples, ponder_time = build_model(\n hparams_set, model_name, data_dir, problem_name, beam_size=beam_size)\n\n # Fetch the problem\n babi_problem = problems.problem(problem_name)\n encoders = babi_problem.feature_encoders(data_dir)\n\n self.story = story\n self.question = question\n self.targets = targets\n self.ponder_time = ponder_time\n self.samples = samples\n self.encoders = encoders\n\n def encode(self, story_str, question_str):\n \"\"\"Input str to features dict, ready for inference.\"\"\"\n \n story_str = babi_qa._normalize_string(story_str)\n question_str = babi_qa._normalize_string(question_str)\n story = story_str.strip().split('.')\n story = [self.encoders[babi_qa.FeatureNames.STORY].encode(sentence) \n for sentence in story[-truncated_story_length:]]\n question = self.encoders[babi_qa.FeatureNames.QUESTION].encode(question_str)\n \n for sentence in story:\n for _ in range(max_sentence_length - len(sentence)):\n sentence.append(babi_qa.PAD)\n assert len(sentence) == max_sentence_length\n\n for _ in range(max_story_length - len(story)):\n story.append([babi_qa.PAD for _ in range(max_sentence_length)])\n\n for _ in range(max_question_length - len(question)):\n question.append(babi_qa.PAD)\n\n assert len(story) == max_story_length\n assert len(question) == max_question_length \n\n story_flat = [token_id for sentence in story for token_id in sentence]\n \n batch_story = np.reshape(np.array(story_flat), \n [1, max_story_length, max_sentence_length, 1])\n batch_question = np.reshape(np.array(question), \n [1, 1, max_question_length, 1])\n return batch_story, batch_question\n\n def decode_story(self, integers):\n \"\"\"List of ints to str.\"\"\"\n integers = np.squeeze(integers).tolist()\n story = []\n for sent in integers:\n sent_decoded = self.encoders[babi_qa.FeatureNames.STORY].decode_list(sent)\n sent_decoded.append('.')\n story.append(sent_decoded)\n return story\n \n def decode_question(self, integers):\n \"\"\"List of ints to str.\"\"\"\n integers = np.squeeze(integers).tolist()\n return self.encoders[babi_qa.FeatureNames.QUESTION].decode_list(integers)\n \n def decode_targets(self, integers):\n \"\"\"List of ints to str.\"\"\"\n integers = np.squeeze(integers).tolist()\n return self.encoders[\"targets\"].decode([integers])\n\n def get_vis_data_from_string(self, sess, story_str, question_str):\n \"\"\"Constructs the data needed for visualizing ponder_time.\n\n Args:\n sess: A tf.Session object.\n input_string: The input setence to be visulized.\n\n Returns:\n Tuple of (\n output_string: The answer\n input_list: Tokenized input sentence.\n output_list: Tokenized answer.\n ponder_time: ponder_time matrices;\n )\n \"\"\"\n encoded_story, encoded_question = self.encode(story_str, question_str)\n\n # Run inference graph to get the label.\n out = sess.run(self.samples, {\n self.story: encoded_story,\n self.question: encoded_question,\n })\n\n # Run the decoded answer through the training graph to get the\n # ponder_time tensors.\n ponder_time = sess.run(self.ponder_time, {\n self.story: encoded_story,\n self.question: encoded_question,\n self.targets: np.reshape(out, [1, -1, 1, 1]),\n })\n \n output = self.decode_targets(out)\n story_list = self.decode_story(encoded_story)\n question_list = self.decode_question(encoded_question)\n \n return story_list, question_list, output, ponder_time\n\n\ndef build_model(hparams_set, model_name, data_dir, problem_name, beam_size=1):\n \"\"\"Build the graph required to featch the ponder_times.\n\n Args:\n hparams_set: HParams set to build the model with.\n model_name: Name of model.\n data_dir: Path to directory contatining training data.\n problem_name: Name of problem.\n beam_size: (Optional) Number of beams to use when decoding a traslation.\n If set to 1 (default) then greedy decoding is used.\n\n Returns:\n Tuple of (\n inputs: Input placeholder to feed in ids.\n targets: Targets placeholder to feed to th when fetching\n ponder_time.\n samples: Tensor representing the ids of the translation.\n ponder_time: Tensors representing the ponder_time.\n )\n \"\"\"\n hparams = trainer_lib.create_hparams(\n hparams_set, data_dir=data_dir, problem_name=problem_name)\n babi_model = registry.model(model_name)(\n hparams, tf.estimator.ModeKeys.EVAL)\n \n story = tf.placeholder(tf.int32, shape=(\n 1, max_story_length, max_sentence_length, 1), \n name=babi_qa.FeatureNames.STORY)\n question = tf.placeholder(tf.int32, shape=(\n 1, 1, max_question_length, 1), \n name=babi_qa.FeatureNames.QUESTION)\n targets = tf.placeholder(tf.int32, shape=(1, 1, 1, 1), name='targets')\n \n babi_model({\n babi_qa.FeatureNames.STORY: story,\n babi_qa.FeatureNames.QUESTION: question,\n 'targets': targets,\n })\n\n # Must be called after building the training graph, so that the dict will\n # have been filled with the ponder_time tensors. BUT before creating the\n # interence graph otherwise the dict will be filled with tensors from\n # inside a tf.while_loop from decoding and are marked unfetchable.\n ponder_time = get_ponder_mats(babi_model)\n\n with tf.variable_scope(tf.get_variable_scope(), reuse=True):\n samples = babi_model.infer({\n babi_qa.FeatureNames.STORY: story,\n babi_qa.FeatureNames.QUESTION: question,\n }, beam_size=beam_size)['outputs']\n\n return story, question, targets, samples, ponder_time\n\n\ndef get_ponder_mats(babi_model):\n \"\"\"Get's the tensors representing the ponder_time from a build model.\n\n The ponder_time are stored in a dict on the Transformer object while building\n the graph.\n\n Args:\n babi_model: Transformer object to fetch the ponder_time from.\n\n Returns:\n Tuple of ponder_time matrices\n \"\"\"\n# print([n.name for n in tf.get_default_graph().as_graph_def().node])\n attention_tensor_name = \"babi_r_transformer/parallel_0_5/babi_r_transformer/body/encoder/r_transformer_act/while/self_attention/multihead_attention/dot_product_attention/attention_weights\"\n ponder_time_tensor_name = \"babi_r_transformer/parallel_0_5/babi_r_transformer/body/enc_ponder_times:0\"\n ponder_time = tf.get_default_graph().get_tensor_by_name(ponder_time_tensor_name)\n\n\n return ponder_time", "_____no_output_____" ], [ "ponder_visualizer = bAbiACTVisualizer(hparams_set, model_name, data_dir, problem_name, beam_size=1)", "_____no_output_____" ], [ "tf.Variable(0, dtype=tf.int64, trainable=False, name='global_step')\n\nsess = tf.train.MonitoredTrainingSession(\n checkpoint_dir=CHECKPOINT,\n save_summaries_secs=0,\n)", "_____no_output_____" ], [ "if babi_task_id == 'qa1':\n# input_story = \"John travelled to the hallway.Mary journeyed to the bathroom.\"\n# input_question = \"Where is John?\" #hallway\n \n input_story = \"John travelled to the hallway.Mary journeyed to the bathroom.Daniel went back to the bathroom.John moved to the bedroom.\"\n input_question = \"Where is Mary?\" #bathroom\n\nelif babi_task_id == 'qa2':\n input_story = \"Mary got the milk there.John moved to the bedroom.Sandra went back to the kitchen.Mary travelled to the hallway.\"\n input_question = \"Where is the milk?\" #hallway\n \n# input_story = \"Mary got the milk there.John moved to the bedroom.Sandra went back to the kitchen.Mary travelled to the hallway.John got the football there.John went to the hallway.\"\n# input_question = \"Where is the football?\" #hallway\n\nelif babi_task_id == 'qa3':\n input_story = \"Mary got the milk.John moved to the bedroom.Daniel journeyed to the office.John grabbed the apple there.John got the football.John journeyed to the garden.Mary left the milk.John left the football.Daniel moved to the garden.Daniel grabbed the football.Mary moved to the hallway.Mary went to the kitchen.John put down the apple there.John picked up the apple.Sandra moved to the hallway.Daniel left the football there.Daniel took the football.John travelled to the kitchen.Daniel dropped the football.John dropped the apple.John grabbed the apple.John went to the office.Sandra went back to the bedroom.Sandra took the milk.John journeyed to the bathroom.John travelled to the office.Sandra left the milk.Mary went to the bedroom.Mary moved to the office.John travelled to the hallway.Sandra moved to the garden.Mary moved to the kitchen.Daniel took the football.Mary journeyed to the bedroom.Mary grabbed the milk there.Mary discarded the milk.John went to the garden.John discarded the apple there.\"\n input_question = \"Where was the apple before the bathroom?\" #office\n \n# input_story = \"Mary got the milk.John moved to the bedroom.Daniel journeyed to the office.John grabbed the apple there.John got the football.John journeyed to the garden.Mary left the milk.John left the football.Daniel moved to the garden.Daniel grabbed the football.Mary moved to the hallway.Mary went to the kitchen.John put down the apple there.John picked up the apple.Sandra moved to the hallway.Daniel left the football there.Daniel took the football.John travelled to the kitchen.Daniel dropped the football.John dropped the apple.John grabbed the apple.John went to the office.Sandra went back to the bedroom.Sandra took the milk.John journeyed to the bathroom.John travelled to the office.Sandra left the milk.Mary went to the bedroom.Mary moved to the office.John travelled to the hallway.Sandra moved to the garden.Mary moved to the kitchen.Daniel took the football.Mary journeyed to the bedroom.Mary grabbed the milk there.Mary discarded the milk.John went to the garden.John discarded the apple there.Sandra travelled to the bedroom.Daniel moved to the bathroom.\"\n# input_question = \"Where was the apple before the hallway?\" #office\n \n", "_____no_output_____" ], [ "story_text, question_text, output, ponder_time = ponder_visualizer.get_vis_data_from_string(sess, input_story, input_question)\n# print(output)\n# print(story_text)\n# print(question_text)\n\ninp_text = []\nfor sent in story_text:\n inp_text.append(' '.join(sent))\ninp_text.append(' '.join(question_text))\nponder_time = np.squeeze(np.array(ponder_time)).tolist()\n# print(ponder_time)\ndef pad_remover(inp_text, ponder_time):\n pad_sent_index = [ i for i, sent in enumerate(inp_text) if sent.startswith('<pad>')]\n start = min(pad_sent_index)\n end = max(pad_sent_index)\n filtered_inp_text = inp_text[:start] + inp_text[end+1:]\n filtered_inp_text = [sent.replace('<pad> ', '') for sent in filtered_inp_text]\n filtered_ponder_time = ponder_time[:start] + ponder_time[end+1:] \n return filtered_inp_text, filtered_ponder_time\n\n\nfiltered_inp_text, filtered_ponder_time = pad_remover(inp_text, ponder_time)\nfor sent in filtered_inp_text:\n print(sent)\nprint(output)\nprint(filtered_ponder_time)", "Mary got the milk .\nJohn moved to the bedroom .\nDaniel journeyed to the office .\nJohn grabbed the apple there .\nJohn got the football .\nJohn journeyed to the garden .\nMary left the milk .\nJohn left the football .\nDaniel moved to the garden .\nDaniel grabbed the football .\nMary moved to the hallway .\nMary went to the kitchen .\nJohn put down the apple there .\nJohn picked up the apple .\nSandra moved to the hallway .\nDaniel left the football there .\nDaniel took the football .\nJohn travelled to the kitchen .\nDaniel dropped the football .\nJohn dropped the apple .\nJohn grabbed the apple .\nJohn went to the office .\nSandra went back to the bedroom .\nSandra took the milk .\nJohn journeyed to the bathroom .\nJohn travelled to the office .\nSandra left the milk .\nMary went to the bedroom .\nMary moved to the office .\nJohn travelled to the hallway .\nSandra moved to the garden .\nMary moved to the kitchen .\nDaniel took the football .\nMary journeyed to the bedroom .\nMary grabbed the milk there .\nMary discarded the milk .\nJohn went to the garden .\nJohn discarded the apple there .\nWhere was the apple before the bathroom ?\noffice\n[1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 4.0, 3.0, 1.0, 1.0, 5.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 4.0, 3.0]\n" ], [ "df = pd.DataFrame(\n {'input': filtered_inp_text,\n 'ponder_time': filtered_ponder_time,\n })\n\nf_size = (10,5)\nif babi_task_id == 'qa2':\n f_size = (15,5)\nif babi_task_id == 'qa3':\n f_size = (25,5)\ndf.plot(kind='bar', x='input', y='ponder_time', rot=90, width=0.3, figsize=f_size, cmap='Spectral')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb77794e7963c0b35fe01b556c286354dd3a8d73
91,306
ipynb
Jupyter Notebook
machine_learning/regresion-allergy-percentage.ipynb
RQuispeC/fun-with-allergies
90d5aa9f5fb80e6139974ae6eb18006ed661bc1c
[ "MIT" ]
null
null
null
machine_learning/regresion-allergy-percentage.ipynb
RQuispeC/fun-with-allergies
90d5aa9f5fb80e6139974ae6eb18006ed661bc1c
[ "MIT" ]
null
null
null
machine_learning/regresion-allergy-percentage.ipynb
RQuispeC/fun-with-allergies
90d5aa9f5fb80e6139974ae6eb18006ed661bc1c
[ "MIT" ]
null
null
null
312.691781
15,512
0.929446
[ [ [ "# Prediction of food allergy percetanges", "_____no_output_____" ], [ "Load machine learning library scikit-learn", "_____no_output_____" ] ], [ [ "from sklearn.svm import SVR\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport os.path as osp\nimport gc", "_____no_output_____" ] ], [ [ "Define a function that will receive data as input and will return a support vector machine trained with the best hiperparameters for that data.", "_____no_output_____" ] ], [ [ "def get_model(x, y):\n\tregr = SVR()\n\tpipe = Pipeline(steps=[('reg', regr)])\n\tparam_grid = {\n\t\t'reg__kernel':('linear', 'rbf'),\n\t\t'reg__C': [0.01, 0.1, 1, 10],\n\t\t'reg__epsilon': [0.1, 0.2, 0.4, 0.5, 0.8, 1., 1.5, 2, 3],\n\t\t'reg__gamma': ['auto', 'scale'],\n\t}\n\tsearch = GridSearchCV(pipe, param_grid, iid=False, cv=5,\n\t\treturn_train_score=False, n_jobs = 4)\n\tsearch.fit(x, y)\n\treturn search.best_estimator_", "_____no_output_____" ] ], [ [ "Define a function that loads the data. Source of data https://www.cdc.gov/nchs/hus/contents2017.htm#035", "_____no_output_____" ] ], [ [ "def read(file_name):\n\tdata = pd.read_csv(file_name, sep = '\\t')\n\tx = np.array([[float(year)] for year in list(data)])\n\ty = np.array([[year] for year in np.array(data).reshape(-1)]).reshape(-1, )\n\treturn x, y", "_____no_output_____" ], [ "data_root = '../data/machine_learning'\nfile_names = ['black_african_american.tsv', 'female.tsv', 'hispanic_latino.tsv', 'male.tsv', 'under_18_years.tsv', 'white.tsv']\nnames = ['black african american', 'female ', 'hispanic latino', 'male', 'under 18 years', 'white']", "_____no_output_____" ] ], [ [ "Define the years we want to predict food allergy percentages", "_____no_output_____" ] ], [ [ "query = np.array([[2018], [2019], [2020]]).reshape(-1,1)", "_____no_output_____" ] ], [ [ "Predict food allergy percentages:", "_____no_output_____" ] ], [ [ "for fn, n in zip(file_names, names):\n x, y = read(osp.join(data_root, fn))\n\n model = get_model(x, y)\n y_model = model.predict(x)\n y_query = model.predict(query)\n\n fig =plt.figure()\n plt.title(n)\n plt.scatter(x, y, color='green', label = 'train')\n plt.scatter(query, y_query, color='black', label = 'test')\n plt.plot(x, y_model, color='blue', linewidth=2, label = 'model')\n plt.legend(loc = 'lower right')\n\n plt.show()\n for q, pq in zip(query, y_query):\n print(\"{:.2f} percentage of {} population will have food allergy in {}\".format(pq, n, q[0]))\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb777c1ff834da217a1ddc9608c840d8b6e96d79
21,550
ipynb
Jupyter Notebook
courses/udacity_intro_to_tensorflow_for_deep_learning/l06c03_exercise_flowers_with_transfer_learning_solution.ipynb
riyaj5246/skin-cancer-with-tflite
2a3fbd58deb0984c1224464638ec7f45f7e1fd9b
[ "Apache-2.0" ]
6,484
2019-02-13T21:32:29.000Z
2022-03-31T20:50:20.000Z
courses/udacity_intro_to_tensorflow_for_deep_learning/l06c03_exercise_flowers_with_transfer_learning_solution.ipynb
riyaj5246/skin-cancer-with-tflite
2a3fbd58deb0984c1224464638ec7f45f7e1fd9b
[ "Apache-2.0" ]
288
2019-02-13T22:56:03.000Z
2022-03-24T11:15:19.000Z
courses/udacity_intro_to_tensorflow_for_deep_learning/l06c03_exercise_flowers_with_transfer_learning_solution.ipynb
riyaj5246/skin-cancer-with-tflite
2a3fbd58deb0984c1224464638ec7f45f7e1fd9b
[ "Apache-2.0" ]
7,222
2019-02-13T21:39:34.000Z
2022-03-31T22:23:54.000Z
35.04065
791
0.558747
[ [ [ "##### Copyright 2019 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l06c03_exercise_flowers_with_transfer_learning_solution.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l06c03_exercise_flowers_with_transfer_learning_solution.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>", "_____no_output_____" ], [ "# TensorFlow Hub", "_____no_output_____" ], [ "[TensorFlow Hub](http://tensorflow.org/hub) is an online repository of already trained TensorFlow models that you can use.\nThese models can either be used as is, or they can be used for Transfer Learning.\n\nTransfer learning is a process where you take an existing trained model, and extend it to do additional work. This involves leaving the bulk of the model unchanged, while adding and retraining the final layers, in order to get a different set of possible outputs.\n\nHere, you can see all the models available in [TensorFlow Module Hub](https://tfhub.dev/).\n\nBefore starting this Colab, you should reset the Colab environment by selecting `Runtime -> Reset all runtimes...` from menu above.", "_____no_output_____" ], [ "# Imports\n", "_____no_output_____" ], [ "Some normal imports we've seen before. The new one is importing tensorflow_hub which this Colab will make heavy use of.", "_____no_output_____" ] ], [ [ "import tensorflow as tf", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\n\nimport tensorflow_hub as hub\nimport tensorflow_datasets as tfds\n\nfrom tensorflow.keras import layers", "_____no_output_____" ], [ "import logging\nlogger = tf.get_logger()\nlogger.setLevel(logging.ERROR)", "_____no_output_____" ] ], [ [ "# TODO: Download the Flowers Dataset using TensorFlow Datasets", "_____no_output_____" ], [ "In the cell below you will download the Flowers dataset using TensorFlow Datasets. If you look at the [TensorFlow Datasets documentation](https://www.tensorflow.org/datasets/datasets#tf_flowers) you will see that the name of the Flowers dataset is `tf_flowers`. You can also see that this dataset is only split into a TRAINING set. You will therefore have to use `tfds.splits` to split this training set into to a `training_set` and a `validation_set`. Do a `[70, 30]` split such that 70 corresponds to the `training_set` and 30 to the `validation_set`. Then load the `tf_flowers` dataset using `tfds.load`. Make sure the `tfds.load` function uses the all the parameters you need, and also make sure it returns the dataset info, so we can retrieve information about the datasets.\n", "_____no_output_____" ] ], [ [ "(training_set, validation_set), dataset_info = tfds.load(\n 'tf_flowers',\n split=['train[:70%]', 'train[70%:]'],\n with_info=True,\n as_supervised=True,\n)", "_____no_output_____" ] ], [ [ "# TODO: Print Information about the Flowers Dataset\n\nNow that you have downloaded the dataset, use the dataset info to print the number of classes in the dataset, and also write some code that counts how many images we have in the training and validation sets. ", "_____no_output_____" ] ], [ [ "num_classes = dataset_info.features['label'].num_classes\n\nnum_training_examples = 0\nnum_validation_examples = 0\n\nfor example in training_set:\n num_training_examples += 1\n\nfor example in validation_set:\n num_validation_examples += 1\n\nprint('Total Number of Classes: {}'.format(num_classes))\nprint('Total Number of Training Images: {}'.format(num_training_examples))\nprint('Total Number of Validation Images: {} \\n'.format(num_validation_examples))", "_____no_output_____" ] ], [ [ "The images in the Flowers dataset are not all the same size.", "_____no_output_____" ] ], [ [ "for i, example in enumerate(training_set.take(5)):\n print('Image {} shape: {} label: {}'.format(i+1, example[0].shape, example[1]))", "_____no_output_____" ] ], [ [ "# TODO: Reformat Images and Create Batches\n\nIn the cell below create a function that reformats all images to the resolution expected by MobileNet v2 (224, 224) and normalizes them. The function should take in an `image` and a `label` as arguments and should return the new `image` and corresponding `label`. Then create training and validation batches of size `32`.", "_____no_output_____" ] ], [ [ "IMAGE_RES = 224\n\ndef format_image(image, label):\n image = tf.image.resize(image, (IMAGE_RES, IMAGE_RES))/255.0\n return image, label\n\nBATCH_SIZE = 32\n\ntrain_batches = training_set.shuffle(num_training_examples//4).map(format_image).batch(BATCH_SIZE).prefetch(1)\n\nvalidation_batches = validation_set.map(format_image).batch(BATCH_SIZE).prefetch(1)", "_____no_output_____" ] ], [ [ "# Do Simple Transfer Learning with TensorFlow Hub\n\nLet's now use TensorFlow Hub to do Transfer Learning. Remember, in transfer learning we reuse parts of an already trained model and change the final layer, or several layers, of the model, and then retrain those layers on our own dataset.\n\n### TODO: Create a Feature Extractor\nIn the cell below create a `feature_extractor` using MobileNet v2. Remember that the partial model from TensorFlow Hub (without the final classification layer) is called a feature vector. Go to the [TensorFlow Hub documentation](https://tfhub.dev/s?module-type=image-feature-vector&q=tf2) to see a list of available feature vectors. Click on the `tf2-preview/mobilenet_v2/feature_vector`. Read the documentation and get the corresponding `URL` to get the MobileNet v2 feature vector. Finally, create a `feature_extractor` by using `hub.KerasLayer` with the correct `input_shape` parameter.", "_____no_output_____" ] ], [ [ "URL = \"https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4\"\nfeature_extractor = hub.KerasLayer(URL,\n input_shape=(IMAGE_RES, IMAGE_RES, 3))", "_____no_output_____" ] ], [ [ "### TODO: Freeze the Pre-Trained Model\n\nIn the cell below freeze the variables in the feature extractor layer, so that the training only modifies the final classifier layer.", "_____no_output_____" ] ], [ [ "feature_extractor.trainable = False", "_____no_output_____" ] ], [ [ "### TODO: Attach a classification head\n\nIn the cell below create a `tf.keras.Sequential` model, and add the pre-trained model and the new classification layer. Remember that the classification layer must have the same number of classes as our Flowers dataset. Finally print a summary of the Sequential model.", "_____no_output_____" ] ], [ [ "model = tf.keras.Sequential([\n feature_extractor,\n layers.Dense(num_classes)\n])\n\nmodel.summary()", "_____no_output_____" ] ], [ [ "### TODO: Train the model\n\nIn the cell bellow train this model like any other, by first calling `compile` and then followed by `fit`. Make sure you use the proper parameters when applying both methods. Train the model for only 6 epochs.", "_____no_output_____" ] ], [ [ "model.compile(\n optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nEPOCHS = 6\n\nhistory = model.fit(train_batches,\n epochs=EPOCHS,\n validation_data=validation_batches)", "_____no_output_____" ] ], [ [ "You can see we get ~88% validation accuracy with only 6 epochs of training, which is absolutely awesome. This is a huge improvement over the model we created in the previous lesson, where we were able to get ~76% accuracy with 80 epochs of training. The reason for this difference is that MobileNet v2 was carefully designed over a long time by experts, then trained on a massive dataset (ImageNet).", "_____no_output_____" ], [ "# TODO: Plot Training and Validation Graphs\n\nIn the cell below, plot the training and validation accuracy/loss graphs.", "_____no_output_____" ] ], [ [ "acc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs_range = range(EPOCHS)\n\nplt.figure(figsize=(8, 8))\nplt.subplot(1, 2, 1)\nplt.plot(epochs_range, acc, label='Training Accuracy')\nplt.plot(epochs_range, val_acc, label='Validation Accuracy')\nplt.legend(loc='lower right')\nplt.title('Training and Validation Accuracy')\n\nplt.subplot(1, 2, 2)\nplt.plot(epochs_range, loss, label='Training Loss')\nplt.plot(epochs_range, val_loss, label='Validation Loss')\nplt.legend(loc='upper right')\nplt.title('Training and Validation Loss')\nplt.show()", "_____no_output_____" ] ], [ [ "What is a bit curious here is that validation performance is better than training performance, right from the start to the end of execution.\n\nOne reason for this is that validation performance is measured at the end of the epoch, but training performance is the average values across the epoch.\n\nThe bigger reason though is that we're reusing a large part of MobileNet which is already trained on Flower images. ", "_____no_output_____" ], [ "# TODO: Check Predictions\n\nIn the cell below get the label names from the dataset info and convert them into a NumPy array. Print the array to make sure you have the correct label names.", "_____no_output_____" ] ], [ [ "class_names = np.array(dataset_info.features['label'].names)\n\nprint(class_names)", "_____no_output_____" ] ], [ [ "### TODO: Create an Image Batch and Make Predictions\n\nIn the cell below, use the `next()` function to create an `image_batch` and its corresponding `label_batch`. Convert both the `image_batch` and `label_batch` to numpy arrays using the `.numpy()` method. Then use the `.predict()` method to run the image batch through your model and make predictions. Then use the `np.argmax()` function to get the indices of the best prediction for each image. Finally convert the indices of the best predictions to class names.", "_____no_output_____" ] ], [ [ "image_batch, label_batch = next(iter(train_batches))\n\n\nimage_batch = image_batch.numpy()\nlabel_batch = label_batch.numpy()\n\npredicted_batch = model.predict(image_batch)\npredicted_batch = tf.squeeze(predicted_batch).numpy()\n\npredicted_ids = np.argmax(predicted_batch, axis=-1)\npredicted_class_names = class_names[predicted_ids]\n\nprint(predicted_class_names)", "_____no_output_____" ] ], [ [ "### TODO: Print True Labels and Predicted Indices\n\nIn the cell below, print the true labels and the indices of predicted labels.", "_____no_output_____" ] ], [ [ "print(\"Labels: \", label_batch)\nprint(\"Predicted labels: \", predicted_ids)", "_____no_output_____" ] ], [ [ "# Plot Model Predictions", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(10,9))\nfor n in range(30):\n plt.subplot(6,5,n+1)\n plt.subplots_adjust(hspace = 0.3)\n plt.imshow(image_batch[n])\n color = \"blue\" if predicted_ids[n] == label_batch[n] else \"red\"\n plt.title(predicted_class_names[n].title(), color=color)\n plt.axis('off')\n_ = plt.suptitle(\"Model predictions (blue: correct, red: incorrect)\")", "_____no_output_____" ] ], [ [ "# TODO: Perform Transfer Learning with the Inception Model\n\nGo to the [TensorFlow Hub documentation](https://tfhub.dev/s?module-type=image-feature-vector&q=tf2) and click on `tf2-preview/inception_v3/feature_vector`. This feature vector corresponds to the Inception v3 model. In the cells below, use transfer learning to create a CNN that uses Inception v3 as the pretrained model to classify the images from the Flowers dataset. Note that Inception, takes as input, images that are 299 x 299 pixels. Compare the accuracy you get with Inception v3 to the accuracy you got with MobileNet v2.", "_____no_output_____" ] ], [ [ "IMAGE_RES = 299\n\n(training_set, validation_set), dataset_info = tfds.load(\n 'tf_flowers', \n with_info=True, \n as_supervised=True, \n split=['train[:70%]', 'train[70%:]'],\n)\ntrain_batches = training_set.shuffle(num_training_examples//4).map(format_image).batch(BATCH_SIZE).prefetch(1)\nvalidation_batches = validation_set.map(format_image).batch(BATCH_SIZE).prefetch(1)\n\nURL = \"https://tfhub.dev/google/tf2-preview/inception_v3/feature_vector/4\"\nfeature_extractor = hub.KerasLayer(URL,\n input_shape=(IMAGE_RES, IMAGE_RES, 3),\n trainable=False)\n\nmodel_inception = tf.keras.Sequential([\n feature_extractor,\n tf.keras.layers.Dense(num_classes)\n])\n\nmodel_inception.summary()", "_____no_output_____" ], [ "model_inception.compile(\n optimizer='adam', \n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nEPOCHS = 6\n\nhistory = model_inception.fit(train_batches,\n epochs=EPOCHS,\n validation_data=validation_batches)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb777d09cdd2e7c57356516e5fca01678cc00ed7
17,104
ipynb
Jupyter Notebook
T&T Lab 8.ipynb
MrBlu1204/Python
caba75ac21a3b955f7c6f1a28677900f0358156f
[ "Apache-2.0" ]
null
null
null
T&T Lab 8.ipynb
MrBlu1204/Python
caba75ac21a3b955f7c6f1a28677900f0358156f
[ "Apache-2.0" ]
null
null
null
T&T Lab 8.ipynb
MrBlu1204/Python
caba75ac21a3b955f7c6f1a28677900f0358156f
[ "Apache-2.0" ]
null
null
null
34.345382
2,444
0.633887
[ [ [ "# T & T Lab 8 - 27th Jan\n## Manish Ranjan Behera - 1828249", "_____no_output_____" ], [ "### WAP TO PRINT THIS PATTERN AND TAKE THE NO OF LINES AS INPUT FROM USER", "_____no_output_____" ], [ "![Screenshot%202021-01-29%20224420.png](attachment:Screenshot%202021-01-29%20224420.png)", "_____no_output_____" ] ], [ [ "n=int(input(\"Enter Size:\"))\nfor i in range(n,0,-1):\n if i==n:\n print(\"*\"*((2*n)-1))\n else:\n print(\"*\"*i+' '*((n-i)*2-1)+\"*\"*i)", "Enter Size:5\n*********\n**** ****\n*** ***\n** **\n* *\n" ] ], [ [ "### WAP to find whether a number is perfect number or not using Function\n**Perfect number, a positive integer that is equal to the sum of its proper divisors. The smallest perfect number is 6, which is the sum of 1, 2, and 3. Other perfect numbers are 28, 496, and 8,128.**", "_____no_output_____" ] ], [ [ "def perfectNumber(n):\n s=0\n for i in range(1,n):\n if n%i==0:\n s=s+i\n \n if s==n:\n print(f'{n} is a perfect number')\n \n else:\n print(f'{n} is not a perfect number')\n \nn=int(input('Enter a Number:'))\nperfectNumber(n)", "Enter a Number:8\n8 is not a perfect number\n" ] ], [ [ "### WAP to find whether a number is Armstrong Number or Not using Function", "_____no_output_____" ] ], [ [ "def armstrong(n):\n d=n\n p=len(str(n))\n print(p)\n s=0\n while d>0:\n r=d%10\n d=int(d/10)\n s=s+(r**p)\n if s==n:\n print(f'{n} is a Armstrong Number')\n else:\n print(f'{n} is NOT a Armstrong Number')\n\nn=int(input('Enter a Number:'))\narmstrong(n)", "Enter a Number:153\n3\n153\n153 is a Armstrong Number\n" ] ], [ [ "### WAP to convert Fahrenheit to Celcius", "_____no_output_____" ] ], [ [ "def f2c(f):\n c=(f-32)*(5/9)\n return c\n \nf=int(input(\"Enter temperature in degree Fahrenheit:\"))\n\nprint(f'{f} degree Fahrenheit is equal to {f2c(f)} degree Celcius')\n ", "Enter temperature in degree Fahrenheit:32\n32 degree Fahrenheit is equal to 0.0 degree Celcius\n" ] ], [ [ "### WAP to find total surface area of a Cuboid", "_____no_output_____" ] ], [ [ "def totalSurfaceArea(l,w,h):\n tsa=2*(l*w+l*h+w*h)\n return tsa\n\nl=float(input('Enter Length of the Cuboid:'))\nw=float(input('Enter Width of the Cuboid:'))\nh=float(input('Enter Height of the Cuboid:'))\n\nprint(\"Total surface area of cuboid is {a:1.2f} square Units\".format(a=totalSurfaceArea(l,w,h)))", "Enter Length of the Cuboid:3.5\nEnter Width of the Cuboid:4\nEnter Height of the Cuboid:2\nTotal surface area of cuboid is 58.00 square Units\n" ] ], [ [ "### WAP to print the following Patterns by taking size as user Input:", "_____no_output_____" ], [ "a)![Screenshot%202021-01-29%20224614.png](attachment:Screenshot%202021-01-29%20224614.png)", "_____no_output_____" ] ], [ [ "def pattern(n):\n for i in range(1,n+1):\n print(\" \"*(n-i),end='')\n for j in range(i):\n print(2*i-1,end=\" \")\n print(\"\")\n\n \nn=int(input(\"Enter Number of lines:\"))\npattern(n)", "Enter Number of lines:5\n 1 \n 3 3 \n 5 5 5 \n 7 7 7 7 \n9 9 9 9 9 \n" ] ], [ [ "b)![Screenshot%202021-01-29%20225847.png](attachment:Screenshot%202021-01-29%20225847.png)", "_____no_output_____" ] ], [ [ "def pattern(n):\n for i in range(2*n,0,-1):\n if i==2*n or i==1:\n print(\"*\"*((2*n)-1))\n elif i<=n:\n print(\"*\"*(n-i+1)+' '*((n-(n-i)-1)*2-1)+\"*\"*(n-i+1))\n else:\n print(\"*\"*(i-n)+' '*((n-(i-n))*2-1)+\"*\"*(i-n))\n \nn=int(input(\"Enter Size:\"))\npattern(n)", "_____no_output_____" ] ], [ [ "c)![Screenshot%202021-01-29%20225945.png](attachment:Screenshot%202021-01-29%20225945.png)", "_____no_output_____" ] ], [ [ "def alphaPattern(n):\n alpha='ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n for i in range(n):\n print(\" \"*(n-i)+alpha[0:2*i+1])\n \nn=int(input(\"Enter Number of lines:\"))\nalphaPattern(n)\n", "Enter Number of lines:5\n A\n ABC\n ABCDE\n ABCDEFG\n ABCDEFGHI\n" ] ], [ [ "### WAP to convert Decimal to Binary", "_____no_output_____" ] ], [ [ "# //: divide with integral result (discard remainder)\ndef decimal2binary(num):\n binary=''\n if num >= 1:\n binary=decimal2binary(num // 2)\n binary=binary+str(num%2)\n return binary\n \nnum=int(input(\"Enter a Decimal Value:\"))\nprint(f'Binary form of {num} is {decimal2binary(num)}')", "Enter a Decimal Value:23\nBinary form of 23 is 010111\n" ] ], [ [ "### WAP to find the ASCII values of a given input string", "_____no_output_____" ] ], [ [ "def str2ascii(string):\n asciiList=[ord(c) for c in string]\n return asciiList\n \nstring=input(\"Enter a String Value:\")\nprint(f\"ASCII values of the string are {str2ascii(string)}\")", "Enter a String Value:MR. Blu\nASCII values of the string are [77, 82, 46, 32, 66, 108, 117]\n" ] ], [ [ "### WAP to find the LCM of the Numbers", "_____no_output_____" ] ], [ [ "# LCM = a*b/GCD(a,b)\n\ndef gcd(a,b):\n if a > b:\n smaller = b\n else:\n smaller = a\n for i in range(1, smaller+1):\n if((a % i == 0) and (b % i == 0)):\n hcf = i \n return hcf\n\n\ndef lcm(a, b):\n l=(a*b)//gcd(a,b)\n return l\n\nn=int(input(\"Enter number of number for which LCM has to be found:\"))\nnum=[int(input('')) for x in range(n)]\nresult=lcm(num[0],num[1])\n\nfor i in range(2,len(num)):\n result=lcm(result,num[i])\nprint(\"The L.C.M. of\",end=' ')\nfor x in num:\n print(x,end=\", \")\nprint(\"is\",result)", "Enter number of number for which LCM has to be found:5\n23\n43\n21\n32\n12\nThe L.C.M. of 23, 43, 21, 32, 12, is 664608\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb778084858414efa96e6d378e7f7da446ac1335
34,367
ipynb
Jupyter Notebook
quiz/m7/m7l3/feature_engineering.ipynb
masrur-ahmed/Udacity-AI-for-Trading-Nanodegree
bf2fe2d933f19af6e8550152d84673d5550d189a
[ "MIT" ]
2
2020-04-29T10:07:17.000Z
2020-09-11T22:11:42.000Z
quiz/m7/m7l3/feature_engineering.ipynb
masrur-ahmed/Udacity-AI-for-Trading-Nanodegree
bf2fe2d933f19af6e8550152d84673d5550d189a
[ "MIT" ]
null
null
null
quiz/m7/m7l3/feature_engineering.ipynb
masrur-ahmed/Udacity-AI-for-Trading-Nanodegree
bf2fe2d933f19af6e8550152d84673d5550d189a
[ "MIT" ]
null
null
null
30.794803
472
0.587831
[ [ [ "# Feature Engineering and Labeling\n\nWe'll use the price-volume data and generate features that we can feed into a model. We'll use this notebook for all the coding exercises of this lesson, so please open this notebook in a separate tab of your browser. \n\nPlease run the following code up to and including \"Make Factors.\" Then continue on with the lesson.", "_____no_output_____" ] ], [ [ "import sys\n!{sys.executable} -m pip install --quiet -r requirements.txt", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nimport time\n\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "plt.style.use('ggplot')\nplt.rcParams['figure.figsize'] = (14, 8)", "_____no_output_____" ] ], [ [ "#### Registering data", "_____no_output_____" ] ], [ [ "import os\nimport project_helper\nfrom zipline.data import bundles\n\nos.environ['ZIPLINE_ROOT'] = os.path.join(os.getcwd(), '..', '..', 'data', 'project_4_eod')\n\ningest_func = bundles.csvdir.csvdir_equities(['daily'], project_helper.EOD_BUNDLE_NAME)\nbundles.register(project_helper.EOD_BUNDLE_NAME, ingest_func)\n\nprint('Data Registered')", "_____no_output_____" ], [ "from zipline.pipeline import Pipeline\nfrom zipline.pipeline.factors import AverageDollarVolume\nfrom zipline.utils.calendars import get_calendar\n\n\nuniverse = AverageDollarVolume(window_length=120).top(500) \ntrading_calendar = get_calendar('NYSE') \nbundle_data = bundles.load(project_helper.EOD_BUNDLE_NAME)\nengine = project_helper.build_pipeline_engine(bundle_data, trading_calendar)", "_____no_output_____" ], [ "universe_end_date = pd.Timestamp('2016-01-05', tz='UTC')\n\nuniverse_tickers = engine\\\n .run_pipeline(\n Pipeline(screen=universe),\n universe_end_date,\n universe_end_date)\\\n .index.get_level_values(1)\\\n .values.tolist()", "_____no_output_____" ], [ "from zipline.data.data_portal import DataPortal\n\ndata_portal = DataPortal(\n bundle_data.asset_finder,\n trading_calendar=trading_calendar,\n first_trading_day=bundle_data.equity_daily_bar_reader.first_trading_day,\n equity_minute_reader=None,\n equity_daily_reader=bundle_data.equity_daily_bar_reader,\n adjustment_reader=bundle_data.adjustment_reader)\n\ndef get_pricing(data_portal, trading_calendar, assets, start_date, end_date, field='close'):\n end_dt = pd.Timestamp(end_date.strftime('%Y-%m-%d'), tz='UTC', offset='C')\n start_dt = pd.Timestamp(start_date.strftime('%Y-%m-%d'), tz='UTC', offset='C')\n\n end_loc = trading_calendar.closes.index.get_loc(end_dt)\n start_loc = trading_calendar.closes.index.get_loc(start_dt)\n\n return data_portal.get_history_window(\n assets=assets,\n end_dt=end_dt,\n bar_count=end_loc - start_loc,\n frequency='1d',\n field=field,\n data_frequency='daily')", "_____no_output_____" ] ], [ [ "# Make Factors\n\n- We'll use the same factors we have been using in the lessons about alpha factor research. Factors can be features that we feed into the model.\n", "_____no_output_____" ] ], [ [ "from zipline.pipeline.factors import CustomFactor, DailyReturns, Returns, SimpleMovingAverage\nfrom zipline.pipeline.data import USEquityPricing\n\nfactor_start_date = universe_end_date - pd.DateOffset(years=3, days=2)\nsector = project_helper.Sector()\n\ndef momentum_1yr(window_length, universe, sector):\n return Returns(window_length=window_length, mask=universe) \\\n .demean(groupby=sector) \\\n .rank() \\\n .zscore()\n\ndef mean_reversion_5day_sector_neutral(window_length, universe, sector):\n return -Returns(window_length=window_length, mask=universe) \\\n .demean(groupby=sector) \\\n .rank() \\\n .zscore()\n\ndef mean_reversion_5day_sector_neutral_smoothed(window_length, universe, sector):\n unsmoothed_factor = mean_reversion_5day_sector_neutral(window_length, universe, sector)\n return SimpleMovingAverage(inputs=[unsmoothed_factor], window_length=window_length) \\\n .rank() \\\n .zscore()\n\nclass CTO(Returns):\n \"\"\"\n Computes the overnight return, per hypothesis from\n https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2554010\n \"\"\"\n inputs = [USEquityPricing.open, USEquityPricing.close]\n \n def compute(self, today, assets, out, opens, closes):\n \"\"\"\n The opens and closes matrix is 2 rows x N assets, with the most recent at the bottom.\n As such, opens[-1] is the most recent open, and closes[0] is the earlier close\n \"\"\"\n out[:] = (opens[-1] - closes[0]) / closes[0]\n\n \nclass TrailingOvernightReturns(Returns):\n \"\"\"\n Sum of trailing 1m O/N returns\n \"\"\"\n window_safe = True\n \n def compute(self, today, asset_ids, out, cto):\n out[:] = np.nansum(cto, axis=0)\n\n \ndef overnight_sentiment(cto_window_length, trail_overnight_returns_window_length, universe):\n cto_out = CTO(mask=universe, window_length=cto_window_length)\n return TrailingOvernightReturns(inputs=[cto_out], window_length=trail_overnight_returns_window_length) \\\n .rank() \\\n .zscore()\n\ndef overnight_sentiment_smoothed(cto_window_length, trail_overnight_returns_window_length, universe):\n unsmoothed_factor = overnight_sentiment(cto_window_length, trail_overnight_returns_window_length, universe)\n return SimpleMovingAverage(inputs=[unsmoothed_factor], window_length=trail_overnight_returns_window_length) \\\n .rank() \\\n .zscore()\n\nuniverse = AverageDollarVolume(window_length=120).top(500)\nsector = project_helper.Sector()\n\npipeline = Pipeline(screen=universe)\npipeline.add(\n momentum_1yr(252, universe, sector),\n 'Momentum_1YR')\npipeline.add(\n mean_reversion_5day_sector_neutral_smoothed(20, universe, sector),\n 'Mean_Reversion_Sector_Neutral_Smoothed')\npipeline.add(\n overnight_sentiment_smoothed(2, 10, universe),\n 'Overnight_Sentiment_Smoothed')\n\nall_factors = engine.run_pipeline(pipeline, factor_start_date, universe_end_date)\n\nall_factors.head()\n", "_____no_output_____" ] ], [ [ "#### Stop here and continue with the lesson section titled \"Features\".", "_____no_output_____" ], [ "# Universal Quant Features\n\n* stock volatility: zipline has a custom factor called AnnualizedVolatility. The [source code is here](https://github.com/quantopian/zipline/blob/master/zipline/pipeline/factors/basic.py) and also pasted below:\n\n```\nclass AnnualizedVolatility(CustomFactor):\n \"\"\"\n Volatility. The degree of variation of a series over time as measured by\n the standard deviation of daily returns.\n https://en.wikipedia.org/wiki/Volatility_(finance)\n **Default Inputs:** :data:`zipline.pipeline.factors.Returns(window_length=2)` # noqa\n Parameters\n ----------\n annualization_factor : float, optional\n The number of time units per year. Defaults is 252, the number of NYSE\n trading days in a normal year.\n \"\"\"\n inputs = [Returns(window_length=2)]\n params = {'annualization_factor': 252.0}\n window_length = 252\n\n def compute(self, today, assets, out, returns, annualization_factor):\n out[:] = nanstd(returns, axis=0) * (annualization_factor ** .5)\n```", "_____no_output_____" ] ], [ [ "from zipline.pipeline.factors import AnnualizedVolatility\nAnnualizedVolatility()", "_____no_output_____" ] ], [ [ "#### Quiz\nWe can see that the returns `window_length` is 2, because we're dealing with daily returns, which are calculated as the percent change from one day to the following day (2 days). The `AnnualizedVolatility` `window_length` is 252 by default, because it's the one-year volatility. Try to adjust the call to the constructor of `AnnualizedVolatility` so that this represents one-month volatility (still annualized, but calculated over a time window of 20 trading days)", "_____no_output_____" ], [ "#### Answer", "_____no_output_____" ] ], [ [ "# TODO\n", "_____no_output_____" ] ], [ [ "#### Quiz: Create one-month and six-month annualized volatility.\nCreate `AnnualizedVolatility` objects for 20 day and 120 day (one month and six-month) time windows. Remember to set the `mask` parameter to the `universe` object created earlier (this filters the stocks to match the list in the `universe`). Convert these to ranks, and then convert the ranks to zscores.", "_____no_output_____" ] ], [ [ "# TODO\nvolatility_20d # ...\nvolatility_120d # ...", "_____no_output_____" ] ], [ [ "#### Add to the pipeline", "_____no_output_____" ] ], [ [ "pipeline.add(volatility_20d, 'volatility_20d')\npipeline.add(volatility_120d, 'volatility_120d')", "_____no_output_____" ] ], [ [ "#### Quiz: Average Dollar Volume feature\nWe've been using [AverageDollarVolume](http://www.zipline.io/appendix.html#zipline.pipeline.factors.AverageDollarVolume) to choose the stock universe based on stocks that have the highest dollar volume. We can also use it as a feature that is input into a predictive model. \nUse 20 day and 120 day `window_length` for average dollar volume. Then rank it and convert to a zscore.", "_____no_output_____" ] ], [ [ "\"\"\"already imported earlier, but shown here for reference\"\"\"\n#from zipline.pipeline.factors import AverageDollarVolume \n\n# TODO: 20-day and 120 day average dollar volume\nadv_20d = # ...\nadv_120d = # ...", "_____no_output_____" ] ], [ [ "#### Add average dollar volume features to pipeline", "_____no_output_____" ] ], [ [ "pipeline.add(adv_20d, 'adv_20d')\npipeline.add(adv_120d, 'adv_120d')", "_____no_output_____" ] ], [ [ "### Market Regime Features\nWe are going to try to capture market-wide regimes: Market-wide means we'll look at the aggregate movement of the universe of stocks.\n\nHigh and low dispersion: dispersion is looking at the dispersion (standard deviation) of the cross section of all stocks at each period of time (on each day). We'll inherit from [CustomFactor](http://www.zipline.io/appendix.html?highlight=customfactor#zipline.pipeline.CustomFactor). We'll feed in [DailyReturns](http://www.zipline.io/appendix.html?highlight=dailyreturns#zipline.pipeline.factors.DailyReturns) as the `inputs`. ", "_____no_output_____" ], [ "#### Quiz\nIf the `inputs` to our market dispersion factor are the daily returns, and we plan to calculate the market dispersion on each day, what should be the `window_length` of the market dispersion class?", "_____no_output_____" ], [ "#### Answer\n", "_____no_output_____" ], [ "#### Quiz: market dispersion feature\nCreate a class that inherits from `CustomFactor`. Override the `compute` function to calculate the population standard deviation of all the stocks over a specified window of time.\n\n**mean returns**\n\n$\\mu = \\sum_{t=0}^{T}\\sum_{i=1}^{N}r_{i,t}$\n\n**Market Dispersion**\n\n$\\sqrt{\\frac{1}{T} \\sum_{t=0}^{T} \\frac{1}{N}\\sum_{i=1}^{N}(r_{i,t} - \\mu)^2}$\n\nUse [numpy.nanmean](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.nanmean.html) to calculate the average market return $\\mu$ and to calculate the average of the squared differences.", "_____no_output_____" ] ], [ [ "class MarketDispersion(CustomFactor):\n inputs = [DailyReturns()]\n window_length = # ...\n window_safe = True\n\n def compute(self, today, assets, out, returns):\n \n # TODO: calculate average returns\n mean_returns = # ...\n \n #TODO: calculate standard deviation of returns\n out[:] = # ...", "_____no_output_____" ] ], [ [ "#### Quiz\n\nCreate the MarketDispersion object. Apply two separate smoothing operations using [SimpleMovingAverage](https://www.zipline.io/appendix.html?highlight=simplemovingaverage#zipline.pipeline.factors.SimpleMovingAverage). One with a one-month window, and another with a 6-month window. Add both to the pipeline.", "_____no_output_____" ] ], [ [ "# TODO: create MarketDispersion object\ndispersion = # ...\n\n# TODO: apply one-month simple moving average\ndispersion_20d = # ...\n\n# TODO: apply 6-month simple moving average\ndispersion_120d = # ...\n\n# Add to pipeline\npipeline.add(dispersion_20d, 'dispersion_20d')\npipeline.add(dispersion_120d, 'dispersion_120d')", "_____no_output_____" ] ], [ [ "#### Market volatility feature\n* High and low volatility \nWe'll also build a class for market volatility, which inherits from [CustomFactor](http://www.zipline.io/appendix.html?highlight=customfactor#zipline.pipeline.CustomFactor). This will measure the standard deviation of the returns of the \"market\". In this case, we're approximating the \"market\" as the equal weighted average return of all the stocks in the stock universe.\n\n##### Market return\n$r_{m,t} = \\frac{1}{N}\\sum_{i=1}^{N}r_{i,t}$ for each day $t$ in `window_length`. \n\n##### Average market return\nAlso calculate the average market return over the `window_length` $T$ of days: \n$\\mu_{m} = \\frac{1}{T}\\sum_{t=1}^{T} r_{m,t}$\n\n#### Standard deviation of market return\nThen calculate the standard deviation of the market return \n$\\sigma_{m,t} = \\sqrt{252 \\times \\frac{1}{N} \\sum_{t=1}^{T}(r_{m,t} - \\mu_{m})^2 } $ \n\n##### Hints\n* Please use [numpy.nanmean](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.nanmean.html) so that it ignores null values.\n* When using `numpy.nanmean`: \naxis=0 will calculate one average for every column (think of it like creating a new row in a spreadsheet) \naxis=1 will calculate one average for every row (think of it like creating a new column in a spreadsheet) \n* The returns data in `compute` has one day in each row, and one stock in each column.\n* Notice that we defined a dictionary `params` that has a key `annualization_factor`. This `annualization_factor` can be used as a regular variable, and you'll be using it in the `compute` function. This is also done in the definition of AnnualizedVolatility (as seen earlier in the notebook).", "_____no_output_____" ] ], [ [ "class MarketVolatility(CustomFactor):\n inputs = [DailyReturns()]\n window_length = 1 # We'll want to set this in the constructor when creating the object.\n window_safe = True\n params = {'annualization_factor': 252.0}\n \n def compute(self, today, assets, out, returns, annualization_factor):\n \n # TODO\n \"\"\" \n For each row (each row represents one day of returns), \n calculate the average of the cross-section of stock returns\n So that market_returns has one value for each day in the window_length\n So choose the appropriate axis (please see hints above)\n \"\"\"\n mkt_returns = # ...\n \n # TODO\n # Calculate the mean of market returns\n mkt_returns_mu = # ...\n \n # TODO\n # Calculate the standard deviation of the market returns, then annualize them.\n out[:] = # ...", "_____no_output_____" ], [ "# TODO: create market volatility features using one month and six-month windows\nmarket_vol_20d = # ...\nmarket_vol_120d = # ...", "_____no_output_____" ], [ "# add market volatility features to pipeline\npipeline.add(market_vol_20d, 'market_vol_20d')\npipeline.add(market_vol_120d, 'market_vol_120d')", "_____no_output_____" ] ], [ [ "#### Stop here and continue with the lesson section \"Sector and Industry\"", "_____no_output_____" ], [ "# Sector and Industry", "_____no_output_____" ], [ "#### Add sector code\n\nNote that after we run the pipeline and get the data in a dataframe, we can work on enhancing the sector code feature with one-hot encoding.", "_____no_output_____" ] ], [ [ "pipeline.add(sector, 'sector_code')", "_____no_output_____" ] ], [ [ "#### Run pipeline to calculate features\n", "_____no_output_____" ] ], [ [ "all_factors = engine.run_pipeline(pipeline, factor_start_date, universe_end_date)\nall_factors.head()", "_____no_output_____" ] ], [ [ "#### One-hot encode sector\n\nLet's get all the unique sector codes. Then we'll use the `==` comparison operator to check when the sector code equals a particular value. This returns a series of True/False values. For some functions that we'll use in a later lesson, it's easier to work with numbers instead of booleans. We can convert the booleans to type int. So False becomes 0, and 1 becomes True.", "_____no_output_____" ] ], [ [ "sector_code_l = set(all_factors['sector_code'])", "_____no_output_____" ], [ "sector_0 = all_factors['sector_code'] == 0\nsector_0[0:5]", "_____no_output_____" ], [ "sector_0_numeric = sector_0.astype(int)\nsector_0_numeric[0:5]", "_____no_output_____" ] ], [ [ "#### Quiz: One-hot encode sector\nChoose column names that look like \"sector_code_0\", \"sector_code_1\" etc. Store the values as 1 when the row matches the sector code of the column, 0 otherwise.", "_____no_output_____" ] ], [ [ "# TODO: one-hot encode sector and store into dataframe\nfor s in sector_code_l:\n # ...", "_____no_output_____" ], [ "all_factors.head()", "_____no_output_____" ] ], [ [ "#### Stop here and continue with the lesson section \"Date Parts\".", "_____no_output_____" ], [ "# Date Parts\n* We will make features that might capture trader/investor behavior due to calendar anomalies.\n* We can get the dates from the index of the dataframe that is returned from running the pipeline.", "_____no_output_____" ], [ "#### Accessing index of dates\n* Note that we can access the date index. using `Dataframe.index.get_level_values(0)`, since the date is stored as index level 0, and the asset name is stored in index level 1. This is of type [DateTimeIndex](https://pandas.pydata.org/pandas-docs/version/0.23.4/generated/pandas.DatetimeIndex.html).", "_____no_output_____" ] ], [ [ "all_factors.index.get_level_values(0)", "_____no_output_____" ] ], [ [ "#### [DateTimeIndex attributes](https://pandas.pydata.org/pandas-docs/version/0.23.4/generated/pandas.DatetimeIndex.html)\n\n* The `month` attribute is a numpy array with a 1 for January, 2 for February ... 12 for December etc. \n* We can use a comparison operator such as `==` to return True or False.\n\n* It's usually easier to have all data of a similar type (numeric), so we recommend converting booleans to integers. \nThe numpy ndarray has a function `.astype()` that can cast the data to a specified type. \nFor instance, `astype(int)` converts False to 0 and True to 1.\n", "_____no_output_____" ] ], [ [ "# Example\nprint(all_factors.index.get_level_values(0).month)\nprint(all_factors.index.get_level_values(0).month == 1)\nprint( (all_factors.index.get_level_values(0).month == 1).astype(int) )", "_____no_output_____" ] ], [ [ "## Quiz\n* Create a numpy array that has 1 when the month is January, and 0 otherwise. Store it as a column in the all_factors dataframe.\n* Add another similar column to indicate when the month is December", "_____no_output_____" ] ], [ [ "# TODO: create a feature that indicate whether it's January\nall_factors['is_January'] = # ...\n\n# TODO: create a feature to indicate whether it's December\nall_factors['is_December'] = # ...", "_____no_output_____" ] ], [ [ "## Weekday, quarter\n* add columns to the all_factors dataframe that specify the weekday, quarter and year.\n* As you can see in the [documentation for DateTimeIndex](https://pandas.pydata.org/pandas-docs/version/0.23.4/generated/pandas.DatetimeIndex.html), `weekday`, `quarter`, and `year` are attributes that you can use here.", "_____no_output_____" ] ], [ [ "# we can see that 0 is for Monday, 4 is for Friday\nset(all_factors.index.get_level_values(0).weekday)", "_____no_output_____" ], [ "# Q1, Q2, Q3 and Q4 are represented by integers too\nset(all_factors.index.get_level_values(0).quarter)", "_____no_output_____" ] ], [ [ "#### Quiz\nAdd features for weekday, quarter and year.", "_____no_output_____" ] ], [ [ "# TODO\nall_factors['weekday'] = # ...\nall_factors['quarter'] = # ...\nall_factors['year'] = # ...", "_____no_output_____" ] ], [ [ "## Start and end-of features\n\n* The start and end of the week, month, and quarter may have structural differences in trading activity.\n* [Pandas.date_range](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.date_range.html) takes the start_date, end_date, and frequency.\n* The [frequency](http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases) for end of month is `BM`.", "_____no_output_____" ] ], [ [ "# Example\ntmp = pd.date_range(start=factor_start_date, end=universe_end_date, freq='BM')\ntmp", "_____no_output_____" ] ], [ [ "#### Example\n\nCreate a DatetimeIndex that stores the dates which are the last business day of each month. \nUse the `.isin` function, passing in these last days of the month, to create a series of booleans. \nConvert the booleans to integers. ", "_____no_output_____" ] ], [ [ "last_day_of_month = pd.date_range(start=factor_start_date, end=universe_end_date, freq='BM')\nlast_day_of_month", "_____no_output_____" ], [ "tmp_month_end = all_factors.index.get_level_values(0).isin(last_day_of_month)\ntmp_month_end", "_____no_output_____" ], [ "tmp_month_end_int = tmp_month_end.astype(int)\ntmp_month_end_int", "_____no_output_____" ], [ "all_factors['month_end'] = tmp_month_end_int", "_____no_output_____" ] ], [ [ "#### Quiz: Start of Month\nCreate a feature that indicates the first business day of each month.\n\n**Hint:** The frequency for first business day of the month uses the code `BMS`.", "_____no_output_____" ] ], [ [ "# TODO: month_start feature\nfirst_day_of_month = # pd.date_range()\nall_factors['month_start'] = # ...", "_____no_output_____" ] ], [ [ "#### Quiz: Quarter end and quarter start\n\nCreate features for the last business day of each quarter, and first business day of each quarter. \n**Hint**: use `freq=BQ` for business day end of quarter, and `freq=BQS` for business day start of quarter.", "_____no_output_____" ] ], [ [ "# TODO: qtr_end feature\nlast_day_qtr = # ...\nall_factors['qtr_end'] = # ...", "_____no_output_____" ], [ "# TODO: qtr_start feature\nfirst_day_qtr = # ...\nall_factors['qtr_start'] = # ...", "_____no_output_____" ] ], [ [ "## View all features", "_____no_output_____" ] ], [ [ "list(all_factors.columns)", "_____no_output_____" ] ], [ [ "Note that we can skip the sector_code feature, since we one-hot encoded it into separate features.", "_____no_output_____" ] ], [ [ "features = ['Mean_Reversion_Sector_Neutral_Smoothed',\n 'Momentum_1YR',\n 'Overnight_Sentiment_Smoothed',\n 'adv_120d',\n 'adv_20d',\n 'dispersion_120d',\n 'dispersion_20d',\n 'market_vol_120d',\n 'market_vol_20d',\n #'sector_code', # removed sector_code\n 'volatility_120d',\n 'volatility_20d',\n 'sector_code_0',\n 'sector_code_1',\n 'sector_code_2',\n 'sector_code_3',\n 'sector_code_4',\n 'sector_code_5',\n 'sector_code_6',\n 'sector_code_7',\n 'sector_code_8',\n 'sector_code_9',\n 'sector_code_10',\n 'sector_code_-1',\n 'is_January',\n 'is_December',\n 'weekday',\n 'quarter',\n 'year',\n 'month_start',\n 'qtr_end',\n 'qtr_start']", "_____no_output_____" ] ], [ [ "#### Stop here and continue to the lesson section \"Targets\"", "_____no_output_____" ], [ "# Targets (Labels)\n\n- We are going to try to predict the go forward 1-week return\n- Very important! Quantize the target. Why do we do this?\n - Makes it market neutral return\n - Normalizes changing volatility and dispersion over time\n - Make the target robust to changes in market regimes \n- The factor we create is the trailing 5-day return.\n\n", "_____no_output_____" ] ], [ [ "# we'll create a separate pipeline to handle the target\npipeline_target = Pipeline(screen=universe)", "_____no_output_____" ] ], [ [ "#### Example\n\nWe'll convert weekly returns into 2-quantiles.", "_____no_output_____" ] ], [ [ "return_5d_2q = Returns(window_length=5, mask=universe).quantiles(2)\nreturn_5d_2q", "_____no_output_____" ], [ "pipeline_target.add(return_5d_2q, 'return_5d_2q')", "_____no_output_____" ] ], [ [ "#### Quiz\nCreate another weekly return target that's converted to 5-quantiles.", "_____no_output_____" ] ], [ [ "# TODO: create a target using 5-quantiles\nreturn_5d_5q = # ...\n\n# TODO: add the feature to the pipeline\n# ...\n\n# Let's run the pipeline to get the dataframe\ntargets_df = engine.run_pipeline(pipeline_target, factor_start_date, universe_end_date)\ntargets_df.head()", "_____no_output_____" ], [ "targets_df.columns", "_____no_output_____" ] ], [ [ "## Solution\n\n[solution notebook](feature_engineering_solution.ipynb)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
cb778259fc9fca3fd7d97885927371b11036b9ff
54,801
ipynb
Jupyter Notebook
tutorials/LinearAlgebra/LinearAlgebra.ipynb
ms-O/QuantumKatas
617cdbec138d163dfd66c8fd805a9c02f3689b88
[ "MIT" ]
1
2020-12-29T19:39:50.000Z
2020-12-29T19:39:50.000Z
tutorials/LinearAlgebra/LinearAlgebra.ipynb
ms-O/QuantumKatas
617cdbec138d163dfd66c8fd805a9c02f3689b88
[ "MIT" ]
null
null
null
tutorials/LinearAlgebra/LinearAlgebra.ipynb
ms-O/QuantumKatas
617cdbec138d163dfd66c8fd805a9c02f3689b88
[ "MIT" ]
null
null
null
37.741736
419
0.527655
[ [ [ "# Introduction to Linear Algebra\n\nThis is a tutorial designed to introduce you to the basics of linear algebra.\nLinear algebra is a branch of mathematics dedicated to studying the properties of matrices and vectors,\nwhich are used extensively in quantum computing to represent quantum states and operations on them.\nThis tutorial doesn't come close to covering the full breadth of the topic, but it should be enough to get you comfortable with the main concepts of linear algebra used in quantum computing.\n\nThis tutorial assumes familiarity with complex numbers; if you need a review of this topic, we recommend that you complete the [Complex Arithmetic](../ComplexArithmetic/ComplexArithmetic.ipynb) tutorial before tackling this one.\n\nThis tutorial covers the following topics:\n* Matrices and vectors\n* Basic matrix operations\n* Operations and properties of complex matrices\n* Inner and outer vector products\n* Tensor product\n* Eigenvalues and eigenvectors\n\nIf you need to look up some formulas quickly, you can find them in [this cheatsheet](https://github.com/microsoft/QuantumKatas/blob/main/quickref/qsharp-quick-reference.pdf).", "_____no_output_____" ], [ "This notebook has several tasks that require you to write Python code to test your understanding of the concepts. If you are not familiar with Python, [here](https://docs.python.org/3/tutorial/index.html) is a good introductory tutorial for it.\n\n> The exercises use Python's built-in representation of complex numbers. Most of the operations (addition, multiplication, etc.) work as you expect them to. Here are a few notes on Python-specific syntax:\n>\n> * If `z` is a complex number, `z.real` is the real component, and `z.imag` is the coefficient of the imaginary component.\n> * To represent an imaginary number, put `j` after a real number: $3.14i$ would be `3.14j`.\n> * To represent a complex number, simply add a real number and an imaginary number.\n> * The built-in function `abs` computes the modulus of a complex number.\n>\n> You can find more information in the [official documentation](https://docs.python.org/3/library/cmath.html).\n\nLet's start by importing some useful mathematical functions and constants, and setting up a few things necessary for testing the exercises. **Do not skip this step.**\n\nClick the cell with code below this block of text and press `Ctrl+Enter` (`⌘+Enter` on Mac).", "_____no_output_____" ] ], [ [ "# Run this cell using Ctrl+Enter (⌘+Enter on Mac).\nfrom testing import exercise, create_empty_matrix\nfrom typing import List\n\nimport math, cmath\n\nMatrix = List[List[complex]]", "Success!\n" ] ], [ [ "# Part I. Matrices and Basic Operations\n\n## Matrices and Vectors\n\nA **matrix** is set of numbers arranged in a rectangular grid. Here is a $2$ by $2$ matrix:\n\n$$A =\n\\begin{bmatrix} 1 & 2 \\\\ 3 & 4 \\end{bmatrix}$$\n\n$A_{i,j}$ refers to the element in row $i$ and column $j$ of matrix $A$ (all indices are 0-based). In the above example, $A_{0,1} = 2$.\n\nAn $n \\times m$ matrix will have $n$ rows and $m$ columns, like so:\n\n$$\\begin{bmatrix}\n x_{0,0} & x_{0,1} & \\dotsb & x_{0,m-1} \\\\\n x_{1,0} & x_{1,1} & \\dotsb & x_{1,m-1} \\\\\n \\vdots & \\vdots & \\ddots & \\vdots \\\\\n x_{n-1,0} & x_{n-1,1} & \\dotsb & x_{n-1,m-1}\n\\end{bmatrix}$$\n\nA $1 \\times 1$ matrix is equivalent to a scalar:\n\n$$\\begin{bmatrix} 3 \\end{bmatrix} = 3$$\n\nQuantum computing uses complex-valued matrices: the elements of a matrix can be complex numbers. This, for example, is a valid complex-valued matrix:\n\n$$\\begin{bmatrix}\n 1 & i \\\\\n -2i & 3 + 4i\n\\end{bmatrix}$$\n\nFinally, a **vector** is an $n \\times 1$ matrix. Here, for example, is a $3 \\times 1$ vector:\n\n$$V = \\begin{bmatrix} 1 \\\\ 2i \\\\ 3 + 4i \\end{bmatrix}$$\n\nSince vectors always have a width of $1$, vector elements are sometimes written using only one index. In the above example, $V_0 = 1$ and $V_1 = 2i$.", "_____no_output_____" ], [ "## Matrix Addition\n\nThe easiest matrix operation is **matrix addition**. Matrix addition works between two matrices of the same size, and adds each number from the first matrix to the number in the same position in the second matrix:\n\n$$\\begin{bmatrix}\n x_{0,0} & x_{0,1} & \\dotsb & x_{0,m-1} \\\\\n x_{1,0} & x_{1,1} & \\dotsb & x_{1,m-1} \\\\\n \\vdots & \\vdots & \\ddots & \\vdots \\\\\n x_{n-1,0} & x_{n-1,1} & \\dotsb & x_{n-1,m-1}\n\\end{bmatrix}\n+\n\\begin{bmatrix}\n y_{0,0} & y_{0,1} & \\dotsb & y_{0,m-1} \\\\\n y_{1,0} & y_{1,1} & \\dotsb & y_{1,m-1} \\\\\n \\vdots & \\vdots & \\ddots & \\vdots \\\\\n y_{n-1,0} & y_{n-1,1} & \\dotsb & y_{n-1,m-1}\n\\end{bmatrix}\n=\n\\begin{bmatrix}\n x_{0,0} + y_{0,0} & x_{0,1} + y_{0,1} & \\dotsb & x_{0,m-1} + y_{0,m-1} \\\\\n x_{1,0} + y_{1,0} & x_{1,1} + y_{1,1} & \\dotsb & x_{1,m-1} + y_{1,m-1} \\\\\n \\vdots & \\vdots & \\ddots & \\vdots \\\\\n x_{n-1,0} + y_{n-1,0} & x_{n-1,1} + y_{n-1,1} & \\dotsb & x_{n-1,m-1} + y_{n-1,m-1}\n\\end{bmatrix}$$\n\nSimilarly, we can compute $A - B$ by subtracting elements of $B$ from corresponding elements of $A$.\n\nMatrix addition has the following properties:\n\n* Commutativity: $A + B = B + A$\n* Associativity: $(A + B) + C = A + (B + C)$", "_____no_output_____" ], [ "### <span style=\"color:blue\">Exercise 1</span>: Matrix addition.\n\n**Inputs:**\n\n1. An $n \\times m$ matrix $A$, represented as a two-dimensional list.\n2. An $n \\times m$ matrix $B$, represented as a two-dimensional list.\n\n**Output:** Return the sum of the matrices $A + B$ - an $n \\times m$ matrix, represented as a two-dimensional list.\n\n> When representing matrices as lists, each sub-list represents a row.\n>\n> For example, list `[[1, 2], [3, 4]]` represents the following matrix:\n>\n> $$\\begin{bmatrix}\n 1 & 2 \\\\\n 3 & 4\n\\end{bmatrix}$$\n\nFill in the missing code and run the cell below to test your work.\n\n<br/>\n<details>\n <summary><b>Need a hint? Click here</b></summary>\n A video explanation can be found <a href=\"https://www.youtube.com/watch?v=WR9qCSXJlyY\">here</a>.\n</details>", "_____no_output_____" ] ], [ [ "@exercise\ndef matrix_add(a : Matrix, b : Matrix) -> Matrix:\n # You can get the size of a matrix like this:\n rows = len(a)\n columns = len(a[0])\n \n # You can use the following function to initialize a rows×columns matrix filled with 0s to store your answer\n c = create_empty_matrix(rows, columns)\n \n # You can use a for loop to execute its body several times;\n # in this loop variable i will take on each value from 0 to n-1, inclusive\n for i in range(rows):\n # Loops can be nested\n for j in range(columns):\n # You can access elements of a matrix like this:\n x = a[i][j]\n y = b[i][j]\n \n # You can modify the elements of a matrix like this:\n c[i][j] = x + y\n \n return c", "Success!\n" ] ], [ [ "*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-1:-Matrix-addition.).*", "_____no_output_____" ], [ "## Scalar Multiplication\n\nThe next matrix operation is **scalar multiplication** - multiplying the entire matrix by a scalar (real or complex number):\n\n$$a \\cdot\n\\begin{bmatrix}\n x_{0,0} & x_{0,1} & \\dotsb & x_{0,m-1} \\\\\n x_{1,0} & x_{1,1} & \\dotsb & x_{1,m-1} \\\\\n \\vdots & \\vdots & \\ddots & \\vdots \\\\\n x_{n-1,0} & x_{n-1,1} & \\dotsb & x_{n-1,m-1}\n\\end{bmatrix}\n=\n\\begin{bmatrix}\n a \\cdot x_{0,0} & a \\cdot x_{0,1} & \\dotsb & a \\cdot x_{0,m-1} \\\\\n a \\cdot x_{1,0} & a \\cdot x_{1,1} & \\dotsb & a \\cdot x_{1,m-1} \\\\\n \\vdots & \\vdots & \\ddots & \\vdots \\\\\n a \\cdot x_{n-1,0} & a \\cdot x_{n-1,1} & \\dotsb & a \\cdot x_{n-1,m-1}\n\\end{bmatrix}$$\n\nScalar multiplication has the following properties:\n\n* Associativity: $x \\cdot (yA) = (x \\cdot y)A$\n* Distributivity over matrix addition: $x(A + B) = xA + xB$\n* Distributivity over scalar addition: $(x + y)A = xA + yA$", "_____no_output_____" ], [ "### <span style=\"color:blue\">Exercise 2</span>: Scalar multiplication.\n\n**Inputs:**\n\n1. A scalar $x$.\n2. An $n \\times m$ matrix $A$.\n\n**Output:** Return the $n \\times m$ matrix $x \\cdot A$.\n\n<br/>\n<details>\n <summary><b>Need a hint? Click here</b></summary>\n A video explanation can be found <a href=\"https://www.youtube.com/watch?v=TbaltFbJ3wE\">here</a>.\n</details>", "_____no_output_____" ] ], [ [ "@exercise\ndef scalar_mult(x : complex, a : Matrix) -> Matrix:\n # Fill in the missing code and run the cell to check your work.\n rows = len(a)\n columns = len(a[0])\n \n c = create_empty_matrix(rows, columns)\n \n # You can use a for loop to execute its body several times;\n # in this loop variable i will take on each value from 0 to n-1, inclusive\n for i in range(rows):\n # Loops can be nested\n for j in range(columns):\n # You can access elements of a matrix like this:\n current_cell = a[i][j]\n \n # You can modify the elements of a matrix like this:\n c[i][j] = x * current_cell\n \n return c", "Success!\n" ] ], [ [ "*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-2:-Scalar-multiplication.).*", "_____no_output_____" ], [ "## Matrix Multiplication\n\n**Matrix multiplication** is a very important and somewhat unusual operation. The unusual thing about it is that neither its operands nor its output are the same size: an $n \\times m$ matrix multiplied by an $m \\times k$ matrix results in an $n \\times k$ matrix. \nThat is, for matrix multiplication to be applicable, the number of columns in the first matrix must equal the number of rows in the second matrix.\n\nHere is how matrix product is calculated: if we are calculating $AB = C$, then\n\n$$C_{i,j} = A_{i,0} \\cdot B_{0,j} + A_{i,1} \\cdot B_{1,j} + \\dotsb + A_{i,m-1} \\cdot B_{m-1,j} = \\sum_{t = 0}^{m-1} A_{i,t} \\cdot B_{t,j}$$\n\nHere is a small example:\n\n$$\\begin{bmatrix}\n \\color{blue} 1 & \\color{blue} 2 & \\color{blue} 3 \\\\\n \\color{red} 4 & \\color{red} 5 & \\color{red} 6\n\\end{bmatrix}\n\\begin{bmatrix}\n 1 \\\\\n 2 \\\\\n 3\n\\end{bmatrix}\n=\n\\begin{bmatrix}\n (\\color{blue} 1 \\cdot 1) + (\\color{blue} 2 \\cdot 2) + (\\color{blue} 3 \\cdot 3) \\\\\n (\\color{red} 4 \\cdot 1) + (\\color{red} 5 \\cdot 2) + (\\color{red} 6 \\cdot 3)\n\\end{bmatrix}\n=\n\\begin{bmatrix}\n 14 \\\\\n 32\n\\end{bmatrix}$$", "_____no_output_____" ], [ "Matrix multiplication has the following properties:\n\n* Associativity: $A(BC) = (AB)C$\n* Distributivity over matrix addition: $A(B + C) = AB + AC$ and $(A + B)C = AC + BC$\n* Associativity with scalar multiplication: $xAB = x(AB) = A(xB)$\n\n> Note that matrix multiplication is **not commutative:** $AB$ rarely equals $BA$.\n\nAnother very important property of matrix multiplication is that a matrix multiplied by a vector produces another vector.\n\nAn **identity matrix** $I_n$ is a special $n \\times n$ matrix which has $1$s on the main diagonal, and $0$s everywhere else:\n\n$$I_n =\n\\begin{bmatrix}\n 1 & 0 & \\dotsb & 0 \\\\\n 0 & 1 & \\dotsb & 0 \\\\\n \\vdots & \\vdots & \\ddots & \\vdots \\\\\n 0 & 0 & \\dotsb & 1\n\\end{bmatrix}$$\n\nWhat makes it special is that multiplying any matrix (of compatible size) by $I_n$ returns the original matrix. To put it another way, if $A$ is an $n \\times m$ matrix:\n\n$$AI_m = I_nA = A$$\n\nThis is why $I_n$ is called an identity matrix - it acts as a **multiplicative identity**. In other words, it is the matrix equivalent of the number $1$.", "_____no_output_____" ], [ "### <span style=\"color:blue\">Exercise 3</span>: Matrix multiplication.\n\n**Inputs:**\n\n1. An $n \\times m$ matrix $A$.\n2. An $m \\times k$ matrix $B$.\n\n**Output:** Return the $n \\times k$ matrix equal to the matrix product $AB$.\n\n<br/>\n<details>\n <summary><strong>Need a hint? Click here</strong></summary>\n To solve this exercise, you will need 3 <code>for</code> loops: one to go over $n$ rows of the output matrix, one to go over $k$ columns, and one to add up $m$ products that form each element of the output:\n <pre>\n <code>\n for i in range(n):\n for j in range(k):\n sum = 0\n for t in range(m):\n sum = sum + ...\n c[i][j] = sum\n </code>\n </pre>\n A video explanation can be found <a href=\"https://www.youtube.com/watch?v=OMA2Mwo0aZg\">here</a>.\n</details>", "_____no_output_____" ] ], [ [ "@exercise\ndef matrix_mult(a : Matrix, b : Matrix) -> Matrix:\n n = len(a)\n m = len(a[0])\n k = len(b[0])\n \n c = create_empty_matrix(n, k)\n \n def calc_sum_this_cell(i, j, m):\n sum_cell = 0\n for t in range(m):\n sum_cell += a[i][t] * b[t][j]\n return sum_cell\n \n for i in range(n):\n for j in range(k):\n c[i][j] = calc_sum_this_cell(i, j, m)\n\n return c", "Success!\n" ] ], [ [ "*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-3:-Matrix-multiplication.).*", "_____no_output_____" ], [ "## Inverse Matrices\n\nA square $n \\times n$ matrix $A$ is **invertible** if it has an inverse $n \\times n$ matrix $A^{-1}$ with the following property:\n\n$$AA^{-1} = A^{-1}A = I_n$$\n\nIn other words, $A^{-1}$ acts as the **multiplicative inverse** of $A$.\n\nAnother, equivalent definition highlights what makes this an interesting property. For any matrices $B$ and $C$ of compatible sizes:\n\n$$A^{-1}(AB) = A(A^{-1}B) = B \\\\\n(CA)A^{-1} = (CA^{-1})A = C$$\n\nA square matrix has a property called the **determinant**, with the determinant of matrix $A$ being written as $|A|$. A matrix is invertible if and only if its determinant isn't equal to $0$.\n\nFor a $2 \\times 2$ matrix $A$, the determinant is defined as $|A| = (A_{0,0} \\cdot A_{1,1}) - (A_{0,1} \\cdot A_{1,0})$.\n\nFor larger matrices, the determinant is defined through determinants of sub-matrices. You can learn more from [Wikipedia](https://en.wikipedia.org/wiki/Determinant) or from [Wolfram MathWorld](http://mathworld.wolfram.com/Determinant.html).", "_____no_output_____" ], [ "### <span style=\"color:blue\">Exercise 4</span>: Matrix Inversion.\n\n**Input:** An invertible $2 \\times 2$ matrix $A$.\n\n**Output:** Return the inverse of $A$, a $2 \\times 2$ matrix $A^{-1}$.\n\n<br/>\n<details>\n <summary><strong>Need a hint? Click here</strong></summary>\n Try to come up with a general method of doing it by hand first. If you get stuck, you may find <a href=\"https://en.wikipedia.org/wiki/Invertible_matrix#Inversion_of_2_%C3%97_2_matrices\">this Wikipedia article</a> useful. For this exercise, $|A|$ is guaranteed to be non-zero. <br>\n A video explanation can be found <a href=\"https://www.youtube.com/watch?v=01c12NaUQDw\">here</a>.\n</details>", "_____no_output_____" ] ], [ [ "@exercise\ndef matrix_inverse(m : Matrix) -> Matrix:\n #inverse must be same size as original (and should be square, which we could verify)\n \n m_inverse = create_empty_matrix(len(m), len(m[0]))\n \n a = m[0][0]\n b = m[0][1]\n c = m[1][0]\n d = m[1][1]\n \n determinant_m = a * d - b * c\n \n if determinant_m != 0:\n m_inverse[0][0] = d / determinant_m\n m_inverse[0][1] = -b / determinant_m\n m_inverse[1][0] = -c / determinant_m\n m_inverse[1][1] = a / determinant_m\n \n\n return m_inverse", "Success!\n" ] ], [ [ "*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-4:-Matrix-Inversion.).*", "_____no_output_____" ], [ "## Transpose\n\nThe **transpose** operation, denoted as $A^T$, is essentially a reflection of the matrix across the diagonal: $(A^T)_{i,j} = A_{j,i}$.\n\nGiven an $n \\times m$ matrix $A$, its transpose is the $m \\times n$ matrix $A^T$, such that if:\n\n$$A =\n\\begin{bmatrix}\n x_{0,0} & x_{0,1} & \\dotsb & x_{0,m-1} \\\\\n x_{1,0} & x_{1,1} & \\dotsb & x_{1,m-1} \\\\\n \\vdots & \\vdots & \\ddots & \\vdots \\\\\n x_{n-1,0} & x_{n-1,1} & \\dotsb & x_{n-1,m-1}\n\\end{bmatrix}$$\n\nthen:\n\n$$A^T =\n\\begin{bmatrix}\n x_{0,0} & x_{1,0} & \\dotsb & x_{n-1,0} \\\\\n x_{0,1} & x_{1,1} & \\dotsb & x_{n-1,1} \\\\\n \\vdots & \\vdots & \\ddots & \\vdots \\\\\n x_{0,m-1} & x_{1,m-1} & \\dotsb & x_{n-1,m-1}\n\\end{bmatrix}$$\n\nFor example:\n\n$$\\begin{bmatrix}\n 1 & 2 \\\\\n 3 & 4 \\\\\n 5 & 6\n\\end{bmatrix}^T\n=\n\\begin{bmatrix}\n 1 & 3 & 5 \\\\\n 2 & 4 & 6\n\\end{bmatrix}$$\n\nA **symmetric** matrix is a square matrix which equals its own transpose: $A = A^T$. To put it another way, it has reflection symmetry (hence the name) across the main diagonal. For example, the following matrix is symmetric:\n\n$$\\begin{bmatrix}\n 1 & 2 & 3 \\\\\n 2 & 4 & 5 \\\\\n 3 & 5 & 6\n\\end{bmatrix}$$\n\nThe transpose of a matrix product is equal to the product of transposed matrices, taken in reverse order:\n\n$$(AB)^T = B^TA^T$$", "_____no_output_____" ], [ "### <span style=\"color:blue\">Exercise 5</span>: Transpose.\n\n**Input:** An $n \\times m$ matrix $A$.\n\n**Output:** Return an $m \\times n$ matrix $A^T$, the transpose of $A$.\n\n<br/>\n<details>\n <summary><b>Need a hint? Click here</b></summary>\n A video explanation can be found <a href=\"https://www.youtube.com/watch?v=TZrKrNVhbjI\">here</a>.\n</details>", "_____no_output_____" ] ], [ [ "@exercise\ndef transpose(a : Matrix) -> Matrix:\n n = len(a)\n m = len(a[0])\n \n #transpose of n x m is m x n\n transpose_of_a = create_empty_matrix(m, n)\n \n #for each row, make it a column\n for i in range(n):\n for j in range(m):\n transpose_of_a[j][i] = a[i][j]\n \n return transpose_of_a", "Success!\n" ] ], [ [ "*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-5:-Transpose.).*", "_____no_output_____" ], [ "## Conjugate\n\nThe next important single-matrix operation is the **matrix conjugate**, denoted as $\\overline{A}$. This, as the name might suggest, involves taking the [complex conjugate](../ComplexArithmetic/ComplexArithmetic.ipynb#Complex-Conjugate) of every element of the matrix: if\n\n$$A =\n\\begin{bmatrix}\n x_{0,0} & x_{0,1} & \\dotsb & x_{0,m-1} \\\\\n x_{1,0} & x_{1,1} & \\dotsb & x_{1,m-1} \\\\\n \\vdots & \\vdots & \\ddots & \\vdots \\\\\n x_{n-1,0} & x_{n-1,1} & \\dotsb & x_{n-1,m-1}\n\\end{bmatrix}$$\n\nThen:\n\n$$\\overline{A} =\n\\begin{bmatrix}\n \\overline{x}_{0,0} & \\overline{x}_{0,1} & \\dotsb & \\overline{x}_{0,m-1} \\\\\n \\overline{x}_{1,0} & \\overline{x}_{1,1} & \\dotsb & \\overline{x}_{1,m-1} \\\\\n \\vdots & \\vdots & \\ddots & \\vdots \\\\\n \\overline{x}_{n-1,0} & \\overline{x}_{n-1,1} & \\dotsb & \\overline{x}_{n-1,m-1}\n\\end{bmatrix}$$\n\nThe conjugate of a matrix product equals to the product of conjugates of the matrices:\n\n$$\\overline{AB} = (\\overline{A})(\\overline{B})$$", "_____no_output_____" ], [ "### <span style=\"color:blue\">Exercise 6</span>: Conjugate.\n\n**Input:** An $n \\times m$ matrix $A$.\n\n**Output:** Return an $n \\times m$ matrix $\\overline{A}$, the conjugate of $A$.\n\n> As a reminder, you can get the real and imaginary components of complex number `z` using `z.real` and `z.imag`, respectively.\n<details>\n <summary><b>Need a hint? Click here</b></summary>\n To calculate the conjugate of a matrix take the conjugate of each element, check the <a href=\"../ComplexArithmetic/ComplexArithmetic.ipynb#Exercise-4:-Complex-conjugate.\">complex arithmetic tutorial</a> to see how to calculate the conjugate of a complex number.\n</details>", "_____no_output_____" ] ], [ [ "@exercise\ndef conjugate(a : Matrix) -> Matrix:\n # result is same size\n n = len(a)\n m = len(a[0])\n conjugate_of_a = create_empty_matrix(n, m)\n \n for i in range(n):\n for j in range(m):\n conjugate_of_a[i][j] = a[i][j].real + (-1)* a[i][j].imag * 1j #1j is i in python ugh\n \n return conjugate_of_a", "Success!\n" ] ], [ [ "*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-6:-Conjugate.).*", "_____no_output_____" ], [ "## Adjoint\n\nThe final important single-matrix operation is a combination of the above two. The **conjugate transpose**, also called the **adjoint** of matrix $A$, is defined as $A^\\dagger = \\overline{(A^T)} = (\\overline{A})^T$.\n\nA matrix is known as **Hermitian** or **self-adjoint** if it equals its own adjoint: $A = A^\\dagger$. For example, the following matrix is Hermitian:\n\n$$\\begin{bmatrix}\n 1 & i \\\\\n -i & 2\n\\end{bmatrix}$$\n\nThe adjoint of a matrix product can be calculated as follows:\n\n$$(AB)^\\dagger = B^\\dagger A^\\dagger$$", "_____no_output_____" ], [ "### <span style=\"color:blue\">Exercise 7</span>: Adjoint.\n\n**Input:** An $n \\times m$ matrix $A$.\n\n**Output:** Return an $m \\times n$ matrix $A^\\dagger$, the adjoint of $A$.\n\n> Don't forget, you can re-use functions you've written previously.", "_____no_output_____" ] ], [ [ "@exercise\ndef adjoint(a : Matrix) -> Matrix:\n \n #first do transpose, then do conjugate\n #size of result will be m x n because of the transpose\n \n n = len(a)\n m = len(a[0])\n adjoint_of_a = create_empty_matrix(m, n)\n \n #transpose - for each row, make it a column\n for i in range(n):\n for j in range(m):\n adjoint_of_a[j][i] = a[i][j] \n \n #conjugate let a + bi become a - bi\n for i in range(m):\n for j in range(n):\n adjoint_of_a[i][j] = adjoint_of_a[i][j].real + (-1)* adjoint_of_a[i][j].imag * 1j\n \n return adjoint_of_a", "Success!\n" ] ], [ [ "*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-7:-Adjoint.).*", "_____no_output_____" ], [ "## Unitary Matrices\n\n**Unitary matrices** are very important for quantum computing. A matrix is unitary when it is invertible, and its inverse is equal to its adjoint: $U^{-1} = U^\\dagger$. That is, an $n \\times n$ square matrix $U$ is unitary if and only if $UU^\\dagger = U^\\dagger U = I_n$.\n\nFor example, the following matrix is unitary:\n\n$$\\begin{bmatrix}\n \\frac{1}{\\sqrt{2}} & \\frac{1}{\\sqrt{2}} \\\\\n \\frac{i}{\\sqrt{2}} & \\frac{-i}{\\sqrt{2}} \\\\\n\\end{bmatrix}$$", "_____no_output_____" ], [ "### <span style=\"color:blue\">Exercise 8</span>: Unitary Verification.\n\n**Input:** An $n \\times n$ matrix $A$.\n\n**Output:** Check if the matrix is unitary and return `True` if it is, or `False` if it isn't.\n\n> Because of inaccuracy when dealing with floating point numbers on a computer (rounding errors), you won't always get the exact result you are expecting from a long series of calculations. To get around this, Python has a function `approx` which can be used to check if two numbers are \"close enough:\" `a == approx(b)`.\n\n<br/>\n<details>\n <summary><strong>Need a hint? Click here</strong></summary>\n Keep in mind, you have only implemented matrix inverses for $2 \\times 2$ matrices, and this exercise may give you larger inputs. There is a way to solve this without taking the inverse.\n</details>", "_____no_output_____" ] ], [ [ "from pytest import approx\n\n@exercise\ndef is_matrix_unitary(a : Matrix) -> bool:\n \n #if a is unitary, then a multiplied by its adjoint yields I\n #this will automatically handle the zero matrix corner case\n #this is for square nxn matrix\n n = len(a)\n product_matrix = matrix_mult(a, adjoint(a))\n \n #check whether product_matrix is I\n is_unitary = True\n for i in range(n):\n for j in range(n):\n #diagonal must be 1, all others must be zero\n #holy ugly code batman\n if (i == j and product_matrix[i][j] != approx(1)) or (i != j and product_matrix[i][j] != approx(0)):\n is_unitary = False\n break;\n\n return is_unitary", "Success!\n" ] ], [ [ "*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-8:-Unitary-Verification.).*", "_____no_output_____" ], [ "## Next Steps\n\nCongratulations! At this point, you should understand enough linear algebra to be able to get started with the tutorials on [the concept of qubit](../Qubit/Qubit.ipynb) and on [single-qubit quantum gates](../SingleQubitGates/SingleQubitGates.ipynb). The next section covers more advanced matrix operations that help explain the properties of qubits and quantum gates.", "_____no_output_____" ], [ "# Part II. Advanced Operations\n\n## Inner Product\n\nThe **inner product** is yet another important matrix operation that is only applied to vectors. Given two vectors $V$ and $W$ of the same size, their inner product $\\langle V , W \\rangle$ is defined as a product of matrices $V^\\dagger$ and $W$:\n\n$$\\langle V , W \\rangle = V^\\dagger W$$\n\nLet's break this down so it's a bit easier to understand. A $1 \\times n$ matrix (the adjoint of an $n \\times 1$ vector) multiplied by an $n \\times 1$ vector results in a $1 \\times 1$ matrix (which is equivalent to a scalar). The result of an inner product is that scalar. \n\nTo put it another way, to calculate the inner product of two vectors, take the corresponding elements $V_k$ and $W_k$, multiply the complex conjugate of $V_k$ by $W_k$, and add up those products:\n\n$$\\langle V , W \\rangle = \\sum_{k=0}^{n-1}\\overline{V_k}W_k$$\n\nHere is a simple example:\n\n$$\\langle\n\\begin{bmatrix}\n -6 \\\\\n 9i\n\\end{bmatrix}\n,\n\\begin{bmatrix}\n 3 \\\\\n -8\n\\end{bmatrix}\n\\rangle =\n\\begin{bmatrix}\n -6 \\\\\n 9i\n\\end{bmatrix}^\\dagger\n\\begin{bmatrix}\n 3 \\\\\n -8\n\\end{bmatrix}\n=\n\\begin{bmatrix} -6 & -9i \\end{bmatrix}\n\\begin{bmatrix}\n 3 \\\\\n -8\n\\end{bmatrix}\n= (-6) \\cdot (3) + (-9i) \\cdot (-8) = -18 + 72i$$", "_____no_output_____" ], [ "If you are familiar with the **dot product**, you will notice that it is equivalent to inner product for real-numbered vectors.\n\n> We use our definition for these tutorials because it matches the notation used in quantum computing. You might encounter other sources which define the inner product a little differently: $\\langle V , W \\rangle = W^\\dagger V = V^T\\overline{W}$, in contrast to the $V^\\dagger W$ that we use. These definitions are almost equivalent, with some differences in the scalar multiplication by a complex number.\n\nAn immediate application for the inner product is computing the **vector norm**. The norm of vector $V$ is defined as $||V|| = \\sqrt{\\langle V , V \\rangle}$. This condenses the vector down to a single non-negative real value. If the vector represents coordinates in space, the norm happens to be the length of the vector. A vector is called **normalized** if its norm is equal to $1$.\n\nThe inner product has the following properties:\n\n* Distributivity over addition: $\\langle V + W , X \\rangle = \\langle V , X \\rangle + \\langle W , X \\rangle$ and $\\langle V , W + X \\rangle = \\langle V , W \\rangle + \\langle V , X \\rangle$\n* Partial associativity with scalar multiplication: $x \\cdot \\langle V , W \\rangle = \\langle \\overline{x}V , W \\rangle = \\langle V , xW \\rangle$\n* Skew symmetry: $\\langle V , W \\rangle = \\overline{\\langle W , V \\rangle}$\n* Multiplying a vector by a unitary matrix **preserves the vector's inner product with itself** (and therefore the vector's norm): $\\langle UV , UV \\rangle = \\langle V , V \\rangle$\n\n> Note that just like matrix multiplication, the inner product is **not commutative**: $\\langle V , W \\rangle$ won't always equal $\\langle W , V \\rangle$.", "_____no_output_____" ], [ "### <span style=\"color:blue\">Exercise 9</span>: Inner product.\n\n**Inputs:**\n\n1. An $n \\times 1$ vector $V$.\n2. An $n \\times 1$ vector $W$.\n\n**Output:** Return a complex number - the inner product $\\langle V , W \\rangle$.\n\n<br/>\n<details>\n <summary><b>Need a hint? Click here</b></summary>\n A video explanation can be found <a href=\"https://www.youtube.com/watch?v=FCmH4MqbFGs\">here</a>.\n</details>", "_____no_output_____" ] ], [ [ "@exercise\ndef inner_prod(v : Matrix, w : Matrix) -> complex:\n n = len(v)\n \n conjugate_of_v = conjugate(v)\n \n inner_product = 0\n \n for k in range(n):\n inner_product += conjugate_of_v[k][0] * w[k][0]\n \n return inner_product", "Success!\n" ] ], [ [ "*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-9:-Inner-product.).*", "_____no_output_____" ], [ "### <span style=\"color:blue\">Exercise 10</span>: Normalized vectors.\n\n**Input:** A non-zero $n \\times 1$ vector $V$.\n\n**Output:** Return an $n \\times 1$ vector $\\frac{V}{||V||}$ - the normalized version of the vector $V$.\n\n<br/>\n<details>\n <summary><strong>Need a hint? Click here</strong></summary>\n You might need the square root function to solve this exercise. As a reminder, <a href=https://docs.python.org/3/library/math.html#math.sqrt>Python's square root function</a> is available in the <code>math</code> library.<br>\n A video explanation can be found <a href=\"https://www.youtube.com/watch?v=7fn03DIW3Ak\">here</a>. Note that when this method is used with complex vectors, you should take the modulus of the complex number for the division.\n</details>", "_____no_output_____" ] ], [ [ "@exercise\ndef normalize(v : Matrix) -> Matrix:\n # sqrt of complex number?? norm = math.sqrt(inner_prod(v, v))\n \n #try modulus of result of inner prod bc it's a complex number\n prod = inner_prod(v, v)\n modulus_of_prod = math.sqrt(prod.real**2 + prod.imag**2)\n norm = math.sqrt(modulus_of_prod)\n \n v_normalized = create_empty_matrix(len(v), 1)\n \n for k in range(len(v)):\n v_normalized[k][0] = v[k][0] / norm\n \n return v_normalized", "Success!\n" ] ], [ [ "*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-10:-Normalized-vectors.).*", "_____no_output_____" ], [ "## Outer Product\n\nThe **outer product** of two vectors $V$ and $W$ is defined as $VW^\\dagger$. That is, the outer product of an $n \\times 1$ vector and an $m \\times 1$ vector is an $n \\times m$ matrix. If we denote the outer product of $V$ and $W$ as $X$, then $X_{i,j} = V_i \\cdot \\overline{W_j}$. \n\nHere is a simple example:\nouter product of $\\begin{bmatrix} -3i \\\\ 9 \\end{bmatrix}$ and $\\begin{bmatrix} 9i \\\\ 2 \\\\ 7 \\end{bmatrix}$ is:\n\n$$\\begin{bmatrix} \\color{blue} {-3i} \\\\ \\color{blue} 9 \\end{bmatrix}\n\\begin{bmatrix} \\color{red} {9i} \\\\ \\color{red} 2 \\\\ \\color{red} 7 \\end{bmatrix}^\\dagger\n=\n\\begin{bmatrix} \\color{blue} {-3i} \\\\ \\color{blue} 9 \\end{bmatrix}\n\\begin{bmatrix} \\color{red} {-9i} & \\color{red} 2 & \\color{red} 7 \\end{bmatrix}\n=\n\\begin{bmatrix}\n \\color{blue} {-3i} \\cdot \\color{red} {(-9i)} & \\color{blue} {-3i} \\cdot \\color{red} 2 & \\color{blue} {-3i} \\cdot \\color{red} 7 \\\\\n \\color{blue} 9 \\cdot \\color{red} {(-9i)} & \\color{blue} 9 \\cdot \\color{red} 2 & \\color{blue} 9 \\cdot \\color{red} 7\n\\end{bmatrix}\n=\n\\begin{bmatrix}\n -27 & -6i & -21i \\\\\n -81i & 18 & 63\n\\end{bmatrix}$$", "_____no_output_____" ], [ "### <span style=\"color:blue\">Exercise 11</span>: Outer product.\n\n**Inputs:**\n\n1. An $n \\times 1$ vector $V$.\n2. An $m \\times 1$ vector $W$.\n\n**Output:** Return an $n \\times m$ matrix that represents the outer product of $V$ and $W$.", "_____no_output_____" ] ], [ [ "@exercise\ndef outer_prod(v : Matrix, w : Matrix) -> Matrix:\n #outer product equals v times adjoint of w\n \n return matrix_mult(v, adjoint(w))", "Success!\n" ] ], [ [ "*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-11:-Outer-product.).*", "_____no_output_____" ], [ "## Tensor Product\n\nThe **tensor product** is a different way of multiplying matrices. Rather than multiplying rows by columns, the tensor product multiplies the second matrix by every element of the first matrix.\n\nGiven $n \\times m$ matrix $A$ and $k \\times l$ matrix $B$, their tensor product $A \\otimes B$ is an $(n \\cdot k) \\times (m \\cdot l)$ matrix defined as follows:\n\n$$A \\otimes B =\n\\begin{bmatrix}\n A_{0,0} \\cdot B & A_{0,1} \\cdot B & \\dotsb & A_{0,m-1} \\cdot B \\\\\n A_{1,0} \\cdot B & A_{1,1} \\cdot B & \\dotsb & A_{1,m-1} \\cdot B \\\\\n \\vdots & \\vdots & \\ddots & \\vdots \\\\\n A_{n-1,0} \\cdot B & A_{n-1,1} \\cdot B & \\dotsb & A_{n-1,m-1} \\cdot B\n\\end{bmatrix}\n=\n\\begin{bmatrix}\n A_{0,0} \\cdot \\color{red} {\\begin{bmatrix}B_{0,0} & \\dotsb & B_{0,l-1} \\\\ \\vdots & \\ddots & \\vdots \\\\ B_{k-1,0} & \\dotsb & b_{k-1,l-1} \\end{bmatrix}} & \\dotsb &\n A_{0,m-1} \\cdot \\color{blue} {\\begin{bmatrix}B_{0,0} & \\dotsb & B_{0,l-1} \\\\ \\vdots & \\ddots & \\vdots \\\\ B_{k-1,0} & \\dotsb & B_{k-1,l-1} \\end{bmatrix}} \\\\\n \\vdots & \\ddots & \\vdots \\\\\n A_{n-1,0} \\cdot \\color{blue} {\\begin{bmatrix}B_{0,0} & \\dotsb & B_{0,l-1} \\\\ \\vdots & \\ddots & \\vdots \\\\ B_{k-1,0} & \\dotsb & B_{k-1,l-1} \\end{bmatrix}} & \\dotsb &\n A_{n-1,m-1} \\cdot \\color{red} {\\begin{bmatrix}B_{0,0} & \\dotsb & B_{0,l-1} \\\\ \\vdots & \\ddots & \\vdots \\\\ B_{k-1,0} & \\dotsb & B_{k-1,l-1} \\end{bmatrix}}\n\\end{bmatrix}\n= \\\\\n=\n\\begin{bmatrix}\n A_{0,0} \\cdot \\color{red} {B_{0,0}} & \\dotsb & A_{0,0} \\cdot \\color{red} {B_{0,l-1}} & \\dotsb & A_{0,m-1} \\cdot \\color{blue} {B_{0,0}} & \\dotsb & A_{0,m-1} \\cdot \\color{blue} {B_{0,l-1}} \\\\\n \\vdots & \\ddots & \\vdots & \\dotsb & \\vdots & \\ddots & \\vdots \\\\\n A_{0,0} \\cdot \\color{red} {B_{k-1,0}} & \\dotsb & A_{0,0} \\cdot \\color{red} {B_{k-1,l-1}} & \\dotsb & A_{0,m-1} \\cdot \\color{blue} {B_{k-1,0}} & \\dotsb & A_{0,m-1} \\cdot \\color{blue} {B_{k-1,l-1}} \\\\\n \\vdots & \\vdots & \\vdots & \\ddots & \\vdots & \\vdots & \\vdots \\\\\n A_{n-1,0} \\cdot \\color{blue} {B_{0,0}} & \\dotsb & A_{n-1,0} \\cdot \\color{blue} {B_{0,l-1}} & \\dotsb & A_{n-1,m-1} \\cdot \\color{red} {B_{0,0}} & \\dotsb & A_{n-1,m-1} \\cdot \\color{red} {B_{0,l-1}} \\\\\n \\vdots & \\ddots & \\vdots & \\dotsb & \\vdots & \\ddots & \\vdots \\\\\n A_{n-1,0} \\cdot \\color{blue} {B_{k-1,0}} & \\dotsb & A_{n-1,0} \\cdot \\color{blue} {B_{k-1,l-1}} & \\dotsb & A_{n-1,m-1} \\cdot \\color{red} {B_{k-1,0}} & \\dotsb & A_{n-1,m-1} \\cdot \\color{red} {B_{k-1,l-1}}\n\\end{bmatrix}$$\n\nHere is a simple example:\n\n$$\\begin{bmatrix} 1 & 2 \\\\ 3 & 4 \\end{bmatrix} \\otimes \\begin{bmatrix} 5 & 6 \\\\ 7 & 8 \\end{bmatrix} =\n\\begin{bmatrix}\n 1 \\cdot \\begin{bmatrix} 5 & 6 \\\\ 7 & 8 \\end{bmatrix} & 2 \\cdot \\begin{bmatrix} 5 & 6 \\\\ 7 & 8 \\end{bmatrix} \\\\\n 3 \\cdot \\begin{bmatrix} 5 & 6 \\\\ 7 & 8 \\end{bmatrix} & 4 \\cdot \\begin{bmatrix} 5 & 6 \\\\ 7 & 8 \\end{bmatrix}\n\\end{bmatrix}\n=\n\\begin{bmatrix}\n 1 \\cdot 5 & 1 \\cdot 6 & 2 \\cdot 5 & 2 \\cdot 6 \\\\\n 1 \\cdot 7 & 1 \\cdot 8 & 2 \\cdot 7 & 2 \\cdot 8 \\\\\n 3 \\cdot 5 & 3 \\cdot 6 & 4 \\cdot 5 & 4 \\cdot 6 \\\\\n 3 \\cdot 7 & 3 \\cdot 8 & 4 \\cdot 7 & 4 \\cdot 8\n\\end{bmatrix}\n=\n\\begin{bmatrix}\n 5 & 6 & 10 & 12 \\\\\n 7 & 8 & 14 & 16 \\\\\n 15 & 18 & 20 & 24 \\\\\n 21 & 24 & 28 & 32\n\\end{bmatrix}$$\n\nNotice that the tensor product of two vectors is another vector: if $V$ is an $n \\times 1$ vector, and $W$ is an $m \\times 1$ vector, $V \\otimes W$ is an $(n \\cdot m) \\times 1$ vector.", "_____no_output_____" ], [ "The tensor product has the following properties:\n\n* Distributivity over addition: $(A + B) \\otimes C = A \\otimes C + B \\otimes C$, $A \\otimes (B + C) = A \\otimes B + A \\otimes C$\n* Associativity with scalar multiplication: $x(A \\otimes B) = (xA) \\otimes B = A \\otimes (xB)$\n* Mixed-product property (relation with matrix multiplication): $(A \\otimes B) (C \\otimes D) = (AC) \\otimes (BD)$", "_____no_output_____" ], [ "### <span style=\"color:blue\">Exercise 12</span>*: Tensor Product.\n\n**Inputs:**\n\n1. An $n \\times m$ matrix $A$.\n2. A $k \\times l$ matrix $B$.\n\n**Output:** Return an $(n \\cdot k) \\times (m \\cdot l)$ matrix $A \\otimes B$, the tensor product of $A$ and $B$.", "_____no_output_____" ] ], [ [ "@exercise\ndef tensor_product(a : Matrix, b : Matrix) -> Matrix:\n n = len(a)\n m = len(a[0])\n k = len(b)\n l = len(b[0])\n \n result = create_empty_matrix(n*k, m*l)\n \n #for each element in a, which is n x m\n for arow in range(n):\n for acol in range(m):\n acurrent = a[arow][acol] \n #copy B elements into result, multiplying by acurrent as we go \n for brow in range(k):\n for bcol in range(l):\n bcurrent = b[brow][bcol] \n #trick is indices in result\n result[arow*k + brow][acol*l + bcol] = acurrent * bcurrent\n \n return result", "Success!\n" ] ], [ [ "*Can't come up with a solution? See the explained solution in the* <i><a href=\"./Workbook_LinearAlgebra.ipynb#Exercise-12*:-Tensor-Product.\">Linear Algebra Workbook</a></i>.", "_____no_output_____" ], [ "## Next Steps\n\nAt this point, you know enough to complete the tutorials on [the concept of qubit](../Qubit/Qubit.ipynb), [single-qubit gates](../SingleQubitGates/SingleQubitGates.ipynb), [multi-qubit systems](../MultiQubitSystems/MultiQubitSystems.ipynb), and [multi-qubit gates](../MultiQubitGates/MultiQubitGates.ipynb). \nThe last part of this tutorial is a brief introduction to eigenvalues and eigenvectors, which are used for more advanced topics in quantum computing. \nFeel free to move on to the next tutorials, and come back here once you encounter eigenvalues and eigenvectors elsewhere.", "_____no_output_____" ], [ "# Part III: Eigenvalues and Eigenvectors\n\nConsider the following example of multiplying a matrix by a vector:\n\n$$\\begin{bmatrix}\n 1 & -3 & 3 \\\\\n 3 & -5 & 3 \\\\\n 6 & -6 & 4\n\\end{bmatrix}\n\\begin{bmatrix}\n 1 \\\\\n 1 \\\\\n 2\n\\end{bmatrix}\n=\n\\begin{bmatrix}\n 4 \\\\\n 4 \\\\\n 8\n\\end{bmatrix}$$\n\nNotice that the resulting vector is just the initial vector multiplied by a scalar (in this case 4). This behavior is so noteworthy that it is described using a special set of terms.\n\nGiven a nonzero $n \\times n$ matrix $A$, a nonzero vector $V$, and a scalar $x$, if $AV = xV$, then $x$ is an **eigenvalue** of $A$, and $V$ is an **eigenvector** of $A$ corresponding to that eigenvalue.\n\nThe properties of eigenvalues and eigenvectors are used extensively in quantum computing. You can learn more about eigenvalues, eigenvectors, and their properties at [Wolfram MathWorld](http://mathworld.wolfram.com/Eigenvector.html) or on [Wikipedia](https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors).", "_____no_output_____" ], [ "### <span style=\"color:blue\">Exercise 13</span>: Finding an eigenvalue.\n\n**Inputs:**\n\n1. An $n \\times n$ matrix $A$.\n2. An eigenvector $V$ of matrix $A$.\n\n**Output:** Return a real number - the eigenvalue of $A$ that is associated with the given eigenvector.\n\n<br/>\n<details>\n <summary><strong>Need a hint? Click here</strong></summary>\n Multiply the matrix by the vector, then divide the elements of the result by the elements of the original vector. Don't forget though, some elements of the vector may be $0$.\n</details>", "_____no_output_____" ] ], [ [ "@exercise\ndef find_eigenvalue(a : Matrix, v : Matrix) -> float:\n #eigenvalue = AV / V \n #AV will be (nxn) * (n * 1) = n * 1, so can divide each element\n n = len(a)\n prod_av = matrix_mult(a, v)\n result = create_empty_matrix(n, 1)\n eigenvalue = 0\n\n for i in range(n):\n if (v[i][0] != 0):\n result[i][0] = prod_av[i][0] / v[i][0]\n #find first non-zero result for eigenvalue \n if result[i][0] != 0:\n eigenvalue = result[i][0]\n break;\n \n \n return eigenvalue", "Success!\n" ] ], [ [ "*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-13:-Finding-an-eigenvalue.).*", "_____no_output_____" ], [ "### <span style=\"color:blue\">Exercise 14</span>**: Finding an eigenvector.\n\n**Inputs:**\n\n1. A $2 \\times 2$ matrix $A$.\n2. An eigenvalue $x$ of matrix $A$.\n\n\n**Output:** Return any non-zero eigenvector of $A$ that is associated with $x$.\n\n<br/>\n<details>\n <summary><strong>Need a hint? Click here</strong></summary>\n A matrix and an eigenvalue will have multiple eigenvectors (infinitely many, in fact), but you only need to find one.<br/>\n Try treating the elements of the vector as variables in a system of two equations. Watch out for division by $0$! \n</details>", "_____no_output_____" ] ], [ [ "@exercise\ndef find_eigenvector(a : Matrix, x : float) -> Matrix:\n \n result = create_empty_matrix(len(a), 1)\n \n return result", "_____no_output_____" ] ], [ [ "*Can't come up with a solution? See the explained solution in the [Linear Algebra Workbook](./Workbook_LinearAlgebra.ipynb#Exercise-14**:-Finding-an-eigenvector.).*", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
cb77997c0d3dfa6bacf1d67d83848842006a4b72
42,661
ipynb
Jupyter Notebook
analysis/geoip/Filecoin IP Analysis.ipynb
coryschwartz/nebula-crawler
34ebe1109a5117949b4f285891a065adcc0bae08
[ "Apache-2.0" ]
66
2021-07-05T21:55:27.000Z
2022-03-20T20:44:38.000Z
analysis/geoip/Filecoin IP Analysis.ipynb
coryschwartz/nebula-crawler
34ebe1109a5117949b4f285891a065adcc0bae08
[ "Apache-2.0" ]
8
2021-07-18T09:00:12.000Z
2022-03-15T17:44:11.000Z
analysis/geoip/Filecoin IP Analysis.ipynb
wcgcyx/nebula-crawler
b71d52512d5aa8d9b050594e126a60c2aae0f7c9
[ "Apache-2.0" ]
6
2021-07-11T12:25:05.000Z
2022-01-04T21:14:50.000Z
250.947059
38,068
0.910926
[ [ [ "import geoip2.database\nimport csv\nfrom multiaddr import Multiaddr\nimport matplotlib.pyplot as plt\nimport seaborn as sns", "_____no_output_____" ], [ "sns.set_style(\"darkgrid\")\nsns.set()\n\nPEERS_CVS = \"2021-07-14T1924-peers-filecoin.csv\"", "_____no_output_____" ], [ "def node_address(maddr):\n try:\n return maddr.value_for_protocol(0x04)\n except:\n pass\n return maddr.value_for_protocol(0x29)\n\ndef parse_maddr_str(maddr_str):\n \"\"\"\n The following line parses a row like:\n {/ip6/::/tcp/37374,/ip4/151.252.13.181/tcp/37374}\n into\n ['/ip6/::/tcp/37374', '/ip4/151.252.13.181/tcp/37374']\n \"\"\"\n return maddr_str.replace(\"{\", \"\").replace(\"}\", \"\").split(\",\")", "_____no_output_____" ], [ "result = {}\nwith geoip2.database.Reader('./GeoLite2/GeoLite2-Country.mmdb') as geoipreader:\n with open(PEERS_CVS) as csvfile:\n csvreader = csv.DictReader(csvfile, delimiter=\",\")\n for row in csvreader:\n maddr_strs = parse_maddr_str(row[\"multi_addresses\"])\n found = False\n for maddr_str in maddr_strs:\n maddr = Multiaddr(maddr_str)\n address = node_address(maddr)\n try:\n iso_code = geoipreader.country(address).country.iso_code;\n if iso_code in result:\n result[iso_code] += 1\n else:\n result[iso_code] = 1\n found = True\n break\n except:\n pass\n if not found:\n print(\"Unknown: \", maddr_strs)\nprint(result)", "{'NO': 4, 'SI': 3, 'HK': 65, 'US': 152, 'CN': 2027, 'SE': 6, 'FR': 9, 'SG': 15, 'KR': 417, 'JP': 13, 'CA': 20, 'DE': 24, 'LV': 2, 'GB': 8, 'UA': 11, 'PL': 2, 'RU': 14, 'ES': 2, 'TW': 6, 'NL': 13, 'IR': 1, 'BE': 4, 'BY': 1, 'MY': 3, 'DK': 2, 'NZ': 2, 'TH': 2, 'CH': 1, 'BG': 7, 'AU': 3, 'CZ': 2, 'VN': 1}\n" ], [ "pie_data = {\"other\": 0}\nthreshold = 20\nfor key in result:\n if result[key] < threshold:\n pie_data[\"other\"] += result[key]\n continue\n \n pie_data[key] = result[key]\n\nvalues, labels = zip(*sorted(zip(pie_data.values(), pie_data.keys())))\n\nplt.figure(figsize=(8, 18))\nplt.pie(values, labels=labels, autopct='%1.1f%%', startangle=0, pctdistance=1.1, labeldistance=1.2)\nplt.title(\"Filecoin Network - Node Country Distribution\")\nplt.tightlayout()\nplt.plot()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
cb77aceec49b4c6eaaae6c121cb2963926c9677a
372,228
ipynb
Jupyter Notebook
code/notebooks/grad_per_module_batch.ipynb
t46/can-wikipedia-help-offline-rl
1ef027342b92356b7fb5fa9b0a71f8d473f385bd
[ "MIT" ]
null
null
null
code/notebooks/grad_per_module_batch.ipynb
t46/can-wikipedia-help-offline-rl
1ef027342b92356b7fb5fa9b0a71f8d473f385bd
[ "MIT" ]
null
null
null
code/notebooks/grad_per_module_batch.ipynb
t46/can-wikipedia-help-offline-rl
1ef027342b92356b7fb5fa9b0a71f8d473f385bd
[ "MIT" ]
null
null
null
348.202058
286,850
0.9107
[ [ [ "import gym\nimport numpy as np\nimport torch\nimport wandb\nimport pandas as pd\n\nimport argparse\nimport pickle\nimport random\nimport sys\n\nsys.path.append('/Users/shiro/research/projects/rl-nlp/can-wikipedia-help-offline-rl/code')\n\nfrom decision_transformer.evaluation.evaluate_episodes import (\n evaluate_episode,\n evaluate_episode_rtg,\n)\nfrom decision_transformer.models.decision_transformer import DecisionTransformer\nfrom decision_transformer.models.mlp_bc import MLPBCModel\nfrom decision_transformer.training.act_trainer import ActTrainer\nfrom decision_transformer.training.seq_trainer import SequenceTrainer\n\nfrom utils import get_optimizer\nimport os\n\nfrom tqdm.notebook import tqdm\nimport seaborn as sns\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "sns.set_style(\"ticks\")\nsns.set_context(\"paper\", 1.5, {\"lines.linewidth\": 2})", "_____no_output_____" ], [ "def discount_cumsum(x, gamma):\n discount_cumsum = np.zeros_like(x)\n discount_cumsum[-1] = x[-1]\n for t in reversed(range(x.shape[0] - 1)):\n discount_cumsum[t] = x[t] + gamma * discount_cumsum[t + 1]\n return discount_cumsum\n\ndef prepare_data(variant):\n env_name, dataset = variant[\"env\"], variant[\"dataset\"]\n model_type = variant[\"model_type\"]\n exp_prefix = 'gym-experiment'\n group_name = f\"{exp_prefix}-{env_name}-{dataset}\"\n exp_prefix = f\"{group_name}-{random.randint(int(1e5), int(1e6) - 1)}\"\n\n if env_name == \"hopper\":\n env = gym.make(\"Hopper-v3\")\n max_ep_len = 1000\n env_targets = [3600, 1800] # evaluation conditioning targets\n scale = 1000.0 # normalization for rewards/returns\n elif env_name == \"halfcheetah\":\n env = gym.make(\"HalfCheetah-v3\")\n max_ep_len = 1000\n env_targets = [12000, 6000]\n scale = 1000.0\n elif env_name == \"walker2d\":\n env = gym.make(\"Walker2d-v3\")\n max_ep_len = 1000\n env_targets = [5000, 2500]\n scale = 1000.0\n elif env_name == \"reacher2d\":\n from decision_transformer.envs.reacher_2d import Reacher2dEnv\n\n env = Reacher2dEnv()\n max_ep_len = 100\n env_targets = [76, 40]\n scale = 10.0\n else:\n raise NotImplementedError\n\n if model_type == \"bc\":\n env_targets = env_targets[\n :1\n ] # since BC ignores target, no need for different evaluations\n\n state_dim = env.observation_space.shape[0]\n act_dim = env.action_space.shape[0]\n\n # load dataset\n dataset_path = f\"../data/{env_name}-{dataset}-v2.pkl\"\n with open(dataset_path, \"rb\") as f:\n trajectories = pickle.load(f)\n\n # save all path information into separate lists\n mode = variant.get(\"mode\", \"normal\")\n states, traj_lens, returns = [], [], []\n for path in trajectories:\n if mode == \"delayed\": # delayed: all rewards moved to end of trajectory\n path[\"rewards\"][-1] = path[\"rewards\"].sum()\n path[\"rewards\"][:-1] = 0.0\n states.append(path[\"observations\"])\n traj_lens.append(len(path[\"observations\"]))\n returns.append(path[\"rewards\"].sum())\n traj_lens, returns = np.array(traj_lens), np.array(returns)\n\n # used for input normalization\n states = np.concatenate(states, axis=0)\n state_mean, state_std = np.mean(states, axis=0), np.std(states, axis=0) + 1e-6\n\n num_timesteps = sum(traj_lens)\n\n print(\"=\" * 50)\n print(f\"Starting new experiment: {env_name} {dataset}\")\n print(f\"{len(traj_lens)} trajectories, {num_timesteps} timesteps found\")\n print(f\"Average return: {np.mean(returns):.2f}, std: {np.std(returns):.2f}\")\n print(f\"Max return: {np.max(returns):.2f}, min: {np.min(returns):.2f}\")\n print(\"=\" * 50)\n \n pct_traj = variant.get(\"pct_traj\", 1.0)\n\n # only train on top pct_traj trajectories (for %BC experiment)\n num_timesteps = max(int(pct_traj * num_timesteps), 1)\n sorted_inds = np.argsort(returns) # lowest to highest\n num_trajectories = 1\n timesteps = traj_lens[sorted_inds[-1]]\n ind = len(trajectories) - 2\n while ind >= 0 and timesteps + traj_lens[sorted_inds[ind]] < num_timesteps:\n timesteps += traj_lens[sorted_inds[ind]]\n num_trajectories += 1\n ind -= 1\n sorted_inds = sorted_inds[-num_trajectories:]\n\n # used to reweight sampling so we sample according to timesteps instead of trajectories\n p_sample = traj_lens[sorted_inds] / sum(traj_lens[sorted_inds])\n \n return trajectories, sorted_inds, state_dim, act_dim, max_ep_len, state_mean, state_std, num_trajectories, p_sample, scale\n\ndef get_batch(\n batch_size, \n max_len,\n trajectories,\n sorted_inds,\n state_dim,\n act_dim,\n max_ep_len,\n state_mean,\n state_std,\n num_trajectories,\n p_sample,\n scale,\n device\n ):\n batch_inds = np.random.choice(\n np.arange(num_trajectories),\n size=batch_size,\n replace=True,\n p=p_sample, # reweights so we sample according to timesteps\n )\n\n s, a, r, d, rtg, timesteps, mask = [], [], [], [], [], [], []\n for i in range(batch_size):\n traj = trajectories[int(sorted_inds[batch_inds[i]])]\n si = random.randint(0, traj[\"rewards\"].shape[0] - 1)\n\n # get sequences from dataset\n s.append(traj[\"observations\"][si : si + max_len].reshape(1, -1, state_dim))\n a.append(traj[\"actions\"][si : si + max_len].reshape(1, -1, act_dim))\n r.append(traj[\"rewards\"][si : si + max_len].reshape(1, -1, 1))\n if \"terminals\" in traj:\n d.append(traj[\"terminals\"][si : si + max_len].reshape(1, -1))\n else:\n d.append(traj[\"dones\"][si : si + max_len].reshape(1, -1))\n timesteps.append(np.arange(si, si + s[-1].shape[1]).reshape(1, -1))\n timesteps[-1][timesteps[-1] >= max_ep_len] = (\n max_ep_len - 1\n ) # padding cutoff\n rtg.append(\n discount_cumsum(traj[\"rewards\"][si:], gamma=1.0)[\n : s[-1].shape[1] + 1\n ].reshape(1, -1, 1)\n )\n if rtg[-1].shape[1] <= s[-1].shape[1]:\n rtg[-1] = np.concatenate([rtg[-1], np.zeros((1, 1, 1))], axis=1)\n\n # padding and state + reward normalization\n tlen = s[-1].shape[1]\n s[-1] = np.concatenate(\n [np.zeros((1, max_len - tlen, state_dim)), s[-1]], axis=1\n )\n s[-1] = (s[-1] - state_mean) / state_std\n a[-1] = np.concatenate(\n [np.ones((1, max_len - tlen, act_dim)) * -10.0, a[-1]], axis=1\n )\n r[-1] = np.concatenate([np.zeros((1, max_len - tlen, 1)), r[-1]], axis=1)\n d[-1] = np.concatenate([np.ones((1, max_len - tlen)) * 2, d[-1]], axis=1)\n rtg[-1] = (\n np.concatenate([np.zeros((1, max_len - tlen, 1)), rtg[-1]], axis=1)\n / scale\n )\n timesteps[-1] = np.concatenate(\n [np.zeros((1, max_len - tlen)), timesteps[-1]], axis=1\n )\n mask.append(\n np.concatenate(\n [np.zeros((1, max_len - tlen)), np.ones((1, tlen))], axis=1\n )\n )\n\n s = torch.from_numpy(np.concatenate(s, axis=0)).to(\n dtype=torch.float32, device=device\n )\n a = torch.from_numpy(np.concatenate(a, axis=0)).to(\n dtype=torch.float32, device=device\n )\n r = torch.from_numpy(np.concatenate(r, axis=0)).to(\n dtype=torch.float32, device=device\n )\n d = torch.from_numpy(np.concatenate(d, axis=0)).to(\n dtype=torch.long, device=device\n )\n rtg = torch.from_numpy(np.concatenate(rtg, axis=0)).to(\n dtype=torch.float32, device=device\n )\n timesteps = torch.from_numpy(np.concatenate(timesteps, axis=0)).to(\n dtype=torch.long, device=device\n )\n mask = torch.from_numpy(np.concatenate(mask, axis=0)).to(device=device)\n\n return s, a, r, d, rtg, timesteps, mask", "_____no_output_____" ], [ "seed=666\nepoch=1\nenv_name='hopper'\nreward_state_action = 'state'\nmodel_name = 'igpt'\n\ntorch.manual_seed(seed)\n\ndataset_name = 'medium'\n\n# model_names = ['gpt2', 'igpt', 'dt'] # ['gpt2', 'igpt', 'dt']\ngrad_norms_list = []\n\n\nif model_name == 'gpt2':\n pretrained_lm1 = 'gpt2'\nelif model_name == 'clip':\n pretrained_lm1 = 'openai/clip-vit-base-patch32'\nelif model_name == 'igpt':\n pretrained_lm1 = 'openai/imagegpt-small'\nelif model_name == 'dt':\n pretrained_lm1 = False\n\nvariant = {\n 'embed_dim': 768,\n 'n_layer': 12,\n 'n_head': 1,\n 'activation_function': 'relu',\n 'dropout': 0.2, # 0.1\n 'load_checkpoint': False if epoch==0 else f'../checkpoints/{model_name}_medium_{env_name}_666/model_{epoch}.pt',\n 'seed': seed,\n 'outdir': f\"checkpoints/{model_name}_{dataset_name}_{env_name}_{seed}\",\n 'env': env_name,\n 'dataset': dataset_name,\n 'model_type': 'dt',\n 'K': 20, # 2\n 'pct_traj': 1.0,\n 'batch_size': 100, # 64\n 'num_eval_episodes': 100,\n 'max_iters': 40,\n 'num_steps_per_iter': 2500,\n 'pretrained_lm': pretrained_lm1,\n 'gpt_kmeans': None,\n 'kmeans_cache': None,\n 'frozen': False,\n 'extend_positions': False,\n 'share_input_output_proj': True\n}\n\nos.makedirs(variant[\"outdir\"], exist_ok=True)\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ntrajectories, sorted_inds, state_dim, act_dim, max_ep_len, state_mean, state_std, num_trajectories, p_sample, scale = prepare_data(variant)\n\nK = variant[\"K\"]\nbatch_size = variant[\"batch_size\"]\n\nloss_fn = lambda s_hat, a_hat, r_hat, s, a, r: torch.mean((a_hat - a) ** 2)\n\nmodel = DecisionTransformer(\n args=variant,\n state_dim=state_dim,\n act_dim=act_dim,\n max_length=K,\n max_ep_len=max_ep_len,\n hidden_size=variant[\"embed_dim\"],\n n_layer=variant[\"n_layer\"],\n n_head=variant[\"n_head\"],\n n_inner=4 * variant[\"embed_dim\"],\n activation_function=variant[\"activation_function\"],\n n_positions=1024,\n resid_pdrop=variant[\"dropout\"],\n attn_pdrop=0.1,\n)\nif variant[\"load_checkpoint\"]:\n state_dict = torch.load(variant[\"load_checkpoint\"], map_location=torch.device('cpu'))\n model.load_state_dict(state_dict)\n print(f\"Loaded from {variant['load_checkpoint']}\")\n\n# model.eval()\n\n# grad = {}\n# def get_grad(name):\n# def hook(model, input, output):\n# grad[name] = output.detach()\n# return hook\n\n# for block_id in range(len(model.transformer.h)):\n# model.transformer.h[block_id].ln_1.register_backward_hook(get_grad(f'{block_id}.ln_1'))\n# model.transformer.h[block_id].attn.c_attn.register_backward_hook(get_grad(f'{block_id}.attn.c_attn'))\n# model.transformer.h[block_id].attn.c_proj.register_backward_hook(get_grad(f'{block_id}.attn.c_proj'))\n# model.transformer.h[block_id].attn.attn_dropout.register_backward_hook(get_grad(f'{block_id}.attn.attn_dropout'))\n# model.transformer.h[block_id].attn.resid_dropout.register_backward_hook(get_grad(f'{block_id}.attn.resid_dropout'))\n# model.transformer.h[block_id].ln_2.register_backward_hook(get_grad(f'{block_id}.ln_2'))\n# model.transformer.h[block_id].mlp.c_fc.register_backward_hook(get_grad(f'{block_id}.mlp.c_fc'))\n# model.transformer.h[block_id].mlp.c_proj.register_backward_hook(get_grad(f'{block_id}.mlp.c_proj'))\n# model.transformer.h[block_id].mlp.act.register_backward_hook(get_grad(f'{block_id}.mlp.act'))\n# model.transformer.h[block_id].mlp.dropout.register_backward_hook(get_grad(f'{block_id}.mlp.dropout'))\nstates, actions, rewards, dones, rtg, timesteps, attention_mask = get_batch(batch_size, \n K,\n trajectories,\n sorted_inds,\n state_dim,\n act_dim,\n max_ep_len,\n state_mean,\n state_std,\n num_trajectories,\n p_sample,\n scale,\n device\n )\naction_target = torch.clone(actions)\ngrads_list = []\n\nfor batch_id in tqdm(range(batch_size)):\n ##### 勾配計算 #####\n action_target_batch = action_target[batch_id, :, :].unsqueeze(0)\n\n state_preds, action_preds, reward_preds, all_embs = model.forward(\n states[batch_id, :, :].unsqueeze(0),\n actions[batch_id, :, :].unsqueeze(0),\n rewards[batch_id, :, :].unsqueeze(0),\n rtg[batch_id, :-1].unsqueeze(0),\n timesteps[batch_id, :].unsqueeze(0),\n attention_mask=attention_mask[batch_id, :].unsqueeze(0),\n )\n\n act_dim = action_preds.shape[2]\n action_preds = action_preds.reshape(-1, act_dim)[attention_mask[batch_id, :].unsqueeze(0).reshape(-1) > 0]\n action_target_batch = action_target_batch.reshape(-1, act_dim)[\n attention_mask[batch_id, :].unsqueeze(0).reshape(-1) > 0\n ]\n\n model.zero_grad()\n loss = loss_fn(\n None,\n action_preds,\n None,\n None,\n action_target_batch,\n None,\n )\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), .25)\n\n grad_norm = {}\n for name, param in model.transformer.h.named_parameters():\n grad_norm[name] = torch.norm(param.grad.view(-1)).numpy()", "==================================================\nStarting new experiment: hopper medium\n2186 trajectories, 999906 timesteps found\nAverage return: 1422.06, std: 378.95\nMax return: 3222.36, min: 315.87\n==================================================\nLoading from pretrained\n" ], [ "plt.figure(figsize=(20, 4))\nplt.bar(x=range(len(grad_norm)), height=list(grad_norm.values()), color=(0.372, 0.537, 0.537))\n# plt.xticks([], [])\nplt.xlabel('Parameter of Each Layer', fontsize=20)\nplt.ylabel('Clipped Gradient Norm', fontsize=20)\nplt.title('Gradient Norm of Each Parameter', fontsize=20)\nplt.ylim(0, 0.25)\nplt.tight_layout()\nplt.savefig(f'figs/gradnorm_perparam_{epoch}_igpt_{env_name}_{dataset_name}_{seed}_{reward_state_action}.pdf')\nplt.show()", "_____no_output_____" ], [ "df = pd.DataFrame([grad_norm]).astype(float)\ndf", "_____no_output_____" ], [ "df = pd.DataFrame([grad_norm]).astype(float)\nx = np.linspace(0, 1, len(grad_norm))#[(i+1)/len(grad_norm) for i in range(len(grad_norm))]\ndf.plot.barh(stacked=True, figsize=(24, 40)) # color=plt.cm.Blues_r(x), \nplt.legend(loc=\"lower left\", ncol=12)\nplt.show()", "_____no_output_____" ], [ "grad_norm_others = {\n '0.ln_1.weight': 0,\n '0.ln_1.bias': 0,\n 'others': 0\n}\ntotal = np.sum(list(grad_norm.values()))\nfor key, value in grad_norm.items():\n if key == '0.ln_1.weight' or key == '0.ln_1.bias':\n grad_norm_others[key] = value / total\n else:\n grad_norm_others['others'] += value / total\n\ntotal = np.sum(grad_norm_others.values())\n\n\ndf_others = pd.DataFrame([grad_norm_others]).astype(float)\nx = np.linspace(0, 1, len(grad_norm_others))#[(i+1)/len(grad_norm) for i in range(len(grad_norm))]\ndf_others.plot.barh(stacked=True, figsize=(10, 5), color=[(0.372, 0.537, 0.537), (0.627, 0.352, 0.470), (0.733, 0.737, 0.870)], fontsize=12) # color=plt.cm.Blues_r(x), \nplt.yticks([], [])\nplt.xlim(0, 1)\nplt.title('Gradient Norm per Parameter', fontsize=12)\nplt.xlabel('Clipped Gradient Norm Ratio', fontsize=12)\nplt.legend(loc=\"lower left\", ncol=12, fontsize=12)\nplt.savefig(f'figs/gradnorm_perparam_ratio_{epoch}_igpt_{env_name}_{dataset_name}_{seed}_{reward_state_action}.pdf')\nplt.show()", "_____no_output_____" ], [ "total = np.sum(list(grad_norm.values()))\ntotal", "_____no_output_____" ], [ "grad_norm_others", "_____no_output_____" ], [ "grad_norm", "_____no_output_____" ], [ "df['0.ln_1.weight']", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb77beb4aec47ab53dc4aff09c2e379f740113bc
6,834
ipynb
Jupyter Notebook
3. Machine Learning/3.1 Foundations/Machine Learning Foundations - Lecture Notes.ipynb
vohuynhquangnguyen/Machine-Learning-Notes
3114cd74b23fcae9f881ae01892208ba65b24e40
[ "Apache-2.0" ]
5
2021-07-02T07:30:31.000Z
2022-03-04T15:19:18.000Z
3. Machine Learning/3.1 Foundations/.ipynb_checkpoints/Machine Learning Foundations - Lecture Notes-checkpoint.ipynb
vohuynhquangnguyen/Machine-Learning-Notes
3114cd74b23fcae9f881ae01892208ba65b24e40
[ "Apache-2.0" ]
26
2021-07-02T08:54:15.000Z
2021-08-09T14:41:13.000Z
3. Machine Learning/3.1 Foundations/Machine Learning Foundations - Lecture Notes.ipynb
vohuynhquangnguyen/Machine-Learning-Notes
3114cd74b23fcae9f881ae01892208ba65b24e40
[ "Apache-2.0" ]
2
2021-07-03T13:48:07.000Z
2022-02-14T02:03:44.000Z
54.238095
540
0.672666
[ [ [ "Author: Vo, Huynh Quang Nguyen", "_____no_output_____" ], [ "# Acknowledgments\nThe contents of this note are based on the lecture notes and the materials from the sources below. All rights reserved to respective owners.\n\n\n1. **Deep Learning** textbook by Dr Ian Goodfellow, Prof. Yoshua Bengio, and Prof. Aaron Courville. Available at: [Deep Learning textbook](https://www.deeplearningbook.org/)\n\n\n2. **Machine Learning with Python** course given by Prof. Alexander Jung from Aalto University, Finland.\n\n\n3. **Machine Learning** course by Prof. Andrew Ng. Available in Coursera: [Machine Learning](https://www.coursera.org/learn/machine-learning)\n\n\n4. **Hands-On Machine Learning with Scikit-Learn, Keras, and TensorFlow** by Aurélien Géron.", "_____no_output_____" ], [ "## Disclaimer\n1. This lecture note serves as a summary of fundamental concepts that are commonly used in machine learning. Thus, we strongly recommend this note to be used strictly as a reference:\n * For lectures, teachers on which topics they should include when organizing their own Machine Learning classes, and\n * For learners to get an overview of machine learning.\n\n2. This lecture note is the second of the two-episodes series about the fundamentals of data science and machine learning. Thus, we strongly recommend to read this note after having finished the previous one.", "_____no_output_____" ], [ "# Overview of Machine Learning", "_____no_output_____" ], [ "## Components of Machine Learning\n1. As mentioned in the previous note, machine learning (ML) programs are algorithms that is capable of learning from data. According to the definition from Tom Mitchell, which was also introduced in the previous note, a ML program is a program that \"learn from **experience $\\mathcal{E}$** with respect to some **task $\\mathcal{T}$** and some **performance measure $\\mathcal{P}$**, if its performance on $\\mathcal{T}$, as measured by $\\mathcal{P}$, improves with experience $\\mathcal{E}$\".\n\n\n2. Let's dive into details each component mentioned in Mitchell's definition.\n\n### Task \n1. ML tasks are usually described in terms of how the machine learning system should process an **example**, the latter of which is a collection of features that have been quantitatively measured from some object or event that we want the machine learning system to process. An example is typically represented as a vector $\\mathbf{x} \\in \\mathbb{R}^n$ where each entry $x_i$ of the vector is another feature (also known as variable).\n\n\n2. Here are the list of commonly tasks in ML. Noted that we have already encountered most of them in Data Science.\n * **Classification**: In this type of task, the computer program is asked to specify which of $k$ categories some input belongs to. To solve this task, the learning algorithm is usually asked to produce a function $f : \\mathbb{R}^n \\rightarrow {1, . . . , k}$. When $y = f(\\mathbf{x})$, the model assigns an input $\\mathbf{x}$ to a category identified by numeric code $y$. A harder version of this task is **classification with missing inputs**, where every measurement in its input is not guaranteed to always be provided.\n * **Regression**: In this type of task, the computer program is asked to predict a numerical value given some input. To solve this task, the learning algorithm is asked to output a function $f : \\mathbb{R}^n \\rightarrow \\mathbb{R}$. This type of task is similar to classification, except that the format of output is different.\n * **Machine translation**: In a machine translation task, the input already consists of a sequence of symbols in some language, and the computer program must convert this into a sequence of symbols in another language. \n * **Anomaly detection**: In this type of task, the computer program sifts\nthrough a set of events or objects, and flags some of them as being unusual\nor atypical.\n * **Synthesis and sampling**: In this type of task, the machine learning algorithm is asked to generate new examples that are similar to those in the\ntraining data. \n * **Imputation of missing values**: In this type of task, the machine learning algorithm is given a new example $\\mathbf{x} \\in \\mathbb{R}^n$, but with some entries $x_i$ of $\\mathbf{x}$ missing. The algorithm must provide a prediction of the values of the missing entries.\n * **Denoising**: In this type of task, the machine learning algorithm is given in\ninput a corrupted example x˜ ∈ Rn obtained by an unknown corruption process\nfrom a clean example x ∈ Rn\n. The learner must predict the clean example\nx from its corrupted version x˜, or more generally predict the conditional\nprobability distribution p(x | x˜).\n * Density estimation or probability mass function estimation: In\nthe density estimation problem, the machine learning algorithm is asked\nto learn a function pmodel : R n → R, where pmodel(x) can be interpreted\nas a probability density function (if x is continuous) or a probability mass\nfunction (if x is discrete) on the space that the examples were drawn from.\nTo do such a task well (we will specify exactly what that means when we\ndiscuss performance measures P), the algorithm needs to learn the structure\nof the data it has seen\n\n", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cb77d2d5432ab60043a7276ea23fe6b0683353e1
17,437
ipynb
Jupyter Notebook
MyLearning/Lists,_Arrays,_Tensors,_Dataframes,_and_Datasets.ipynb
andrewcgaitskell/dash-template
b90ae99579f126ad4eb151f48d39a3ae59035b5d
[ "MIT" ]
null
null
null
MyLearning/Lists,_Arrays,_Tensors,_Dataframes,_and_Datasets.ipynb
andrewcgaitskell/dash-template
b90ae99579f126ad4eb151f48d39a3ae59035b5d
[ "MIT" ]
null
null
null
MyLearning/Lists,_Arrays,_Tensors,_Dataframes,_and_Datasets.ipynb
andrewcgaitskell/dash-template
b90ae99579f126ad4eb151f48d39a3ae59035b5d
[ "MIT" ]
null
null
null
33.727273
453
0.468659
[ [ [ "<a href=\"https://colab.research.google.com/github/andrewcgaitskell/dmtoolnotes/blob/main/Lists%2C_Arrays%2C_Tensors%2C_Dataframes%2C_and_Datasets.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "https://colab.research.google.com/github/tensorpig/learning_tensorflow/blob/master/Lists%2C_Arrays%2C_Tensors%2C_Dataframes%2C_and_Datasets.ipynb#scrollTo=0-i0PylHrjWs", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "data = [[1,2,3],[4.0,5.0,6.0],['100','101','102']]\ndata", "_____no_output_____" ], [ "data_df_raw = pd.DataFrame(data=data)\ndata_df = data_df_raw.T\ndata_df.columns=['legs','weight','version']\ndata_df", "_____no_output_____" ] ], [ [ "Let's pretend we have a simple regression like problem. We start out with 3 features describing a robotic spider we're building. For example: number of legs (feature 1), weight (feature 2), and version number (feature 3). Say that we so far built three prototype robots, so have 3 values for each feature. ", "_____no_output_____" ] ], [ [ "data_dict = {'legs':[1,2,3],\n 'weight':[4.0,5.0,6.0],\n 'version':['100','101','102']}\ndata_df_dict = pd.DataFrame(data=data_dict)\ndata_df_dict", "_____no_output_____" ], [ "feature1 = [1,2,3]\nfeature2 = [4.0,5.0,6.0]\nfeature3 = ['100','101','102']\nprint(type(feature1))", "<class 'list'>\n" ], [ "data_df['legs'].tolist()", "_____no_output_____" ], [ "data_df.iloc[0]", "_____no_output_____" ] ], [ [ "We'll look at the various different data structures you will probably run into when doing ML/AI in pyhton and tensorflow. Combining the features into matrices etc. Starting from basic python lists and progressing up to keras Datasets which you will typically feed into your neural network.\n\nFirst up: the basic python LIST", "_____no_output_____" ] ], [ [ "list2d = [feature1, feature2, feature3] \nprint(type(list2d))\nprint(list2d)\nprint('({},{})'.format(len(list2d),len(list2d[0]))) #nr of rows and cols\nprint(list2d[0]) #first row\nprint([row[0] for row in list2d]) #first col\nprint(list2d[0][0]) # value at 0,0\nprint([[row[i] for row in list2d] for i in range(len(list2d[0]))]) # transpose to make more like excel sheet", "_____no_output_____" ] ], [ [ "A python list is a collection of any data types. The items in a list can be lists again, and there are no requirements for the items in a list to be of the same type, or of the same length.\n\nThere is also the Tuple, which has () around the feautes instead of []. A Tuple works hte same, but once creatd, cannot be changed.\n\nNext up the Numpy ARRAY", "_____no_output_____" ] ], [ [ "import numpy as np\narray2d = np.array([feature1, feature2, feature3], dtype=object)\nprint(type(array2d))\nprint(array2d)\nprint(array2d.shape) #nr of rows and cols\nprint(array2d[0,:]) #first element/row = array, could also be just array2d[0]\nprint(array2d[:,0]) #first column, or actually first element from each 1d array in the 2d array\nprint(array2d[0,0]) # value at 0,0\nprint(array2d.transpose()) #more like excel sheet", "_____no_output_____" ] ], [ [ "A numpy array expects all items to be of the same type. If the dtype=object is not used above, all of the values will be converted to strings as this is the minimum type that can hold all values. A numpy array can handle features of different length, but then each element in the array will be of type 'list', so no direct indexing like you would expect from a matrix.\n\nNext up the Pandas DATAFRAME", "_____no_output_____" ] ], [ [ "import pandas as pd\ndataframe = pd.DataFrame()\ndataframe['feature1'] = feature1\ndataframe['feature2'] = feature2\ndataframe['feature3'] = feature3\nprint(type(dataframe))\nprint(dataframe)\nprint(dataframe.shape)\nprint(dataframe.iloc[0].tolist()) # first row, without .tolist() it also shows the column headers as row headers. You can also use loc[0], where 0 is now value in the index column (same as row number here)\nprint(dataframe['feature1'].tolist()) #first column, without .tolist() it also shows the index. You can also use .iloc[:,0]\nprint(dataframe.iloc[0,0]) #value at 0,0", "_____no_output_____" ] ], [ [ "A Pandas dataframe is basically an excel sheet. It can handle features with different datatypes, but not different lengths of feature arrays.\n\nNext up TENSORs", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nfeature3int = [int(x) for x in feature3 ] # map string values to numerical representation (in this case the string is a number so easy)\ntensorRank2 = tf.constant([feature1, feature2, feature3int], dtype=float)\nprint(type(tensorRank2))\nprint(tensorRank2)\nprint(tensorRank2.shape)\nprint(tensorRank2[0,:].numpy()) #first row, without .numpy() a tensor object is returned. Could also use just [0]\nprint(tensorRank2[:,0].numpy()) #first col\nprint(tensorRank2[0,0].numpy()) # value at 0,0\nprint(tf.transpose(tensorRank2)) # more like excel sheet", "_____no_output_____" ] ], [ [ "Tensors are n-dimensional generalizations of matrices. Vectors are tensors, and can be seen as 1-dimensional matrices. All are represented using n-dimensional arrays with a uniform type, and features with uniform length. I had to convert feature3 list to int, although I could also have converted feature1 and fature2 lists to strings.\n\nNext up DATASETs", "_____no_output_____" ] ], [ [ "feature1f = [float(x) for x in feature1 ] # map string values to numerical representation\nfeature3f = [float(x) for x in feature3 ] # map string values to numerical representation\ndataset = tf.data.Dataset.from_tensor_slices([feature1f, feature2, feature3f])\nprint(type(dataset))\nprint(dataset.element_spec)\nprint(dataset)\nprint(list(dataset.as_numpy_iterator()))\nprint(list(dataset.take(1).as_numpy_iterator())[0]) #first \"row\"\nprint(list(dataset.take(1).as_numpy_iterator())[0][0]) # value at 0,0\n", "_____no_output_____" ] ], [ [ "A Dataset is a sequence of elements, each element consisting of one or more components. In this case, each element of the Dataset is a TensorSliceDataset of shape (3,) which, when converted to a list, is shown to wrap around an array of 3 floats as expected.\n\nA Dataset is aimed at creating data pipelines, which get data from somewhere, process and transform it (typically in smaller batches), and then output it to a neural network (or somewhere else). A main goal of such a piepline is to avoid getting (all) the data in memory and enable large data sets to be handled in smaller peices. As such, getting values for specific elements in the dataset is not what Dataset are built for (and it shows). ", "_____no_output_____" ] ], [ [ "datasett = tf.data.Dataset.from_tensor_slices((feature1, feature2, feature3))\nprint(type(datasett))\nprint(datasett.element_spec)\nprint(datasett)\nprint(list(datasett.as_numpy_iterator()))", "_____no_output_____" ] ], [ [ "If you create a Dataset from a tuple of arrays, instead of an array of arrays, you can see each element is now a tuple of 3 TensorSpec of different type and shape () which can be seen wrap around a tuple for transposed feature values. \n\nThis shows that from_tensor_slices() \"slices\" the tensors along the first dimension", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb77de9b6383ab61f381cfb9fb0776a2b6351e7e
2,621
ipynb
Jupyter Notebook
python/smqtk/web/classifier_service/example/classifier_test.ipynb
jbeezley/SMQTK
e6b00f94be95f39bbca52a7983ac3d6d1f86f847
[ "BSD-3-Clause" ]
1
2021-04-10T10:51:26.000Z
2021-04-10T10:51:26.000Z
python/smqtk/web/classifier_service/example/classifier_test.ipynb
spongezhang/SMQTK
e6b00f94be95f39bbca52a7983ac3d6d1f86f847
[ "BSD-3-Clause" ]
3
2021-09-08T02:17:49.000Z
2022-03-12T00:40:33.000Z
python/smqtk/web/classifier_service/example/classifier_test.ipynb
DigitalCompanion/SMQTK
fc9404b69150ef44f24423844bc80735c0c2b669
[ "BSD-3-Clause" ]
null
null
null
28.802198
108
0.621137
[ [ [ "from smqtk.web.classifier_service import SmqtkClassifierService\n\nimport json\nimport os\n\nfrom six.moves import cPickle as pickle\n\n# Make available to SMQTK the local dummy implementations.\nos.environ.update(\n CLASSIFIER_PATH='dummy_classifier',\n DESCRIPTOR_GENERATOR_PATH='dummy_descriptor_generator',\n)", "_____no_output_____" ], [ "# Craft the configuration for the service.\ndefault_config = SmqtkClassifierService.get_default_config()\ndef_conf_json = json.dumps(default_config, sort_keys=True, indent=2)\n# print(def_conf_json)\nconfig = json.loads(def_conf_json)\n\nconfig['classification_factory']['type'] = 'MemoryClassificationElement'\ndel config['classification_factory']['FileClassificationElement']\ndel config['classifier_collection']['__example_label__']\nconfig['classifier_collection']['dummy'] = dict(DummyClassifier={}, type='DummyClassifier')\nconfig['immutable_labels'] = ['dummy']\nconfig['descriptor_factory']['type'] = 'DescriptorMemoryElement'\nconfig['descriptor_generator'] = dict(DummyDescriptorGenerator={}, type='DummyDescriptorGenerator')\ndel config['descriptor_factory']['DescriptorFileElement']\nconfig['iqr_state_classifier_config']['type'] = 'LibSvmClassifier'\n\nconfig['enable_classifier_removal'] = True\n\nprint(json.dumps(config, sort_keys=True, indent=2))", "_____no_output_____" ], [ "# Run the server. This is a blocking call.\nSmqtkClassifierService(json_config=config).run()\n# The ``test_classifier_service_curl.sh`` script may be called now.", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
cb77e78672f7054d40bad746a786415e68d9e920
121,814
ipynb
Jupyter Notebook
tf4_movie-reviews-2.ipynb
halllo/TensorFlow_FirstSteps
23c24360bcd893e0f30cb69a0e27b2f4dd4a742e
[ "MIT" ]
null
null
null
tf4_movie-reviews-2.ipynb
halllo/TensorFlow_FirstSteps
23c24360bcd893e0f30cb69a0e27b2f4dd4a742e
[ "MIT" ]
null
null
null
tf4_movie-reviews-2.ipynb
halllo/TensorFlow_FirstSteps
23c24360bcd893e0f30cb69a0e27b2f4dd4a742e
[ "MIT" ]
null
null
null
290.725537
35,536
0.727388
[ [ [ "# Movie Review Text Classification with Text processing\nThis tutorial: https://www.tensorflow.org/tutorials/keras/text_classification", "_____no_output_____" ] ], [ [ "!pip install -q tf-nightly\nimport tensorflow as tf", "_____no_output_____" ], [ "from tensorflow import keras\n\n!pip install -q tfds-nightly\nimport tensorflow_datasets as tfds\ntfds.disable_progress_bar()\n\nimport numpy as np\n\nprint(tf.__version__)", "2.2.0-dev20200429\n" ], [ "(train_data, test_data), info = tfds.load(\n # Use the version pre-encoded with an ~8k vocabulary.\n 'imdb_reviews/subwords8k', \n # Return the train/test datasets as a tuple.\n split = (tfds.Split.TRAIN, tfds.Split.TEST),\n # Return (example, label) pairs from the dataset (instead of a dictionary).\n as_supervised=True,\n # Also return the `info` structure. \n with_info=True)", "WARNING:absl:TFDS datasets with text encoding are deprecated and will be removed in a future version. Instead, you should use the plain text version and tokenize the text using `tensorflow_text` (See: https://www.tensorflow.org/tutorials/tensorflow_text/intro#tfdata_example)\n\u001b[1mDownloading and preparing dataset imdb_reviews/subwords8k/1.0.0 (download: 80.23 MiB, generated: Unknown size, total: 80.23 MiB) to /home/jovyan/tensorflow_datasets/imdb_reviews/subwords8k/1.0.0...\u001b[0m\nShuffling and writing examples to /home/jovyan/tensorflow_datasets/imdb_reviews/subwords8k/1.0.0.incompleteMT5J4M/imdb_reviews-train.tfrecord\nShuffling and writing examples to /home/jovyan/tensorflow_datasets/imdb_reviews/subwords8k/1.0.0.incompleteMT5J4M/imdb_reviews-test.tfrecord\nShuffling and writing examples to /home/jovyan/tensorflow_datasets/imdb_reviews/subwords8k/1.0.0.incompleteMT5J4M/imdb_reviews-unsupervised.tfrecord\n\u001b[1mDataset imdb_reviews downloaded and prepared to /home/jovyan/tensorflow_datasets/imdb_reviews/subwords8k/1.0.0. Subsequent calls will reuse this data.\u001b[0m\n" ], [ "encoder = info.features['text'].encoder", "_____no_output_____" ], [ "print ('Vocabulary size: {}'.format(encoder.vocab_size))", "Vocabulary size: 8185\n" ], [ "sample_string = 'Hello TensorFlow.'\n\nencoded_string = encoder.encode(sample_string)\nprint ('Encoded string is {}'.format(encoded_string))\n\noriginal_string = encoder.decode(encoded_string)\nprint ('The original string: \"{}\"'.format(original_string))\n\nassert original_string == sample_string", "Encoded string is [4025, 222, 6307, 2327, 4043, 2120, 7975]\nThe original string: \"Hello TensorFlow.\"\n" ], [ "for ts in encoded_string:\n print ('{} ----> {}'.format(ts, encoder.decode([ts])))", "4025 ----> Hell\n222 ----> o \n6307 ----> Ten\n2327 ----> sor\n4043 ----> Fl\n2120 ----> ow\n7975 ----> .\n" ], [ "for train_example, train_label in train_data.take(5):\n print('Encoded text:', train_example[:10].numpy())\n print('Label:', train_label.numpy())\n print(encoder.decode(train_example)[:150])", "Encoded text: [ 62 18 41 604 927 65 3 644 7968 21]\nLabel: 0\nThis was an absolutely terrible movie. Don't be lured in by Christopher Walken or Michael Ironside. Both are great actors, but this must simply be the\nEncoded text: [ 12 31 93 867 7 1256 6585 7961 421 365]\nLabel: 0\nI have been known to fall asleep during films, but this is usually due to a combination of things including, really tired, being warm and comfortable \nEncoded text: [ 636 102 4714 8 1 4333 4 4135 47 1325]\nLabel: 0\nMann photographs the Alberta Rocky Mountains in a superb fashion, and Jimmy Stewart and Walter Brennan give enjoyable performances as they always seem\nEncoded text: [ 62 9 1 312 6 32 23 4 7809 47]\nLabel: 1\nThis is the kind of film for a snowy Sunday afternoon when the rest of the world can go ahead with its own business as you descend into a big arm-chai\nEncoded text: [ 249 929 31 2699 104 2 51 1 707 13]\nLabel: 1\nAs others have mentioned, all the women that go nude in this film are mostly absolutely gorgeous. The plot very ably shows the hypocrisy of the female\n" ], [ "BUFFER_SIZE = 1000\n\ntrain_batches = (\n train_data\n .shuffle(BUFFER_SIZE)\n .padded_batch(32, padded_shapes=([None],[])))\n\ntest_batches = (\n test_data\n .padded_batch(32, padded_shapes=([None],[])))", "_____no_output_____" ], [ "train_batches = (\n train_data\n .shuffle(BUFFER_SIZE)\n .padded_batch(32))\n\ntest_batches = (\n test_data\n .padded_batch(32))", "_____no_output_____" ], [ "for example_batch, label_batch in train_batches.take(2):\n print(\"Batch shape:\", example_batch.shape)\n print(\"label shape:\", label_batch.shape)", "Batch shape: (32, 1615)\nlabel shape: (32,)\nBatch shape: (32, 677)\nlabel shape: (32,)\n" ], [ "model = keras.Sequential([\n keras.layers.Embedding(encoder.vocab_size, 16),\n keras.layers.GlobalAveragePooling1D(),\n keras.layers.Dense(1)])\n\nmodel.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding (Embedding) (None, None, 16) 130960 \n_________________________________________________________________\nglobal_average_pooling1d (Gl (None, 16) 0 \n_________________________________________________________________\ndense (Dense) (None, 1) 17 \n=================================================================\nTotal params: 130,977\nTrainable params: 130,977\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "model.compile(optimizer='adam',\n loss=tf.losses.BinaryCrossentropy(from_logits=True),\n metrics=['accuracy'])", "_____no_output_____" ], [ "history = model.fit(train_batches,\n epochs=10,\n validation_data=test_batches,\n validation_steps=30)", "Epoch 1/10\n782/782 [==============================] - 4s 5ms/step - loss: 0.6836 - accuracy: 0.5002 - val_loss: 0.6682 - val_accuracy: 0.5052\nEpoch 2/10\n782/782 [==============================] - 4s 5ms/step - loss: 0.6257 - accuracy: 0.5486 - val_loss: 0.5989 - val_accuracy: 0.5792\nEpoch 3/10\n782/782 [==============================] - 4s 5ms/step - loss: 0.5454 - accuracy: 0.6599 - val_loss: 0.5360 - val_accuracy: 0.7083\nEpoch 4/10\n782/782 [==============================] - 4s 5ms/step - loss: 0.4791 - accuracy: 0.7467 - val_loss: 0.4871 - val_accuracy: 0.7260\nEpoch 5/10\n782/782 [==============================] - 4s 5ms/step - loss: 0.4252 - accuracy: 0.8000 - val_loss: 0.4498 - val_accuracy: 0.8208\nEpoch 6/10\n782/782 [==============================] - 4s 5ms/step - loss: 0.3841 - accuracy: 0.8316 - val_loss: 0.4213 - val_accuracy: 0.8375\nEpoch 7/10\n782/782 [==============================] - 4s 5ms/step - loss: 0.3511 - accuracy: 0.8516 - val_loss: 0.4002 - val_accuracy: 0.8302\nEpoch 8/10\n782/782 [==============================] - 4s 5ms/step - loss: 0.3274 - accuracy: 0.8669 - val_loss: 0.3845 - val_accuracy: 0.8521\nEpoch 9/10\n782/782 [==============================] - 4s 5ms/step - loss: 0.3070 - accuracy: 0.8767 - val_loss: 0.3725 - val_accuracy: 0.8490\nEpoch 10/10\n782/782 [==============================] - 4s 5ms/step - loss: 0.2903 - accuracy: 0.8844 - val_loss: 0.3712 - val_accuracy: 0.8271\n" ], [ "loss, accuracy = model.evaluate(test_batches)\n\nprint(\"Loss: \", loss)\nprint(\"Accuracy: \", accuracy)", "782/782 [==============================] - 2s 3ms/step - loss: 0.3380 - accuracy: 0.8350\nLoss: 0.33803045749664307\nAccuracy: 0.8349599838256836\n" ], [ "history_dict = history.history\nhistory_dict.keys()", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n\nacc = history_dict['accuracy']\nval_acc = history_dict['val_accuracy']\nloss = history_dict['loss']\nval_loss = history_dict['val_loss']\n\nepochs = range(1, len(acc) + 1)\n\n# \"bo\" is for \"blue dot\"\nplt.plot(epochs, loss, 'bo', label='Training loss')\n# b is for \"solid blue line\"\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\n\nplt.show()", "_____no_output_____" ], [ "plt.clf() # clear figure\n\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend(loc='lower right')\n\nplt.show()", "_____no_output_____" ] ], [ [ "I want a probablistic binary output, but the prediction is actually in numeric value range (raw output, logits=inverse of sigmoid). So after training, we add the sigmoid function as the last layer to get range \\[0,1\\].", "_____no_output_____" ] ], [ [ "probability_model = tf.keras.Sequential([\n model,\n tf.keras.layers.Activation('sigmoid')\n])", "_____no_output_____" ], [ "reviews = list(test_data.take(30))\nfor (review, label) in reviews:\n reviewPredictable = tf.expand_dims(review, 0)\n [[p]] = probability_model.predict(reviewPredictable)\n l = label.numpy()\n print('actual', l, 'predicted', p, \"\\x1b[32m\\\"correct\\\"\\x1b[0m\" if (l==1 and p>=0.5) or (l==0 and p<0.5) else \"\\x1b[31m\\\"wrong\\\"\\x1b[0m\")\n", "actual 1 predicted 0.04586496 \u001b[31m\"wrong\"\u001b[0m\nactual 1 predicted 0.9965754 \u001b[32m\"correct\"\u001b[0m\nactual 0 predicted 0.05847739 \u001b[32m\"correct\"\u001b[0m\nactual 0 predicted 0.017328272 \u001b[32m\"correct\"\u001b[0m\nactual 1 predicted 1.0 \u001b[32m\"correct\"\u001b[0m\nactual 1 predicted 0.9999504 \u001b[32m\"correct\"\u001b[0m\nactual 1 predicted 1.0 \u001b[32m\"correct\"\u001b[0m\nactual 1 predicted 1.0 \u001b[32m\"correct\"\u001b[0m\nactual 0 predicted 0.87956274 \u001b[31m\"wrong\"\u001b[0m\nactual 1 predicted 0.16138041 \u001b[31m\"wrong\"\u001b[0m\nactual 0 predicted 4.545313e-05 \u001b[32m\"correct\"\u001b[0m\nactual 0 predicted 0.054269537 \u001b[32m\"correct\"\u001b[0m\nactual 1 predicted 0.93153566 \u001b[32m\"correct\"\u001b[0m\nactual 0 predicted 0.18797654 \u001b[32m\"correct\"\u001b[0m\nactual 1 predicted 0.98900455 \u001b[32m\"correct\"\u001b[0m\nactual 0 predicted 0.119149916 \u001b[32m\"correct\"\u001b[0m\nactual 1 predicted 0.9957332 \u001b[32m\"correct\"\u001b[0m\nactual 0 predicted 0.01647478 \u001b[32m\"correct\"\u001b[0m\nactual 1 predicted 0.99385804 \u001b[32m\"correct\"\u001b[0m\nactual 0 predicted 3.5888823e-09 \u001b[32m\"correct\"\u001b[0m\nactual 0 predicted 2.927113e-15 \u001b[32m\"correct\"\u001b[0m\nactual 1 predicted 0.022534447 \u001b[31m\"wrong\"\u001b[0m\nactual 0 predicted 0.0008164926 \u001b[32m\"correct\"\u001b[0m\nactual 0 predicted 2.9270244e-05 \u001b[32m\"correct\"\u001b[0m\nactual 1 predicted 1.0 \u001b[32m\"correct\"\u001b[0m\nactual 1 predicted 0.03468268 \u001b[31m\"wrong\"\u001b[0m\nactual 0 predicted 1.2156344e-05 \u001b[32m\"correct\"\u001b[0m\nactual 0 predicted 0.0011337061 \u001b[32m\"correct\"\u001b[0m\nactual 0 predicted 0.00027265924 \u001b[32m\"correct\"\u001b[0m\nactual 1 predicted 0.9994221 \u001b[32m\"correct\"\u001b[0m\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb77f83981c980764142ef9df9a1a5d4a5f7f0b5
68,344
ipynb
Jupyter Notebook
notebook/Maximum_likelihood.ipynb
tnakaicode/Python-for-Signal-Processing
b610ca377564e115a0dbd5a8cdcc2ad195c3b162
[ "CC-BY-3.0" ]
null
null
null
notebook/Maximum_likelihood.ipynb
tnakaicode/Python-for-Signal-Processing
b610ca377564e115a0dbd5a8cdcc2ad195c3b162
[ "CC-BY-3.0" ]
null
null
null
notebook/Maximum_likelihood.ipynb
tnakaicode/Python-for-Signal-Processing
b610ca377564e115a0dbd5a8cdcc2ad195c3b162
[ "CC-BY-3.0" ]
null
null
null
132.706796
24,394
0.833958
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb77fbf3bfa4e2d7882ed54c062375e0f087b6ad
23,762
ipynb
Jupyter Notebook
Movie_lens_reco_practice/Recommender_practice.ipynb
patiljeevanr/Recommendation
eee6e9c0b07315c98a304458ed0ee1cde9c9c295
[ "MIT" ]
null
null
null
Movie_lens_reco_practice/Recommender_practice.ipynb
patiljeevanr/Recommendation
eee6e9c0b07315c98a304458ed0ee1cde9c9c295
[ "MIT" ]
null
null
null
Movie_lens_reco_practice/Recommender_practice.ipynb
patiljeevanr/Recommendation
eee6e9c0b07315c98a304458ed0ee1cde9c9c295
[ "MIT" ]
null
null
null
29.665418
287
0.376105
[ [ [ "import os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "data_cols = ['user id','movie id','rating','timestamp']\nitem_cols = ['movie id','movie title','release date','video release date','IMDb URL','unknown','Action','Adventure','Animation','Childrens','Comedy','Crime','Documentary','Drama','Fantasy','Film-Noir','Horror','Musical','Mystery','Romance ','Sci-Fi','Thriller','War' ,'Western']\nuser_cols = ['user id','age','gender','occupation','zip code']", "_____no_output_____" ], [ "#importing the data files onto dataframes\nusers = pd.read_csv('u.user', sep='|', names=user_cols, encoding='latin-1')\nitem = pd.read_csv('u.item', sep='|', names=item_cols, encoding='latin-1')\ndata = pd.read_csv('u.data', sep='\\t', names=data_cols, encoding='latin-1')", "_____no_output_____" ], [ "users.head()", "_____no_output_____" ], [ "item.head()", "_____no_output_____" ], [ "data.head()", "_____no_output_____" ], [ "print(users.info())", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 943 entries, 0 to 942\nData columns (total 5 columns):\nuser id 943 non-null int64\nage 943 non-null int64\ngender 943 non-null object\noccupation 943 non-null object\nzip code 943 non-null object\ndtypes: int64(2), object(3)\nmemory usage: 36.9+ KB\nNone\n" ], [ "print(item.info())", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1682 entries, 0 to 1681\nData columns (total 24 columns):\nmovie id 1682 non-null int64\nmovie title 1682 non-null object\nrelease date 1681 non-null object\nvideo release date 0 non-null float64\nIMDb URL 1679 non-null object\nunknown 1682 non-null int64\nAction 1682 non-null int64\nAdventure 1682 non-null int64\nAnimation 1682 non-null int64\nChildrens 1682 non-null int64\nComedy 1682 non-null int64\nCrime 1682 non-null int64\nDocumentary 1682 non-null int64\nDrama 1682 non-null int64\nFantasy 1682 non-null int64\nFilm-Noir 1682 non-null int64\nHorror 1682 non-null int64\nMusical 1682 non-null int64\nMystery 1682 non-null int64\nRomance 1682 non-null int64\nSci-Fi 1682 non-null int64\nThriller 1682 non-null int64\nWar 1682 non-null int64\nWestern 1682 non-null int64\ndtypes: float64(1), int64(20), object(3)\nmemory usage: 315.5+ KB\nNone\n" ], [ "print(data.info())", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 100000 entries, 0 to 99999\nData columns (total 4 columns):\nuser id 100000 non-null int64\nmovie id 100000 non-null int64\nrating 100000 non-null int64\ntimestamp 100000 non-null int64\ndtypes: int64(4)\nmemory usage: 3.1 MB\nNone\n" ], [ "#creating a merge dataframe\ndf = pd.merge(pd.merge(item, data), users)", "_____no_output_____" ], [ "# Group by Movies by their title\nratings_total = df.groupby('movie title').size()\nprint(ratings_total.head())", "movie title\n'Til There Was You (1997) 9\n1-900 (1994) 5\n101 Dalmatians (1996) 109\n12 Angry Men (1957) 125\n187 (1997) 41\ndtype: int64\n" ], [ "# Take the mean ratings of each movie using the mean function\nratings_mean = (df.groupby('movie title'))['movie title','rating'].mean()\nprint(ratings_mean.head())", " rating\nmovie title \n'Til There Was You (1997) 2.333333\n1-900 (1994) 2.600000\n101 Dalmatians (1996) 2.908257\n12 Angry Men (1957) 4.344000\n187 (1997) 3.024390\n" ], [ "ratings_mean.shape", "_____no_output_____" ], [ "ratings_total.shape", "_____no_output_____" ], [ "# Modify the dataframes so that we can merge the two\n# Now we sort the values by the total rating\nratings_total = pd.DataFrame({'movie title':ratings_total.index,'total ratings': ratings_total.values})\nratings_mean['movie title'] = ratings_mean.index", "_____no_output_____" ], [ "# Now we sort the values by the total rating\nfinal = pd.merge(ratings_mean, ratings_total).sort_values(by = 'total ratings',ascending= False)\nprint(final.describe())", " rating total ratings\ncount 1664.000000 1664.000000\nmean 3.077018 60.096154\nstd 0.780418 80.956484\nmin 1.000000 1.000000\n25% 2.665094 7.000000\n50% 3.162132 27.000000\n75% 3.651808 80.250000\nmax 5.000000 583.000000\n" ], [ "final.head()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb780bcef77365bcfa7a9d847831d457cbed7c5d
92,421
ipynb
Jupyter Notebook
inst/prototypes/marginal_stats_compare.ipynb
stephenslab/mmbr
4635d3756530e833e85fcfd972f6d12989cfd294
[ "MIT" ]
7
2020-03-08T15:55:12.000Z
2020-12-02T12:03:57.000Z
inst/prototypes/marginal_stats_compare.ipynb
stephenslab/mmbr
4635d3756530e833e85fcfd972f6d12989cfd294
[ "MIT" ]
18
2020-01-17T20:39:39.000Z
2021-02-15T17:04:00.000Z
inst/prototypes/marginal_stats_compare.ipynb
gaow/mmbr
9f2891696f503d76ff2901ff3beb12be35785d20
[ "MIT" ]
4
2019-03-05T23:23:53.000Z
2020-01-07T06:50:28.000Z
204.019868
41,646
0.899622
[ [ [ "# Comparing marginal statistics with Linear regression\n\nFor prototyping `get_sumstats()` function in data object.", "_____no_output_____" ], [ "## Data simulation\n\nHere I allow for presence of missing data. I load all packages to access some private functions.", "_____no_output_____" ] ], [ [ "devtools::load_all(\"/home/gaow/Documents/GIT/software/susieR\")\ndevtools::load_all(\"/home/gaow/Documents/GIT/software/mvsusieR\")", "Loading susieR\nLoading mvsusieR\nLoading required package: mashr\nLoading required package: ashr\n" ], [ "set.seed(1)\ndat = mvsusie_sim1(100,100,5,4,center_scale=F,y_missing=0.5)", "_____no_output_____" ], [ "names(dat)", "_____no_output_____" ], [ "resid_Y <- compute_cov_diag(dat$y)\nresid_Y_miss <- compute_cov_diag(dat$y_missing)\nalpha = 0", "_____no_output_____" ] ], [ [ "## Marginal stats with mvsusieR", "_____no_output_____" ], [ "Without and with missing data:", "_____no_output_____" ] ], [ [ "d1 = DenseData$new(dat$X, dat$y,center=T,scale=T)\nres1 = d1$get_sumstats(diag(resid_Y), cov2cor(resid_Y), alpha)\nz1 = res1$bhat/res1$sbhat0", "_____no_output_____" ], [ "d2 = DenseData$new(dat$X, dat$y_missing,center=T,scale=T)\nres2 = d2$get_sumstats(diag(resid_Y_miss), cov2cor(resid_Y_miss), alpha)\nz2 = res2$bhat/res2$sbhat0", "_____no_output_____" ] ], [ [ "## Marginal stats with `lm.fit`", "_____no_output_____" ] ], [ [ "z3 = susieR:::calc_z(dat$X, dat$y,center=T,scale=T)", "_____no_output_____" ], [ "z4 = susieR:::calc_z(dat$X, dat$y_missing,center=T,scale=T)", "_____no_output_____" ], [ "plot(z1,z3)", "_____no_output_____" ], [ "plot(z2,z4)", "_____no_output_____" ] ], [ [ "The are not exactly identical because here residual variance I put in is variance of `y` so it's going to be more conservative. Looking at one `y` for example and check `bhat` agreement:", "_____no_output_____" ] ], [ [ "res = univariate_regression(dat$X,dat$y[,1],center=T,scale=T)\nhead(res$betahat)", "_____no_output_____" ], [ "head(res1$bhat[,1])", "_____no_output_____" ], [ "head(res$sebetahat)", "_____no_output_____" ], [ "head(res1$sbhat0[,1])", "_____no_output_____" ] ], [ [ "`bhat` are identical but not `sebhat`. This is understandable. In unit tests I'll compare `bhat`.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
cb7814c5f91a31cafee925fda48223067f233cc4
384,305
ipynb
Jupyter Notebook
analysis/paredao13.ipynb
kenzosakiyama/BBBert20
10b5d40d6943da0a9f0b9d001b439998224a650b
[ "MIT" ]
3
2020-08-03T15:00:05.000Z
2021-04-18T19:22:36.000Z
analysis/paredao13.ipynb
kenzosakiyama/BBBert20
10b5d40d6943da0a9f0b9d001b439998224a650b
[ "MIT" ]
null
null
null
analysis/paredao13.ipynb
kenzosakiyama/BBBert20
10b5d40d6943da0a9f0b9d001b439998224a650b
[ "MIT" ]
null
null
null
595.821705
63,082
0.731492
[ [ [ "import pandas as pd\nimport numpy as np\nfrom analysis_utils import *\n", "_____no_output_____" ], [ "PAREDAO = \"paredao13\"\nCAND1_PATH = \"data/paredao13/flay.csv\"\nCAND2_PATH = \"data/paredao13/thelma.csv\"\nCAND3_PATH = \"data/paredao13/babu.csv\"\nDATE = 3\nIGNORE_HASHTAGS = [\"#bbb20\", \"#redebbb\", \"#bbb2020\"]", "_____no_output_____" ], [ "candidate1_df = pd.read_csv(CAND1_PATH)\ncandidate2_df = pd.read_csv(CAND2_PATH)\ncandidate3_df = pd.read_csv(CAND3_PATH)", "_____no_output_____" ], [ "cand1 = candidate1_df[[\"tweet\", \"sentiment\", \"date\", \"likes_count\", \"retweets_count\", \"hashtags\"]]\ncand2 = candidate2_df[[\"tweet\", \"sentiment\", \"date\", \"likes_count\", \"retweets_count\", \"hashtags\"]]\ncand3 = candidate3_df[[\"tweet\", \"sentiment\", \"date\", \"likes_count\", \"retweets_count\", \"hashtags\"]]", "_____no_output_____" ] ], [ [ "# Flayslene (eliminada)", "_____no_output_____" ] ], [ [ "cand1[\"sentiment\"].hist()", "_____no_output_____" ] ], [ [ "# Thelma", "_____no_output_____" ] ], [ [ "cand2[\"sentiment\"].hist()", "_____no_output_____" ] ], [ [ "# Babu", "_____no_output_____" ] ], [ [ "cand3[\"sentiment\"].hist()", "_____no_output_____" ] ], [ [ "# Quantidades absolutas", "_____no_output_____" ] ], [ [ "candidates = {\"flayslene\": cand1, \"thelma\": cand2, \"babu\": cand3}", "_____no_output_____" ], [ "qtds_df = get_raw_quantities(candidates)", "_____no_output_____" ], [ "qtds_df", "_____no_output_____" ], [ "qtds_df.plot.bar(rot=45, color=['green', 'gray', 'red'])", "_____no_output_____" ] ], [ [ "# Porcentagens em relação aos total de tweets de cada candidato", "_____no_output_____" ] ], [ [ "pcts_df = get_pct_by_candidate(candidates)", "_____no_output_____" ], [ "pcts_df", "_____no_output_____" ], [ "pcts_df.plot.bar(rot=45, color=['green', 'gray', 'red'])", "_____no_output_____" ] ], [ [ "# Porcentagens em relação ao total de tweets por categoria", "_____no_output_____" ] ], [ [ "qtds_df_copy = qtds_df.copy()", "_____no_output_____" ], [ "qtds_df[\"positivos\"] /= qtds_df[\"positivos\"].sum() \nqtds_df[\"neutros\"] /= qtds_df[\"neutros\"].sum()\nqtds_df[\"negativos\"] /= qtds_df[\"negativos\"].sum()", "_____no_output_____" ], [ "qtds_df", "_____no_output_____" ], [ "qtds_df.plot.bar(rot=45, color=['green', 'gray', 'red'])", "_____no_output_____" ] ], [ [ "# Tweets por dia", "_____no_output_____" ] ], [ [ "names = list(candidates.keys())\ntweets_by_day_df = get_tweets_by_day(candidates[names[0]], names[0])\nfor name in names[1:]:\n current = get_tweets_by_day(candidates[name], name)\n tweets_by_day_df = tweets_by_day_df.append(current)", "_____no_output_____" ], [ "tweets_by_day_df.transpose().plot()", "_____no_output_____" ] ], [ [ "# Análise de hashtags", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nplt.rcParams[\"figure.figsize\"] = (20,10)\n\nunique_df = get_unique_hashtags(list(candidates.values()))\nunique_df.drop(index=IGNORE_HASHTAGS, inplace=True)\nunique_df.sort_values(by=\"quantidade\", ascending=False).head(30).plot.bar(rot=45)", "_____no_output_____" ], [ "alias = {\"flayslene\": \"flay\", \"thelma\": \"thelma\", \"babu\": \"babu\"}\nfica_fora_df = get_fica_fora_quantities(unique_df, alias)\n", "_____no_output_____" ], [ "fica_fora_df", "_____no_output_____" ] ], [ [ "# Seleção de atributos", "_____no_output_____" ] ], [ [ "atributes_df = qtds_df_copy.join(pcts_df, rsuffix=\"_individual_pct\")\natributes_df = atributes_df.join(qtds_df, rsuffix=\"_global_pct\")\natributes_df = atributes_df.join(tweets_by_day_df)\natributes_df = atributes_df.join(fica_fora_df)", "_____no_output_____" ], [ "raw_participantes_info = get_participantes_info()[DATE]\nprint(\"Seguidores atualizados em:\", raw_participantes_info[\"date\"])", "Seguidores atualizados em: 10-04-2020\n" ], [ "participantes_info = raw_participantes_info[\"infos\"]\nparedoes_info = get_paredoes_info()", "_____no_output_____" ], [ "followers = [participantes_info[participante][\"seguidores\"] for participante in atributes_df.index]\n", "_____no_output_____" ], [ "likes = [get_likes_count(candidates[participante]) for participante in atributes_df.index]", "_____no_output_____" ], [ "retweets = [get_retweets_count(candidates[participante]) for participante in atributes_df.index]", "_____no_output_____" ], [ "paredao_info = paredoes_info[PAREDAO][\"candidatos\"]\nresults_info = {candidate[\"nome\"]: candidate[\"porcentagem\"]/100 for candidate in paredao_info}\nrejection = [results_info[participante] for participante in atributes_df.index]", "_____no_output_____" ], [ "atributes_df[\"likes\"] = likes\natributes_df[\"retweets\"] = retweets\natributes_df[\"seguidores\"] = followers\natributes_df[\"rejeicao\"] = rejection", "_____no_output_____" ], [ "atributes_df", "_____no_output_____" ], [ "atributes_df.to_csv(\"data/{}/paredao_atributes.csv\".format(PAREDAO))", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb781a796faa86764c01a5056d0ca76c4fd2c6cd
57,962
ipynb
Jupyter Notebook
project/Prediction_of_Rossmann_Store_Sales/predictionwtcustomers.ipynb
lemonsong/lemonsong.github.io
14a65b8c2506c95bab64f50143f3850be3edadc1
[ "MIT" ]
null
null
null
project/Prediction_of_Rossmann_Store_Sales/predictionwtcustomers.ipynb
lemonsong/lemonsong.github.io
14a65b8c2506c95bab64f50143f3850be3edadc1
[ "MIT" ]
1
2022-01-10T04:39:05.000Z
2022-01-10T04:39:05.000Z
project/Prediction_of_Rossmann_Store_Sales/predictionwtcustomers.ipynb
lemonsong/lemonsong.github.io
14a65b8c2506c95bab64f50143f3850be3edadc1
[ "MIT" ]
null
null
null
27.224988
175
0.446499
[ [ [ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 27 23:01:16 2015\n\n@author: yilin\n\"\"\"\n# useful code: https://www.kaggle.com/cast42/rossmann-store-sales/xgboost-in-python-with-rmspe-v2/code\nimport pandas as pd\nimport numpy as np\nimport re\nfrom dateutil.parser import parse\nimport random\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(context=\"paper\", font=\"monospace\")\nimport plotly\nimport plotly.plotly as py\npy.sign_in('lemonsong', '3lcplsq1a3')\nimport plotly.graph_objs as go\n\n#import datetime\n\nfrom sklearn.utils import shuffle\nfrom sklearn import preprocessing\nfrom numpy import float32\nfrom sklearn.preprocessing import Imputer", "_____no_output_____" ], [ "def getxy(x):\n y = x.Sales\n x.drop('Sales', axis=1, inplace=True)\n #x.drop('Store', axis=1, inplace=True)\n return x,y", "_____no_output_____" ], [ "data = pd.read_csv(\"train0forkagglewtcustomer.csv\")\ndata1 = pd.read_csv(\"train1forkagglewtcustomer.csv\")", "_____no_output_____" ], [ "data = pd.read_csv(\"train0forkagglewtcustomer.csv\")\ndata = data[(data['Year']==2013) & (data['Month']==7) | (data['Year']==2014) & (data['Month']==7) |\\\n (data['Year']==2013) & (data['Month']==8) | (data['Year']==2014) & (data['Month']==8) |\\\n (data['Year']==2013) & (data['Month']==9) | (data['Year']==2014) & (data['Month']==9) |\\\n (data['Year']==2015) & (data['Month']==6) | (data['Year']==2014) & (data['Month']==5) |\n (data['Year']==2015) & (data['Month']==7) ]\ndata1 = pd.read_csv(\"train1forkagglewtcustomer.csv\")\ndata1 = data1[(data1['Year']==2013) & (data1['Month']==7) | (data1['Year']==2014) & (data1['Month']==7) |\\\n (data1['Year']==2013) & (data1['Month']==8) | (data1['Year']==2014) & (data1['Month']==8) |\\\n (data1['Year']==2013) & (data1['Month']==9) | (data1['Year']==2014) & (data1['Month']==9) |\\\n (data1['Year']==2015) & (data1['Month']==6) | (data1['Year']==2014) & (data1['Month']==5) |\n (data1['Year']==2015) & (data1['Month']==7) ]\n", "_____no_output_____" ], [ "data=pd.DataFrame(data)\ndata.to_csv(\"bigml0.csv\", index=False)\ndata1=pd.DataFrame(data1)", "_____no_output_____" ], [ "data = pd.read_csv(\"train0forkagglewtcustomer.csv\")\ndata = data[(data['Year']==2015) & (data['Month']==6) | (data['Year']==2014) & (data['Month']==5) |\n (data['Year']==2015) & (data['Month']==7) ]\ndata1 = pd.read_csv(\"train1forkagglewtcustomer.csv\")\ndata1 = data1[(data1['Year']==2015) & (data1['Month']==6) | (data1['Year']==2014) & (data1['Month']==5) |\n (data1['Year']==2015) & (data1['Month']==7) ]", "_____no_output_____" ], [ "data = pd.read_csv(\"train0forkagglewtcustomer.csv\")\ndata = data[(data['Year']==2013) & (data['Month']==7) | (data['Year']==2014) & (data['Month']==7) |\\\n (data['Year']==2013) & (data['Month']==8) | (data['Year']==2014) & (data['Month']==8) |\\\n (data['Year']==2013) & (data['Month']==9) | (data['Year']==2014) & (data['Month']==9)]\ndata1 = pd.read_csv(\"train1forkagglewtcustomer.csv\")\ndata1 = data1[(data1['Year']==2013) & (data1['Month']==7) | (data1['Year']==2014) & (data1['Month']==7) |\\\n (data1['Year']==2013) & (data1['Month']==8) | (data1['Year']==2014) & (data1['Month']==8) |\\\n (data1['Year']==2013) & (data1['Month']==9) | (data1['Year']==2014) & (data1['Month']==9)]", "_____no_output_____" ], [ "x,y=getxy(data)\nx1,y1=getxy(data1)\n", "_____no_output_____" ] ], [ [ "## Split Data", "_____no_output_____" ] ], [ [ "def splitdata(x,y):# Split data into train and test\n train, test = shuffle(x,y, random_state=15)\n offset = int(train.shape[0] * 0.7)\n x_train, y_train = train[:offset], test[:offset]\n x_test, y_test = train[offset:], test[offset:]\n return x_train, y_train,x_test, y_test", "_____no_output_____" ], [ "x_train, y_train,x_test, y_test = splitdata(x,y)", "_____no_output_____" ], [ "print x_train.columns", "Index([u'Store', u'DayOfWeek', u'Promo', u'SchoolHoliday', u'HaveCompetitor',\n u'CompetitionDistance', u'Year', u'Month', u'Day', u'Week',\n u'StoreType_a', u'StoreType_b', u'StoreType_c', u'StoreType_d',\n u'Assortment_a', u'Assortment_b', u'Assortment_c', u'StateHoliday_0',\n u'StateHoliday_a', u'CompetitionMonth', u'Customers'],\n dtype='object')\n" ], [ "x_train1, y_train1,x_test1, y_test1 = splitdata(x1,y1)", "_____no_output_____" ] ], [ [ "## Builde Model", "_____no_output_____" ], [ "##### DT", "_____no_output_____" ] ], [ [ "from sklearn import tree\nclf2 = tree.DecisionTreeRegressor(max_features='auto')\nclf2.fit(x_train, y_train)\ny_pred2 = clf2.predict(x_test)", "_____no_output_____" ], [ "from sklearn import tree\nclf12 = tree.DecisionTreeRegressor(max_features='auto')\nclf12.fit(x_train1, y_train1)\ny_pred12 = clf12.predict(x_test1)", "_____no_output_____" ] ], [ [ "##### KNN", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsRegressor\nclf3 = KNeighborsRegressor(n_neighbors=5,weights='distance',algorithm='auto')\nclf3.fit(x_train, y_train)\ny_pred3=clf3.predict(x_test)", "_____no_output_____" ], [ "from sklearn.neighbors import KNeighborsRegressor\nclf13 = KNeighborsRegressor(n_neighbors=10,weights='distance',algorithm='auto')\nclf13.fit(x_train1, y_train1)\ny_pred13=clf13.predict(x_test1)", "_____no_output_____" ] ], [ [ "##### RF", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestRegressor\nclf4 = RandomForestRegressor(n_estimators=300)\nclf4.fit(x_train, y_train)\ny_pred4=clf4.predict(x_test)", "_____no_output_____" ], [ "from sklearn.ensemble import RandomForestRegressor\nclf14 = RandomForestRegressor(n_estimators=300)\nclf14.fit(x_train1, y_train1)\ny_pred14=clf14.predict(x_test1)", "_____no_output_____" ] ], [ [ "#### Feature Importance", "_____no_output_____" ] ], [ [ "def getfeature_importance(df,clf):\n feature_importance= pd.concat([pd.Series(list(df.columns),name='Feature'),\\\n pd.Series(clf.feature_importances_,name='Importance')],\\\n axis=1).sort(['Importance'], ascending=[1])\n return feature_importance", "_____no_output_____" ], [ "feature_importance=getfeature_importance(x_train,clf4)\nfeature_importance1=getfeature_importance(x_train1,clf14)\n", "_____no_output_____" ], [ "featureimportance = pd.merge(feature_importance,feature_importance1,on=\"Feature\", how='outer')\n\nprint featureimportance\nfeatureimportance.to_csv(\"featureimportance.csv\", index=False)\n", " Feature Importance_x Importance_y\n0 HaveCompetitor 0.000033 0.000005\n1 StateHoliday_a 0.000065 0.000018\n2 StateHoliday_0 0.000071 0.000018\n3 SchoolHoliday 0.000902 0.001177\n4 StoreType_c 0.001294 0.001040\n5 Month 0.001297 0.001600\n6 StoreType_a 0.001703 0.001779\n7 Assortment_a 0.002244 0.002869\n8 Assortment_c 0.002447 0.002820\n9 Year 0.003039 0.004429\n10 Week 0.003800 0.004933\n11 Day 0.005641 0.009144\n12 DayOfWeek 0.009455 0.014090\n13 StoreType_b 0.013134 0.013617\n14 Assortment_b 0.014945 0.013282\n15 CompetitionMonth 0.017033 0.019052\n16 StoreType_d 0.033185 0.054346\n17 Store 0.035426 0.031710\n18 Promo 0.035536 0.066964\n19 CompetitionDistance 0.048419 0.060506\n20 Customers 0.770330 0.670496\n21 Promo2Month NaN 0.002017\n22 Promo2Week NaN 0.024087\n" ], [ "%matplotlib inline\n\ntrace1 = go.Bar(\n y=featureimportance.Feature,\n x=featureimportance.Importance_x,\n name='Promo2==0',\n orientation = 'h',\n marker = dict(\n color = 'rgba(55, 128, 191, 0.6)',\n line = dict(\n color = 'rgba(55, 128, 191, 1.0)',\n width = 1,\n )\n )\n)\ntrace2 = go.Bar(\n y=featureimportance.Feature,\n x=featureimportance.Importance_y,\n name='Promo2==1',\n orientation = 'h',\n marker = dict(\n color = 'rgba(255, 153, 51, 0.6)',\n line = dict(\n color = 'rgba(255, 153, 51, 1.0)',\n width = 1,\n )\n )\n)\ndata = [trace1, trace2]\nlayout = go.Layout(\n barmode='group'\n)\nfig = go.Figure(data=data, layout=layout)\nplot_url = py.plot(fig, filename='marker-h-bar')", "_____no_output_____" ], [ "import plotly.tools as tls\n\ntls.embed(\"https://plot.ly/~lemonsong/43/promo20-vs-promo21/\")", "_____no_output_____" ] ], [ [ "###### Predict based on average of three algorithm", "_____no_output_____" ] ], [ [ "predcollect=pd.concat([pd.Series(y_pred2,name='dt'),pd.Series(y_pred3,name='knn'),pd.Series(y_pred4,name='rf')], axis=1)", "_____no_output_____" ], [ "pred1collect=pd.concat([pd.Series(y_pred12,name='dt'),pd.Series(y_pred13,name='knn'),pd.Series(y_pred14,name='rf')], axis=1)", "_____no_output_____" ], [ "predavg= predcollect.mean(axis=1)", "_____no_output_____" ], [ "pred1avg= pred1collect.mean(axis=1)", "_____no_output_____" ] ], [ [ "## Evaluation", "_____no_output_____" ] ], [ [ "def rmspe(y, yhat):\n return np.sqrt(np.mean((yhat/y-1) ** 2))\n\ndef rmspe_xg(yhat, y):\n y = np.expm1(y)\n yhat = np.expm1(yhat)\n print y\n return \"rmspe\", rmspe(y,yhat)", "_____no_output_____" ] ], [ [ "Function to calculate RMSPE for both Promo2==0 and Promo2==1 test", "_____no_output_____" ] ], [ [ "def compare(y_test,y_pred,y_test1,y_pred1):\n y_test=np.append(y_test,y_test1)\n y_pred=np.append(y_pred,y_pred1)\n return rmspe(y_test,y_pred)", "_____no_output_____" ] ], [ [ "##### DT", "_____no_output_____" ], [ "Promo2==0", "_____no_output_____" ] ], [ [ "print rmspe(y_test,y_pred2)", "0.103550818995\n" ] ], [ [ "Promo2==1", "_____no_output_____" ] ], [ [ "print rmspe(y_test1,y_pred12)", "0.130232097084\n" ] ], [ [ "Promo2==0 & Promo2==1", "_____no_output_____" ] ], [ [ "print compare(y_test,y_pred2,y_test1,y_pred12)", "0.117093827408\n" ] ], [ [ "##### KNN", "_____no_output_____" ] ], [ [ "print rmspe(y_test,y_pred3)", "0.0827110382687\n" ], [ "print rmspe(y_test1,y_pred13)", "0.0901338125419\n" ], [ "print compare(y_test,y_pred3,y_test1,y_pred13)", "0.0863466452436\n" ] ], [ [ "##### RF", "_____no_output_____" ] ], [ [ "print rmspe(y_test,y_pred4)", "0.0703702331174\n" ], [ "print rmspe(y_test1,y_pred14)", "0.0799880772887\n" ], [ "print compare(y_test,y_pred4,y_test1,y_pred14)", "0.0751315384619\n" ] ], [ [ "##### Average method\nPredict sales based on average of predictions from three algorithms", "_____no_output_____" ] ], [ [ "print rmspe(y_test,predavg)", "0.850845818724\n" ], [ "print rmspe(y_test1,pred1avg)", "0.823322014024\n" ], [ "print compare(y_test,predavg,y_test1,pred1avg)", "0.0699363310493\n" ] ], [ [ "#### Export Decision Tree", "_____no_output_____" ] ], [ [ "tree.export_graphviz(clf2,out_file='tree0.dot',max_depth=8)", "_____no_output_____" ], [ "tree.export_graphviz(clf12,out_file='tree1.dot',max_depth=8)", "_____no_output_____" ] ], [ [ "## Make Prediction", "_____no_output_____" ] ], [ [ "def makeprediction(testfile,feature,clf):\n #train_x = pd.read_csv(trainfile).astype(float32)\n pre_x = pd.read_csv(testfile).astype(float32)\n #print np.all(np.isfinite(train_x))\n print np.all(np.isfinite(pre_x))\n \n \n #train_x,train_y=getxy(train_x)\n \n \n pre_y = clf.predict(pre_x[feature])\n prediction = pd.concat([pre_x, pd.Series(pre_y,name='Sales')], axis=1)\n\n return prediction\nfeature0=[\"Store\",\"DayOfWeek\",\"Promo\",\"SchoolHoliday\",'HaveCompetitor',\n \"CompetitionDistance\",\n \"Year\",\"Month\",\"Day\",\"Week\",\n \"StoreType_a\",\"StoreType_b\",\"StoreType_c\",\"StoreType_d\",\n \"Assortment_a\",\"Assortment_b\",\"Assortment_c\",\n \"StateHoliday_0\",\"StateHoliday_a\",\n \"CompetitionMonth\",'Customers'\n ]\nfeature1=[\"Store\",\"DayOfWeek\",\"Promo\",\"SchoolHoliday\",'HaveCompetitor',\n \"CompetitionDistance\",\n \"Year\",\"Month\",\"Day\",\"Week\",\n \"StoreType_a\",\"StoreType_b\",\"StoreType_c\",\"StoreType_d\",\n \"Assortment_a\",\"Assortment_b\",\"Assortment_c\",\n \"StateHoliday_0\",\"StateHoliday_a\",\n \"CompetitionMonth\",\n \"Promo2Month\",\"Promo2Week\",'Customers'\n ]", "_____no_output_____" ], [ "prediction0=makeprediction('pre0wtcustomers.csv',feature0,clf4)\n", "True\n" ], [ "prediction1=makeprediction('pre1wtcustomers.csv',feature1,clf14)\n", "True\n" ] ], [ [ "#### average method\nWhen make submission based on average prediction of three algorithms, use this part", "_____no_output_____" ] ], [ [ "prediction02=makeprediction('pre0.csv',feature0,clf2)\nprediction03=makeprediction('pre0.csv',feature0,clf3)\nprediction04=makeprediction('pre0.csv',feature0,clf4)\n", "_____no_output_____" ], [ "prediction12=makeprediction('pre1.csv',feature1,clf12)\nprediction13=makeprediction('pre1.csv',feature1,clf13)\nprediction14=makeprediction('pre1.csv',feature1,clf14)\n", "_____no_output_____" ], [ "def mergeavg(predition2,predition3,predition4):\n predcollect=pd.concat([pd.Series(predition2,name='dt'),pd.Series(predition3,name='knn'),pd.Series(predition4,name='rf')], axis=1)\n predavg= predcollect.mean(axis=1)\n return predavg", "_____no_output_____" ], [ "prediction0=mergeavg(prediction02.Sales,prediction03.Sales,prediction04.Sales)", "_____no_output_____" ], [ "prediction1=mergeavg(prediction12.Sales,prediction13.Sales,prediction14.Sales)", "_____no_output_____" ], [ "def generatepreforsub(filename,pred):\n pre_x = pd.read_csv(filename).astype(float32)\n prediction = pd.concat([pre_x.Id, pd.Series(pred,name='Sales')], axis=1)\n return prediction", "_____no_output_____" ], [ "prediction0=generatepreforsub('pre0.csv',prediction0)", "_____no_output_____" ], [ "prediction1=generatepreforsub('pre1.csv',prediction1)", "_____no_output_____" ] ], [ [ "## Make Submission", "_____no_output_____" ] ], [ [ "prediction_sub0=pd.DataFrame(prediction0[[\"Id\",\"Sales\"]],columns=[\"Id\",\"Sales\"])\nprediction_sub1=pd.DataFrame(prediction1[[\"Id\",\"Sales\"]],columns=[\"Id\",\"Sales\"])\n\nprediction_sub=pd.concat([prediction_sub0,prediction_sub1])\nprint len(prediction_sub)\nsubmission = pd.read_csv(\"submission.csv\")\nsubmission = pd.merge(submission,prediction_sub,on=\"Id\", how='outer')\nsubmission.fillna(0, inplace=True)", "35104\n" ], [ "submission.to_csv(\"submission4.csv\", index=False)", "_____no_output_____" ] ], [ [ "## Generat Data for Advance Analysis", "_____no_output_____" ], [ "Only includ test and prediction of test with open==1 or open==null", "_____no_output_____" ] ], [ [ "prediction0.to_csv(\"prediction0.csv\", index=False)\nprediction1.to_csv(\"prediction1.csv\", index=False)", "_____no_output_____" ], [ "fet=[\"Store\",\"DayOfWeek\",\"Promo\",\"SchoolHoliday\",\"StateHoliday_0\",\"StateHoliday_a\",\n \"Year\",\"Month\",\"Day\",\n \"StoreType_a\",\"StoreType_b\",\"StoreType_c\",\"StoreType_d\",\n \"Assortment_a\",\"Assortment_b\",\"Assortment_c\",\n \"Customers\",\"Sales\"]\nprediction_ana0=pd.DataFrame(prediction0[fet])\nprediction_ana0[\"Promo2\"]=0\nprint prediction_ana0.head()\nprediction_ana1=pd.DataFrame(prediction1[fet])\nprediction_ana0[\"Promo2\"]=1\ndata_ana0=pd.DataFrame(data[fet])\nprediction_ana0[\"Promo2\"]=0\ndata_ana1=pd.DataFrame(data1[fet])\nprediction_ana0[\"Promo2\"]=1\nprediction_ana=pd.concat([prediction_ana0,prediction_ana1,data_ana0,data_ana1])", " Store DayOfWeek Promo SchoolHoliday StateHoliday_0 StateHoliday_a \\\n0 1 4 1 0 1 0 \n1 1 3 1 0 1 0 \n2 1 2 1 0 1 0 \n3 1 1 1 0 1 0 \n4 1 6 0 0 1 0 \n\n Year Month Day StoreType_a StoreType_b StoreType_c StoreType_d \\\n0 2015 9 17 0 0 1 0 \n1 2015 9 16 0 0 1 0 \n2 2015 9 15 0 0 1 0 \n3 2015 9 14 0 0 1 0 \n4 2015 9 12 0 0 1 0 \n\n Assortment_a Assortment_b Assortment_c Customers Sales Promo2 \n0 1 0 0 484.890015 3674.843333 0 \n1 1 0 0 516.349976 3914.896667 0 \n2 1 0 0 527.690002 3905.466667 0 \n3 1 0 0 579.429993 5384.690000 0 \n4 1 0 0 513.880005 4214.560000 0 \n" ] ], [ [ "#### Creat Date column", "_____no_output_____" ] ], [ [ "y = np.array(prediction_ana['Year']-1970, dtype='<M8[Y]')\nm = np.array(prediction_ana['Month']-1, dtype='<m8[M]')\nd = np.array(prediction_ana['Day']-1, dtype='<m8[D]')\nprediction_ana['Date'] = pd.Series(y+m+d)", "_____no_output_____" ], [ "print prediction_ana.dtypes", "Assortment_a float64\nAssortment_b float64\nAssortment_c float64\nCustomers float64\nDay float64\nDayOfWeek float64\nMonth float64\nPromo float64\nPromo2 float64\nSales float64\nSchoolHoliday float64\nStateHoliday_0 float64\nStateHoliday_a float64\nStore float64\nStoreType_a float64\nStoreType_b float64\nStoreType_c float64\nStoreType_d float64\nYear float64\nDate datetime64[ns]\ndtype: object\n" ], [ "print prediction_ana.head()", " Assortment_a Assortment_b Assortment_c Customers Day DayOfWeek \\\n0 1 0 0 484.890015 17 4 \n1 1 0 0 516.349976 16 3 \n2 1 0 0 527.690002 15 2 \n3 1 0 0 579.429993 14 1 \n4 1 0 0 513.880005 12 6 \n\n Month Promo Promo2 Sales SchoolHoliday StateHoliday_0 \\\n0 9 1 1 3674.843333 0 1 \n1 9 1 1 3914.896667 0 1 \n2 9 1 1 3905.466667 0 1 \n3 9 1 1 5384.690000 0 1 \n4 9 0 1 4214.560000 0 1 \n\n StateHoliday_a Store StoreType_a StoreType_b StoreType_c StoreType_d \\\n0 0 1 0 0 1 0 \n1 0 1 0 0 1 0 \n2 0 1 0 0 1 0 \n3 0 1 0 0 1 0 \n4 0 1 0 0 1 0 \n\n Year Date \n0 2015 2015-09-17 \n1 2015 2015-09-16 \n2 2015 2015-09-15 \n3 2015 2015-09-14 \n4 2015 2015-09-12 \n" ], [ "prediction_ana.drop([\"Day\",\"Month\",\"Year\"], axis=1, inplace=True)", "_____no_output_____" ] ], [ [ "### Sales and Customers Prediction by Date", "_____no_output_____" ] ], [ [ "gr_date=prediction_ana.groupby(['Date'])", "_____no_output_____" ], [ "gr_date_sales=gr_date.agg({'Customers' : 'mean', 'Sales' : 'mean'})", "_____no_output_____" ], [ "print gr_date_sales.head()", " Customers Sales\nDate \n2013-01-01 832.227273 7404.090909\n2013-01-02 767.874747 6994.882828\n2013-01-03 773.235830 7062.363360\n2013-01-04 761.178138 6975.305668\n2013-01-05 766.191684 7009.895538\n" ], [ "trace1 = go.Scatter(\n x=gr_date_sales.index,\n y=gr_date_sales.Customers,\n name='Customers',\n line=dict(\n color='#ae32e4',\n width = 1 ,\n \n )\n)\ntrace2 = go.Scatter(\n x=gr_date_sales.index,\n y=gr_date_sales.Sales,\n name='Sales',\n mode = 'lines+markers',\n yaxis='y2',\n line=dict(\n color='#3268e4',\n width = 1 \n ),\n opacity=0.8\n)\ndata = [trace1, trace2]\nlayout = go.Layout(\n title='Time Series of Prediction',\n yaxis=dict(\n title='Customers'\n ),\n yaxis2=dict(\n title='Sales',\n titlefont=dict(\n color='rgb(174,50,228)'\n ),\n tickfont=dict(\n color='rgb(174,50,228)'\n ),\n overlaying='y',\n side='right'\n )\n)\nfig = go.Figure(data=data, layout=layout)\nplot_url = py.plot(fig, filename='multiple-axes-double')\ntls.embed(\"https://plot.ly/~lemonsong/54/time-series-of-prediction/\")", "_____no_output_____" ], [ "gr_assortment=prediction_ana\n", "_____no_output_____" ], [ "#gr_assortment.query('Assortment_a==1')['Assortment']='basic'\ngr_assortment.ix[gr_assortment.Assortment_a==1, 'Assortment'] = 'basic'\ngr_assortment.ix[gr_assortment.Assortment_b==1, 'Assortment'] = 'extra'\ngr_assortment.ix[gr_assortment.Assortment_c==1, 'Assortment'] = 'extended'\n\ngr_assortment.drop(['Assortment_a','Assortment_b','Assortment_c'], axis=1, inplace=True)", "_____no_output_____" ], [ "print gr_assortment.columns", "Index([u'Customers', u'DayOfWeek', u'Promo', u'Promo2', u'Sales',\n u'SchoolHoliday', u'StateHoliday_0', u'StateHoliday_a', u'Store',\n u'StoreType_a', u'StoreType_b', u'StoreType_c', u'StoreType_d', u'Date',\n u'Assortment'],\n dtype='object')\n" ], [ "gr_assortment1=gr_assortment.groupby(['Assortment', 'DayOfWeek'])\ngr_assortment1=gr_assortment1.agg({ 'Customers' : 'sum','Store':'count'}).reset_index()\ngr_assortment1['Coustomers_by_store']=gr_assortment1['Customers']/gr_assortment1['Store']\ngr_assortment1", "_____no_output_____" ], [ "gr_assortment2=gr_assortment1.pivot('Assortment', 'DayOfWeek', 'Coustomers_by_store')\nprint gr_assortment2", "DayOfWeek 1 2 3 4 5 \\\nAssortment \nbasic 845.588925 763.638532 731.224711 741.552602 762.959825 \nextended 837.817407 751.170597 725.254788 742.080359 770.858928 \nextra 2128.963607 2039.735130 2044.904263 2104.246270 2240.707866 \n\nDayOfWeek 6 7 \nAssortment \nbasic 618.085336 1726.211010 \nextended 684.955642 656.486821 \nextra 1808.844528 2159.519221 \n" ], [ "data = [\n go.Heatmap(\n z=gr_assortment2.values,\n x=gr_assortment2.columns,\n y=gr_assortment2.index,\n colorscale=[[0, '\"rgb(228, 174, 50)\"'],[1, 'rgb(174, 50, 228)']]\n )\n]\nlayout = go.Layout(\n title='Average Customers',\n yaxis=dict(\n title='Assortment',\n ),\n xaxis=dict(\n type=\"category\",\n title='WeekOfDay',\n )\n)\nfig = go.Figure(data=data, layout=layout)\nplot_url = py.plot(fig, filename='labelled-heatmap')\ntls.embed(\"https://plot.ly/~lemonsong/80/average-sales/\")", "_____no_output_____" ], [ "gr_store=prediction_ana", "_____no_output_____" ], [ "gr_store=gr_store.groupby(['Store'])\ngr_store_sales=gr_store.agg({'Customers' : 'sum', 'Sales' : 'sum','Promo':'sum','Promo2':'sum'}).reset_index()", "_____no_output_____" ], [ "gr_store1=pd.merge(gr_store_sales,prediction_ana[['Store','Assortment']],on=\"Store\", how='left').drop_duplicates()", "_____no_output_____" ], [ "gr_store1.head()", "_____no_output_____" ], [ "gr_store1_assort=gr_store1.groupby(['Assortment'])\ngr_store_sales_agg=gr_store1_assort.agg({'Customers' : 'sum', 'Sales' : 'sum','Store':'count','Promo':'sum','Promo2':'sum'}).reset_index()", "_____no_output_____" ], [ "gr_store_sales_agg", "_____no_output_____" ], [ "fig = {\n \"data\": [\n {\n \"values\": gr_store_sales_agg.Store,\n \"labels\": gr_store_sales_agg.Assortment,\n \"domain\": {\"x\": [0, .33]},\n \"name\": \"Store\",\n \"hoverinfo\":\"label+percent+name\",\n \"hole\": .4,\n \"type\": \"pie\"\n }, \n {\n \"values\": gr_store_sales_agg.Customers,\n \"labels\":gr_store_sales_agg.Assortment,\n \"text\":\"Customers\",\n \"textposition\":\"inside\",\n \"domain\": {\"x\": [.33, .66]},\n \"name\": \"Customers\",\n \"hoverinfo\":\"label+percent+name\",\n \"hole\": .4,\n \"type\": \"pie\"\n }, \n {\n \"values\": gr_store_sales_agg.Sales,\n \"labels\":gr_store_sales_agg.Assortment,\n \"text\":\"Sales\",\n \"textposition\":\"inside\",\n \"domain\": {\"x\": [.66, 1]},\n \"name\": \"Sales\",\n \"hoverinfo\":\"label+percent+name\",\n \"hole\": .4,\n \"type\": \"pie\"\n },\n ],\n \"layout\": {\n \"title\":\"Percentage by Assortment Type\",\n \"annotations\": [\n {\n \"font\": {\n \"size\": 20\n },\n \"showarrow\": False,\n \"text\": \"Store\",\n \"x\": 0.10,\n \"y\": 0.5\n },\n {\n \"font\": {\n \"size\": 20\n },\n \"showarrow\": False,\n \"text\": \"Customers\",\n \"x\": 0.5,\n \"y\": 0.5\n },\n {\n \"font\": {\n \"size\": 20\n },\n \"showarrow\": False,\n \"text\": \"Sales\",\n \"x\": 0.9,\n \"y\": 0.5\n }\n ]\n }\n}\n\nurl = py.plot(fig, filename='Global Emissions 1990-2011')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb782496e13c3831f2cad4480445531a8063e184
4,586
ipynb
Jupyter Notebook
ipynb/Malta.ipynb
skirienko/oscovida.github.io
eda5412d02365a8a000239be5480512c53bee8c2
[ "CC-BY-4.0" ]
null
null
null
ipynb/Malta.ipynb
skirienko/oscovida.github.io
eda5412d02365a8a000239be5480512c53bee8c2
[ "CC-BY-4.0" ]
null
null
null
ipynb/Malta.ipynb
skirienko/oscovida.github.io
eda5412d02365a8a000239be5480512c53bee8c2
[ "CC-BY-4.0" ]
null
null
null
28.134969
159
0.505451
[ [ [ "# Malta\n\n* Homepage of project: https://oscovida.github.io\n* Plots are explained at http://oscovida.github.io/plots.html\n* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Malta.ipynb)", "_____no_output_____" ] ], [ [ "import datetime\nimport time\n\nstart = datetime.datetime.now()\nprint(f\"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}\")", "_____no_output_____" ], [ "%config InlineBackend.figure_formats = ['svg']\nfrom oscovida import *", "_____no_output_____" ], [ "overview(\"Malta\", weeks=5);", "_____no_output_____" ], [ "overview(\"Malta\");", "_____no_output_____" ], [ "compare_plot(\"Malta\", normalise=True);\n", "_____no_output_____" ], [ "# load the data\ncases, deaths = get_country_data(\"Malta\")\n\n# compose into one table\ntable = compose_dataframe_summary(cases, deaths)\n\n# show tables with up to 500 rows\npd.set_option(\"max_rows\", 500)\n\n# display the table\ntable", "_____no_output_____" ] ], [ [ "# Explore the data in your web browser\n\n- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Malta.ipynb)\n- and wait (~1 to 2 minutes)\n- Then press SHIFT+RETURN to advance code cell to code cell\n- See http://jupyter.org for more details on how to use Jupyter Notebook", "_____no_output_____" ], [ "# Acknowledgements:\n\n- Johns Hopkins University provides data for countries\n- Robert Koch Institute provides data for within Germany\n- Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/)\n- Open source and scientific computing community for the data tools\n- Github for hosting repository and html files\n- Project Jupyter for the Notebook and binder service\n- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))\n\n--------------------", "_____no_output_____" ] ], [ [ "print(f\"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and \"\n f\"deaths at {fetch_deaths_last_execution()}.\")", "_____no_output_____" ], [ "# to force a fresh download of data, run \"clear_cache()\"", "_____no_output_____" ], [ "print(f\"Notebook execution took: {datetime.datetime.now()-start}\")\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
cb7838d99686c15e20a0b0a55601bef737f6631a
146,054
ipynb
Jupyter Notebook
Notebooks/04 Model _ WangchanBERTa _ LSTM on LimeSoda.ipynb
nitsirs/debunker
db039898f28f72c21490e334fe8fed19f5c20b2c
[ "MIT" ]
2
2022-03-14T08:52:49.000Z
2022-03-25T09:47:30.000Z
Notebooks/04 Model _ WangchanBERTa _ LSTM on LimeSoda.ipynb
nitsirs/debunker
db039898f28f72c21490e334fe8fed19f5c20b2c
[ "MIT" ]
null
null
null
Notebooks/04 Model _ WangchanBERTa _ LSTM on LimeSoda.ipynb
nitsirs/debunker
db039898f28f72c21490e334fe8fed19f5c20b2c
[ "MIT" ]
null
null
null
146,054
146,054
0.823141
[ [ [ "## Installation", "_____no_output_____" ] ], [ [ "!pip install -q --upgrade transformers datasets tokenizers \n!pip install -q emoji pythainlp sklearn-pycrfsuite seqeval\n!rm -r thai2transformers thai2transformers_parent \n!git clone -b dev https://github.com/vistec-AI/thai2transformers/\n!mv thai2transformers thai2transformers_parent\n!mv thai2transformers_parent/thai2transformers .\n!pip install accelerate==0.5.1\n!apt install git-lfs\n!pip install sentencepiece\n\n! git clone https://github.com/Bjarten/early-stopping-pytorch.git\nimport sys \nsys.path.insert(0, '/content/early-stopping-pytorch')", "\u001b[K |████████████████████████████████| 4.2 MB 7.3 MB/s \n\u001b[K |████████████████████████████████| 346 kB 60.3 MB/s \n\u001b[K |████████████████████████████████| 6.6 MB 45.3 MB/s \n\u001b[K |████████████████████████████████| 86 kB 5.7 MB/s \n\u001b[K |████████████████████████████████| 596 kB 62.3 MB/s \n\u001b[K |████████████████████████████████| 212 kB 59.9 MB/s \n\u001b[K |████████████████████████████████| 86 kB 6.7 MB/s \n\u001b[K |████████████████████████████████| 1.1 MB 49.9 MB/s \n\u001b[K |████████████████████████████████| 140 kB 65.2 MB/s \n\u001b[K |████████████████████████████████| 127 kB 63.0 MB/s \n\u001b[K |████████████████████████████████| 271 kB 70.0 MB/s \n\u001b[K |████████████████████████████████| 144 kB 75.6 MB/s \n\u001b[K |████████████████████████████████| 94 kB 3.6 MB/s \n\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\ndatascience 0.10.6 requires folium==0.2.1, but you have folium 0.8.3 which is incompatible.\u001b[0m\n\u001b[K |████████████████████████████████| 175 kB 8.7 MB/s \n\u001b[K |████████████████████████████████| 11.5 MB 22.8 MB/s \n\u001b[K |████████████████████████████████| 43 kB 2.2 MB/s \n\u001b[K |████████████████████████████████| 485 kB 63.4 MB/s \n\u001b[?25h Building wheel for emoji (setup.py) ... \u001b[?25l\u001b[?25hdone\n Building wheel for sklearn-pycrfsuite (setup.py) ... \u001b[?25l\u001b[?25hdone\n Building wheel for seqeval (setup.py) ... \u001b[?25l\u001b[?25hdone\n Building wheel for python-crfsuite-extension (setup.py) ... \u001b[?25l\u001b[?25hdone\nrm: cannot remove 'thai2transformers': No such file or directory\nrm: cannot remove 'thai2transformers_parent': No such file or directory\nCloning into 'thai2transformers'...\nremote: Enumerating objects: 5838, done.\u001b[K\nremote: Counting objects: 100% (1882/1882), done.\u001b[K\nremote: Compressing objects: 100% (555/555), done.\u001b[K\nremote: Total 5838 (delta 1279), reused 1807 (delta 1236), pack-reused 3956\u001b[K\nReceiving objects: 100% (5838/5838), 17.08 MiB | 17.59 MiB/s, done.\nResolving deltas: 100% (4088/4088), done.\nLooking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\nCollecting accelerate==0.5.1\n Downloading accelerate-0.5.1-py3-none-any.whl (58 kB)\n\u001b[K |████████████████████████████████| 58 kB 4.2 MB/s \n\u001b[?25hRequirement already satisfied: pyyaml in /usr/local/lib/python3.7/dist-packages (from accelerate==0.5.1) (6.0)\nRequirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.7/dist-packages (from accelerate==0.5.1) (1.21.6)\nRequirement already satisfied: torch>=1.4.0 in /usr/local/lib/python3.7/dist-packages (from accelerate==0.5.1) (1.11.0+cu113)\nRequirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from torch>=1.4.0->accelerate==0.5.1) (4.2.0)\nInstalling collected packages: accelerate\nSuccessfully installed accelerate-0.5.1\nReading package lists... Done\nBuilding dependency tree \nReading state information... Done\ngit-lfs is already the newest version (2.3.4-1).\nThe following package was automatically installed and is no longer required:\n libnvidia-common-460\nUse 'apt autoremove' to remove it.\n0 upgraded, 0 newly installed, 0 to remove and 42 not upgraded.\nLooking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\nCollecting sentencepiece\n Downloading sentencepiece-0.1.96-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.2 MB)\n\u001b[K |████████████████████████████████| 1.2 MB 7.0 MB/s \n\u001b[?25hInstalling collected packages: sentencepiece\nSuccessfully installed sentencepiece-0.1.96\nCloning into 'early-stopping-pytorch'...\nremote: Enumerating objects: 92, done.\u001b[K\nremote: Total 92 (delta 0), reused 0 (delta 0), pack-reused 92\u001b[K\nUnpacking objects: 100% (92/92), done.\n" ], [ "import os\nos.environ['CUDA_LAUNCH_BLOCKING'] = \"1\"", "_____no_output_____" ] ], [ [ "## Importing the libraries\n\n", "_____no_output_____" ] ], [ [ "from datasets import load_dataset,Dataset,DatasetDict,load_from_disk\nfrom transformers import DataCollatorWithPadding,AutoModelForSequenceClassification, Trainer, TrainingArguments,AutoTokenizer,AutoModel,AutoConfig\nfrom transformers.modeling_outputs import SequenceClassifierOutput\nfrom thai2transformers.preprocess import process_transformers\nimport torch\nimport torch.nn as nn\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import classification_report\nfrom pytorchtools import EarlyStopping", "_____no_output_____" ], [ "from google.colab import drive\ndrive.mount('/content/drive')", "Mounted at /content/drive\n" ] ], [ [ "## Loading the dataset", "_____no_output_____" ] ], [ [ "data = load_from_disk('/content/drive/MyDrive/Fake news/News-Dataset/dataset')", "_____no_output_____" ], [ "def clean_function(examples):\n examples['text'] = process_transformers(examples['text'])\n return examples\ndata = data.map(clean_function)", "Loading cached processed dataset at /content/drive/MyDrive/Fake news/News-Dataset/dataset/train/cache-9aaf53670437ac28.arrow\nLoading cached processed dataset at /content/drive/MyDrive/Fake news/News-Dataset/dataset/test/cache-f75cfba6eebda1fc.arrow\nLoading cached processed dataset at /content/drive/MyDrive/Fake news/News-Dataset/dataset/valid/cache-b331ba61eb8a3b56.arrow\n" ] ], [ [ "## Fine-tuning", "_____no_output_____" ] ], [ [ "checkpoint = \"airesearch/wangchanberta-base-att-spm-uncased\"\ntokenizer = AutoTokenizer.from_pretrained(checkpoint)\ntokenizer.model_max_len=416", "_____no_output_____" ], [ "def tokenize(batch):\n return tokenizer(batch[\"text\"], truncation=True,max_length=416)\n\ntokenized_dataset = data.map(tokenize, batched=True)\ntokenized_dataset", "Loading cached processed dataset at /content/drive/MyDrive/Fake news/News-Dataset/dataset/train/cache-1231022e8a5cbde4.arrow\nLoading cached processed dataset at /content/drive/MyDrive/Fake news/News-Dataset/dataset/test/cache-19daeb334c4d5f5e.arrow\nLoading cached processed dataset at /content/drive/MyDrive/Fake news/News-Dataset/dataset/valid/cache-c4aced047a8102a4.arrow\n" ], [ "tokenized_dataset.set_format(\"torch\",columns=[\"input_ids\", \"attention_mask\", \"labels\"])\ndata_collator = DataCollatorWithPadding(tokenizer=tokenizer)", "_____no_output_____" ], [ "class extract_tensor(nn.Module):\n def forward(self,x):\n # Output shape (batch, features, hidden)\n tensor, _ = x\n # Reshape shape (batch, hidden)\n return tensor[:, :]", "_____no_output_____" ], [ "class CustomModel(nn.Module):\n def __init__(self,checkpoint,num_labels): \n super(CustomModel,self).__init__() \n self.num_labels = num_labels \n\n #Load Model with given checkpoint and extract its body\n self.model = model = AutoModel.from_pretrained(checkpoint,config=AutoConfig.from_pretrained(checkpoint, output_attentions=True,output_hidden_states=True))\n self.dropout = nn.Dropout(0.1) \n self.classifier = nn.Sequential(\n nn.LSTM(768, 256, 1, batch_first=True),\n extract_tensor(),\n nn.Linear(256, 2)\n )\n\n def forward(self, input_ids=None, attention_mask=None,labels=None):\n #Extract outputs from the body\n outputs = self.model(input_ids=input_ids, attention_mask=attention_mask)\n\n #Add custom layers\n sequence_output = self.dropout(outputs[0]) #outputs[0]=last hidden state\n\n logits = self.classifier(sequence_output[:,0,:].view(-1,768)) # calculate losses\n \n loss = None\n if labels is not None:\n loss_fct = nn.CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n \n return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states,attentions=outputs.attentions)", "_____no_output_____" ], [ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nmodel=CustomModel(checkpoint=checkpoint,num_labels=2).to(device)", "_____no_output_____" ], [ "from torch.utils.data import DataLoader\n\ntrain_dataloader = DataLoader(\n tokenized_dataset[\"train\"], shuffle=True, batch_size=8, collate_fn=data_collator\n)\neval_dataloader = DataLoader(\n tokenized_dataset[\"valid\"], batch_size=8, collate_fn=data_collator\n)", "_____no_output_____" ], [ "from transformers import AdamW,get_scheduler\n\noptimizer = AdamW(model.parameters(), lr=5e-5)\n\nnum_epochs = 50\nnum_training_steps = num_epochs * len(train_dataloader)\nlr_scheduler = get_scheduler(\n \"linear\",\n optimizer=optimizer,\n num_warmup_steps=0,\n num_training_steps=num_training_steps,\n)\nprint(num_training_steps)", "15200\n" ], [ "from datasets import load_metric\nmetric = load_metric(\"f1\")\n", "_____no_output_____" ], [ "from tqdm.auto import tqdm\n\nprogress_bar_train = tqdm(range(num_training_steps))\nprogress_bar_eval = tqdm(range(num_epochs * len(eval_dataloader)))\n# to track the training loss as the model trains\ntrain_losses = []\n# to track the validation loss as the model trains\nvalid_losses = []\n# to track the average training loss per epoch as the model trains\navg_train_losses = []\n# to track the average validation loss per epoch as the model trains\navg_valid_losses = [] \n\nearly_stopping = EarlyStopping(patience=7, verbose=True)\n\nfor epoch in range(num_epochs):\n model.train()\n size = len(train_dataloader.dataset)\n for batch, X in enumerate(train_dataloader):\n X = {k: v.to(device) for k, v in X.items()}\n outputs = model(**X)\n loss = outputs.loss\n loss.backward()\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n progress_bar_train.update(1)\n train_losses.append(loss.item())\n\n model.eval()\n for batch, X in enumerate(eval_dataloader):\n X = {k: v.to(device) for k, v in X.items()}\n with torch.no_grad():\n outputs = model(**X)\n loss = outputs.loss\n valid_losses.append(loss.item())\n\n logits = outputs.logits\n predictions = torch.argmax(logits, dim=-1)\n metric.add_batch(predictions=predictions, references=X[\"labels\"])\n progress_bar_eval.update(1)\n\n # print training/validation statistics \n # calculate average loss over an epoch\n train_loss = np.average(train_losses)\n valid_loss = np.average(valid_losses)\n avg_train_losses.append(train_loss)\n avg_valid_losses.append(valid_loss)\n \n epoch_len = len(str(num_epochs))\n \n loss_msg = (f'[{epoch+1:>{epoch_len}}/{num_epochs:>{epoch_len}}] ' +\n f'train_loss: {train_loss:.5f} ' +\n f'valid_loss: {valid_loss:.5f}')\n \n print(loss_msg)\n\n # clear lists to track next epoch\n train_losses = []\n valid_losses = []\n # early_stopping needs the validation loss to check if it has decresed, \n # and if it has, it will make a checkpoint of the current model\n early_stopping(valid_loss, model)\n \n if early_stopping.early_stop:\n print(\"Early stopping\")\n break\n print(metric.compute())\n print('\\n')\n\n ", "_____no_output_____" ], [ "model.load_state_dict(torch.load('checkpoint.pt'))", "_____no_output_____" ], [ "# visualize the loss as the network trained\nimport matplotlib.pyplot as plt\nfig = plt.figure(figsize=(10,8))\nplt.plot(range(1,len(avg_train_losses)+1),avg_train_losses, label='Training Loss')\nplt.plot(range(1,len(avg_valid_losses)+1),avg_valid_losses,label='Validation Loss')\n\n# find position of lowest validation loss\nminposs = avg_valid_losses.index(min(avg_valid_losses))+1 \nplt.axvline(minposs, linestyle='--', color='r',label='Early Stopping Checkpoint')\n\nplt.xlabel('epochs')\nplt.ylabel('loss')\nplt.ylim(0, 0.5) # consistent scale\nplt.xlim(0, len(avg_train_losses)+1) # consistent scale\nplt.grid(True)\nplt.legend()\nplt.tight_layout()\nplt.show()\nfig.savefig('loss_plot.png', bbox_inches='tight')", "_____no_output_____" ] ], [ [ "## Test Result", "_____no_output_____" ] ], [ [ "preds = torch.empty(0).cuda()\n\nmodel.eval()\n\ntest_dataloader = DataLoader(\n tokenized_dataset[\"test\"], batch_size=8, collate_fn=data_collator\n)\n\nfor batch in test_dataloader:\n batch = {k: v.to(device) for k, v in batch.items()}\n with torch.no_grad():\n outputs = model(**batch)\n\n logits = outputs.logits\n predictions = torch.argmax(logits, dim=-1)\n metric.add_batch(predictions=predictions, references=batch[\"labels\"])\n preds = torch.cat((preds, predictions), 0)\n\nmetric.compute()", "_____no_output_____" ], [ "text = tokenized_dataset[\"test\"][\"text\"]", "_____no_output_____" ], [ "y_true = tokenized_dataset[\"test\"][\"labels\"]\ny_pred = preds.cpu()\nprint(classification_report(y_true, y_pred, target_names=['true','fake']))", " precision recall f1-score support\n\n true 0.88 0.90 0.89 324\n fake 0.91 0.89 0.90 352\n\n accuracy 0.89 676\n macro avg 0.89 0.90 0.89 676\nweighted avg 0.90 0.89 0.90 676\n\n" ] ], [ [ "## Wrong Prediction", "_____no_output_____" ] ], [ [ "test_result = pd.DataFrame(zip(text, [int(x) for x in y_pred.tolist()], y_true.tolist()), columns=['text','pred','true'])\nwrong_prediction = test_result[test_result['pred'] != test_result['true']]\nwrong_prediction.head()", "_____no_output_____" ] ], [ [ "## Confusion Matrix", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\nimport seaborn as sn\narray = confusion_matrix(y_true, y_pred)\ndf_cm = pd.DataFrame(array, range(2), range(2))\nsn.heatmap(df_cm, annot=True, annot_kws={\"size\": 16}, fmt='g', cmap=\"flare\") \nplt.show()", "_____no_output_____" ], [ "torch.save(model, '/content/drive/MyDrive/Fake news/Model/sodabert-lstm')", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb783bdfc8445d833755a436855cb65ebe62ff62
48,630
ipynb
Jupyter Notebook
notebooks/MuHeQA_Datasets.ipynb
librairy/extractiveQA-over-KG
31107fe16e642ceac06c0fbb0a73ad04ba196655
[ "Apache-2.0" ]
1
2022-02-07T16:56:36.000Z
2022-02-07T16:56:36.000Z
notebooks/MuHeQA_Datasets.ipynb
librairy/extractiveQA-over-KG
31107fe16e642ceac06c0fbb0a73ad04ba196655
[ "Apache-2.0" ]
5
2021-11-30T11:25:47.000Z
2021-11-30T12:15:59.000Z
notebooks/MuHeQA_Datasets.ipynb
librairy/MuHeQA
31107fe16e642ceac06c0fbb0a73ad04ba196655
[ "Apache-2.0" ]
null
null
null
36.156134
502
0.463603
[ [ [ "# Pre-Processing Methods", "_____no_output_____" ] ], [ [ "%%capture\n!pip3 install sparqlwrapper", "_____no_output_____" ], [ "# Common methods to retrieve data from Wikidata\n\nimport time\nfrom SPARQLWrapper import SPARQLWrapper, JSON \nimport pandas as pd\nimport urllib.request as url\nimport json\nfrom SPARQLWrapper import SPARQLWrapper\n\nwiki_sparql = SPARQLWrapper(\"https://query.wikidata.org/sparql\")\nwiki_sparql.setReturnFormat(JSON)\nwiki_sparql.setTimeout(timeout=25)\n\nwiki_cache = {}", "_____no_output_____" ], [ "def get_wikidata_label(entity):\n if (entity in cache):\n #print(\"use of cache!\")\n return cache[entity]\n query = \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> \n PREFIX wd: <http://www.wikidata.org/entity/> \n SELECT *\n WHERE {\n wd:ENTITY rdfs:label ?label .\n FILTER (langMatches( lang(?label), \"EN\" ) )\n } \n LIMIT 1\n \"\"\"\n query_text = query.replace('ENTITY',entity)\n wiki_sparql.setQuery(query_text)\n result = \"\"\n while (result == \"\"):\n try:\n ret = wiki_sparql.queryAndConvert()\n if (len(ret[\"results\"][\"bindings\"]) == 0):\n result = \"-\"\n for r in ret[\"results\"][\"bindings\"]:\n result = r['label']['value']\n except Exception as e:\n print(\"Error on wikidata query:\",e)\n if \"timed out\" in str(e): \n result = \"-\"\n break\n cache[entity] = result\n return result\n\ndef get_wikidata(query):\n if (\"ASK\" not in query) and (\"LIMIT\" not in query):\n query += \" LIMIT 10\"\n #print(query)\n key = query.replace(\" \",\"_\")\n if (key in cache):\n #print(\"use of cache!\")\n return cache[key]\n wiki_sparql.setQuery(query)\n result = []\n retries = 0\n while (len(result) == 0) and (retries < 5):\n try:\n ret = wiki_sparql.queryAndConvert()\n #print(ret)\n if (\"ASK\" in query):\n result.append(str(ret['boolean']))\n elif (len(ret[\"results\"][\"bindings\"]) == 0):\n result.append(\"-\")\n else:\n for r in ret[\"results\"][\"bindings\"]:\n for k in r.keys():\n tokens = r[k]['value'].split(\"/\")\n result.append(tokens[len(tokens)-1])\n except Exception as e:\n retries += 1\n print(\"Error on wikidata query:\",e)\n if \"timed out\" in str(e): \n result.append(\"-\")\n break\n cache[key] = result\n return result\n\ndef preprocess_questions(questions):\n rows = []\n counter = 0\n for question in data['questions']:\n if (counter % 1000 == 0):\n print(\"Queries processed:\",counter, \"Cache Size:\",len(cache))\n #print(\"#\",question['question_id'])\n answer = question['query_answer'][0]\n subject_labels = []\n subjects = []\n predicates = [e.split(\":\")[1] for e in answer['sparql_template'].split(\" \") if \":\" in e]\n predicate_labels = []\n for p in predicates:\n predicate_labels.append(get_wikidata_label(p.replace(\"*\",\"\").split(\"/\")[0]))\n objects = get_wikidata(answer['sparql_query'])\n object_labels = []\n for o in objects:\n if (len(o)>0) and (o[0]==\"Q\"):\n object_labels.append(get_wikidata_label(o))\n else:\n object_labels.append(o)\n for entity in answer['entities']: \n subject_labels.append(entity['label'])\n subjects.append(entity['entity'].split(\":\")[1])\n row = {\n 'subjects':subjects,\n 'predicates' : predicates,\n 'objects': objects,\n 'question': question['natural_language_question'],\n 'subject_labels':subject_labels,\n 'predicate_labels':predicate_labels,\n 'object_labels':object_labels\n }\n #print(row)\n rows.append(row)\n counter += 1\n df = pd.DataFrame(rows)\n return df", "_____no_output_____" ], [ "# Common methods to retrieve data from Wikidata\n\nimport time\nfrom SPARQLWrapper import SPARQLWrapper, JSON \nimport pandas as pd\nimport urllib.request as url\nimport json\nfrom SPARQLWrapper import SPARQLWrapper\n\ndbpedia_sparql = SPARQLWrapper(\"https://dbpedia.org/sparql/\")\ndbpedia_sparql.setReturnFormat(JSON)\ndbpedia_sparql.setTimeout(timeout=60)\n\ndbpedia_cache = {}", "_____no_output_____" ], [ "import hashlib\n\ndef hash_text(text):\n hash_object = hashlib.md5(text.encode())\n md5_hash = hash_object.hexdigest()\n return str(md5_hash)\n\ndef get_dbpedia_label(entity,use_cache=True,verbose=False):\n key = entity+\"_label\"\n if (use_cache) and (key in dbpedia_cache):\n #print(\"use of cache!\")\n return dbpedia_cache[key].copy()\n query = \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> \n PREFIX dbr: <http://dbpedia.org/resource/> \n select distinct ?label {\n \n <ENTITY> rdfs:label ?label .\n filter langMatches(lang(?label), 'en')\n \n }\n LIMIT 250\n \"\"\"\n query_text = query.replace('ENTITY',entity) \n dbpedia_sparql.setQuery(query_text)\n result = []\n while (len(result) == 0):\n try:\n if (verbose):\n print(\"SPARQL Query:\",query_text)\n ret = dbpedia_sparql.queryAndConvert()\n if (verbose):\n print(\"SPARQL Response:\",ret)\n for r in ret[\"results\"][\"bindings\"]:\n id = entity\n value = id\n if ('label' in r) and ('value' in r['label']):\n value = r['label']['value'] \n if (' id ' not in value.lower()) and (' link ' not in value.lower()) and ('has abstract' not in value.lower()) and ('wiki' not in value.lower()) and ('instance of' not in value.lower()):\n result.append({'id':id, 'value':value})\n except Exception as e:\n print(\"Error on SPARQL query:\",e)\n break \n dbpedia_cache[key] = result\n #print(len(result),\"properties found\")\n return result\n\ndef get_dbpedia_property_value(filter,use_cache=True,verbose=False):\n key = hash_text(filter)\n if (use_cache) and (key in dbpedia_cache):\n #print(\"use of cache!\")\n return dbpedia_cache[key].copy()\n query = \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> \n PREFIX dbr: <http://dbpedia.org/resource/> \n select distinct ?object ?label {\n { FILTER }\n\n optional { \n ?object rdfs:label ?label .\n filter langMatches(lang(?label), 'en')\n }\n }\n LIMIT 250\n \"\"\"\n query_text = query.replace('FILTER',filter) \n dbpedia_sparql.setQuery(query_text)\n result = []\n while (len(result) == 0):\n try:\n if (verbose):\n print(\"SPARQL Query:\",query_text)\n ret = dbpedia_sparql.queryAndConvert()\n if (verbose):\n print(\"SPARQL Response:\",ret)\n for r in ret[\"results\"][\"bindings\"]:\n id = r['object']['value']\n value = id\n if ('label' in r) and ('value' in r['label']):\n value = r['label']['value'] \n if (' id ' not in value.lower()) and (' link ' not in value.lower()) and ('has abstract' not in value.lower()) and ('wiki' not in value.lower()) and ('instance of' not in value.lower()):\n result.append({'id':id, 'value':value})\n except Exception as e:\n print(\"Error on SPARQL query:\",e)\n break \n dbpedia_cache[key] = result\n #print(len(result),\"properties found\")\n return result\n\n\ndef get_forward_dbpedia_property_value(entity,property,use_cache=True,verbose=False):\n query_filter =\"<ENTITY> <PROPERTY> ?object\" \n return get_dbpedia_property_value(query_filter.replace(\"ENTITY\",entity).replace(\"PROPERTY\",property),use_cache,verbose) \n\ndef get_backward_dbpedia_property_value(entity,property,use_cache=True,verbose=False):\n query_filter =\"?object <PROPERTY> <ENTITY>\"\n return get_dbpedia_property_value(query_filter.replace(\"ENTITY\",entity).replace(\"PROPERTY\",property),use_cache,verbose) \n", "_____no_output_____" ] ], [ [ "# Datasets", "_____no_output_____" ], [ "## SimpleQuestions Dataset\n\n", "_____no_output_____" ], [ "### Wikidata SimpleQuestions", "_____no_output_____" ] ], [ [ "import pandas as pd\ndf = pd.read_csv('https://raw.githubusercontent.com/askplatypus/wikidata-simplequestions/master/annotated_wd_data_test_answerable.txt', sep=\"\\t\", index_col=False, header=None, names=['subject','predicate','object','question'])\ndf.head()", "_____no_output_____" ] ], [ [ "Retrieve labels from wikidata for subject, predicate and object:", "_____no_output_____" ] ], [ [ "object_labels = []\nsubject_labels = []\npredicate_labels = [] \nfor index, row in df.iterrows():\n print(index,\":\",row)\n subject_labels.append(get_wikidata_label(row['subject'])) \n predicate_labels.append(get_wikidata_label(row['predicate'].replace(\"R\",\"P\")))\n object_labels.append(get_wikidata_label(row['object']))\n if (index % 100 == 0 ):\n print(\"Labels Identified:\",index,\"Cache Size:\",len(cache))\n index += 1\nprint(len(object_labels),\"labels retrieved!\")\ndf['subject_label']=subject_labels\ndf['predicate_label']=predicate_labels\ndf['object_label']=object_labels\ndf.to_csv('wsq-labels.csv')\ndf.head()", "_____no_output_____" ] ], [ [ "### SimpleDBpediaQuestions", "_____no_output_____" ] ], [ [ "# read dbpedia compatible SimpleQuestions\nimport urllib.request as url\nimport json\nimport unidecode\nimport pandas as pd\n\n\ndef normalize(label):\n return unidecode.unidecode(label.strip()).lower()\n\n\nstream = url.urlopen(\"https://raw.githubusercontent.com/castorini/SimpleDBpediaQA/master/V1/test.json\")\ncontent = stream.read()\ndata = json.loads(content)\nref_questions = [e.lower().strip() for e in pd.read_csv('data/wsq-labels.csv', index_col=0)['question'].tolist()]\ncounter = 0\ntotal = 0\nrows = []\ndbpedia_questions = []\nfor question in data['Questions']:\n total += 1\n if (total % 100 == 0):\n print(total)\n question_query = question['Query']\n if (question_query.lower().strip() in ref_questions):\n counter += 1\n subject_val = question['Subject']\n subject_label = ''\n ss = get_dbpedia_label(subject_val)\n if (len(ss) > 0):\n subject_label = ss[0]['value']\n predicate = question['PredicateList'][0]\n property_val = predicate['Predicate']\n property_label = ''\n pp = get_dbpedia_label(property_val)\n if (len(pp) > 0):\n property_label = pp[0]['value']\n if (predicate['Direction'] == 'forward'):\n object_val = get_forward_dbpedia_property_value(subject_val,property_val)\n else:\n object_val = get_backward_dbpedia_property_value(subject_val,property_val)\n object_id = ''\n object_label = ''\n if len(object_val) > 0:\n object_id = object_val[0]['id']\n object_label = object_val[0]['value']\n row = {'subject':subject_val, 'predicate':property_val, 'object': object_id, 'question':question_query, 'subject_label':subject_label, 'property_label':property_label, 'object_label': object_label}\n rows.append(row)\nprint(\"Total:\",len(rows))\ndf = pd.DataFrame(rows)\ndf.to_csv('dsq-labels.csv')\ndf.head(10)", "100\n200\n300\n400\n500\n600\n700\n800\n900\n1000\n1100\n1200\n1300\n1400\n1500\n1600\n1700\n1800\n1900\n2000\n2100\n2200\n2300\n2400\n2500\n2600\n2700\n2800\n2900\n3000\n3100\n3200\n3300\n3400\n3500\n3600\n3700\n3800\n3900\n4000\n4100\n4200\n4300\n4400\n4500\n4600\n4700\n4800\n4900\n5000\n5100\n5200\n5300\n5400\n5500\n5600\n5700\n5800\n5900\n6000\n6100\n6200\n6300\n6400\n6500\n6600\n6700\n6800\n6900\n7000\n7100\n7200\n7300\n7400\n7500\n7600\n7700\n7800\n7900\n8000\n8100\n8200\n8300\n8400\n8500\nTotal: 3688\n" ] ], [ [ "## Wikidata QA Dataset\n\nFrom paper: https://arxiv.org/pdf/2107.02865v1.pdf ", "_____no_output_____" ] ], [ [ "import urllib.request as url\nimport json\n\nstream = url.urlopen(\"https://raw.githubusercontent.com/thesemanticwebhero/ElNeuKGQA/main/data/dataset_wikisparql.json\")\ncontent = stream.read()\ndata = json.loads(content)\ndf = preprocess_questions(data)\ndf.to_csv('wqa-labels.csv')\ndf.head()", "_____no_output_____" ], [ "df.describe(include='all')", "_____no_output_____" ] ], [ [ "## LC-QuAD 2.0 Dataset\n\nFrom paper: https://arxiv.org/pdf/2107.02865v1.pdf ", "_____no_output_____" ] ], [ [ "import urllib.request as url\nimport json\n\nstream = url.urlopen(\"https://raw.githubusercontent.com/thesemanticwebhero/ElNeuKGQA/main/data/dataset_lcquad2.json\")\ncontent = stream.read()\ndata = json.loads(content)\ndf = preprocess_questions(data)\ndf.to_csv('lcquad2-labels.csv')\ndf.head()", "Queries processed: 0 Cache Size: 7439\nQueries processed: 1000 Cache Size: 7439\nQueries processed: 2000 Cache Size: 7439\nQueries processed: 3000 Cache Size: 7439\n" ] ], [ [ "## COVID-QA Dataset \n\nFrom paper: https://aclanthology.org/2020.nlpcovid19-acl.18.pdf", "_____no_output_____" ] ], [ [ "import urllib.request as url\nimport json\nimport pandas as pd\n\nstream = url.urlopen(\"https://raw.githubusercontent.com/sharonlevy/Open_Domain_COVIDQA/main/data/qa_test.json\")\ncontent = stream.read()\ndata = json.loads(content)\nrows = []\ncounter = 0\nfor item in data['data']:\n row = {\n 'article': item['title'],\n 'text' : item['context'],\n 'question': item['question'],\n 'answer': item['answers'][0]['text'] \n }\n rows.append(row)\n counter += 1\n if (counter % 100 == 0 ):\n print(\"Questions processed:\",counter)\ndf = pd.DataFrame(rows)\ndf.to_csv('covidqa-labels.csv')\ndf.head()", "Questions processed: 100\nQuestions processed: 200\nQuestions processed: 300\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb784212e878861ec170cd3f01f3472ca7bc9ccd
29,367
ipynb
Jupyter Notebook
2_Training.ipynb
volfi/CVND---Image-Captioning-Project
67f059d43b3c165bb068a4e534841f55b0d3a4e6
[ "MIT" ]
null
null
null
2_Training.ipynb
volfi/CVND---Image-Captioning-Project
67f059d43b3c165bb068a4e534841f55b0d3a4e6
[ "MIT" ]
null
null
null
2_Training.ipynb
volfi/CVND---Image-Captioning-Project
67f059d43b3c165bb068a4e534841f55b0d3a4e6
[ "MIT" ]
null
null
null
60.801242
734
0.62703
[ [ [ "# Computer Vision Nanodegree\n\n## Project: Image Captioning\n\n---\n\nIn this notebook, you will train your CNN-RNN model. \n\nYou are welcome and encouraged to try out many different architectures and hyperparameters when searching for a good model.\n\nThis does have the potential to make the project quite messy! Before submitting your project, make sure that you clean up:\n- the code you write in this notebook. The notebook should describe how to train a single CNN-RNN architecture, corresponding to your final choice of hyperparameters. You should structure the notebook so that the reviewer can replicate your results by running the code in this notebook. \n- the output of the code cell in **Step 2**. The output should show the output obtained when training the model from scratch.\n\nThis notebook **will be graded**. \n\nFeel free to use the links below to navigate the notebook:\n- [Step 1](#step1): Training Setup\n- [Step 2](#step2): Train your Model\n- [Step 3](#step3): (Optional) Validate your Model", "_____no_output_____" ], [ "<a id='step1'></a>\n## Step 1: Training Setup\n\nIn this step of the notebook, you will customize the training of your CNN-RNN model by specifying hyperparameters and setting other options that are important to the training procedure. The values you set now will be used when training your model in **Step 2** below.\n\nYou should only amend blocks of code that are preceded by a `TODO` statement. **Any code blocks that are not preceded by a `TODO` statement should not be modified**.\n\n### Task #1\n\nBegin by setting the following variables:\n- `batch_size` - the batch size of each training batch. It is the number of image-caption pairs used to amend the model weights in each training step. \n- `vocab_threshold` - the minimum word count threshold. Note that a larger threshold will result in a smaller vocabulary, whereas a smaller threshold will include rarer words and result in a larger vocabulary. \n- `vocab_from_file` - a Boolean that decides whether to load the vocabulary from file. \n- `embed_size` - the dimensionality of the image and word embeddings. \n- `hidden_size` - the number of features in the hidden state of the RNN decoder. \n- `num_epochs` - the number of epochs to train the model. We recommend that you set `num_epochs=3`, but feel free to increase or decrease this number as you wish. [This paper](https://arxiv.org/pdf/1502.03044.pdf) trained a captioning model on a single state-of-the-art GPU for 3 days, but you'll soon see that you can get reasonable results in a matter of a few hours! (_But of course, if you want your model to compete with current research, you will have to train for much longer._)\n- `save_every` - determines how often to save the model weights. We recommend that you set `save_every=1`, to save the model weights after each epoch. This way, after the `i`th epoch, the encoder and decoder weights will be saved in the `models/` folder as `encoder-i.pkl` and `decoder-i.pkl`, respectively.\n- `print_every` - determines how often to print the batch loss to the Jupyter notebook while training. Note that you **will not** observe a monotonic decrease in the loss function while training - this is perfectly fine and completely expected! You are encouraged to keep this at its default value of `100` to avoid clogging the notebook, but feel free to change it.\n- `log_file` - the name of the text file containing - for every step - how the loss and perplexity evolved during training.\n\nIf you're not sure where to begin to set some of the values above, you can peruse [this paper](https://arxiv.org/pdf/1502.03044.pdf) and [this paper](https://arxiv.org/pdf/1411.4555.pdf) for useful guidance! **To avoid spending too long on this notebook**, you are encouraged to consult these suggested research papers to obtain a strong initial guess for which hyperparameters are likely to work best. Then, train a single model, and proceed to the next notebook (**3_Inference.ipynb**). If you are unhappy with your performance, you can return to this notebook to tweak the hyperparameters (and/or the architecture in **model.py**) and re-train your model.\n\n### Question 1\n\n**Question:** Describe your CNN-RNN architecture in detail. With this architecture in mind, how did you select the values of the variables in Task 1? If you consulted a research paper detailing a successful implementation of an image captioning model, please provide the reference.\n\n**Answer:** The Encoder which consists of a CNN (transfer learning used here as the resnet50 weights were loaded) was already given. I kept the decoder RNN simple for the start. It consists of one layer with 512 features. I used a standard value as the batch-size (128) and set the vocab-threshold to 4 in order to delete very uncommon words but still have a big enough vocabulary (about 10000 words) in order to be able to describe very specific pictures. I set the embedding size to 512 which worked fine in my case but probably could be set lower and still yield good results.\n\n\n### (Optional) Task #2\n\nNote that we have provided a recommended image transform `transform_train` for pre-processing the training images, but you are welcome (and encouraged!) to modify it as you wish. When modifying this transform, keep in mind that:\n- the images in the dataset have varying heights and widths, and \n- if using a pre-trained model, you must perform the corresponding appropriate normalization.\n\n### Question 2\n\n**Question:** How did you select the transform in `transform_train`? If you left the transform at its provided value, why do you think that it is a good choice for your CNN architecture?\n\n**Answer:** I left the transform unchanged because I think it already produces very good data augmentation. Random parts of the pictures are cropped and flipped horizontically with a 50-50 chance. It is always good to introduce this kind of randomness in your data to prevent overfitting.\n\n### Task #3\n\nNext, you will specify a Python list containing the learnable parameters of the model. For instance, if you decide to make all weights in the decoder trainable, but only want to train the weights in the embedding layer of the encoder, then you should set `params` to something like:\n```\nparams = list(decoder.parameters()) + list(encoder.embed.parameters()) \n```\n\n### Question 3\n\n**Question:** How did you select the trainable parameters of your architecture? Why do you think this is a good choice?\n\n**Answer:** I decided - as described above - to make all weights in the decoder trainable and only train the weights in the embedding layer of the encoder. Both hidden-size and embedding-size were set to 512, which I did not change during the different tests. I experimented more with vocab-threshold and learning rate.\n\n### Task #4\n\nFinally, you will select an [optimizer](http://pytorch.org/docs/master/optim.html#torch.optim.Optimizer).\n\n### Question 4\n\n**Question:** How did you select the optimizer used to train your model?\n\n**Answer:** I selected the adam optimizer which is usually a good one to start with because it adjusts learning rate and momentum for each parameter individually.", "_____no_output_____" ] ], [ [ "import torch\nimport torch.nn as nn\nfrom torchvision import transforms\nimport sys\nsys.path.append('/opt/cocoapi/PythonAPI')\nfrom pycocotools.coco import COCO\nfrom data_loader import get_loader\nfrom model import EncoderCNN, DecoderRNN\nimport math\nimport nltk\nnltk.download('punkt')\n\n## TODO #1: Select appropriate values for the Python variables below.\nbatch_size = 128 # batch size\nvocab_threshold = 4 # minimum word count threshold\nvocab_from_file = True # if True, load existing vocab file\nembed_size = 512 # dimensionality of image and word embeddings\nhidden_size = 512 # number of features in hidden state of the RNN decoder\nnum_epochs = 10 # number of training epochs\nsave_every = 1 # determines frequency of saving model weights\nprint_every = 100 # determines window for printing average loss\nlog_file = 'training_log.txt' # name of file with saved training loss and perplexity\n\n# (Optional) TODO #2: Amend the image transform below.\ntransform_train = transforms.Compose([ \n transforms.Resize(256), # smaller edge of image resized to 256\n transforms.RandomCrop(224), # get 224x224 crop from random location\n transforms.RandomHorizontalFlip(), # horizontally flip image with probability=0.5\n transforms.ToTensor(), # convert the PIL Image to a tensor\n transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model\n (0.229, 0.224, 0.225))])\n\n# Build data loader.\ndata_loader = get_loader(transform=transform_train,\n mode='train',\n batch_size=batch_size,\n vocab_threshold=vocab_threshold,\n vocab_from_file=vocab_from_file)\n\n# The size of the vocabulary.\nvocab_size = len(data_loader.dataset.vocab)\nprint(vocab_size)\n# Initialize the encoder and decoder. \nencoder = EncoderCNN(embed_size)\ndecoder = DecoderRNN(embed_size, hidden_size, vocab_size)\n\n# Move models to GPU if CUDA is available. \ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nencoder.to(device)\ndecoder.to(device)\n\n# Define the loss function. \ncriterion = nn.CrossEntropyLoss().cuda() if torch.cuda.is_available() else nn.CrossEntropyLoss()\n\n# TODO #3: Specify the learnable parameters of the model.\nparams = list(decoder.parameters()) + list(encoder.embed.parameters()) \n\n# TODO #4: Define the optimizer.\n#optimizer = torch.optim.Adam(params, lr=0.001, betas=(0.9, 0.999), eps=1e-08)\noptimizer = torch.optim.Adam(params, lr=0.001, weight_decay=0)\n\n# Set the total number of training steps per epoch.\ntotal_step = math.ceil(len(data_loader.dataset.caption_lengths) / data_loader.batch_sampler.batch_size)", "[nltk_data] Downloading package punkt to /root/nltk_data...\n[nltk_data] Unzipping tokenizers/punkt.zip.\nVocabulary successfully loaded from vocab.pkl file!\nloading annotations into memory...\nDone (t=1.25s)\ncreating index...\n" ] ], [ [ "<a id='step2'></a>\n## Step 2: Train your Model\n\nOnce you have executed the code cell in **Step 1**, the training procedure below should run without issue. \n\nIt is completely fine to leave the code cell below as-is without modifications to train your model. However, if you would like to modify the code used to train the model below, you must ensure that your changes are easily parsed by your reviewer. In other words, make sure to provide appropriate comments to describe how your code works! \n\nYou may find it useful to load saved weights to resume training. In that case, note the names of the files containing the encoder and decoder weights that you'd like to load (`encoder_file` and `decoder_file`). Then you can load the weights by using the lines below:\n\n```python\n# Load pre-trained weights before resuming training.\nencoder.load_state_dict(torch.load(os.path.join('./models', encoder_file)))\ndecoder.load_state_dict(torch.load(os.path.join('./models', decoder_file)))\n```\n\nWhile trying out parameters, make sure to take extensive notes and record the settings that you used in your various training runs. In particular, you don't want to encounter a situation where you've trained a model for several hours but can't remember what settings you used :).\n\n### A Note on Tuning Hyperparameters\n\nTo figure out how well your model is doing, you can look at how the training loss and perplexity evolve during training - and for the purposes of this project, you are encouraged to amend the hyperparameters based on this information. \n\nHowever, this will not tell you if your model is overfitting to the training data, and, unfortunately, overfitting is a problem that is commonly encountered when training image captioning models. \n\nFor this project, you need not worry about overfitting. **This project does not have strict requirements regarding the performance of your model**, and you just need to demonstrate that your model has learned **_something_** when you generate captions on the test data. For now, we strongly encourage you to train your model for the suggested 3 epochs without worrying about performance; then, you should immediately transition to the next notebook in the sequence (**3_Inference.ipynb**) to see how your model performs on the test data. If your model needs to be changed, you can come back to this notebook, amend hyperparameters (if necessary), and re-train the model.\n\nThat said, if you would like to go above and beyond in this project, you can read about some approaches to minimizing overfitting in section 4.3.1 of [this paper](http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=7505636). In the next (optional) step of this notebook, we provide some guidance for assessing the performance on the validation dataset.", "_____no_output_____" ] ], [ [ "import torch.utils.data as data\nimport numpy as np\nimport os\nimport requests\nimport time\n\n# Open the training log file.\nf = open(log_file, 'w')\n\nold_time = time.time()\nresponse = requests.request(\"GET\", \n \"http://metadata.google.internal/computeMetadata/v1/instance/attributes/keep_alive_token\", \n headers={\"Metadata-Flavor\":\"Google\"})\n\nfor epoch in range(1, num_epochs+1):\n \n for i_step in range(1, total_step+1):\n \n if time.time() - old_time > 60:\n old_time = time.time()\n requests.request(\"POST\", \n \"https://nebula.udacity.com/api/v1/remote/keep-alive\", \n headers={'Authorization': \"STAR \" + response.text})\n \n # Randomly sample a caption length, and sample indices with that length.\n indices = data_loader.dataset.get_train_indices()\n # Create and assign a batch sampler to retrieve a batch with the sampled indices.\n new_sampler = data.sampler.SubsetRandomSampler(indices=indices)\n data_loader.batch_sampler.sampler = new_sampler\n \n # Obtain the batch.\n images, captions = next(iter(data_loader))\n\n # Move batch of images and captions to GPU if CUDA is available.\n images = images.to(device)\n captions = captions.to(device)\n \n # Zero the gradients.\n decoder.zero_grad()\n encoder.zero_grad()\n \n # Pass the inputs through the CNN-RNN model.\n features = encoder(images)\n outputs = decoder(features, captions)\n \n # Calculate the batch loss.\n loss = criterion(outputs.view(-1, vocab_size), captions.view(-1))\n \n # Backward pass.\n loss.backward()\n \n # Update the parameters in the optimizer.\n optimizer.step()\n \n # Get training statistics.\n stats = 'Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Perplexity: %5.4f' % (epoch, num_epochs, i_step, total_step, loss.item(), np.exp(loss.item()))\n \n # Print training statistics (on same line).\n print('\\r' + stats, end=\"\")\n sys.stdout.flush()\n \n # Print training statistics to file.\n f.write(stats + '\\n')\n f.flush()\n \n # Print training statistics (on different line).\n if i_step % print_every == 0:\n print('\\r' + stats)\n \n # Save the weights.\n if epoch % save_every == 0:\n torch.save(decoder.state_dict(), os.path.join('./models', 'decoder-%d-bs128-voc4_lr001_bftrue.pkl' % epoch))\n torch.save(encoder.state_dict(), os.path.join('./models', 'encoder-%d-bs128-voc4_lr001_bftrue.pkl' % epoch))\n\n# Close the training log file.\nf.close()", "Epoch [1/10], Step [100/3236], Loss: 3.5249, Perplexity: 33.9510\nEpoch [1/10], Step [200/3236], Loss: 3.2077, Perplexity: 24.72255\nEpoch [1/10], Step [300/3236], Loss: 3.0764, Perplexity: 21.6796\nEpoch [1/10], Step [400/3236], Loss: 3.0551, Perplexity: 21.2224\nEpoch [1/10], Step [500/3236], Loss: 2.9446, Perplexity: 19.0025\nEpoch [1/10], Step [600/3236], Loss: 3.5423, Perplexity: 34.5453\nEpoch [1/10], Step [700/3236], Loss: 3.1733, Perplexity: 23.8861\nEpoch [1/10], Step [800/3236], Loss: 2.7361, Perplexity: 15.42696\nEpoch [1/10], Step [900/3236], Loss: 2.4469, Perplexity: 11.5523\nEpoch [1/10], Step [1000/3236], Loss: 2.6209, Perplexity: 13.7478\nEpoch [1/10], Step [1100/3236], Loss: 2.4891, Perplexity: 12.0506\nEpoch [1/10], Step [1200/3236], Loss: 2.6268, Perplexity: 13.8300\nEpoch [1/10], Step [1300/3236], Loss: 2.4437, Perplexity: 11.5152\nEpoch [1/10], Step [1400/3236], Loss: 2.3978, Perplexity: 10.9989\nEpoch [1/10], Step [1500/3236], Loss: 2.5592, Perplexity: 12.9253\nEpoch [1/10], Step [1600/3236], Loss: 2.5500, Perplexity: 12.8072\nEpoch [1/10], Step [1700/3236], Loss: 2.3900, Perplexity: 10.9134\nEpoch [1/10], Step [1800/3236], Loss: 2.3653, Perplexity: 10.6472\nEpoch [1/10], Step [1900/3236], Loss: 2.3138, Perplexity: 10.1132\nEpoch [1/10], Step [2000/3236], Loss: 2.5513, Perplexity: 12.8237\nEpoch [1/10], Step [2100/3236], Loss: 2.2804, Perplexity: 9.78094\nEpoch [1/10], Step [2200/3236], Loss: 2.2440, Perplexity: 9.43128\nEpoch [1/10], Step [2300/3236], Loss: 2.3122, Perplexity: 10.0962\nEpoch [1/10], Step [2400/3236], Loss: 2.4054, Perplexity: 11.0830\nEpoch [1/10], Step [2500/3236], Loss: 2.1619, Perplexity: 8.68749\nEpoch [1/10], Step [2600/3236], Loss: 2.1975, Perplexity: 9.00210\nEpoch [1/10], Step [2700/3236], Loss: 2.3298, Perplexity: 10.2759\nEpoch [1/10], Step [2800/3236], Loss: 2.2121, Perplexity: 9.13441\nEpoch [1/10], Step [2900/3236], Loss: 2.1592, Perplexity: 8.66446\nEpoch [1/10], Step [3000/3236], Loss: 2.2311, Perplexity: 9.31012\nEpoch [1/10], Step [3100/3236], Loss: 2.1274, Perplexity: 8.39332\nEpoch [1/10], Step [3200/3236], Loss: 2.1806, Perplexity: 8.85151\nEpoch [2/10], Step [100/3236], Loss: 2.3409, Perplexity: 10.39070\nEpoch [2/10], Step [200/3236], Loss: 2.2948, Perplexity: 9.92227\nEpoch [2/10], Step [300/3236], Loss: 2.7835, Perplexity: 16.1760\nEpoch [2/10], Step [400/3236], Loss: 2.3715, Perplexity: 10.7129\nEpoch [2/10], Step [500/3236], Loss: 2.3021, Perplexity: 9.99476\nEpoch [2/10], Step [600/3236], Loss: 2.1122, Perplexity: 8.26676\nEpoch [2/10], Step [700/3236], Loss: 2.1197, Perplexity: 8.32835\nEpoch [2/10], Step [800/3236], Loss: 2.0020, Perplexity: 7.40374\nEpoch [2/10], Step [900/3236], Loss: 2.0606, Perplexity: 7.85075\nEpoch [2/10], Step [1000/3236], Loss: 2.1091, Perplexity: 8.2410\nEpoch [2/10], Step [1100/3236], Loss: 2.3499, Perplexity: 10.4846\nEpoch [2/10], Step [1200/3236], Loss: 2.1320, Perplexity: 8.43157\nEpoch [2/10], Step [1300/3236], Loss: 2.0885, Perplexity: 8.07287\nEpoch [2/10], Step [1400/3236], Loss: 3.2426, Perplexity: 25.5999\nEpoch [2/10], Step [1500/3236], Loss: 2.0226, Perplexity: 7.55827\nEpoch [2/10], Step [1600/3236], Loss: 2.0741, Perplexity: 7.95726\nEpoch [2/10], Step [1700/3236], Loss: 2.0529, Perplexity: 7.79064\nEpoch [2/10], Step [1800/3236], Loss: 2.1569, Perplexity: 8.64391\nEpoch [2/10], Step [1900/3236], Loss: 2.1514, Perplexity: 8.59655\nEpoch [2/10], Step [2000/3236], Loss: 1.9809, Perplexity: 7.24951\nEpoch [2/10], Step [2100/3236], Loss: 2.8798, Perplexity: 17.8111\nEpoch [2/10], Step [2200/3236], Loss: 1.9769, Perplexity: 7.22021\nEpoch [2/10], Step [2300/3236], Loss: 1.9406, Perplexity: 6.96276\nEpoch [2/10], Step [2400/3236], Loss: 2.0572, Perplexity: 7.82436\nEpoch [2/10], Step [2500/3236], Loss: 2.0030, Perplexity: 7.41145\nEpoch [2/10], Step [2600/3236], Loss: 2.1025, Perplexity: 8.18675\nEpoch [2/10], Step [2700/3236], Loss: 1.9915, Perplexity: 7.32642\nEpoch [2/10], Step [2800/3236], Loss: 2.0553, Perplexity: 7.80948\nEpoch [2/10], Step [2900/3236], Loss: 1.9969, Perplexity: 7.36627\nEpoch [2/10], Step [3000/3236], Loss: 2.0922, Perplexity: 8.10312\nEpoch [2/10], Step [3100/3236], Loss: 2.4134, Perplexity: 11.1719\nEpoch [2/10], Step [3200/3236], Loss: 1.9474, Perplexity: 7.01012\nEpoch [3/10], Step [100/3236], Loss: 1.9895, Perplexity: 7.312221\nEpoch [3/10], Step [200/3236], Loss: 2.0218, Perplexity: 7.55207\nEpoch [3/10], Step [300/3236], Loss: 2.2966, Perplexity: 9.94084\nEpoch [3/10], Step [400/3236], Loss: 1.9364, Perplexity: 6.93401\nEpoch [3/10], Step [500/3236], Loss: 1.9944, Perplexity: 7.34817\nEpoch [3/10], Step [600/3236], Loss: 1.8719, Perplexity: 6.50051\nEpoch [3/10], Step [700/3236], Loss: 1.9498, Perplexity: 7.02769\nEpoch [3/10], Step [800/3236], Loss: 2.1352, Perplexity: 8.45898\nEpoch [3/10], Step [900/3236], Loss: 1.8685, Perplexity: 6.47858\nEpoch [3/10], Step [1000/3236], Loss: 1.8063, Perplexity: 6.0880\nEpoch [3/10], Step [1100/3236], Loss: 2.1233, Perplexity: 8.35849\nEpoch [3/10], Step [1200/3236], Loss: 2.1820, Perplexity: 8.86422\nEpoch [3/10], Step [1300/3236], Loss: 2.0544, Perplexity: 7.80245\nEpoch [3/10], Step [1400/3236], Loss: 2.6476, Perplexity: 14.1201\nEpoch [3/10], Step [1500/3236], Loss: 1.9202, Perplexity: 6.82248\nEpoch [3/10], Step [1600/3236], Loss: 1.9066, Perplexity: 6.73010\nEpoch [3/10], Step [1700/3236], Loss: 1.9248, Perplexity: 6.85394\nEpoch [3/10], Step [1800/3236], Loss: 3.6730, Perplexity: 39.3686\nEpoch [3/10], Step [1900/3236], Loss: 2.0762, Perplexity: 7.97375\nEpoch [3/10], Step [2000/3236], Loss: 1.9610, Perplexity: 7.10617\nEpoch [3/10], Step [2100/3236], Loss: 1.9734, Perplexity: 7.19540\nEpoch [3/10], Step [2200/3236], Loss: 2.0315, Perplexity: 7.62530\nEpoch [3/10], Step [2300/3236], Loss: 2.0791, Perplexity: 7.99746\nEpoch [3/10], Step [2400/3236], Loss: 2.5655, Perplexity: 13.0073\nEpoch [3/10], Step [2500/3236], Loss: 1.7970, Perplexity: 6.03158\nEpoch [3/10], Step [2600/3236], Loss: 1.8940, Perplexity: 6.64592\nEpoch [3/10], Step [2700/3236], Loss: 1.8039, Perplexity: 6.07329\nEpoch [3/10], Step [2800/3236], Loss: 2.0745, Perplexity: 7.96065\nEpoch [3/10], Step [2900/3236], Loss: 1.8706, Perplexity: 6.49237\nEpoch [3/10], Step [3000/3236], Loss: 1.9789, Perplexity: 7.23487\nEpoch [3/10], Step [3100/3236], Loss: 2.5673, Perplexity: 13.0306\nEpoch [3/10], Step [3200/3236], Loss: 2.0277, Perplexity: 7.59685\nEpoch [4/10], Step [100/3236], Loss: 1.9070, Perplexity: 6.73283\nEpoch [4/10], Step [200/3236], Loss: 2.2594, Perplexity: 9.57697\nEpoch [4/10], Step [300/3236], Loss: 2.1634, Perplexity: 8.70108\nEpoch [4/10], Step [372/3236], Loss: 1.9685, Perplexity: 7.15973" ] ], [ [ "<a id='step3'></a>\n## Step 3: (Optional) Validate your Model\n\nTo assess potential overfitting, one approach is to assess performance on a validation set. If you decide to do this **optional** task, you are required to first complete all of the steps in the next notebook in the sequence (**3_Inference.ipynb**); as part of that notebook, you will write and test code (specifically, the `sample` method in the `DecoderRNN` class) that uses your RNN decoder to generate captions. That code will prove incredibly useful here. \n\nIf you decide to validate your model, please do not edit the data loader in **data_loader.py**. Instead, create a new file named **data_loader_val.py** containing the code for obtaining the data loader for the validation data. You can access:\n- the validation images at filepath `'/opt/cocoapi/images/train2014/'`, and\n- the validation image caption annotation file at filepath `'/opt/cocoapi/annotations/captions_val2014.json'`.\n\nThe suggested approach to validating your model involves creating a json file such as [this one](https://github.com/cocodataset/cocoapi/blob/master/results/captions_val2014_fakecap_results.json) containing your model's predicted captions for the validation images. Then, you can write your own script or use one that you [find online](https://github.com/tylin/coco-caption) to calculate the BLEU score of your model. You can read more about the BLEU score, along with other evaluation metrics (such as TEOR and Cider) in section 4.1 of [this paper](https://arxiv.org/pdf/1411.4555.pdf). For more information about how to use the annotation file, check out the [website](http://cocodataset.org/#download) for the COCO dataset.", "_____no_output_____" ] ], [ [ "# (Optional) TODO: Validate your model.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb7842d14ce86b6fc130c948dfa1033b904ed23b
4,829
ipynb
Jupyter Notebook
Appendix/ipynb/AppendixE.ipynb
0todd0000/fdr1d
e551b7695b746543afce7a4b644311af353cdd68
[ "MIT" ]
null
null
null
Appendix/ipynb/AppendixE.ipynb
0todd0000/fdr1d
e551b7695b746543afce7a4b644311af353cdd68
[ "MIT" ]
null
null
null
Appendix/ipynb/AppendixE.ipynb
0todd0000/fdr1d
e551b7695b746543afce7a4b644311af353cdd68
[ "MIT" ]
null
null
null
49.783505
318
0.615655
[ [ [ "# Appendix E: Validation of FDR’s control of false positive node proportion\n\nThis appendix contains RFT and FDR results (Fig.E1) from six experimental datasets and a total of eight different analyses (Table E1) that were conducted but were not included in the main manuscript. The datasets represent a variety of biomechanical modalities, experimental designs and tasks.", "_____no_output_____" ], [ "___\n\n**Table E1**. Experimental datasets and analyses. J and Q are the sample size and number of time nodes, respectively. GRF = ground reaction force. EMG = electromyography. \n\n| Dataset | Source | J | Q | Model | Task | Variables |\n| :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n| A | Caravaggi et al., 2010 | 10 | 101 | Paired t-test | Walking | Plantar arch deformation |\n| B\t| Dorn, Schache & Pandy, 2012 |\t7 |\t100\t| Linear regression\t| Running/ sprinting |\tGRF |\n| C\t| Pataky et al., 2008 | \t59\t| 101 |\tLinear regression | Walking | GRF\n| D\t| Neptune, Wright & Van Den Bogert, 1999 | 15 |\t101\t| Two sample t-test\t| Cutting movement | Kinematics, EMG |\n| E | Pataky et al., 2014 | 10 | 101 | Paired t-test | Walking | Center of pressure |\n| F | Caravaggi et al., 2010 | 19 | 101 | Two sample t-test | Walking | Plantar arch deformation |\n| G | Pataky et al., 2008 |\t20 | 101 | One sample t-test | Walking | GRF |\n| H\t| Besier et al., 2009 | 40 | 100 | Two sample t-test | Walking, running | GRF, muscle forces", "_____no_output_____" ], [ "___\n\n| | |\n|------|------|\n| <img src=\"./figs/A.png\" alt=\"FigA\" width=\"300\"/> | <img src=\"./figs/B.png\" alt=\"FigB\" width=\"300\"/> |\n| <img src=\"./figs/C.png\" alt=\"FigC\" width=\"300\"/> | <img src=\"./figs/D.png\" alt=\"FigD\" width=\"300\"/> |\n| <img src=\"./figs/E.png\" alt=\"FigE\" width=\"300\"/> | <img src=\"./figs/F.png\" alt=\"FigF\" width=\"300\"/> |\n| <img src=\"./figs/G.png\" alt=\"FigG\" width=\"300\"/> | <img src=\"./figs/H.png\" alt=\"FigH\" width=\"300\"/> |\n\n\n\n\n\n\n\n**Figure E1**. Results from six datasets depicting two thresholds: false discovery rate (FDR) and random field theory (RFT). The null hypothesis is rejected if the t value traverses a threshold.", "_____no_output_____" ], [ "## References\n\n1. Besier TF, Fredericson M, Gold GE, Beaupré GS, Delp SL. 2009. Knee muscle forces during walking and running in patellofemoral pain patients and pain-free controls. Journal of Biomechanics 42:898–905. DOI: 10.1016/j.jbiomech.2009.01.032.\n\n1. Caravaggi P, Pataky T, Günther M, Savage R, Crompton R. 2010. Dynamics of longitudinal arch support in relation to walking speed: Contribution of the plantar aponeurosis. Journal of Anatomy 217:254–261. DOI: 10.1111/j.1469-7580.2010.01261.x.\n\n1. Dorn TW, Schache AG, Pandy MG. 2012. Muscular strategy shift in human running: dependence of running speed on hip and ankle muscle performance. Journal of Experimental Biology 215:1944–1956. DOI: 10.1242/jeb.064527.\n\n1. Neptune RR, Wright IC, Van Den Bogert AJ. 1999. Muscle coordination and function during cutting movements. Medicine and Science in Sports and Exercise 31:294–302. DOI: 10.1097/00005768-199902000-00014.\n\n1. Pataky TC, Caravaggi P, Savage R, Parker D, Goulermas JY, Sellers WI, Crompton RH. 2008. New insights into the plantar pressure correlates of walking speed using pedobarographic statistical parametric mapping (pSPM). Journal of Biomechanics 41:1987–1994. DOI: 10.1016/j.jbiomech.2008.03.034.\n\n1. Pataky TC, Robinson MA, Vanrenterghem J, Savage R, Bates KT, Crompton RH. 2014. Vector field statistics for objective center-of-pressure trajectory analysis during gait, with evidence of scalar sensitivity to small coordinate system rotations. Gait and Posture 40:255–258. DOI: 10.1016/j.gaitpost.2014.01.023.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ] ]
cb7844c00cd9a1fd6834c4f14b8cb86316219ea6
787,798
ipynb
Jupyter Notebook
TASK 2 - Image to Pencil Sketch/Task 2 - Image to Pencil Sketch.ipynb
Rahul1086-rale/LGMVIP-Task2
d65083849443ad0ca05575d6573ec0564d0fe04f
[ "MIT" ]
null
null
null
TASK 2 - Image to Pencil Sketch/Task 2 - Image to Pencil Sketch.ipynb
Rahul1086-rale/LGMVIP-Task2
d65083849443ad0ca05575d6573ec0564d0fe04f
[ "MIT" ]
null
null
null
TASK 2 - Image to Pencil Sketch/Task 2 - Image to Pencil Sketch.ipynb
Rahul1086-rale/LGMVIP-Task2
d65083849443ad0ca05575d6573ec0564d0fe04f
[ "MIT" ]
null
null
null
3,366.65812
357,608
0.961896
[ [ [ "# Author:Rahul Subhash Rale\n## Let's Grow More - VIP Internship\n### Task 2 - Image to Pencil Sketch with Python\n##### Description --\n\n**We need to read the image in RBG format and then convert it to a grayscale image.This will turn an image into a classic black and white photo.\\\nThen the next thing to do is invert the grayscale image also called negative image, this will be our inverted grayscale image.\\\nInversion can be used to enhance details. \\\nThen we can finally create the pencil sketch by mixing the grayscale image with the inverted blurry image.\\\nThis can be done by dividing the grayscale image by the inverted blurry image.\\\nSince images are just arrays, we can easily do this programmatically using the divide function from the cv2 library in Python.**", "_____no_output_____" ], [ "### Importing Libraries", "_____no_output_____" ] ], [ [ "import cv2\nfrom PIL import Image\nfrom IPython.display import display", "_____no_output_____" ] ], [ [ "### Read the Image", "_____no_output_____" ] ], [ [ "image = cv2.imread('bird.jpg')\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\ndisplay(Image.fromarray(image))", "_____no_output_____" ] ], [ [ "### Resize the Image", "_____no_output_____" ] ], [ [ "resized = cv2.resize(image,(350,400))\ndisplay(Image.fromarray(resized))", "_____no_output_____" ] ], [ [ "### Convert to Grayscale", "_____no_output_____" ] ], [ [ "grey_image = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)\ndisplay(Image.fromarray(grey_image))", "_____no_output_____" ] ], [ [ "### Convert to Negative Image", "_____no_output_____" ] ], [ [ "image_inv = cv2.bitwise_not(grey_image)\ndisplay(Image.fromarray(image_inv))", "_____no_output_____" ] ], [ [ "### Finally a Pencil Sketch", "_____no_output_____" ] ], [ [ "inverted_image = 255 - grey_image\nblurred = cv2.GaussianBlur(inverted_image, (21, 21), 0)\ninverted_blurred = 255 - blurred\npencil_sketch = cv2.divide(grey_image, inverted_blurred, scale=256.0)\ndisplay(Image.fromarray(pencil_sketch))", "_____no_output_____" ], [ "cv2.imshow(\"original image\", resized)\ncv2.imshow(\"pencil sketch\", pencil_sketch)\ncv2.waitKey(0)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb78451589345e763f244d392d864ad01353b1f1
4,750
ipynb
Jupyter Notebook
exercicios/Lista_01.ipynb
filiperobotic/ic
157d6e86d9997361c1d8579196ed6bae898d278f
[ "MIT" ]
2
2019-09-26T12:01:47.000Z
2021-04-16T03:00:00.000Z
exercicios/Lista_01.ipynb
filiperobotic/ic
157d6e86d9997361c1d8579196ed6bae898d278f
[ "MIT" ]
null
null
null
exercicios/Lista_01.ipynb
filiperobotic/ic
157d6e86d9997361c1d8579196ed6bae898d278f
[ "MIT" ]
3
2019-10-10T16:47:18.000Z
2020-08-30T22:38:19.000Z
21.788991
165
0.536421
[ [ [ "# Lista de Exercícios\n\n## Questao 1:\nFaça um Programa que peça um número e então mostre a mensagem:\n> O número informado foi [número].\n", "_____no_output_____" ], [ "## Questao 2\nFaça um Programa que peça dois números e imprima a soma.", "_____no_output_____" ], [ "## Questao 3\nFaça um Programa que peça as 4 notas bimestrais e mostre a média.", "_____no_output_____" ], [ "## Questao 4\nFaça um Programa que converta metros para centímetros.", "_____no_output_____" ], [ "## Questao 5\nFaça um Programa que peça o raio de um círculo, calcule e mostre sua área.", "_____no_output_____" ], [ "## Questao 6\nFaça um Programa que calcule a área de um quadrado, em seguida mostre o dobro desta área para o usuário.", "_____no_output_____" ], [ "## Questao 7\nFaça um Programa que pergunte quanto você ganha por hora e o número de horas trabalhadas no mês. Calcule e mostre o total do seu salário no referido mês.", "_____no_output_____" ], [ "## Questao 8\nFaça um Programa que peça a temperatura em graus Farenheit, transforme e mostre a temperatura em graus Celsius. o C=(5*(F-32)/9).", "_____no_output_____" ], [ "## Questao 9\nFaça um Programa que peça a temperatura em graus Celsius, transforme e mostre em graus Farenheit.", "_____no_output_____" ], [ "## Questao 10\nFaça um Programa que peça 2 números inteiros e um número real. Calcule e mostre: \n- o produto do dobro do primeiro com metade do segundo.\n- a soma do triplo do primeiro com o terceiro.\n- o terceiro elevado ao cubo.", "_____no_output_____" ], [ "## Questao 11\nTendo como dados de entrada a altura de uma pessoa, construa um algoritmo que calcule seu peso ideal, usando a seguinte fórmula: \n \n (72.7*altura) - 58\n", "_____no_output_____" ], [ "## Questao 12\nTendo como dados de entrada a altura e o sexo de uma pessoa, construa um algoritmo que calcule seu peso ideal, utilizando as seguintes fórmulas: \n- Para homens: (72.7*h) - 58 \t\n- Para mulheres: (62.1*h) - 44.7 (h = altura)\n\nPeça o peso da pessoa e informe se ela está dentro, acima ou abaixo do peso.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cb784ab325e650c6c0843a003619838d534117f2
434,063
ipynb
Jupyter Notebook
notebooks/Lucy Richardson Deconvolution.ipynb
david-hoffman/pyDecon
7ea9a959a609997f84c94ea09a1f90107f827093
[ "Apache-2.0" ]
10
2018-01-30T09:51:18.000Z
2021-06-02T04:33:04.000Z
notebooks/Lucy Richardson Deconvolution.ipynb
david-hoffman/pyDecon
7ea9a959a609997f84c94ea09a1f90107f827093
[ "Apache-2.0" ]
6
2020-01-31T15:22:58.000Z
2021-02-23T06:18:26.000Z
notebooks/Lucy Richardson Deconvolution.ipynb
VolkerH/pyDecon
7304674947a1efc5b00b41927ffb600ec03a2144
[ "MIT" ]
3
2019-10-15T00:01:47.000Z
2021-03-02T03:09:55.000Z
764.195423
117,334
0.937804
[ [ [ "import matplotlib.pyplot as plt\n%matplotlib inline\n\nimport numpy as np\nimport numexpr as ne\nfrom scipy.ndimage import correlate1d\nfrom dphutils import scale\nimport scipy.signal\nfrom timeit import Timer", "_____no_output_____" ], [ "import pyfftw\n\n# test monkey patching (it doesn't work for rfftn)\na = pyfftw.empty_aligned((512, 512), dtype='complex128')\nb = pyfftw.empty_aligned((512, 512), dtype='complex128')\n\na[:] = np.random.randn(512, 512) + 1j*np.random.randn(512, 512)\nb[:] = np.random.randn(512, 512) + 1j*np.random.randn(512, 512)\n\nt = Timer(lambda: scipy.signal.fftconvolve(a, b, 'same'))\n\nprint('Time with scipy.fftpack: %1.3f seconds' % t.timeit(number=10))\n\n# Monkey patch in fftn and ifftn from pyfftw.interfaces.scipy_fftpack\nscipy.signal.signaltools.fftn = pyfftw.interfaces.scipy_fftpack.fftn\nscipy.signal.signaltools.ifftn = pyfftw.interfaces.scipy_fftpack.ifftn\nscipy.signal.signaltools.fftpack = pyfftw.interfaces.scipy_fftpack\n# can't monkey patch the rfft because it's used through np in the package.\nscipy.signal.fftconvolve(a, b, 'same') # We cheat a bit by doing the planning first\n\n# Turn on the cache for optimum performance\npyfftw.interfaces.cache.enable()\n\nprint('Time with monkey patched scipy_fftpack: %1.3f seconds' %\n t.timeit(number=10))", "Time with scipy.fftpack: 2.300 seconds\nTime with monkey patched scipy_fftpack: 1.120 seconds\n" ], [ "# Testing the best method to enforce positivity constraint.\na = np.random.randn(1e3,1e3)\nprint(a.max(), a.min())\n%timeit a[a<0] = 0\nprint(a.max(), a.min())\n\na = np.random.randn(1e3,1e3)\nb=np.zeros_like(a)\nprint(a.max(), a.min())\n%timeit c = np.minimum(a,b)\nprint(a.max(), a.min())", "4.78898193194 -4.60097336282\nThe slowest run took 19.30 times longer than the fastest. This could mean that an intermediate result is being cached." ], [ "# testing speedups for numexpr\na = np.random.randn(2**9,2**9)\nb = np.random.randn(2**9,2**9)\n\n%timeit a-b\n%timeit ne.evaluate(\"a-b\")\n\n%timeit a/b\n%timeit ne.evaluate(\"a/b\")", "1000 loops, best of 3: 723 µs per loop\n1000 loops, best of 3: 920 µs per loop\n1000 loops, best of 3: 1.21 ms per loop\n1000 loops, best of 3: 851 µs per loop\n" ], [ "# Standard Richardson-Lucy form skimage\nfrom skimage import color, data, restoration\ncamera = color.rgb2gray(data.camera())\nfrom scipy.signal import convolve2d\npsf = np.ones((5, 5)) / 25\ncamera = convolve2d(camera, psf, 'same')\ncamera += 0.1 * camera.std() * np.random.poisson(size=camera.shape)\ndeconvolved = restoration.richardson_lucy(camera, psf, 30, False)\nplt.matshow(camera, cmap='Greys_r')\nplt.matshow(deconvolved, cmap='Greys_r', vmin=camera.min(), vmax=camera.max())", "_____no_output_____" ], [ "# test monkey patching properly.\nfrom pyfftw.interfaces.numpy_fft import (ifftshift, fftshift, fftn, ifftn,\n rfftn, irfftn)\n\nfrom scipy.signal.signaltools import _rfft_lock, _rfft_mt_safe, _next_regular,_check_valid_mode_shapes,_centered\n\ndef fftconvolve2(in1, in2, mode=\"full\"):\n\n if in1.ndim == in2.ndim == 0: # scalar inputs\n return in1 * in2\n elif not in1.ndim == in2.ndim:\n raise ValueError(\"in1 and in2 should have the same dimensionality\")\n elif in1.size == 0 or in2.size == 0: # empty arrays\n return array([])\n\n s1 = np.array(in1.shape)\n s2 = np.array(in2.shape)\n complex_result = (np.issubdtype(in1.dtype, complex) or\n np.issubdtype(in2.dtype, complex))\n shape = s1 + s2 - 1\n\n if mode == \"valid\":\n _check_valid_mode_shapes(s1, s2)\n\n # Speed up FFT by padding to optimal size for FFTPACK\n fshape = [_next_regular(int(d)) for d in shape]\n fslice = tuple([slice(0, int(sz)) for sz in shape])\n # Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make\n # sure we only call rfftn/irfftn from one thread at a time.\n if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):\n try:\n ret = (irfftn(rfftn(in1, fshape) *\n rfftn(in2, fshape), fshape)[fslice].\n copy())\n finally:\n if not _rfft_mt_safe:\n _rfft_lock.release()\n else:\n # If we're here, it's either because we need a complex result, or we\n # failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and\n # is already in use by another thread). In either case, use the\n # (threadsafe but slower) SciPy complex-FFT routines instead.\n ret = ifftn(fftn(in1, fshape) *\n fftn(in2, fshape))[fslice].copy()\n if not complex_result:\n ret = ret.real\n\n if mode == \"full\":\n return ret\n elif mode == \"same\":\n return _centered(ret, s1)\n elif mode == \"valid\":\n return _centered(ret, s1 - s2 + 1)\n else:\n raise ValueError(\"Acceptable mode flags are 'valid',\"\n \" 'same', or 'full'.\")", "_____no_output_____" ], [ "%timeit scipy.signal.fftconvolve(camera, psf, 'same')\n%timeit fftconvolve2(camera, psf, 'same')", "10 loops, best of 3: 34.8 ms per loop\n100 loops, best of 3: 15.2 ms per loop\n" ], [ "def tv(im):\n \"\"\"\n Calculate the total variation image\n \n (1) Laasmaa, M.; Vendelin, M.; Peterson, P. Application of Regularized Richardson–Lucy Algorithm for\n Deconvolution of Confocal Microscopy Images. Journal of Microscopy 2011, 243 (2), 124–140.\n \n dx.doi.org/10.1111/j.1365-2818.2011.03486.x\n \"\"\"\n \n def m(a, b):\n '''\n As described in (1)\n '''\n return (sign(a)+sign(b))/2*minimum(abs(a), abs(b))\n\n ndim = im.ndim\n g = np.zeros_like(p)\n i = 0\n \n # g stores the gradients of out along each axis\n # e.g. g[0] is the first order finite difference along axis 0\n for ax in range(ndim):\n a = 2*ax\n # backward difference\n g[a] = correlate1d(im, [-1, 1], ax)\n # forward difference\n g[a+1] = correlate1d(im, [-1, 1], ax, origin=-1)\n\n eps = finfo(float).eps\n oym, oyp, oxm, oxp = g \n \n return oxm*oxp/sqrt(oxp**2 +m(oyp,oym)**2+eps)+oym*oyp/sqrt(oyp**2 +m(oxp,oxm)**2+eps)\n\n\ndef rl_update(convolve_method, kwargs):\n '''\n A function that represents the core rl operation:\n $u^{(t+1)} = u^{(t)}\\cdot\\left(\\frac{d}{u^{(t)}\\otimes p}\\otimes \\hat{p}\\right)$\n \n Parameters\n ----------\n image : ndarray\n original image to be deconvolved\n u_tm1 : ndarray\n previous\n u_t\n u_tp1\n psf\n convolve_method\n '''\n \n image = kwargs['image']\n psf = kwargs['psf']\n # use the prediction step to iterate on\n y_t = kwargs['y_t']\n u_t = kwargs['u_t']\n u_tm1 = kwargs['u_tm1']\n g_tm1 = kwargs['g_tm1']\n psf_mirror = psf[::-1, ::-1]\n blur = convolve_method(y_t, psf, 'same')\n relative_blur = ne.evaluate(\"image / blur\")\n blur_blur = convolve_method(relative_blur, psf_mirror, 'same')\n u_tp1 = ne.evaluate(\"y_t*blur_blur\")\n u_tp1[u_tp1 < 0] = 0\n # update\n \n \n kwargs.update(dict(\n u_tm2 = u_tm1,\n u_tm1 = u_t,\n u_t = u_tp1,\n blur = blur_blur,\n g_tm2 = g_tm1,\n g_tm1 = ne.evaluate(\"u_tp1 - y_t\")\n ))\n\n\ndef richardson_lucy(image, psf, iterations=50, clip=False):\n \"\"\"Richardson-Lucy deconvolution.\n Parameters\n ----------\n image : ndarray\n Input degraded image (can be N dimensional).\n psf : ndarray\n The point spread function.\n iterations : int\n Number of iterations. This parameter plays the role of\n regularisation.\n clip : boolean, optional\n True by default. If true, pixel value of the result above 1 or\n under -1 are thresholded for skimage pipeline compatibility.\n Returns\n -------\n im_deconv : ndarray\n The deconvolved image.\n Examples\n --------\n >>> from skimage import color, data, restoration\n >>> camera = color.rgb2gray(data.camera())\n >>> from scipy.signal import convolve2d\n >>> psf = np.ones((5, 5)) / 25\n >>> camera = convolve2d(camera, psf, 'same')\n >>> camera += 0.1 * camera.std() * np.random.standard_normal(camera.shape)\n >>> deconvolved = restoration.richardson_lucy(camera, psf, 5, False)\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Richardson%E2%80%93Lucy_deconvolution\n \"\"\"\n # Stolen from the dev branch of skimage because stable branch is slow\n # compute the times for direct convolution and the fft method. The fft is of\n # complexity O(N log(N)) for each dimension and the direct method does\n # straight arithmetic (and is O(n*k) to add n elements k times)\n direct_time = np.prod(image.shape + psf.shape)\n fft_time = np.sum([n*np.log(n) for n in image.shape + psf.shape])\n\n # see whether the fourier transform convolution method or the direct\n # convolution method is faster (discussed in scikit-image PR #1792)\n time_ratio = 40.032 * fft_time / direct_time\n\n if time_ratio <= 1 or len(image.shape) > 2:\n convolve_method = fftconvolve2\n else:\n convolve_method = convolve\n\n image = image.astype(np.float)\n psf = psf.astype(np.float)\n im_deconv = 0.5 * np.ones(image.shape)\n \n psf_mirror = psf[::-1, ::-1]\n \n rl_dict = dict(\n image=image,\n u_tm2=None,\n u_tm1=None,\n g_tm2=None,\n g_tm1=None,\n u_t=None,\n y_t=image,\n psf=psf\n )\n \n for i in range(iterations):\n # d/(u_t \\otimes p)\n \n rl_update(convolve_method, rl_dict)\n alpha = 0\n \n if rl_dict['g_tm1'] is not None and rl_dict['g_tm2'] is not None and i > 1:\n alpha = (rl_dict['g_tm1'] * rl_dict['g_tm2']).sum()/(rl_dict['g_tm2']**2).sum()\n alpha = max(min(alpha,1),0)\n \n if alpha != 0: \n if rl_dict['u_tm1'] is not None:\n h1_t = rl_dict['u_t'] - rl_dict['u_tm1']\n h1_t\n if rl_dict['u_tm2'] is not None:\n h2_t = rl_dict['u_t'] - 2 * rl_dict['u_tm1'] + rl_dict['u_tm2']\n else:\n h2_t = 0\n else:\n h1_t = 0\n else:\n h2_t = 0\n h1_t = 0\n \n rl_dict['y_t'] = rl_dict['u_t']+alpha*h1_t+alpha**2/2*h2_t\n rl_dict['y_t'][rl_dict['y_t'] < 0] = 0\n \n im_deconv = rl_dict['u_t']\n if clip:\n im_deconv[im_deconv > 1] = 1\n im_deconv[im_deconv < -1] = -1\n\n return rl_dict", "_____no_output_____" ], [ "deconvolved2 = richardson_lucy(camera, psf, 10)\n\nplt.matshow(camera, cmap='Greys_r')\nplt.matshow(np.real(deconvolved2['u_t']), cmap='Greys_r', vmin=camera.min(), vmax=camera.max())", "_____no_output_____" ], [ "%timeit deconvolved2 = richardson_lucy(camera, psf, 10)", "1 loop, best of 3: 506 ms per loop\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb7854366dae2eb44b8b5c8bf8dc30b1cac2eb19
1,615
ipynb
Jupyter Notebook
mk002-is_triangle.ipynb
karakose77/codewars-katas
ceb4f4128ace1ede95e57e499809dc2be2ca15c6
[ "MIT" ]
null
null
null
mk002-is_triangle.ipynb
karakose77/codewars-katas
ceb4f4128ace1ede95e57e499809dc2be2ca15c6
[ "MIT" ]
null
null
null
mk002-is_triangle.ipynb
karakose77/codewars-katas
ceb4f4128ace1ede95e57e499809dc2be2ca15c6
[ "MIT" ]
null
null
null
20.1875
243
0.51517
[ [ [ "# IS THIS A TRIANGLE?\n\nImplement a method that accepts 3 integer values a, b, c. The method should return true if a triangle can be built with the sides of given length and false in any other case. (All triangles must have sides greater than 0 to be accepted).", "_____no_output_____" ], [ "## Function Definitions", "_____no_output_____" ] ], [ [ "def is_triangle(a, b, c):\n return a > 0 and b > 0 and c > 0 and a + b > c and a + c > b and c + b > a", "_____no_output_____" ] ], [ [ "## Test", "_____no_output_____" ] ], [ [ "print(is_triangle(2,4,5) == True)\nprint(is_triangle(0,3,4) == False)\nprint(is_triangle(3,1,23) == False)", "True\nTrue\nTrue\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb78571f83183012557bc49de3894f59463c51de
249,044
ipynb
Jupyter Notebook
datacamp_ml/exploratory_data_analysis/lessons/eda_part1-2.ipynb
issagaliyeva/machine_learning
63f4d39a95147cdac4ef760cb47dffc318793a99
[ "MIT" ]
null
null
null
datacamp_ml/exploratory_data_analysis/lessons/eda_part1-2.ipynb
issagaliyeva/machine_learning
63f4d39a95147cdac4ef760cb47dffc318793a99
[ "MIT" ]
null
null
null
datacamp_ml/exploratory_data_analysis/lessons/eda_part1-2.ipynb
issagaliyeva/machine_learning
63f4d39a95147cdac4ef760cb47dffc318793a99
[ "MIT" ]
null
null
null
246.090909
47,208
0.915633
[ [ [ "# Exploratory Data Analysis\n\nStatistical functions can be found here: https://nbviewer.org/github/AllenDowney/empiricaldist/blob/master/empiricaldist/dist_demo.ipynb", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\nsns.set()", "_____no_output_____" ] ], [ [ "## Question: What's the average weight of a newborn?", "_____no_output_____" ] ], [ [ "nsfg = pd.read_hdf('data/nsfg.hdf5')\nnsfg.head(3)", "_____no_output_____" ], [ "pounds = nsfg['birthwgt_lb1']\npounds.describe()", "_____no_output_____" ], [ "# replace 98-99 values since it's not weight\npounds = pounds.replace([98, 99], np.nan)\npounds.describe()", "_____no_output_____" ] ], [ [ "### Clean a variable\nIn the NSFG dataset, the variable 'nbrnaliv' records the number of babies born alive at the end of a pregnancy.\n\nIf you use .value_counts() to view the responses, you'll see that the value 8 appears once, and if you consult the codebook, you'll see that this value indicates that the respondent refused to answer the question.\n\nYour job in this exercise is to replace this value with np.nan. Recall from the video how Allen replaced the values 98 and 99 in the ounces column using the .replace() method:", "_____no_output_____" ] ], [ [ "# Replace the value 8 with NaN\nnsfg['nbrnaliv'].replace(8, np.nan, inplace=True)\n\n# Print the values and their frequencies\nprint(nsfg['nbrnaliv'].value_counts())", "1.0 6379\n2.0 100\n3.0 5\nName: nbrnaliv, dtype: int64\n" ] ], [ [ "### Calculate pregnancy length\nFor each pregnancy in the NSFG dataset, the variable 'agecon' encodes the respondent's age at conception, and 'agepreg' the respondent's age at the end of the pregnancy.\n\nBoth variables are recorded as integers with two implicit decimal places, so the value 2575 means that the respondent's age was 25.75", "_____no_output_____" ] ], [ [ "# Select the columns and divide by 100\nagecon = nsfg['agecon'] / 100\nagepreg = nsfg['agepreg'] / 100\n\n# Compute the difference\npreg_length = agepreg - agecon\n\n# Compute summary statistics\nprint(preg_length.describe())", "count 9109.000000\nmean 0.552069\nstd 0.271479\nmin 0.000000\n25% 0.250000\n50% 0.670000\n75% 0.750000\nmax 0.920000\ndtype: float64\n" ], [ "plt.hist(preg_length.dropna(), bins=30)\nplt.show()", "_____no_output_____" ] ], [ [ "### Compare the weights in pre-term and normal births", "_____no_output_____" ] ], [ [ "preterm = nsfg['prglngth'] < 37\nprint('Number of pre-term births:', preterm.sum())", "Number of pre-term births: 3742\n" ], [ "print('Pre-term mean baby weight:', nsfg[preterm]['birthwgt_lb1'].mean())\nprint('Normal mean baby weight:', nsfg[~preterm]['birthwgt_lb1'].mean())", "Pre-term mean baby weight: 6.8250825082508255\nNormal mean baby weight: 8.255738880918221\n" ] ], [ [ "### Investigate age column", "_____no_output_____" ] ], [ [ "def resample_rows_weighted(df, column='wgt2013_2015'):\n \"\"\"Resamples a DataFrame using probabilities proportional to given column.\n Args:\n df: DataFrame\n column: string column name to use as weights\n returns: \n DataFrame\n \"\"\"\n weights = df[column].copy()\n weights /= sum(weights)\n indices = np.random.choice(df.index, len(df), replace=True, p=weights)\n sample = df.loc[indices]\n return sample", "_____no_output_____" ], [ "# Plot the histogram\nplt.hist(agecon, bins=20, histtype='step')\n\n# Label the axes\nplt.xlabel('Age at conception')\nplt.ylabel('Number of pregnancies')\n\n# Show the figure\nplt.show()", "_____no_output_____" ], [ "# Resample the data\nnsfg = resample_rows_weighted(nsfg, 'wgt2013_2015')\n\n# Clean the weight variables\npounds = nsfg['birthwgt_lb1'].replace([98, 99], np.nan)\nounces = nsfg['birthwgt_oz1'].replace([98, 99], np.nan)\n\n# Compute total birth weight\nbirth_weight = pounds + ounces/16", "_____no_output_____" ], [ "# Create a Boolean Series for full-term babies\nfull_term = nsfg['prglngth'] >= 37\n\n# Select the weights of full-term babies\nfull_term_weight = birth_weight[full_term]\n\n# Compute the mean weight of full-term babies\nprint(full_term_weight.mean())", "7.414526306434412\n" ], [ "# Filter full-term babies\nfull_term = nsfg['prglngth'] >= 37\n\n# Filter single births\nsingle = nsfg['nbrnaliv'] == 1\n\n# Compute birth weight for single full-term babies\nsingle_full_term_weight = birth_weight[full_term & single]\nprint('Single full-term mean:', single_full_term_weight.mean())\n\n# Compute birth weight for multiple full-term babies\nmult_full_term_weight = birth_weight[full_term & ~single]\nprint('Multiple full-term mean:', mult_full_term_weight.mean())", "Single full-term mean: 7.427415383237823\nMultiple full-term mean: 5.700892857142857\n" ] ], [ [ "## Distributions", "_____no_output_____" ] ], [ [ "gss = pd.read_hdf('data/gss.hdf5', 'gss')\ngss.head(3)", "_____no_output_____" ], [ "def pdf(df, col, normalize=False):\n df2 = df.copy()\n \n if not normalize:\n return df2[col].value_counts().sort_index()\n \n N = 10000\n outcomes = np.zeros(N)\n for i in range(N):\n outcome = np.random.choice(df[col])\n outcomes[i] = outcome\n\n val, cnt = np.unique(outcomes, return_counts=True)\n prop = cnt / len(outcomes)\n return pd.DataFrame({'index': val, 'probability': prop}).dropna()\n ", "_____no_output_____" ], [ "educ_pdf = pdf(gss, 'educ', normalize=True)\neduc_pdf.probability.plot(kind='bar')\nplt.show()", "_____no_output_____" ], [ "!pip install empiricaldist", "Collecting empiricaldist\n Downloading empiricaldist-0.6.2.tar.gz (9.5 kB)\n Preparing metadata (setup.py) ... \u001b[?25ldone\n\u001b[?25hBuilding wheels for collected packages: empiricaldist\n Building wheel for empiricaldist (setup.py) ... \u001b[?25ldone\n\u001b[?25h Created wheel for empiricaldist: filename=empiricaldist-0.6.2-py3-none-any.whl size=10733 sha256=1acc32a7d8abe4a2e5ed5477792bd45b7db9b7f26d3356185ca5cf35dda72a82\n Stored in directory: /home/repl/.cache/pip/wheels/2a/ed/75/39cda0596e8f5606df54fb63969c565b49d23869ee64a62435\nSuccessfully built empiricaldist\nInstalling collected packages: empiricaldist\nSuccessfully installed empiricaldist-0.6.2\n" ], [ "import empiricaldist\n\n# Select realinc\nincome = gss['realinc']\n\n# Make the CDF\ncdf_income = empiricaldist.Cdf.from_seq(income)\n\n# Plot it\ncdf_income.plot()\n\n# Label the axes\nplt.xlabel('Income (1986 USD)')\nplt.ylabel('CDF')\nplt.show()", "_____no_output_____" ], [ "income = gss['realinc']\npre95 = gss['year'] < 1995\n\nempiricaldist.Pmf.from_seq(income[pre95]).plot(label='Before 1995')\nempiricaldist.Pmf.from_seq(income[~pre95]).plot(label='After 1995')\nplt.xlabel('Income (1986 USD)')\nplt.ylabel('PMF')\nplt.show()", "_____no_output_____" ], [ "income = gss['realinc']\npre95 = gss['year'] < 1995\n\nempiricaldist.Cdf.from_seq(income[pre95]).plot(label='Before 1995')\nempiricaldist.Cdf.from_seq(income[~pre95]).plot(label='After 1995')\nplt.xlabel('Income (1986 USD)')\nplt.ylabel('PMF')\nplt.show()", "_____no_output_____" ], [ "# Select educ\neduc = gss['educ']\n\n# Bachelor's degree\nbach = (educ >= 16)\n\n# Associate degree\nassc = ((educ >= 14) & (educ < 16))\n\n# High school (12 or fewer years of education)\nhigh = (educ <= 12)\nprint(high.mean())", "0.5308807991547402\n" ], [ "income = gss['realinc']\n\n# Plot the CDFs\nempiricaldist.Cdf.from_seq(income[high]).plot(label='High school')\nempiricaldist.Cdf.from_seq(income[assc]).plot(label='Associate')\nempiricaldist.Cdf.from_seq(income[bach]).plot(label='Bachelor')\n\n# Label the axes\nplt.xlabel('Income (1986 USD)')\nplt.ylabel('CDF')\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "# normal CDF \nfrom scipy.stats import norm\n\nxs = np.linspace(-3, 3)\nys = norm(0, 1).cdf(xs)\n\nplt.plot(xs, ys)\nplt.show()", "_____no_output_____" ], [ "xs = np.linspace(-3, 3)\nys = norm(0, 1).pdf(xs)\n\nplt.plot(xs, ys)\nplt.show()", "_____no_output_____" ], [ "# Extract realinc and compute its log\nincome = gss['realinc']\nlog_income = np.log10(income)\n\n# Compute mean and standard deviation\nmean = np.mean(log_income)\nstd = np.std(log_income)\nprint(mean, std)\n\n# Make a norm object\nfrom scipy.stats import norm\ndist = norm(mean, std)", "4.371148677934171 0.42900437330100427\n" ], [ "# Evaluate the model CDF\nxs = np.linspace(2, 5.5)\nys = dist.cdf(xs)\n\n# Plot the model CDF\nplt.clf()\nplt.plot(xs, ys, color='gray')\n\n# Create and plot the Cdf of log_income\nempiricaldist.Cdf.from_seq(log_income).plot()\n \n# Label the axes\nplt.xlabel('log10 of realinc')\nplt.ylabel('CDF')\nplt.show()", "_____no_output_____" ], [ "# Evaluate the normal PDF\nxs = np.linspace(2, 5.5)\nys = dist.pdf(xs)\n\n# Plot the model PDF\nplt.clf()\nplt.plot(xs, ys, color='gray')\n\n# Plot the data KDE\nsns.kdeplot(log_income)\n\n# Label the axes\nplt.xlabel('log10 of realinc')\nplt.ylabel('PDF')\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb785baa5a2cebf0d9345ec521d5a4646cae622d
79,951
ipynb
Jupyter Notebook
19-Spatial-Analysis-and-Cartography/projection-mapping-california.ipynb
zh-yao/urban-data-science
59afcff905649c5f8d1f8256ec37f28496e0c740
[ "MIT" ]
7
2019-12-11T20:42:15.000Z
2019-12-19T07:23:25.000Z
19-Spatial-Analysis-and-Cartography/projection-mapping-california.ipynb
zh-yao/urban-data-science
59afcff905649c5f8d1f8256ec37f28496e0c740
[ "MIT" ]
null
null
null
19-Spatial-Analysis-and-Cartography/projection-mapping-california.ipynb
zh-yao/urban-data-science
59afcff905649c5f8d1f8256ec37f28496e0c740
[ "MIT" ]
3
2018-12-15T10:51:43.000Z
2018-12-21T09:13:54.000Z
365.073059
37,160
0.923516
[ [ [ "# Using matplotlib basemap to project California data", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport pandas as pd, numpy as np, matplotlib.pyplot as plt\nfrom geopandas import GeoDataFrame\nfrom mpl_toolkits.basemap import Basemap\nfrom shapely.geometry import Point", "_____no_output_____" ], [ "# define basemap colors\nland_color = '#F6F6F6'\nwater_color = '#D2F5FF'\ncoastline_color = '#333333'\nborder_color = '#999999'", "_____no_output_____" ], [ "# load the point data and select only points in california\ndf = pd.read_csv('data/usa-latlong.csv')\nusa_points = GeoDataFrame(df)\nusa_points['geometry'] = usa_points.apply(lambda row: Point(row['longitude'], row['latitude']), axis=1)\nstates = GeoDataFrame.from_file('data/states_21basic/states.shp')\ncalifornia = states[states['STATE_NAME']=='California']['geometry']\ncalifornia_polygon = california.iloc[0]\ncalifornia_points = usa_points[usa_points.within(california_polygon)]", "_____no_output_____" ], [ "# first define a transverse mercator projection\nmap_width_m = 1000 * 1000\nmap_height_m = 1200 * 1000\ntarget_crs = {'datum':'WGS84',\n 'ellps':'WGS84',\n 'proj':'tmerc',\n 'lon_0':-119,\n 'lat_0':37.5}", "_____no_output_____" ], [ "# plot the map\nfig_width = 6\nplt.figure(figsize=[fig_width, fig_width * map_height_m / float(map_width_m)])\n\nm = Basemap(ellps=target_crs['ellps'],\n projection=target_crs['proj'],\n lon_0=target_crs['lon_0'], \n lat_0=target_crs['lat_0'],\n width=map_width_m, \n height=map_height_m,\n resolution='l',\n area_thresh=10000)\n\nm.drawcoastlines(color=coastline_color)\nm.drawcountries(color=border_color)\nm.fillcontinents(color=land_color, lake_color=water_color)\nm.drawstates(color=border_color)\nm.drawmapboundary(fill_color=water_color)\n\nx, y = m(np.array(california_points['longitude']), np.array(california_points['latitude']))\nm.scatter(x, y, s=80, color='r', edgecolor='#333333', alpha=0.4, zorder=10)\n\nplt.show()", "_____no_output_____" ], [ "# next define an albers projection for california\ntarget_crs = {'datum':'NAD83',\n 'ellps':'GRS80',\n 'proj':'aea', \n 'lat_1':35, \n 'lat_2':39, \n 'lon_0':-119, \n 'lat_0':37.5, \n 'x_0':map_width_m/2, \n 'y_0':map_height_m/2,\n 'units':'m'}", "_____no_output_____" ], [ "# plot the map\nfig_width = 6\nplt.figure(figsize=[fig_width, fig_width * map_height_m / float(map_width_m)])\n\nm = Basemap(ellps=target_crs['ellps'],\n projection=target_crs['proj'],\n lat_1=target_crs['lat_1'], \n lat_2=target_crs['lat_2'], \n lon_0=target_crs['lon_0'], \n lat_0=target_crs['lat_0'],\n width=map_width_m, \n height=map_height_m,\n resolution='l',\n area_thresh=10000)\n\nm.drawcoastlines(color=coastline_color)\nm.drawcountries(color=border_color)\nm.fillcontinents(color=land_color, lake_color=water_color)\nm.drawstates(color=border_color)\nm.drawmapboundary(fill_color=water_color)\n\nx, y = m(np.array(california_points['longitude']), np.array(california_points['latitude']))\nm.scatter(x, y, s=80, color='r', edgecolor='#333333', alpha=0.4, zorder=10)\n\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb785e1c4093021180dc21cc4e21c19bbb2d973b
10,885
ipynb
Jupyter Notebook
6.paper_figures/summarize_compound_reproducibility_scores.ipynb
broadinstitute/lincs-profiling-comparison
075c3bc60eeb3934fc42c30bae6aeed8cda1cd6d
[ "BSD-3-Clause" ]
1
2021-07-20T07:47:02.000Z
2021-07-20T07:47:02.000Z
6.paper_figures/summarize_compound_reproducibility_scores.ipynb
broadinstitute/lincs-profiling-comparison
075c3bc60eeb3934fc42c30bae6aeed8cda1cd6d
[ "BSD-3-Clause" ]
19
2020-10-24T20:55:27.000Z
2021-08-13T16:26:30.000Z
6.paper_figures/summarize_compound_reproducibility_scores.ipynb
broadinstitute/lincs-profiling-comparison
075c3bc60eeb3934fc42c30bae6aeed8cda1cd6d
[ "BSD-3-Clause" ]
3
2020-10-24T18:14:07.000Z
2021-06-24T17:36:25.000Z
45.92827
408
0.566284
[ [ [ "## Summarize all common compounds and their percent strong scores", "_____no_output_____" ] ], [ [ "suppressPackageStartupMessages(library(dplyr))\nsuppressPackageStartupMessages(library(ggplot2))\nsuppressPackageStartupMessages(library(patchwork))\n\nsource(\"viz_themes.R\")\nsource(\"plotting_functions.R\")\nsource(\"data_functions.R\")", "_____no_output_____" ], [ "results_dir <- file.path(\"../1.Data-exploration/Profiles_level4/results/\")", "_____no_output_____" ], [ "# First, obtain the threshold to consider strong phenotype\ncell_painting_pr_df <- load_percent_strong(assay = \"cellpainting\", results_dir = results_dir)\nl1000_pr_df <- load_percent_strong(assay = \"l1000\", results_dir = results_dir)\n\npr_df <- dplyr::bind_rows(cell_painting_pr_df, l1000_pr_df)\npr_df$dose <- factor(pr_df$dose, levels = dose_order)\n\nthreshold_df <- pr_df %>%\n dplyr::filter(type == 'non_replicate') %>%\n dplyr::group_by(assay, dose) %>%\n dplyr::summarise(threshold = quantile(replicate_correlation, 0.95))\n\nthreshold_plot_ready_df <- threshold_df %>% reshape2::dcast(dose ~ assay, value.var = \"threshold\")", "_____no_output_____" ], [ "# Next, get the median pairwise correlations and determine if they pass the threshold\ncell_painting_comp_df <- load_median_correlation_scores(assay = \"cellpainting\", results_dir = results_dir)\nl1000_comp_df <- load_median_correlation_scores(assay = \"l1000\", results_dir = results_dir)\n\n# Note that the variable significant_compounds contains ALL compounds and a variable indicating if they pass the threshold\nsignificant_compounds_df <- cell_painting_comp_df %>%\n dplyr::left_join(l1000_comp_df, by = c(\"dose\", \"compound\"), suffix = c(\"_cellpainting\", \"_l1000\")) %>%\n tidyr::drop_na() %>%\n dplyr::left_join(threshold_df %>% dplyr::filter(assay == \"Cell Painting\"), by = \"dose\") %>%\n dplyr::left_join(threshold_df %>% dplyr::filter(assay == \"L1000\"), by = \"dose\", suffix = c(\"_cellpainting\", \"_l1000\")) %>%\n dplyr::mutate(\n pass_cellpainting_thresh = median_replicate_score_cellpainting > threshold_cellpainting,\n pass_l1000_thresh = median_replicate_score_l1000 > threshold_l1000\n ) %>%\n dplyr::mutate(pass_both = pass_cellpainting_thresh + pass_l1000_thresh) %>%\n dplyr::mutate(pass_both = ifelse(pass_both == 2, TRUE, FALSE)) %>%\n dplyr::select(\n compound,\n dose,\n median_replicate_score_cellpainting,\n median_replicate_score_l1000,\n pass_cellpainting_thresh,\n pass_l1000_thresh,\n pass_both\n )\n\n# Count in how many doses the particular compound was reproducible\ncp_reprod_count_df <- significant_compounds_df %>%\n dplyr::filter(pass_cellpainting_thresh) %>%\n dplyr::group_by(compound) %>%\n dplyr::count() %>%\n dplyr::rename(cell_painting_num_reproducible = n)\n\nl1000_reprod_count_df <- significant_compounds_df %>%\n dplyr::filter(pass_l1000_thresh) %>%\n dplyr::group_by(compound) %>%\n dplyr::count() %>%\n dplyr::rename(l1000_num_reproducible = n)\n\nsignificant_compounds_df <- significant_compounds_df %>%\n dplyr::left_join(cp_reprod_count_df, by = \"compound\") %>%\n dplyr::left_join(l1000_reprod_count_df, by = \"compound\") %>%\n tidyr::replace_na(list(l1000_num_reproducible = 0, cell_painting_num_reproducible = 0)) %>%\n dplyr::mutate(total_reproducible = cell_painting_num_reproducible + l1000_num_reproducible)\n\nsignificant_compounds_df$dose <- factor(significant_compounds_df$dose, levels = dose_order)\nsignificant_compounds_df$compound <- tolower(significant_compounds_df$compound)\n\nprint(length(unique(significant_compounds_df$compound)))", "Warning message:\n“Column `dose` joining character vector and factor, coercing into character vector”\nWarning message:\n“Column `dose` joining character vector and factor, coercing into character vector”\n" ], [ "# Output file for further use\noutput_file <- file.path(\"data\", \"significant_compounds_by_threshold_both_assays.tsv.gz\")\nsignificant_compounds_df %>% readr::write_tsv(output_file)\n\nprint(dim(significant_compounds_df))\nhead(significant_compounds_df, 3)", "[1] 7962 10\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb78669cd69d5b23d82d20483ad816df1e773fcb
43,585
ipynb
Jupyter Notebook
examples/notebooks/parameter-values.ipynb
dalbamont/PyBaMM
23b29273806f514e3725c67c30d25cc50b57a4f4
[ "BSD-3-Clause" ]
1
2021-03-06T15:10:34.000Z
2021-03-06T15:10:34.000Z
examples/notebooks/parameter-values.ipynb
dalbamont/PyBaMM
23b29273806f514e3725c67c30d25cc50b57a4f4
[ "BSD-3-Clause" ]
null
null
null
examples/notebooks/parameter-values.ipynb
dalbamont/PyBaMM
23b29273806f514e3725c67c30d25cc50b57a4f4
[ "BSD-3-Clause" ]
null
null
null
86.822709
29,768
0.830148
[ [ [ "# Parameter Values\n\nIn this notebook, we explain how parameter values are set for a model. Information on how to add parameter values is provided in our [online documentation](https://pybamm.readthedocs.io/en/latest/tutorials/add-parameter-values.html)", "_____no_output_____" ], [ "## Setting up parameter values", "_____no_output_____" ] ], [ [ "%pip install pybamm -q # install PyBaMM if it is not installed\nimport pybamm\nimport tests\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom pprint import pprint\nos.chdir(pybamm.__path__[0]+'/..')", "_____no_output_____" ] ], [ [ "In `pybamm`, the object that sets parameter values for a model is the `ParameterValues` class, which extends `dict`. This takes the values of the parameters as input, which can be either a dictionary,", "_____no_output_____" ] ], [ [ "param_dict = {\"a\": 1, \"b\": 2, \"c\": 3}\nparameter_values = pybamm.ParameterValues(param_dict)\nprint(\"parameter values are {}\".format(parameter_values))", "parameter values are {'a': 1,\n 'b': 2,\n 'c': 3}\n" ] ], [ [ "or a csv file,", "_____no_output_____" ] ], [ [ "f = open(\"param_file.csv\", \"w+\")\nf.write(\n\"\"\"\nName [units],Value\na, 4\nb, 5\nc, 6\n\"\"\"\n)\nf.close()\n\nparameter_values = pybamm.ParameterValues(\"param_file.csv\")\nprint(\"parameter values are {}\".format(parameter_values))", "parameter values are {'a': 4,\n 'b': 5,\n 'c': 6}\n" ] ], [ [ "or using one of the pre-set chemistries", "_____no_output_____" ] ], [ [ "print(\"Marquis2019 chemistry set is {}\".format(pybamm.parameter_sets.Marquis2019))\nchem_parameter_values = pybamm.ParameterValues(chemistry=pybamm.parameter_sets.Marquis2019)\nprint(\"Negative current collector thickness is {} m\".format(\n chem_parameter_values[\"Negative current collector thickness [m]\"])\n)", "Marquis2019 chemistry set is {'chemistry': 'lithium-ion', 'cell': 'kokam_Marquis2019', 'anode': 'graphite_mcmb2528_Marquis2019', 'separator': 'separator_Marquis2019', 'cathode': 'lico2_Marquis2019', 'electrolyte': 'lipf6_Marquis2019', 'experiment': '1C_discharge_from_full_Marquis2019', 'sei': 'example', 'citation': 'marquis2019asymptotic'}\nNegative current collector thickness is 2.5e-05 m\n" ] ], [ [ "We can input functions into the parameter values, either directly (note we bypass the check that the parameter already exists)", "_____no_output_____" ] ], [ [ "def cubed(x):\n return x ** 3\nparameter_values.update({\"cube function\": cubed}, check_already_exists=False)\nprint(\"parameter values are {}\".format(parameter_values))", "parameter values are {'a': 4,\n 'b': 5,\n 'c': 6,\n 'cube function': <function cubed at 0x7f66d0e2d1e0>}\n" ] ], [ [ "or by using `pybamm.load_function` to load from a path to the function or just a name (in which case the whole directory is searched)", "_____no_output_____" ] ], [ [ "f = open(\"squared.py\",\"w+\")\nf.write(\n\"\"\"\ndef squared(x):\n return x ** 2\n\"\"\"\n)\nf.close()\nparameter_values.update({\"square function\": pybamm.load_function(\"squared.py\")}, check_already_exists=False)\nprint(\"parameter values are {}\".format(parameter_values))", "parameter values are {'a': 4,\n 'b': 5,\n 'c': 6,\n 'cube function': <function cubed at 0x7f66d0e2d1e0>,\n 'square function': <function squared at 0x7f673004f048>}\n" ] ], [ [ "## Setting parameters for an expression", "_____no_output_____" ], [ "We represent parameters in models using the classes `Parameter` and `FunctionParameter`. These cannot be evaluated directly,", "_____no_output_____" ] ], [ [ "a = pybamm.Parameter(\"a\")\nb = pybamm.Parameter(\"b\")\nc = pybamm.Parameter(\"c\")\nfunc = pybamm.FunctionParameter(\"square function\", {\"a\": a})\n\nexpr = a + b * c\ntry:\n expr.evaluate()\nexcept NotImplementedError as e:\n print(e)", "method self.evaluate() not implemented\n for symbol a of type <class 'pybamm.expression_tree.parameter.Parameter'>\n" ] ], [ [ "However, the `ParameterValues` class can walk through an expression, changing an `Parameter` objects it sees to the appropriate `Scalar` and any `FunctionParameter` object to the appropriate `Function`, and the resulting expression can be evaluated", "_____no_output_____" ] ], [ [ "expr_eval = parameter_values.process_symbol(expr)\nprint(\"{} = {}\".format(expr_eval, expr_eval.evaluate()))", "a + b * c = 34.0\n" ], [ "func_eval = parameter_values.process_symbol(func)\nprint(\"{} = {}\".format(func_eval, func_eval.evaluate()))", "16.0 = 16.0\n" ] ], [ [ "If a parameter needs to be changed often (for example, for convergence studies or parameter estimation), the `InputParameter` class should be used. This is not fixed by parameter values, and its value can be set on evaluation (or on solve):", "_____no_output_____" ] ], [ [ "d = pybamm.InputParameter(\"d\")\nexpr = 2 + d\nexpr_eval = parameter_values.process_symbol(expr)\nprint(\"with d = {}, {} = {}\".format(3, expr_eval, expr_eval.evaluate(inputs={\"d\": 3})))\nprint(\"with d = {}, {} = {}\".format(5, expr_eval, expr_eval.evaluate(inputs={\"d\": 5})))", "with d = 3, 2.0 + d = 5.0\nwith d = 5, 2.0 + d = 7.0\n" ] ], [ [ "## Solving a model", "_____no_output_____" ], [ "The code below shows the entire workflow of:\n\n1. Proposing a toy model\n2. Discretising and solving it first with one set of parameters,\n3. then updating the parameters and solving again\n\nThe toy model used is:\n$$\\frac{\\mathrm{d} u}{\\mathrm{d} t} = -a u$$\n\nwith initial conditions $u(0) = b$. The model is first solved with $a = 3, b = 2$, then with $a = 4, b = -1$", "_____no_output_____" ] ], [ [ "# Create model\nmodel = pybamm.BaseModel()\nu = pybamm.Variable(\"u\")\na = pybamm.Parameter(\"a\")\nb = pybamm.Parameter(\"b\")\nmodel.rhs = {u: -a * u}\nmodel.initial_conditions = {u: b}\nmodel.variables = {\"u\": u, \"a\": a, \"b\": b}\n\n# Set parameters, with a as an input ########################\nparameter_values = pybamm.ParameterValues({\"a\": \"[input]\", \"b\": 2})\nparameter_values.process_model(model)\n#############################################################\n\n# Discretise using default discretisation\ndisc = pybamm.Discretisation()\ndisc.process_model(model)\n\n# Solve\nt_eval = np.linspace(0, 2, 30)\node_solver = pybamm.ScipySolver()\nsolution = ode_solver.solve(model, t_eval, inputs={\"a\": 3})\n\n# Post-process, so that u1 can be called at any time t (using interpolation)\nt_sol1 = solution.t\nu1 = solution[\"u\"]\n\n# Solve again with different inputs ###############################\nsolution = ode_solver.solve(model, t_eval, inputs={\"a\": -1})\nt_sol2 = solution.t\nu2 = solution[\"u\"]\n###################################################################\n\n# Plot\nt_fine = np.linspace(0,t_eval[-1],1000)\n\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(13,4))\nax1.plot(t_fine, 2 * np.exp(-3 * t_fine), t_sol1, u1(t_sol1), \"o\")\nax1.set_xlabel(\"t\")\nax1.legend([\"2 * exp(-3 * t)\", \"u1\"], loc=\"best\")\nax1.set_title(\"a = 3, b = 2\")\n\nax2.plot(t_fine, 2 * np.exp(t_fine), t_sol2, u2(t_sol2), \"o\")\nax2.set_xlabel(\"t\")\nax2.legend([\"2 * exp(t)\", \"u2\"], loc=\"best\")\nax2.set_title(\"a = -1, b = 2\")\n\n\nplt.tight_layout()\nplt.show()", "_____no_output_____" ], [ "model.rhs", "_____no_output_____" ] ], [ [ "## Printing parameter values\n\nIn most models, it is useful to define dimensionless parameters, which are combinations of other parameters. However, since parameters objects must be processed by the `ParameterValues` class before they can be evaluated, it can be difficult to quickly check the value of a dimensionless parameter. \n\nYou can print all of the dimensionless parameters in a model by using the `print_parameters` function. Note that the `print_parameters` function also gives the dependence of the parameters on C-rate (as some dimensionless parameters vary with C-rate), but we can ignore that here", "_____no_output_____" ] ], [ [ "a = pybamm.Parameter(\"a\")\nb = pybamm.Parameter(\"b\")\nparameter_values = pybamm.ParameterValues({\"a\": 4, \"b\": 3})\nparameters = {\"a\": a, \"b\": b, \"a + b\": a + b, \"a * b\": a * b}\nparam_eval = parameter_values.print_parameters(parameters)\nfor name, (value,C_dependence) in param_eval.items():\n print(\"{}: {}\".format(name, value))", "a: 4.0\nb: 3.0\na + b: 7.0\na * b: 12.0\n" ] ], [ [ "If you provide an output file to `print_parameters`, the parameters will be printed to that output file.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb786ad4404fea4672ef78a6259b53202159e2ce
22,683
ipynb
Jupyter Notebook
markdown_generator/talks.ipynb
emhastings/emhastings.github.io
5f90bf5bfb148f4072c96ca3cc654687391f3ac4
[ "MIT" ]
null
null
null
markdown_generator/talks.ipynb
emhastings/emhastings.github.io
5f90bf5bfb148f4072c96ca3cc654687391f3ac4
[ "MIT" ]
null
null
null
markdown_generator/talks.ipynb
emhastings/emhastings.github.io
5f90bf5bfb148f4072c96ca3cc654687391f3ac4
[ "MIT" ]
null
null
null
44.476471
427
0.456201
[ [ [ "# Talks markdown generator for academicpages\n\nAdapted from generator in academicpages\n\nTakes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data.\n\nTODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style.", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport os", "_____no_output_____" ] ], [ [ "## Data format\n\nThe TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV.\n\n- Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to \"Talk\" \n- `date` must be formatted as YYYY-MM-DD.\n- `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. \n - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]`\n - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames\n\nThis is how the raw file looks (it doesn't look pretty, use a spreadsheet or other program to edit and create).\n\nNote: edit in Excel and save as tsv. Then open in Notepad and save as with utf-8 encoding.\n\nEDIT 8/19/21: Excel doesn't let me save as tsv anymore? works with txt(tab delimited), didn't seem to need notepad but changed filename below", "_____no_output_____" ], [ "## Import TSV\n\nPandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\\t`.\n\nI found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.", "_____no_output_____" ] ], [ [ "talks = pd.read_csv(\"talks.txt\", sep=\"\\t\", header=0)\ntalks", "_____no_output_____" ] ], [ [ "## Escape special characters\n\nYAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.", "_____no_output_____" ] ], [ [ "html_escape_table = {\n \"&\": \"&amp;\",\n '\"': \"&quot;\",\n \"'\": \"&apos;\"\n }\n\ndef html_escape(text):\n if type(text) is str:\n return \"\".join(html_escape_table.get(c,c) for c in text)\n else:\n return \"False\"", "_____no_output_____" ] ], [ [ "## Creating the markdown files\n\nThis is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.", "_____no_output_____" ] ], [ [ "loc_dict = {}\n\nfor row, item in talks.iterrows():\n \n md_filename = str(item.date) + \"-\" + item.url_slug + \".md\"\n html_filename = str(item.date) + \"-\" + item.url_slug \n year = item.date[:4]\n \n md = \"---\\ntitle: \\\"\" + item.title + '\"\\n'\n md += \"collection: talks\" + \"\\n\"\n \n if len(str(item.type)) > 3:\n md += 'type: \"' + item.type + '\"\\n'\n else:\n md += 'type: \"Talk\"\\n'\n \n md += \"permalink: /talks/\" + html_filename + \"\\n\"\n \n if len(str(item.venue)) > 3:\n md += 'venue: \"' + item.venue + '\"\\n'\n \n if len(str(item.location)) > 3:\n md += \"date: \" + str(item.date) + \"\\n\"\n \n if len(str(item.location)) > 3:\n md += 'location: \"' + str(item.location) + '\"\\n'\n \n md += 'excerpt: \"' \n \n if len(str(item.description)) > 3:\n md += item.description + \" \\n\"\n \n if len(str(item.talk_url)) > 3:\n md += \"[Download](\" + item.talk_url + \")\" \n \n #close excerpt\n md += '\"\\n' \n \n if len(str(item.tags)) > 3:\n md += \"tags: [\" + html_escape(item.tags) + \"]\"\n \n md += \"\\n---\\n\"\n \n #start of main text\n \n md += \"\\n\" + item.type + \" \\n\" + item.venue + \" \\n\" \n \n if len(str(item.location)) > 3:\n md += html_escape(item.location) + \"\\n\"\n else:\n md += \"Virtual\\n\"\n \n if len(str(item.description)) > 3:\n md += \"\\n\" + html_escape(item.description) + \"\\n\"\n \n if len(str(item.image)) > 3:\n md += \"\\n\" + html_escape(item.image) + \"\\n\"\n if len(str(item.attr)) > 3:\n md += \"\\n_Photo by \" + html_escape(item.attr) + \"._\\n\"\n else:\n md += \"\\n_Photo by Emily Hastings._\\n\"\n \n if len(str(item.talk_url)) > 3:\n if len(str(item.type)) > 3 :\n if item.type == 'Poster':\n md += \"\\n[Download poster here](\" + item.talk_url + \")\\n\" \n else: \n md += \"\\n[Download slides here](\" + item.talk_url + \")\\n\" \n else:\n md += \"\\n[Download here](\" + item.talk_url + \")\\n\" \n \n md_filename = os.path.basename(md_filename)\n #print(md)\n \n with open(\"../_talks/\" + md_filename, 'w') as f:\n f.write(md)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb7880a8aaacc6f7ca71a149ea6debcb2d3eda1f
141,928
ipynb
Jupyter Notebook
_notebooks/2021-07-22-OverAndUnderFitting.ipynb
CecileGallioz/blog
bd7dfb1a0fcd5d79fb0d94a7132dce4bcb1fab78
[ "Apache-2.0" ]
null
null
null
_notebooks/2021-07-22-OverAndUnderFitting.ipynb
CecileGallioz/blog
bd7dfb1a0fcd5d79fb0d94a7132dce4bcb1fab78
[ "Apache-2.0" ]
12
2021-04-22T16:37:30.000Z
2022-03-07T08:14:26.000Z
_notebooks/2021-07-22-OverAndUnderFitting.ipynb
CecileGallioz/blog
bd7dfb1a0fcd5d79fb0d94a7132dce4bcb1fab78
[ "Apache-2.0" ]
null
null
null
260.418349
51,808
0.921425
[ [ [ "# Under and over fitting\n> Validation and learning curves\n- toc: true\n- badges: false\n- comments: true\n- author: Cécile Gallioz\n- categories: [sklearn]", "_____no_output_____" ], [ "# Underfitting vs. Overfitting - Actual vs estimated function\n\n[scikit-learn documentation](https://scikit-learn.org/stable/auto_examples/model_selection/plot_underfitting_overfitting.html#sphx-glr-auto-examples-model-selection-plot-underfitting-overfitting-py)\n\nThis example demonstrates the problems of underfitting and overfitting and how we can use linear regression with polynomial features to approximate nonlinear functions. \n\nThe plot shows the function that we want to approximate, which is a part of the cosine function. In addition, the samples from the real function and the approximations of different models are displayed. The models have polynomial features of different degrees. \n\nWe can see that a linear function (polynomial with degree 1) is not sufficient to fit the training samples. This is called underfitting. \n\nA polynomial of degree 4 approximates the true function almost perfectly. \n\nHowever, for higher degrees the model will overfit the training data, i.e. it learns the noise of the training data. \n\nWe evaluate quantitatively overfitting / underfitting by using cross-validation. We calculate the mean squared error (MSE) on the validation set, the higher, the less likely the model generalizes correctly from the training data.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import cross_val_score", "_____no_output_____" ], [ "def true_fun(X):\n return np.cos(1.5 * np.pi * X)\n\nnp.random.seed(0)\n\nn_samples = 50\ndegrees = [1, 4, 15]\n\nX = np.sort(np.random.rand(n_samples))\ny = true_fun(X) + np.random.randn(n_samples) * 0.1", "_____no_output_____" ], [ "plt.figure(figsize=(15, 5))\n\nfor i in range(len(degrees)):\n ax = plt.subplot(1, len(degrees), i + 1)\n plt.setp(ax, xticks=(), yticks=())\n\n polynomial_features = PolynomialFeatures(degree=degrees[i],\n include_bias=False)\n linear_regression = LinearRegression()\n pipeline = Pipeline([(\"polynomial_features\", polynomial_features),\n (\"linear_regression\", linear_regression)])\n pipeline.fit(X[:, np.newaxis], y)\n\n # Evaluate the models using crossvalidation\n scores = cross_val_score(pipeline, X[:, np.newaxis], y,\n scoring=\"neg_mean_squared_error\", cv=10)\n\n X_test = np.linspace(0, 1, 100)\n plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label=\"Model\")\n plt.plot(X_test, true_fun(X_test), label=\"True function\")\n plt.scatter(X, y, edgecolor='b', s=20, label=\"Samples\")\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.xlim((0, 1))\n plt.ylim((-2, 2))\n plt.legend(loc=\"best\")\n plt.title(\"Degree {}\\nMSE {:.2e}(+/- {:.2e})\".format(\n degrees[i], -scores.mean(), scores.std()))\n \nplt.show()", "_____no_output_____" ] ], [ [ "# Underfitting vs. Overfitting - Train vs test error", "_____no_output_____" ], [ "## Preparation", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.model_selection import ShuffleSplit\nfrom sklearn.model_selection import validation_curve\nfrom sklearn.model_selection import learning_curve", "_____no_output_____" ], [ "from sklearn.datasets import fetch_california_housing\nmyDataFrame = fetch_california_housing(as_frame=True)", "_____no_output_____" ], [ "data, target = myDataFrame.data, myDataFrame.target\ntarget *= 100 # rescale the target in k$", "_____no_output_____" ], [ "print(f\"The dataset data contains {data.shape[0]} samples and {data.shape[1]} features\")", "The dataset data contains 20640 samples and 8 features\n" ], [ "data.dtypes", "_____no_output_____" ] ], [ [ "## Validation curve", "_____no_output_____" ] ], [ [ "regressor = DecisionTreeRegressor()", "_____no_output_____" ], [ "cv = ShuffleSplit(n_splits=30, test_size=0.2)", "_____no_output_____" ], [ "cv_results = cross_validate(regressor, data, target,\n cv=cv, scoring=\"neg_mean_absolute_error\",\n return_train_score=True, n_jobs=2)", "_____no_output_____" ], [ "scores = cv_results[\"test_score\"]\nfit_time = cv_results[\"fit_time\"]\nprint(\"The accuracy is \"\n f\"{scores.mean():.3f} +/- {scores.std():.3f}, for {fit_time.mean():.3f} seconds\")", "The accuracy is -46.088 +/- 0.886, for 0.135 seconds\n" ], [ "cv_results = pd.DataFrame(cv_results)", "_____no_output_____" ], [ "scores = pd.DataFrame()", "_____no_output_____" ], [ "scores[[\"train error\", \"test error\"]] = -cv_results[\n [\"train_score\", \"test_score\"]]", "_____no_output_____" ], [ "scores.plot.hist(bins=50, edgecolor=\"black\", density=True)\nplt.xlabel(\"Mean absolute error (k$)\")\n_ = plt.title(\"Train and test errors distribution via cross-validation\")", "_____no_output_____" ] ], [ [ "Here, we observe a **small training error** (actually zero), meaning that\nthe model is **not under-fitting**: it is flexible enough to capture any\nvariations present in the training set.\n\nHowever the **significantly larger testing error** tells us that the\nmodel is **over-fitting**: the model has memorized many variations of the\ntraining set that could be considered \"noisy\" because they do not generalize\nto help us make good prediction on the test set.", "_____no_output_____" ] ], [ [ "%%time\nmax_depth = [1, 5, 10, 15, 20, 25]\ntrain_scores, test_scores = validation_curve(\n regressor, data, target, param_name=\"max_depth\", param_range=max_depth,\n cv=cv, scoring=\"neg_mean_absolute_error\", n_jobs=2)\ntrain_errors, test_errors = -train_scores, -test_scores", "Wall time: 8.67 s\n" ], [ "plt.plot(max_depth, train_errors.mean(axis=1), label=\"Training error\")\nplt.plot(max_depth, test_errors.mean(axis=1), label=\"Testing error\")\nplt.legend()\n\nplt.xlabel(\"Maximum depth of decision tree\")\nplt.ylabel(\"Mean absolute error (k$)\")\n_ = plt.title(\"Validation curve for decision tree\")", "_____no_output_____" ], [ "plt.errorbar(max_depth, train_errors.mean(axis=1),\n yerr=train_errors.std(axis=1), label='Training error')\nplt.errorbar(max_depth, test_errors.mean(axis=1),\n yerr=test_errors.std(axis=1), label='Testing error')\nplt.legend()\n\nplt.xlabel(\"Maximum depth of decision tree\")\nplt.ylabel(\"Mean absolute error (k$)\")\n_ = plt.title(\"Validation curve for decision tree\")", "_____no_output_____" ] ], [ [ "## Learning curve\nLet's compute the learning curve for a decision tree and vary the\nproportion of the training set from 10% to 100%.", "_____no_output_____" ] ], [ [ "train_sizes = np.linspace(0.1, 1.0, num=5, endpoint=True)\ntrain_sizes", "_____no_output_____" ], [ "cv = ShuffleSplit(n_splits=30, test_size=0.2)", "_____no_output_____" ], [ "results = learning_curve(\n regressor, data, target, train_sizes=train_sizes, cv=cv,\n scoring=\"neg_mean_absolute_error\", n_jobs=2)\ntrain_size, train_scores, test_scores = results[:3]\n# Convert the scores into errors\ntrain_errors, test_errors = -train_scores, -test_scores", "_____no_output_____" ], [ "plt.errorbar(train_size, train_errors.mean(axis=1),\n yerr=train_errors.std(axis=1), label=\"Training error\")\nplt.errorbar(train_size, test_errors.mean(axis=1),\n yerr=test_errors.std(axis=1), label=\"Testing error\")\nplt.legend()\n\nplt.xscale(\"log\")\nplt.xlabel(\"Number of samples in the training set\")\nplt.ylabel(\"Mean absolute error (k$)\")\n_ = plt.title(\"Learning curve for decision tree\")", "_____no_output_____" ] ], [ [ "Looking at the training error alone, we see that we get an error of 0 k$. It\nmeans that the trained model (i.e. decision tree) is clearly overfitting the\ntraining data.\n\nLooking at the testing error alone, we observe that the more samples are\nadded into the training set, the lower the testing error becomes. Also, we\nare searching for the plateau of the testing error for which there is no\nbenefit to adding samples anymore or assessing the potential gain of adding\nmore samples into the training set.\n\nIf we achieve a plateau and adding new samples in the training set does not\nreduce the testing error, we might have reach the Bayes error rate using the\navailable model. Using a more complex model might be the only possibility to\nreduce the testing error further.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
cb7883bff47b1edaec678a1d7335230b09d47568
43,358
ipynb
Jupyter Notebook
project1.ipynb
TaoHeee/Python-Programming-miniproject
6980a12eeb31669e6a459f943d784ddda809eb86
[ "MIT" ]
null
null
null
project1.ipynb
TaoHeee/Python-Programming-miniproject
6980a12eeb31669e6a459f943d784ddda809eb86
[ "MIT" ]
null
null
null
project1.ipynb
TaoHeee/Python-Programming-miniproject
6980a12eeb31669e6a459f943d784ddda809eb86
[ "MIT" ]
null
null
null
24.195313
68
0.321233
[ [ [ "friends = open(\"friends.txt\",\"r\")", "_____no_output_____" ], [ "friend = list(friends)\nfriend", "_____no_output_____" ], [ "import re", "_____no_output_____" ], [ "for i in range(0,len(friend)):\n if i % 2 == 0: friend[i]=friend[i].replace(\"\\n\",\"\")\n else: friend[i]=re.sub(\"\\D\",\"\",friend[i])", "_____no_output_____" ], [ "friend", "_____no_output_____" ], [ "maps = open(\"map_areacodes_states.txt\",\"r\")", "_____no_output_____" ], [ "fmap = list(maps)\nfmap", "_____no_output_____" ], [ "for i in range(0,len(fmap)):\n if i % 2 == 1: fmap[i]=fmap[i].replace(\"\\n\",\"\")\n else: fmap[i]=re.sub(\"\\D\",\"\",fmap[i])", "_____no_output_____" ], [ "fmap", "_____no_output_____" ], [ "def findlocation(s):\n s=s[0:3]\n for i in range(0,len(fmap)-1):\n if i % 2 == 0: \n if fmap[i]==s: print(fmap[i+1])\n return\n", "_____no_output_____" ], [ "for i in range(0,len(friend)):\n if i % 2 == 0: print(friend[i],\":\")\n else: findlocation(friend[i])", "Ana :\nUtah\nBen :\nNew Jersey\nCory :\nWashington\nDanny :\nNew Jersey\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb788438b8a7cc686f82e96b12a36f5a3aa6a64e
106,931
ipynb
Jupyter Notebook
Tensorflow_2X_Notebooks/Demo48_RNN_FashionMNIST.ipynb
mahnooranjum/Tensorflow_DeepLearning
65ab178d4c17efad01de827062d5c85bdfb9b1ca
[ "MIT" ]
null
null
null
Tensorflow_2X_Notebooks/Demo48_RNN_FashionMNIST.ipynb
mahnooranjum/Tensorflow_DeepLearning
65ab178d4c17efad01de827062d5c85bdfb9b1ca
[ "MIT" ]
null
null
null
Tensorflow_2X_Notebooks/Demo48_RNN_FashionMNIST.ipynb
mahnooranjum/Tensorflow_DeepLearning
65ab178d4c17efad01de827062d5c85bdfb9b1ca
[ "MIT" ]
null
null
null
224.174004
36,922
0.888816
[ [ [ "# **Spit some [tensor] flow**\n\nWe need to learn the intricacies of tensorflow to master deep learning\n\n`Let's get this over with`\n\n", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport cv2\nprint(tf.__version__)", "2.2.0\n" ] ], [ [ "## A time series is just a TxD matrix right? \n\nso instead of the rows, each column is a time series data. Don't worry, lemme explain: \n\n1 2 3 4 5 6 7 8 9 10 \n\nWith T = 2 becomes \n\n| X1 | X2 | \n|----|----|\n| 1 | 2 |\n| 2 | 3 |\n| 3 | 4 |\n| 4 | 5 |\n| 5 | 6 |\n| 6 | 7 |\n| 7 | 8 |\n| 8 | 9 |", "_____no_output_____" ] ], [ [ "from tensorflow.keras.layers import Input, LSTM, GRU, Dropout, Dense, Flatten\nfrom tensorflow.keras.models import Model\n", "_____no_output_____" ], [ "from tensorflow.keras.datasets import fashion_mnist\n(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()", "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz\n32768/29515 [=================================] - 0s 0us/step\nDownloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz\n26427392/26421880 [==============================] - 0s 0us/step\nDownloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz\n8192/5148 [===============================================] - 0s 0us/step\nDownloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz\n4423680/4422102 [==============================] - 0s 0us/step\n" ], [ "X_train, X_test = X_train / 255.0 , X_test / 255.0 \nprint(X_train.shape)\nprint(X_test.shape)\nprint(y_train.shape)\nprint(y_test.shape)", "(60000, 28, 28)\n(10000, 28, 28)\n(60000,)\n(10000,)\n" ], [ "classes = len(set(y_train))\nprint(classes)", "10\n" ], [ "input_shape = X_train[0].shape\nprint(input_shape)\n# Here T = 28, D = 28", "(28, 28)\n" ], [ "i_layer = Input(shape = input_shape)\nh_layer = LSTM(256)(i_layer)\no_layer = Dense(classes, activation='softmax')(h_layer)\nmodel = Model(i_layer, o_layer)\n", "_____no_output_____" ], [ "model.compile(optimizer='adam', \n loss = 'sparse_categorical_crossentropy',\n metrics = ['accuracy'])\n\nreport = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=20)", "Epoch 1/20\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.5248 - accuracy: 0.7995 - val_loss: 0.5320 - val_accuracy: 0.7993\nEpoch 2/20\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.4953 - accuracy: 0.8120 - val_loss: 0.5154 - val_accuracy: 0.8068\nEpoch 3/20\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.4785 - accuracy: 0.8189 - val_loss: 0.4925 - val_accuracy: 0.8165\nEpoch 4/20\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.4609 - accuracy: 0.8264 - val_loss: 0.4989 - val_accuracy: 0.8113\nEpoch 5/20\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.4417 - accuracy: 0.8338 - val_loss: 0.4777 - val_accuracy: 0.8190\nEpoch 6/20\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.4328 - accuracy: 0.8353 - val_loss: 0.4419 - val_accuracy: 0.8344\nEpoch 7/20\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.4197 - accuracy: 0.8419 - val_loss: 0.4369 - val_accuracy: 0.8376\nEpoch 8/20\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.4095 - accuracy: 0.8457 - val_loss: 0.4782 - val_accuracy: 0.8182\nEpoch 9/20\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.3985 - accuracy: 0.8475 - val_loss: 0.4339 - val_accuracy: 0.8345\nEpoch 10/20\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.3876 - accuracy: 0.8535 - val_loss: 0.4531 - val_accuracy: 0.8302\nEpoch 11/20\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.3817 - accuracy: 0.8547 - val_loss: 0.4112 - val_accuracy: 0.8494\nEpoch 12/20\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.3752 - accuracy: 0.8562 - val_loss: 0.4057 - val_accuracy: 0.8492\nEpoch 13/20\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.3667 - accuracy: 0.8616 - val_loss: 0.4240 - val_accuracy: 0.8386\nEpoch 14/20\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.3578 - accuracy: 0.8644 - val_loss: 0.4187 - val_accuracy: 0.8448\nEpoch 15/20\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.3544 - accuracy: 0.8649 - val_loss: 0.4009 - val_accuracy: 0.8472\nEpoch 16/20\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.3470 - accuracy: 0.8680 - val_loss: 0.3869 - val_accuracy: 0.8590\nEpoch 17/20\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.3410 - accuracy: 0.8692 - val_loss: 0.3863 - val_accuracy: 0.8563\nEpoch 18/20\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.3342 - accuracy: 0.8720 - val_loss: 0.3827 - val_accuracy: 0.8581\nEpoch 19/20\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.3280 - accuracy: 0.8748 - val_loss: 0.3790 - val_accuracy: 0.8565\nEpoch 20/20\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.3255 - accuracy: 0.8773 - val_loss: 0.3751 - val_accuracy: 0.8610\n" ], [ "y_pred = model.predict(X_test).argmax(axis=1) \n# only for sparse categorical crossentropy", "_____no_output_____" ], [ "# Taken from https://www.kaggle.com/zalando-research/fashionmnist?select=fashion-mnist_test.csv\nlabels = \"T-shirt/top,Trouser,Pullover,Dress,Coat,Sandal,Shirt,Sneaker,Bag,AnkleBoot\".split(\",\")", "_____no_output_____" ], [ "def evaluation_tf(report, y_test, y_pred, classes):\n plt.plot(report.history['loss'], label = 'training_loss')\n plt.plot(report.history['val_loss'], label = 'validation_loss')\n plt.legend()\n plt.show()\n\n plt.plot(report.history['accuracy'], label = 'training_accuracy')\n plt.plot(report.history['val_accuracy'], label = 'validation_accuracy')\n plt.legend()\n plt.show()\n\n from sklearn.metrics import confusion_matrix\n import itertools\n cm = confusion_matrix(y_test, y_pred)\n\n plt.figure(figsize=(10,10))\n plt.imshow(cm, cmap=plt.cm.Blues)\n for i,j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i,j], 'd'),\n horizontalalignment = 'center',\n color='black')\n plt.xlabel(\"Predicted labels\")\n plt.ylabel(\"True labels\")\n plt.xticks(range(0,classes))\n plt.yticks(range(0,classes))\n plt.title('Confusion matrix')\n plt.colorbar()\n plt.show()", "_____no_output_____" ], [ "evaluation_tf(report, y_test, y_pred, classes)", "_____no_output_____" ], [ "misshits = np.where(y_pred!=y_test)[0]\nindex = np.random.choice(misshits)\nplt.imshow(X_test[index], cmap='gray')\nplt.title(\"Predicted = \" + str(labels[y_pred[index]]) + \", Real = \" + str(labels[y_test[index]]))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb78923b1b3cfd852c8924a71f77c09bd042825b
139,540
ipynb
Jupyter Notebook
Classifiying Ionosphere structure using K nearest neigbours algorithm.ipynb
Raavada/Applied-Machine-Learning
4636497cfb3ae412c2207b5459593fc542e43413
[ "MIT" ]
18
2017-09-28T09:31:04.000Z
2022-03-07T01:45:10.000Z
Classifiying Ionosphere structure using K nearest neigbours algorithm.ipynb
Raavada/Applied-Machine-Learning
4636497cfb3ae412c2207b5459593fc542e43413
[ "MIT" ]
null
null
null
Classifiying Ionosphere structure using K nearest neigbours algorithm.ipynb
Raavada/Applied-Machine-Learning
4636497cfb3ae412c2207b5459593fc542e43413
[ "MIT" ]
20
2017-03-12T16:00:20.000Z
2021-04-20T18:15:14.000Z
191.150685
111,632
0.890454
[ [ [ "# Classifying Ionosphere structure using K nearest neigbours algorithm\n<hr>\n\n### Nearest neighbors\nAmongst the standard machine algorithms, Nearest neighbors is perhaps one of the most intuitive algorithms. To predict the class of a new sample, we look through the training dataset for the samples that are most similar to our new sample.\nWe take the most similar sample and predict the class that the majority of those samples have. As an example, we wish to predict the class of the '?', based on which class it is more similar to (represented here by having similar objects closer together). We find the five nearest neighbors, which are three triangles, one circle and one plus. There are more\ntriangles than circles and plus, and hence the predicted class for the '?' is, therefore, a triangle.\n\n<img src = \"images/knn.png\">\n\n[[image source]](https://github.com/rasbt/python-machine-learning-book/tree/master/images/image_gallery)", "_____no_output_____" ], [ "Nearest neighbors can be used for nearly any dataset-however, since we will have to compute the distance between all pairs of samples, it can be very computationally expensive to do so.\nFor example if there are 10 samples in the dataset, there are 45 unique distances\nto compute. However, if there are 1000 samples, there are nearly 500,000!\n\n#### Distance metrics\nIf we have two samples, we need to know how close they are to each other. Further more, we need to answer\nquestions such as are these two samples more similar than the other two?\nThe most common distance metric that you might have heard of is Euclidean\ndistance, which is the real-world distance. Formally, Euclidean distance is the square root of the sum of the squared\ndistances for each feature. It is intuitive, albeit provides poor accuracy if some features have larger values than others. It also gives poor results when lots of features have a value of 0, i.e our data is 'sparse'. There are other distance metrics in use; two commonly employed ones are the Manhattan and Cosine distance. The Manhattan distance is the sum of the absolute differences in each feature (with no use of square distances). While the Manhattan distance does suffer if\nsome features have larger values than others, the effect is not as dramatic as in the\ncase of Euclidean. Regardless for the implementation of KNN algorithm here, we would consider the Euclidean distance.", "_____no_output_____" ], [ "## Dataset\n\nTo understand KNNs, We will use the Ionosphere dataset, which is the recording of many\nhigh-frequency antennas. The aim of the antennas is to determine whether there is a\nstructure in the ionosphere and a region in the upper atmosphere. Those that have a\nstructure are classified as good, while those that do not are classified as bad. Our aim is to determine whether an image\nis good or bad.\nYou can download the dataset from : http://archive.ics.uci.edu/ml/datasets/Ionosphere.\nSave the ionosphere.data file from the Data Folder to a folder named \"data\" on your computer. \n\nFor each row in the dataset, there are 35 values. The first 34 are measurements taken\nfrom the 17 antennas (two values for each antenna). The last is either 'g' or 'b'; that\nstands for good and bad, respectively.", "_____no_output_____" ] ], [ [ "import csv\nimport numpy as np\n\n# Size taken from the dataset and is known\nX = np.zeros((351, 34), dtype='float')\ny = np.zeros((351,), dtype='bool')\n\nwith open(\"data/Ionosphere/ionosphere.data\", 'r') as input_file:\n reader = csv.reader(input_file)\n for i, row in enumerate(reader):\n # Get the data, converting each item to a float\n data = [float(datum) for datum in row[:-1]]\n # Set the appropriate row in our dataset\n X[i] = data\n # 1 if the class is 'g', 0 otherwise\n y[i] = row[-1] == 'g'", "_____no_output_____" ] ], [ [ "First, we load up the NumPy and csv modules. Then we create the X and y NumPy arrays to store the dataset in. The sizes of these\narrays are known from the dataset. We take the first 34 values from this sample, turn each into a float, and save that to\nour dataset. Finally, we take the last value of the row and set the class. We set it to 1 (or True) if it\nis a good sample, and 0 if it is not. We now have a dataset of samples and features in X, and the corresponding classes in y", "_____no_output_____" ], [ "Estimators in scikit-learn have two main functions: fit() and predict().\nWe train the algorithm using the fit method and our training set. We evaluate it\nusing the predict method on our testing set.\nFirst, we need to create these training and testing sets. As before, import and run the\ntrain_test_split function:", "_____no_output_____" ] ], [ [ "from sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=14)\nprint(\"There are {} samples in the training dataset\".format(X_train.shape[0]))\nprint(\"There are {} samples in the testing dataset\".format(X_test.shape[0]))\nprint(\"Each sample has {} features\".format(X_train.shape[1]))", "There are 263 samples in the training dataset\nThere are 88 samples in the testing dataset\nEach sample has 34 features\n" ] ], [ [ "Then, we import the nearest neighbor class and create an instance for it using the default parameters. By default, the algorithm will choose the five nearest neighbors to predict\nthe class of a testing sample:", "_____no_output_____" ] ], [ [ "from sklearn.neighbors import KNeighborsClassifier\n\nestimator = KNeighborsClassifier()", "_____no_output_____" ] ], [ [ "After creating our estimator, we must then fit it on our training dataset. For the\nnearest neighbor class, this records our dataset, allowing us to find the nearest\nneighbor for a new data point, by comparing that point to the training dataset:\nestimator.fit(X_train, y_train)\nWe then train the algorithm with our test set and evaluate with our testing set:", "_____no_output_____" ] ], [ [ "estimator.fit(X_train, y_train)", "_____no_output_____" ], [ "y_predicted = estimator.predict(X_test)\naccuracy = np.mean(y_test == y_predicted) * 100\nprint(\"The accuracy is {0:.1f}%\".format(accuracy))", "The accuracy is 86.4%\n" ] ], [ [ "This scores 86.4 percent accuracy, which is impressive for a default algorithm and\njust a few lines of code! Most scikit-learn default parameters are chosen explicitly\nto work well with a range of datasets. However, you should always aim to choose\nparameters based on knowledge of the application experiment.", "_____no_output_____" ] ], [ [ "from sklearn.cross_validation import cross_val_score", "_____no_output_____" ], [ "scores = cross_val_score(estimator, X, y, scoring='accuracy')\naverage_accuracy = np.mean(scores) * 100\nprint(\"The average accuracy is {0:.1f}%\".format(average_accuracy))", "The average accuracy is 82.3%\n" ] ], [ [ "Using cross validation, the model this gives a slightly more modest result of 82.3 percent, but it is still quite good\nconsidering we have not yet tried setting better parameters.", "_____no_output_____" ], [ "### Tuning parameters\nAlmost all data mining algorithms have parameters that the user can set. This is\noften a cause of generalizing an algorithm to allow it to be applicable in a wide\nvariety of circumstances. Setting these parameters can be quite difficult, as choosing\ngood parameter values is often highly reliant on features of the dataset.\nThe nearest neighbor algorithm has several parameters, but the most important\none is that of the number of nearest neighbors to use when predicting the class of\nan unseen attribution. In scikit-learn, this parameter is called n_neighbors.\nIn the following figure, we show that when this number is too low, a randomly\nlabeled sample can cause an error. In contrast, when it is too high, the actual nearest\nneighbors have a lower effect on the result. \n\nIf we want to test a number of values for the n_neighbors parameter, for example,\neach of the values from 1 to 20, we can rerun the experiment many times by setting\nn_neighbors and observing the result:", "_____no_output_____" ] ], [ [ "avg_scores = []\nall_scores = []\nparameter_values = list(range(1, 21)) # Including 20\nfor n_neighbors in parameter_values:\n estimator = KNeighborsClassifier(n_neighbors=n_neighbors)\n scores = cross_val_score(estimator, X, y, scoring='accuracy')\n avg_scores.append(np.mean(scores))\n all_scores.append(scores)", "_____no_output_____" ] ], [ [ "We compute and store the average in our list of scores. We also store the full set of\nscores for later analysis. We can then plot the relationship between the value of n_neighbors and the\naccuracy.", "_____no_output_____" ] ], [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "We then import pyplot from the matplotlib library and plot the parameter values\nalongside average scores:", "_____no_output_____" ] ], [ [ "from matplotlib import pyplot as plt\nplt.figure(figsize=(32,20))\nplt.plot(parameter_values, avg_scores, '-o', linewidth=5, markersize=24)\n#plt.axis([0, max(parameter_values), 0, 1.0])", "_____no_output_____" ] ], [ [ "While there is a lot of variance, the plot shows a decreasing trend as the number of\nneighbors increases.\n\n### Preprocessing using pipelines\nWhen taking measurements of real-world objects, we can often get features in\nvery different ranges. Like we saw in the case of classifying Animal data using Naive Bayes, if we are measuring the qualities of an animal,\nwe considered several features, as follows:\n\n* Number of legs: This is between the range of 0-8 for most animals, while\nsome have many more!\n* Weight: This is between the range of only a few micrograms, all the way\nto a blue whale with a weight of 190,000 kilograms!\n* Number of hearts: This can be between zero to five, in the case of\nthe earthworm.\n\nFor a mathematical-based algorithm to compare each of these features, the differences in the scale, range, and units can be difficult to interpret. If we used the above features in many algorithms, the weight would probably be the most\ninfluential feature due to only the larger numbers and not anything to do with the actual effectiveness of the feature.\nOne of the methods to overcome this is to use a process called preprocessing to normalize the features so that they all have the same range, or are put into categories like small, medium and large. Suddenly, the large difference in the\ntypes of features has less of an impact on the algorithm, and can lead to large\nincreases in the accuracy.\nPreprocessing can also be used to choose only the more effective features, create new\nfeatures, and so on. Preprocessing in scikit-learn is done through Transformer\nobjects, which take a dataset in one form and return an altered dataset after some\ntransformation of the data. These don't have to be numerical, as Transformers are also\nused to extract features-however, in this section, we will stick with preprocessing.\nAn example\nWe can show an example of the problem by breaking the Ionosphere dataset.\nWhile this is only an example, many real-world datasets have problems of this\nform. First, we create a copy of the array so that we do not alter the original dataset:", "_____no_output_____" ] ], [ [ "X_broken = np.array(X)", "_____no_output_____" ] ], [ [ "Next, we break the dataset by dividing every second feature by 10:\n", "_____no_output_____" ] ], [ [ "X_broken[:,::2] /= 10", "_____no_output_____" ] ], [ [ "In theory, this should not have a great effect on the result. After all, the values\nfor these features are still relatively the same. The major issue is that the scale has\nchanged and the odd features are now larger than the even features. We can see the\neffect of this by computing the accuracy:", "_____no_output_____" ] ], [ [ "estimator = KNeighborsClassifier()\noriginal_scores = cross_val_score(estimator, X, y,scoring='accuracy')\nprint(\"The original average accuracy for is {0:.1f}%\".format(np.mean(original_scores) * 100))\nbroken_scores = cross_val_score(estimator, X_broken, y,scoring='accuracy')\nprint(\"The 'broken' average accuracy for is {0:.1f}%\".format(np.mean(broken_scores) * 100))", "The original average accuracy for is 82.3%\nThe 'broken' average accuracy for is 70.4%\n" ] ], [ [ "This gives a score of 82.3 percent for the original dataset, which drops down to\n71.5 percent on the broken dataset. We can fix this by scaling all the features to\nthe range 0 to 1.", "_____no_output_____" ], [ "### Standard preprocessing\nThe preprocessing we will perform for this experiment is called feature-based\nnormalization through the MinMaxScaler class.", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import MinMaxScaler", "_____no_output_____" ] ], [ [ "This class takes each feature and scales it to the range 0 to 1. The minimum value is\nreplaced with 0, the maximum with 1, and the other values somewhere in between.\nTo apply our preprocessor, we run the transform function on it. While MinMaxScaler\ndoesn't, some transformers need to be trained first in the same way that the classifiers\ndo. We can combine these steps by running the fit_transform function instead:", "_____no_output_____" ] ], [ [ "X_transformed = MinMaxScaler().fit_transform(X)", "_____no_output_____" ] ], [ [ "Here, X_transformed will have the same shape as X. However, each column will\nhave a maximum of 1 and a minimum of 0.\nThere are various other forms of normalizing in this way, which is effective for other\napplications and feature types:\n* Ensure the sum of the values for each sample equals to 1, using sklearn.\npreprocessing.Normalizer\n* Force each feature to have a zero mean and a variance of 1, using sklearn.\npreprocessing.StandardScaler, which is a commonly used starting point\nfor normalization\n* Turn numerical features into binary features, where any value above\na threshold is 1 and any below is 0, using sklearn.preprocessing.\nBinarizer .", "_____no_output_____" ], [ "We can now create a workflow by combining the code from the previous sections,\nusing the broken dataset previously calculated:\n", "_____no_output_____" ] ], [ [ "X_transformed = MinMaxScaler().fit_transform(X_broken)\nestimator = KNeighborsClassifier()\ntransformed_scores = cross_val_score(estimator, X_transformed, y,scoring='accuracy')\nprint(\"The average accuracy for is {0:.1f}%\".format(np.mean(transformed_scores) * 100))", "The average accuracy for is 82.3%\n" ] ], [ [ "This gives us back our score of 82.3 percent accuracy. The MinMaxScaler resulted in\nfeatures of the same scale, meaning that no features overpowered others by simply\nbeing bigger values. While the Nearest Neighbor algorithm can be confused with\nlarger features, some algorithms handle scale differences better. In contrast, some\nare much worse!\n\n### Pipelines\nAs experiments grow, so does the complexity of the operations. We may split up\nour dataset, binarize features, perform feature-based scaling, perform sample-based\nscaling, and many more operations.\nKeeping track of all of these operations can get quite confusing and can result in\nbeing unable to replicate the result. Problems include forgetting a step, incorrectly\napplying a transformation, or adding a transformation that wasn't needed.\nAnother issue is the order of the code. In the previous section, we created our\nX_transformed dataset and then created a new estimator for the cross validation.\nIf we had multiple steps, we would need to track all of these changes to the dataset\nin the code.\nPipelines are a construct that addresses these problems (and others, which we will\nsee in the next chapter). Pipelines store the steps in your data mining workflow. They\ncan take your raw data in, perform all the necessary transformations, and then create\na prediction. This allows us to use pipelines in functions such as cross_val_score,\nwhere they expect an estimator. First, import the Pipeline object:", "_____no_output_____" ] ], [ [ "from sklearn.pipeline import Pipeline", "_____no_output_____" ] ], [ [ "Pipelines take a list of steps as input, representing the chain of the data mining\napplication. The last step needs to be an Estimator, while all previous steps are\nTransformers. The input dataset is altered by each Transformer, with the output of\none step being the input of the next step. Finally, the samples are classified by the last\nstep's estimator. In our pipeline, we have two steps:\n1. Use MinMaxScaler to scale the feature values from 0 to 1\n2. Use KNeighborsClassifier as the classification algorithms\nEach step is then represented by a tuple ('name', step). We can then create\nour pipeline:", "_____no_output_____" ] ], [ [ "scaling_pipeline = Pipeline([('scale', MinMaxScaler()),\n ('predict', KNeighborsClassifier())])", "_____no_output_____" ] ], [ [ "The key here is the list of tuples. The first tuple is our scaling step and the second\ntuple is the predicting step. We give each step a name: the first we call scale and the\nsecond we call predict, but you can choose your own names. The second part of the\ntuple is the actual Transformer or estimator object.\nRunning this pipeline is now very easy, using the cross validation code from before:", "_____no_output_____" ] ], [ [ "scores = cross_val_score(scaling_pipeline, X_broken, y, scoring='accuracy')\nprint(\"The pipeline scored an average accuracy for is {0:.1f}%\".format(np.mean(transformed_scores) * 100))", "The pipeline scored an average accuracy for is 82.3%\n" ] ], [ [ "This gives us the same score as before (82.3 percent), which is expected, as we are\neffectively running the same steps.\nSetting\nup pipelines is a great way to ensure that the code complexity does not\ngrow unmanageably.", "_____no_output_____" ], [ "<hr>\n### Notes:\nThe right choice of k is crucial to find a good balance between over- and underfitting. We also have to make sure that we choose a distance metric that is appropriate for the features in the dataset. Often, while using the Euclidean distance measure, it is important to standardize the data so that each feature contributes equally to the distance. \n\n#### The curse of dimensionality\nIt is important to mention that KNN is very susceptible to overfitting due to the curse of dimensionality. The curse of dimensionality describes the phenomenon where the feature\nspace becomes increasingly sparse for an increasing number\nof dimensions of a fixed-size training dataset. Intuitively, we\ncan think of even the closest neighbors being too far away in a\nhigh-dimensional space to give a good estimate.\nIn models where regularization is not applicable such as decision trees and KNN, we can use feature selection and dimensionality reduction techniques to help us avoid the curse of dimensionality.\n\n#### Parametric versus nonparametric models\nMachine learning algorithms can be grouped into parametric and nonparametric models. Using parametric models, we estimate parameters from the training dataset to learn a function that can classify new data points without requiring the original training dataset anymore. Typical examples of parametric models are the perceptron, logistic regression, and the linear SVM. In contrast, nonparametric models can't be characterized by a fixed set of parameters, and the number of parameters grows with the training data. Two examples of nonparametric models that we have seen so far are the decision tree classifier/random forest and the kernel SVM.\nKNN belongs to a subcategory of nonparametric models that is described as instance-based learning. Models based on instance-based learning are characterized by memorizing the training dataset, and lazy learning is a special case of instance-based learning that is associated with no (zero) cost during the learning process\n_____\n\n### Summary\nIn this chapter, we used several of scikit-learn's methods for building a\nstandard workflow to run and evaluate data mining models. We introduced the\nNearest Neighbors algorithm, which is already implemented in scikit-learn as an\nestimator. Using this class is quite easy; first, we call the fit function on our training\ndata, and second, we use the predict function to predict the class of testing samples.\nWe then looked at preprocessing by fixing poor feature scaling. This was done using\na Transformer object and the MinMaxScaler class. These functions also have a\nfit method and then a transform, which takes a dataset as an input and returns a\ntransformed dataset as an output.", "_____no_output_____" ], [ "___", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
cb78ac1d2cfa248d8ee24f952f98712720eeccaf
14,680
ipynb
Jupyter Notebook
notebook4.ipynb
RikVoorhaar/seminar-applied-math-2022
f8c727d6dcde76e79faf1ef79d11067766d28a6b
[ "MIT" ]
null
null
null
notebook4.ipynb
RikVoorhaar/seminar-applied-math-2022
f8c727d6dcde76e79faf1ef79d11067766d28a6b
[ "MIT" ]
null
null
null
notebook4.ipynb
RikVoorhaar/seminar-applied-math-2022
f8c727d6dcde76e79faf1ef79d11067766d28a6b
[ "MIT" ]
null
null
null
36.068796
422
0.599319
[ [ [ "# Notebook 4: Quantum operations and distance\n\nIn this notebook we will be taking a closer look at quantum operations, i.e. parts of a quantum circuit that are _not necessarily_ unitary. \n", "_____no_output_____" ] ], [ [ "import numpy as np\n\n# Import cirq, install it if it's not installed.\ntry:\n import cirq\nexcept ImportError:\n print(\"installing cirq...\")\n !pip install --quiet cirq\n print(\"installed cirq.\")\n import cirq", "_____no_output_____" ] ], [ [ "\n\n## Working with density matrices\n\nTo work with quantum operations we need work with density matrices instead of pure states as we have been before. Let's first see how we can do simulations with a density matrix in a unitary quantum circuit using `DensityMatrixSimulator`. ", "_____no_output_____" ] ], [ [ "circuit = cirq.Circuit()\nnum_qubits = 2\nqubits = cirq.LineQubit.range(num_qubits)\n\ncircuit.append([cirq.H(qubits[0])])\ncircuit.append([cirq.CNOT(qubits[0], qubits[1])])\nprint(circuit)\n\nsimulator = cirq.DensityMatrixSimulator()\nresult = simulator.simulate(circuit)\nrho = result.final_density_matrix\nrho\n", "_____no_output_____" ] ], [ [ "The resulting density matrix is a hermitian positive semi-definite (PSD) matrix with trace equal to 1. Because the input is a pure state $|000\\rangle$, and all the operations are unitary, the output should also be a pure state. Recall that a state $\\rho$ is pure if and only if $\\mathrm{tr}(\\rho^2)=1$. Let's verify these properties.", "_____no_output_____" ] ], [ [ "print(\"Trace =\", np.real_if_close(np.trace(rho)))\nprint(\"Hermitian?: \", np.allclose(rho, rho.conjugate().T))\nprint(\"PSD?: \", np.all(np.linalg.eigvalsh(rho >= 0)))\n# np.linalg.eigvals computes eigenvalues of a matrix\n# np.linalg.eigvalsh computes eigenvalues of a hermitian matrix. The assumption that\n# the matrix is hermitian allows for a faster more numerically stable computation.\n\nprint(\"Pure state?\", np.trace(rho @ rho) > 1 - 1e-4)\n", "_____no_output_____" ] ], [ [ "## Noisy channels\n\nUsually we need the quantum operator formalism because we want to model _noise_ in a quantum circuit. There are many types of noise that can occur in real-life quantum circuits. Perhaps the simplest type is the _bit-flip channel_, which flips the state (applies the X gate) of a single qubit with a certain probability.\n\nIf $q$ is the probability of flipping the state, then the bit-flip channel acts as:\n$$\n \\rho \\mapsto (1-q)\\rho+qX\\rho X^\\dagger\n$$\n\nin the _operator sum formalism_, the _operation elements_ are therefore $\\sqrt{1-q}I$ and $\\sqrt{q}X$.\n\nLet's modify the circuit above to use the bit-flip channel on the first qubit, before the CNOT gate.", "_____no_output_____" ] ], [ [ "circuit = cirq.Circuit()\nnum_qubits = 2\nqubits = cirq.LineQubit.range(num_qubits)\n\ncircuit.append([cirq.H(qubits[0]), cirq.bit_flip(0.1)(qubits[1])])\ncircuit.append([cirq.CNOT(qubits[0], qubits[1])])\nprint(circuit)\n\nsimulator = cirq.DensityMatrixSimulator()\nresult = simulator.simulate(circuit)\nrho = result.final_density_matrix\nrho\n", "_____no_output_____" ] ], [ [ "Unlike before, this circuit is _not_ unitary. And hence the output is not a pure state. Here's what happens if we compute $\\mathrm{tr}(\\rho^2)$:", "_____no_output_____" ] ], [ [ "np.trace(rho @ rho)\n", "_____no_output_____" ] ], [ [ "## Exercise 1a\n> While $\\rho$ is not a pure state, it is the mixture of two pure states: $\\rho = 0.1|\\psi\\rangle\\langle \\psi| + 0.9|\\varphi\\rangle\\langle \\varphi|$. Use the eigenvalue decomposition `np.linalg.eigh` to find $|\\psi\\rangle $ and $|\\varphi \\rangle$. (Hint: look carefully at the eigenvalues `eigvals` to select the right eigenvectors). Use `cirq.qis.dirac_notation` to neatly format the resulting vector.", "_____no_output_____" ] ], [ [ "eigvals, eigvects = np.linalg.eigh(rho)\n\n# YOUR CODE HERE\n", "_____no_output_____" ] ], [ [ "## Exercise 1b\n> Denote the entire circuit above by $\\mathcal E$, then we defined $\\rho =\\mathcal E(|0\\rangle\\langle 0|)$, and observed that $\\mathrm{tr}(\\rho^2)<1$. What happens if we iterate the circuit a few times? Use a for loop to show experimentally that $\\mathrm{tr}(\\mathcal E^n(\\rho))$ converges to 0.5.\n\n- To apply the circuit multiple times, we can use the `initial_state=rho` keyword for the function `simulator.simulate`. This sets the initial state of the simulator to the density matrix `rho`. \n\n- If you use too many iterations, you might get this error:\n```py\n ValueError: The density matrix is not hermitian.\n```\nThis is because of accumulating numerical errors. To avoid this, simply use fewer iterations. The convergence should be pretty good after 10 iterations.", "_____no_output_____" ] ], [ [ "rho = simulator.simulate(circuit).final_density_matrix\n\n# Your code here\n", "_____no_output_____" ] ], [ [ "## Exercise 1c\n> We can get sates $\\rho$ such that $\\mathrm{tr}(\\rho^2)$ is even smaller than 0.5. Modify the circuit by adding _a single_ `bit_flip(0.1)` gate to the circuit at the right place and repeat the experiment of Exercise 1a to converge to a state with $\\mathrm{tr}(\\rho^2)\\to 0.25$", "_____no_output_____" ] ], [ [ "circuit = cirq.Circuit()\nnum_qubits = 2\nqubits = cirq.LineQubit.range(num_qubits)\n\n# YOUR CODE HERE\n", "_____no_output_____" ] ], [ [ "## Exercise 1d\n> The lowest value of $\\mathrm{tr}(\\rho^2)$ we can possibly achieve is when $\\rho = I/d$, where d is the dimension of the system. Show that this state is a fixed point of the circuit $\\mathcal E$; i.e. $\\mathcal E(\\rho) = \\rho$.", "_____no_output_____" ] ], [ [ "rho_worst = np.eye(4, dtype=np.complex64) / 4\n\n# YOUR CODE HERE\n", "_____no_output_____" ] ], [ [ "## Trace distance and fidelity\n\nWe will investigate how different types of noise can affect the fidelity and trace distance between states. Your first job is to implement trace distance and fidelity.\n\n## Exercise 2a\n> Recall that the trace distance is defined by $D(\\rho,\\sigma) = \\mathrm{tr}|\\rho-\\sigma|$. Implement the trace distance in a function `trace_distance`. Here you can use the fact that for any hermitian matrix $A$ we have $\\mathrm{tr}|A|=\\sum_i \\sigma_i(A)$ where $\\sigma_i$ is the $i\\!$ th _singular value_ of $A$. You can compute singular values using `scipy.linalg.svdvals`", "_____no_output_____" ] ], [ [ "import scipy.linalg\n\n\ndef trace_distance(rho, sigma):\n # YOUR CODE HERE\n ...\n\n\n# rho = |00><00|\nrho = np.zeros((4, 4), dtype=np.complex64)\nrho[0, 0] = 1\n\n# sigma = E(rho)\nsigma = simulator.simulate(circuit, initial_state=rho).final_density_matrix\n\nprint(trace_distance(rho, rho)) # should be 0\nprint(trace_distance(rho, sigma)) # should be around 1.33\n", "_____no_output_____" ] ], [ [ "## Exercise 2b\n> The fidelity is defined by $F(\\rho,\\sigma) = \\mathrm{tr}\\sqrt{\\rho^{1/2}\\sigma \\rho^{1/2}}$. Implement the fidelity in a function `fidelity`. You can compute the square root of a matrix using `scipy.linalg.sqrtm`. Make sure that the result is a real number, possibly by using `np.abs( ... )` on the result.", "_____no_output_____" ] ], [ [ "def fidelity(rho, sigma):\n # YOUR CODE HERE\n ...\n\n\nprint(fidelity(rho, rho)) # should be 1\nprint(fidelity(rho, sigma)) # should be around 0.67\n", "_____no_output_____" ] ], [ [ "It is perhaps worth noting that while the definition $F(\\rho,\\sigma) = \\mathrm{tr}\\sqrt{\\rho^{1/2}\\sigma \\rho^{1/2}}$ is used in Nielsen-Chuang, the definition $F(\\rho,\\sigma) = (\\mathrm{tr}\\sqrt{\\rho^{1/2}\\sigma \\rho^{1/2}})^2$ is more common in contemporary literature. Since we follow the book, we will keep using the former definition.", "_____no_output_____" ], [ "### Fidelity of a quantum operation\n\nWe often need to know how much a quantum operation (in particular noise) can distort a state. We can do this by computing the fidelity between a state and the result of applying the operation to the state. That is we consider $F(\\rho,\\mathcal E(\\rho))$.\n\nThis fidelity is going to be bigger for some states, and smaller for others. Therefore we are for example interested in the _minimum_ fidelity obtained among all states $\\rho$. This is however not easy to compute in general. Instead we consider the _average_ fidelity\n$$\n\\overline F(\\mathcal E) := \\int_{S^{n-1}}\\! F(|\\psi\\rangle\\langle \\psi|,\\mathcal E(|\\psi\\rangle\\langle \\psi|))\\,\\mathrm d\\psi\n$$\n\nHere we took the average over all the pure states, but instead we could also take the average over all the mixed states. For now we can estimate this integral using Monte-Carlo integration. That is, we randomly sample over states $\\rho$, compute $F(|\\psi\\rangle\\langle \\psi|,\\mathcal E(|\\psi\\rangle\\langle \\psi|))$, and then average the result.\n\n## Exercise 2c\n> Implement the function `average_fidelity(circuit, N)` that estimates the average fidelity of a quantum circuit `circuit` using `N` samples. You can use the function `random_pure_state(num_qubits)` to generate random pure states.", "_____no_output_____" ] ], [ [ "circuit = cirq.Circuit()\nqubit1 = cirq.LineQubit(0)\ncircuit.append(cirq.bit_flip(0.1)(qubit1))\n\n\ndef random_pure_state(num_qubits):\n n = 2**num_qubits\n\n # Vector of random normal complex numbers\n psi = np.random.normal(size=n) + 1j * np.random.normal(size=n)\n\n # Normalize\n psi = psi / np.linalg.norm(psi)\n psi = psi.astype(np.complex64)\n\n # Compute rank-1 matrix |psi><psi|\n state = np.outer(psi, psi.conj())\n\n return state\n\n\ndef average_fidelity(circuit, N):\n num_qubits = len(circuit.all_qubits())\n\n # YOUR CODE HERE\n\naverage_fidelity(circuit, 200) # Should be around 0.967\n", "_____no_output_____" ] ], [ [ "Now let's try to understand how the average fidelity changes if we use noise of different strength. Instead of the bit-flip channel we will be considering the _depolarizing channel_, which is defined as:\n$$\n\\mathcal E(\\rho) = \\frac{pI}{2} + (1-p)\\rho\n$$\n\ni.e. with probability $p$ we change the state into $I/2$ -- a completely random state, and with probability $1-p$ we leave the state unchanged. This channel can be implemented using the `depolarize(p)` gate.\n\n## Exercise 2d\n> Using a `for` loop, define different circuits consisting of the depolarizing channel of strength $p$ on a single qubit. Then compute the average fidelity of this circuit, and add the result to the list `fidelities_list`. The result is then plotted for you. If done correctly, the two plotted lines should perfectly overlap.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\nfidelities_list = []\np_values = np.linspace(0, 1, 20)\n\nfor p in p_values:\n # YOUR CODE HERE\n ...\n\nplt.plot(p_values, fidelities_list, \"-o\", label=\"Estimated\")\nplt.plot(p_values, np.sqrt(1 - 2 * p_values / 3), label=\"Theoretical\")\nplt.legend()\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb78b07d3739221531be0cfc0e2f77ac15a00af0
152,639
ipynb
Jupyter Notebook
tests/ml-books/06-algorithm-chains-and-pipelines.ipynb
gopala-kr/ds-notebooks
bc35430ecdd851f2ceab8f2437eec4d77cb59423
[ "MIT" ]
1
2019-05-10T09:16:23.000Z
2019-05-10T09:16:23.000Z
tests/ml-books/06-algorithm-chains-and-pipelines.ipynb
gopala-kr/ds-notebooks
bc35430ecdd851f2ceab8f2437eec4d77cb59423
[ "MIT" ]
null
null
null
tests/ml-books/06-algorithm-chains-and-pipelines.ipynb
gopala-kr/ds-notebooks
bc35430ecdd851f2ceab8f2437eec4d77cb59423
[ "MIT" ]
1
2019-05-10T09:17:28.000Z
2019-05-10T09:17:28.000Z
169.035437
33,078
0.893133
[ [ [ "#load watermark\n%load_ext watermark\n%watermark -a 'Gopala KR' -u -d -v -p watermark,numpy,pandas,matplotlib,nltk,sklearn,tensorflow,theano,mxnet,chainer,seaborn,keras,tflearn,bokeh,gensim", "/srv/conda/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nWARNING (theano.configdefaults): install mkl with `conda install mkl-service`: No module named 'mkl'\nUsing TensorFlow backend.\n/srv/conda/lib/python3.6/site-packages/tensorflow/python/util/tf_inspect.py:45: DeprecationWarning: inspect.getargspec() is deprecated, use inspect.signature() or inspect.getfullargspec()\n if d.decorator_argspec is not None), _inspect.getargspec(target))\n" ], [ "from preamble import *\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Algorithm Chains and Pipelines", "_____no_output_____" ] ], [ [ "from sklearn.svm import SVC\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\n\n# load and split the data\ncancer = load_breast_cancer()\nX_train, X_test, y_train, y_test = train_test_split(\n cancer.data, cancer.target, random_state=0)\n\n# compute minimum and maximum on the training data\nscaler = MinMaxScaler().fit(X_train)", "_____no_output_____" ], [ "# rescale the training data\nX_train_scaled = scaler.transform(X_train)\n\nsvm = SVC()\n# learn an SVM on the scaled training data\nsvm.fit(X_train_scaled, y_train)\n# scale the test data and score the scaled data\nX_test_scaled = scaler.transform(X_test)\nprint(\"Test score: {:.2f}\".format(svm.score(X_test_scaled, y_test)))", "Test score: 0.95\n" ] ], [ [ "### Parameter Selection with Preprocessing ", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import GridSearchCV\n# for illustration purposes only, don't use this code!\nparam_grid = {'C': [0.001, 0.01, 0.1, 1, 10, 100],\n 'gamma': [0.001, 0.01, 0.1, 1, 10, 100]}\ngrid = GridSearchCV(SVC(), param_grid=param_grid, cv=5)\ngrid.fit(X_train_scaled, y_train)\nprint(\"Best cross-validation accuracy: {:.2f}\".format(grid.best_score_))\nprint(\"Best parameters: \", grid.best_params_)\nprint(\"Test set accuracy: {:.2f}\".format(grid.score(X_test_scaled, y_test)))", "Best cross-validation accuracy: 0.98\nBest parameters: {'C': 1, 'gamma': 1}\nTest set accuracy: 0.97\n" ], [ "mglearn.plots.plot_improper_processing()", "_____no_output_____" ] ], [ [ "### Building Pipelines", "_____no_output_____" ] ], [ [ "from sklearn.pipeline import Pipeline\npipe = Pipeline([(\"scaler\", MinMaxScaler()), (\"svm\", SVC())])", "_____no_output_____" ], [ "pipe.fit(X_train, y_train)", "_____no_output_____" ], [ "print(\"Test score: {:.2f}\".format(pipe.score(X_test, y_test)))", "Test score: 0.95\n" ] ], [ [ "### Using Pipelines in Grid-searches", "_____no_output_____" ] ], [ [ "param_grid = {'svm__C': [0.001, 0.01, 0.1, 1, 10, 100],\n 'svm__gamma': [0.001, 0.01, 0.1, 1, 10, 100]}", "_____no_output_____" ], [ "grid = GridSearchCV(pipe, param_grid=param_grid, cv=5)\ngrid.fit(X_train, y_train)\nprint(\"Best cross-validation accuracy: {:.2f}\".format(grid.best_score_))\nprint(\"Test set score: {:.2f}\".format(grid.score(X_test, y_test)))\nprint(\"Best parameters: {}\".format(grid.best_params_))", "Best cross-validation accuracy: 0.98\nTest set score: 0.97\nBest parameters: {'svm__C': 1, 'svm__gamma': 1}\n" ], [ "mglearn.plots.plot_proper_processing()", "_____no_output_____" ], [ "rnd = np.random.RandomState(seed=0)\nX = rnd.normal(size=(100, 10000))\ny = rnd.normal(size=(100,))", "_____no_output_____" ], [ "from sklearn.feature_selection import SelectPercentile, f_regression\n\nselect = SelectPercentile(score_func=f_regression, percentile=5).fit(X, y)\nX_selected = select.transform(X)\nprint(\"X_selected.shape: {}\".format(X_selected.shape))", "X_selected.shape: (100, 500)\n" ], [ "from sklearn.model_selection import cross_val_score\nfrom sklearn.linear_model import Ridge\nprint(\"Cross-validation accuracy (cv only on ridge): {:.2f}\".format(\n np.mean(cross_val_score(Ridge(), X_selected, y, cv=5))))", "Cross-validation accuracy (cv only on ridge): 0.91\n" ], [ "pipe = Pipeline([(\"select\", SelectPercentile(score_func=f_regression,\n percentile=5)),\n (\"ridge\", Ridge())])\nprint(\"Cross-validation accuracy (pipeline): {:.2f}\".format(\n np.mean(cross_val_score(pipe, X, y, cv=5))))", "Cross-validation accuracy (pipeline): -0.25\n" ] ], [ [ "### The General Pipeline Interface", "_____no_output_____" ] ], [ [ "def fit(self, X, y):\n X_transformed = X\n for name, estimator in self.steps[:-1]:\n # iterate over all but the final step\n # fit and transform the data\n X_transformed = estimator.fit_transform(X_transformed, y)\n # fit the last step\n self.steps[-1][1].fit(X_transformed, y)\n return self", "_____no_output_____" ], [ "def predict(self, X):\n X_transformed = X\n for step in self.steps[:-1]:\n # iterate over all but the final step\n # transform the data\n X_transformed = step[1].transform(X_transformed)\n # predict using the last step\n return self.steps[-1][1].predict(X_transformed)", "_____no_output_____" ] ], [ [ "![pipeline_illustration](images/pipeline.png)", "_____no_output_____" ], [ "### Convenient Pipeline creation with ``make_pipeline``", "_____no_output_____" ] ], [ [ "from sklearn.pipeline import make_pipeline\n# standard syntax\npipe_long = Pipeline([(\"scaler\", MinMaxScaler()), (\"svm\", SVC(C=100))])\n# abbreviated syntax\npipe_short = make_pipeline(MinMaxScaler(), SVC(C=100))", "_____no_output_____" ], [ "print(\"Pipeline steps:\\n{}\".format(pipe_short.steps))", "Pipeline steps:\n[('minmaxscaler', MinMaxScaler(copy=True, feature_range=(0, 1))), ('svc', SVC(C=100, cache_size=200, class_weight=None, coef0=0.0,\n decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',\n max_iter=-1, probability=False, random_state=None, shrinking=True,\n tol=0.001, verbose=False))]\n" ], [ "from sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\n\npipe = make_pipeline(StandardScaler(), PCA(n_components=2), StandardScaler())\nprint(\"Pipeline steps:\\n{}\".format(pipe.steps))", "Pipeline steps:\n[('standardscaler-1', StandardScaler(copy=True, with_mean=True, with_std=True)), ('pca', PCA(copy=True, iterated_power='auto', n_components=2, random_state=None,\n svd_solver='auto', tol=0.0, whiten=False)), ('standardscaler-2', StandardScaler(copy=True, with_mean=True, with_std=True))]\n" ] ], [ [ "#### Accessing step attributes", "_____no_output_____" ] ], [ [ "# fit the pipeline defined before to the cancer dataset\npipe.fit(cancer.data)\n# extract the first two principal components from the \"pca\" step\ncomponents = pipe.named_steps[\"pca\"].components_\nprint(\"components.shape: {}\".format(components.shape))", "components.shape: (2, 30)\n" ] ], [ [ "#### Accessing Attributes in a Pipeline inside GridSearchCV", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LogisticRegression\n\npipe = make_pipeline(StandardScaler(), LogisticRegression())", "_____no_output_____" ], [ "param_grid = {'logisticregression__C': [0.01, 0.1, 1, 10, 100]}", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_test_split(\n cancer.data, cancer.target, random_state=4)\ngrid = GridSearchCV(pipe, param_grid, cv=5)\ngrid.fit(X_train, y_train)", "_____no_output_____" ], [ "print(\"Best estimator:\\n{}\".format(grid.best_estimator_))", "Best estimator:\nPipeline(memory=None,\n steps=[('standardscaler', StandardScaler(copy=True, with_mean=True, with_std=True)), ('logisticregression', LogisticRegression(C=0.1, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1,\n penalty='l2', random_state=None, solver='liblinear', tol=0.0001,\n verbose=0, warm_start=False))])\n" ], [ "print(\"Logistic regression step:\\n{}\".format(\n grid.best_estimator_.named_steps[\"logisticregression\"]))", "Logistic regression step:\nLogisticRegression(C=0.1, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1,\n penalty='l2', random_state=None, solver='liblinear', tol=0.0001,\n verbose=0, warm_start=False)\n" ], [ "print(\"Logistic regression coefficients:\\n{}\".format(\n grid.best_estimator_.named_steps[\"logisticregression\"].coef_))", "Logistic regression coefficients:\n[[-0.389 -0.375 -0.376 -0.396 -0.115 0.017 -0.355 -0.39 -0.058 0.209\n -0.495 -0.004 -0.371 -0.383 -0.045 0.198 0.004 -0.049 0.21 0.224\n -0.547 -0.525 -0.499 -0.515 -0.393 -0.123 -0.388 -0.417 -0.325 -0.139]]\n" ] ], [ [ "### Grid-searching preprocessing steps and model parameters", "_____no_output_____" ] ], [ [ "from sklearn.datasets import load_boston\nboston = load_boston()\nX_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target,\n random_state=0)\n\nfrom sklearn.preprocessing import PolynomialFeatures\npipe = make_pipeline(\n StandardScaler(),\n PolynomialFeatures(),\n Ridge())", "_____no_output_____" ], [ "param_grid = {'polynomialfeatures__degree': [1, 2, 3],\n 'ridge__alpha': [0.001, 0.01, 0.1, 1, 10, 100]}", "_____no_output_____" ], [ "grid = GridSearchCV(pipe, param_grid=param_grid, cv=5, n_jobs=-1)\ngrid.fit(X_train, y_train)", "_____no_output_____" ], [ "mglearn.tools.heatmap(grid.cv_results_['mean_test_score'].reshape(3, -1),\n xlabel=\"ridge__alpha\", ylabel=\"polynomialfeatures__degree\",\n xticklabels=param_grid['ridge__alpha'],\n yticklabels=param_grid['polynomialfeatures__degree'], vmin=0)", "_____no_output_____" ], [ "print(\"Best parameters: {}\".format(grid.best_params_))", "Best parameters: {'polynomialfeatures__degree': 2, 'ridge__alpha': 10}\n" ], [ "print(\"Test-set score: {:.2f}\".format(grid.score(X_test, y_test)))", "Test-set score: 0.77\n" ], [ "param_grid = {'ridge__alpha': [0.001, 0.01, 0.1, 1, 10, 100]}\npipe = make_pipeline(StandardScaler(), Ridge())\ngrid = GridSearchCV(pipe, param_grid, cv=5)\ngrid.fit(X_train, y_train)\nprint(\"Score without poly features: {:.2f}\".format(grid.score(X_test, y_test)))", "Score without poly features: 0.63\n" ], [ "pipe = Pipeline([('preprocessing', StandardScaler()), ('classifier', SVC())])", "_____no_output_____" ], [ "from sklearn.ensemble import RandomForestClassifier\n\nparam_grid = [\n {'classifier': [SVC()], 'preprocessing': [StandardScaler(), None],\n 'classifier__gamma': [0.001, 0.01, 0.1, 1, 10, 100],\n 'classifier__C': [0.001, 0.01, 0.1, 1, 10, 100]},\n {'classifier': [RandomForestClassifier(n_estimators=100)],\n 'preprocessing': [None], 'classifier__max_features': [1, 2, 3]}]", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_test_split(\n cancer.data, cancer.target, random_state=0)\n\ngrid = GridSearchCV(pipe, param_grid, cv=5)\ngrid.fit(X_train, y_train)\n\nprint(\"Best params:\\n{}\\n\".format(grid.best_params_))\nprint(\"Best cross-validation score: {:.2f}\".format(grid.best_score_))\nprint(\"Test-set score: {:.2f}\".format(grid.score(X_test, y_test)))", "Best params:\n{'classifier': SVC(C=10, cache_size=200, class_weight=None, coef0=0.0,\n decision_function_shape='ovr', degree=3, gamma=0.01, kernel='rbf',\n max_iter=-1, probability=False, random_state=None, shrinking=True,\n tol=0.001, verbose=False), 'classifier__C': 10, 'classifier__gamma': 0.01, 'preprocessing': StandardScaler(copy=True, with_mean=True, with_std=True)}\n\nBest cross-validation score: 0.99\nTest-set score: 0.98\n" ] ], [ [ "### Summary and Outlook", "_____no_output_____" ] ], [ [ "test complete ; Gopal", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb78b57f72619324cd7f3409f70b4ec78763f72c
58,051
ipynb
Jupyter Notebook
examples/marginalization.ipynb
jfcrenshaw/pzflow
ed1a7082ee3038794632d59864961da381d2523c
[ "MIT" ]
26
2021-02-01T22:10:04.000Z
2022-03-18T14:54:36.000Z
examples/marginalization.ipynb
jfcrenshaw/pzflow
ed1a7082ee3038794632d59864961da381d2523c
[ "MIT" ]
48
2021-01-12T07:48:10.000Z
2022-03-18T04:45:30.000Z
examples/marginalization.ipynb
jfcrenshaw/pzflow
ed1a7082ee3038794632d59864961da381d2523c
[ "MIT" ]
8
2021-02-03T06:21:45.000Z
2022-01-29T17:15:47.000Z
142.281863
26,792
0.865153
[ [ [ "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/jfcrenshaw/pzflow/blob/main/examples/marginalization.ipynb)\n\nIf running in Colab, to switch to GPU, go to the menu and select Runtime -> Change runtime type -> Hardware accelerator -> GPU.\n\nIn addition, uncomment and run the following code:", "_____no_output_____" ] ], [ [ "# !pip install pzflow", "_____no_output_____" ] ], [ [ "-------------------\n## Marginalization during posterior calculation\n\nThis example notebook demonstrates how to marginalize over missing variables during posterior calculation.\nWe will use the Flow trained in the [redshift example](https://github.com/jfcrenshaw/pzflow/blob/main/examples/redshift_example.ipynb).", "_____no_output_____" ] ], [ [ "import jax.numpy as np\nimport matplotlib.pyplot as plt\nfrom pzflow.examples import get_example_flow", "_____no_output_____" ] ], [ [ "First let's load the pre-trained flow, and use it to generate some samples:", "_____no_output_____" ] ], [ [ "flow = get_example_flow()\nsamples = flow.sample(2, seed=123)", "_____no_output_____" ], [ "samples", "_____no_output_____" ] ], [ [ "Remember that we can calculate posteriors for the data in samples. For example, let's plot redshift posteriors:", "_____no_output_____" ] ], [ [ "grid = np.linspace(0.25, 1.45, 100)\npdfs = flow.posterior(samples, column=\"redshift\", grid=grid)", "_____no_output_____" ], [ "fig, axes = plt.subplots(1, 2, figsize=(5.5, 2), dpi=120, constrained_layout=True)\n\nfor i, ax in enumerate(axes.flatten()):\n \n ax.plot(grid, pdfs[i], label=\"Redshift posterior\")\n \n ztrue = samples[\"redshift\"][i]\n ax.axvline(ztrue, c=\"C3\", label=\"True redshift\")\n ax.set(\n xlabel=\"redshift\", \n xlim=(ztrue - 0.25, ztrue + 0.25), \n yticks=[]\n )\n \naxes[0].legend(\n bbox_to_anchor=(0.55, 1.05, 1, 0.2), \n loc=\"lower left\",\n mode=\"expand\", \n borderaxespad=0, \n ncol=2,\n fontsize=8,\n)\n\nplt.show()", "_____no_output_____" ] ], [ [ "But what if we have missing values? E.g. let's imagine that galaxy 1 wasn't observed in the u band, while galaxy 2 wasn't observed in the u or y bands. We will mark these non-observations with the value 99:", "_____no_output_____" ] ], [ [ "# make a new copy of the samples\nsamples2 = samples.copy()\n# make the non-observations\nsamples2.iloc[0, 1] = 99\nsamples2.iloc[1, 1] = 99\nsamples2.iloc[1, -1] = 99\n# print the new samples\nsamples2", "_____no_output_____" ] ], [ [ "Now if we want to calculate posteriors, we can't simply call `flow.posterior()` as before because the flow will think that 99 is the actual value for those bands, rather than just a flag for a missing value. What we can do, however, is pass `marg_rules`, which is a dictionary of rules that tells the Flow how to marginalize over missing variables.\n\n`marg_rules` must include:\n- \"flag\": 99, which tells the posterior method that 99 is the flag for missing values\n- \"u\": callable, which returns an array of values for the u band over which to marginalize\n- \"y\": callable, which returns an array of values for the y band over which to marginalize\n\n\"u\" and \"y\" both map to callable, because you can use a function of the other values to decide what values of u and y to marginalize over. For example, maybe you expect the value of u to be close to the value of g. In which case you might use:\n```\n\"u\": lambda row: np.linspace(row[\"g\"] - 1, row[\"g\"] + 1, 100)\n```\nThe only constraint is that regardless of the values of the other variables, the callable must *always* return an array of the same length.\n\nFor this example, we won't make the marginalization rules a function of the other variables, but will instead return a fixed array.", "_____no_output_____" ] ], [ [ "marg_rules = {\n \"flag\": 99, # tells the posterior method that 99 means missing value\n \"u\": lambda row: np.linspace(26, 28, 40), # the array of u values to marginalize over\n \"y\": lambda row: np.linspace(24, 26, 40), # the array of y values to marginalize over\n}\n\npdfs2 = flow.posterior(samples2, column=\"redshift\", grid=grid, marg_rules=marg_rules)", "_____no_output_____" ], [ "fig, axes = plt.subplots(1, 2, figsize=(5.5, 2), dpi=120, constrained_layout=True)\n\nfor i, ax in enumerate(axes.flatten()):\n \n ax.plot(grid, pdfs[i], label=\"Posterior w/ all bands\")\n ax.plot(grid, pdfs2[i], label=\"Posterior w/ missing bands marginalized\")\n \n ztrue = samples[\"redshift\"][i]\n ax.axvline(ztrue, c=\"C3\", label=\"True redshift\")\n ax.set(\n xlabel=\"redshift\", \n xlim=(ztrue - 0.25, ztrue + 0.25), \n yticks=[]\n )\n \naxes[0].legend(\n bbox_to_anchor=(0, 1.05, 2, 0.2), \n loc=\"lower left\",\n mode=\"expand\", \n borderaxespad=0, \n ncol=3,\n fontsize=7.5,\n)\n\nplt.show()", "_____no_output_____" ] ], [ [ "You can see that marginalizing over the bands (aka throwing out information), degrades the posteriors.\n\nWarning that marginalizing over fine grids quickly gets very computationally expensive, especially when you have rows in your data frame that are missing multiple values.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
cb78b6cbf346fb6e84af7f0e887bd67729cf1d6a
62,858
ipynb
Jupyter Notebook
Watershed/Watershed.ipynb
sacdallago/dataminer
ae15b602e2da4cf6bc656a819f62b36ea6aaf8d0
[ "Apache-2.0" ]
1
2016-11-14T09:05:45.000Z
2016-11-14T09:05:45.000Z
Watershed/Watershed.ipynb
sacdallago/dataminer
ae15b602e2da4cf6bc656a819f62b36ea6aaf8d0
[ "Apache-2.0" ]
9
2016-11-14T05:24:29.000Z
2016-11-18T03:11:48.000Z
Watershed/Watershed.ipynb
sacdallago/dataminer
ae15b602e2da4cf6bc656a819f62b36ea6aaf8d0
[ "Apache-2.0" ]
null
null
null
296.5
56,806
0.921585
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import ndimage as ndi\n\nimport os\nfrom PIL import Image\nimport PIL.ImageOps \n\nfrom skimage.morphology import watershed\nfrom skimage.feature import peak_local_max\nfrom skimage.filters import threshold_otsu\nfrom skimage.morphology import binary_closing\nfrom skimage.color import rgb2gray", "_____no_output_____" ] ], [ [ "Watershed with binarization first", "_____no_output_____" ] ], [ [ "arraydirectory= './edge_array/'\nphotodirectory='./photos/'\n\nimage=np.array(Image.open(photodirectory + '1449.jpg'))\nimage = rgb2gray(image)\nthresh = threshold_otsu(image)\nimg_bin = image > thresh\nimage_closed=binary_closing(img_bin)\n# Now we want to separate the two objects in image\n# Generate the markers as local maxima of the distance to the background\ndistance = ndi.distance_transform_edt(image_closed)\nlocal_maxi = peak_local_max(distance, indices=False)\nmarkers = ndi.label(local_maxi)[0]\nlabels = watershed(-distance, markers, mask=image_closed)\n\nfig, axes = plt.subplots(ncols=3, figsize=(9, 3), sharex=True, sharey=True,\n subplot_kw={'adjustable': 'box-forced'})\nax = axes.ravel()\n\nax[0].imshow(image_closed, cmap=plt.cm.gray, interpolation='nearest')\nax[0].set_title('Overlapping objects')\nax[1].imshow(-distance, cmap=plt.cm.gray, interpolation='nearest')\nax[1].set_title('Distances')\nax[2].imshow(labels, cmap=plt.cm.spectral, interpolation='nearest')\nax[2].set_title('Separated objects')\n\nfor a in ax:\n a.set_axis_off()\n\nfig.tight_layout()\nplt.show()", "_____no_output_____" ] ], [ [ "Watershed on image itself", "_____no_output_____" ] ], [ [ "arraydirectory= './edge_array/'\nphotodirectory='./photos/'\n\nimage=np.array(Image.open(photodirectory + '1449.jpg'))\n# Now we want to separate the two objects in image\n# Generate the markers as local maxima of the distance to the background\ndistance = ndi.distance_transform_edt(image)\nlocal_maxi = peak_local_max(distance, indices=False)\nmarkers = ndi.label(local_maxi)[0]\nlabels = watershed(-distance, markers, mask=image)\n\nfig, axes = plt.subplots(ncols=3, figsize=(9, 3), sharex=True, sharey=True,\n subplot_kw={'adjustable': 'box-forced'})\nax = axes.ravel()\n\nax[0].imshow(image, cmap=plt.cm.gray, interpolation='nearest')\nax[0].set_title('Overlapping objects')\nax[1].imshow(-distance, cmap=plt.cm.gray, interpolation='nearest')\nax[1].set_title('Distances')\nax[2].imshow(labels, cmap=plt.cm.spectral, interpolation='nearest')\nax[2].set_title('Separated objects')\n\nfor a in ax:\n a.set_axis_off()\n\nfig.tight_layout()\nplt.show()", "_____no_output_____" ] ], [ [ "So we use Watershed on the binary picture.", "_____no_output_____" ] ], [ [ "arraydirectory= '../FeatureSampleFoodClassification/watershed_array/'\nphotodirectory='../SampleFoodClassifier_Norm/'\n\nif not os.path.exists(arraydirectory):\n os.makedirs(arraydirectory)\n \nfor fn in os.listdir(photodirectory):\n if os.path.isfile(photodirectory + fn) and '.jpg' in fn:\n \n \n img=np.array(Image.open(photodirectory + fn))\n img = rgb2gray(img)\n thresh = threshold_otsu(img)\n img_bin = img > thresh\n img_closed=binary_closing(img_bin)\n # Now we want to separate the two objects in image\n # Generate the markers as local maxima of the distance to the background\n distance = ndi.distance_transform_edt(img_closed)\n local_maxi = peak_local_max(distance, indices=False)\n markers = ndi.label(local_maxi)[0]\n ws = watershed(-distance, markers, mask=img_closed)\n ws_flat=[item for sublist in ws for item in sublist]\n np.save(arraydirectory + fn,ws_flat)\n \nprint('done')", "done\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb78be6bb9523d809e74f7024101ed16cb640671
279,132
ipynb
Jupyter Notebook
src/jupyter/rerank-with-mono-t5.ipynb
webis-de/ecir22-anchor-text
254ab53fe4a5fca809af0f846f82b3fb6abd2a82
[ "MIT" ]
3
2021-11-16T19:52:54.000Z
2022-01-20T22:55:01.000Z
src/jupyter/rerank-with-mono-t5.ipynb
webis-de/ecir22-anchor-text
254ab53fe4a5fca809af0f846f82b3fb6abd2a82
[ "MIT" ]
null
null
null
src/jupyter/rerank-with-mono-t5.ipynb
webis-de/ecir22-anchor-text
254ab53fe4a5fca809af0f846f82b3fb6abd2a82
[ "MIT" ]
null
null
null
30.297623
268
0.530129
[ [ [ "# Rerank with MonoT5", "_____no_output_____" ] ], [ [ "!nvidia-smi", "Sun Oct 3 20:49:20 2021 \r\n+-----------------------------------------------------------------------------+\r\n| NVIDIA-SMI 460.84 Driver Version: 460.84 CUDA Version: 11.2 |\r\n|-------------------------------+----------------------+----------------------+\r\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\r\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\r\n| | | MIG M. |\r\n|===============================+======================+======================|\r\n| 0 A100-SXM4-40GB Off | 00000000:41:00.0 Off | 0 |\r\n| N/A 19C P0 49W / 400W | 0MiB / 40536MiB | 0% Default |\r\n| | | Disabled |\r\n+-------------------------------+----------------------+----------------------+\r\n \r\n+-----------------------------------------------------------------------------+\r\n| Processes: |\r\n| GPU GI CI PID Type Process name GPU Memory |\r\n| ID ID Usage |\r\n|=============================================================================|\r\n| No running processes found |\r\n+-----------------------------------------------------------------------------+\r\n" ], [ "from pygaggle.rerank.base import Query, Text\nfrom pygaggle.rerank.transformer import MonoT5\nfrom trectools import TrecRun\nimport ir_datasets\nmonoT5Reranker = MonoT5()\n\n\nDIR='/mnt/ceph/storage/data-in-progress/data-teaching/theses/wstud-thesis-probst/retrievalExperiments/runs-ecir22/'\nDIR_v2='/mnt/ceph/storage/data-in-progress/data-teaching/theses/wstud-thesis-probst/retrievalExperiments/runs-marco-v2-ecir22/'", "2021-10-03 21:01:36 [INFO] loader: Loading faiss with AVX2 support.\n2021-10-03 21:01:36 [INFO] loader: Could not load library with AVX2 support due to:\nModuleNotFoundError(\"No module named 'faiss.swigfaiss_avx2'\",)\n2021-10-03 21:01:36 [INFO] loader: Loading faiss.\n2021-10-03 21:01:36 [INFO] loader: Successfully loaded faiss.\n" ], [ "def load_topics(version, file):\n import pandas as pd\n return pd.read_csv('../../Data/navigational-topics-and-qrels-ms-marco-v' + str(version) + '/' + file, sep='\\t', names=['num', 'query'])\n\ndf_popular_queries = load_topics(1, 'topics.msmarco-entrypage-popular.tsv')\ndf_random_queries = load_topics(1, 'topics.msmarco-entrypage-random.tsv')\ndf_popular_run = TrecRun(DIR + 'entrypage-popular/run.ms-marco-content.bm25-default.txt')\ndf_random_run = TrecRun(DIR + 'entrypage-random/run.ms-marco-content.bm25-default.txt')\n\ndf_popular_queries_v2 = load_topics(2, 'topics.msmarco-v2-entrypage-popular.tsv')\ndf_random_queries_v2 = load_topics(2, 'topics.msmarco-v2-entrypage-random.tsv')\ndf_popular_run_v2 = TrecRun(DIR_v2 + 'entrypage-popular/run.msmarco-doc-v2.bm25-default.txt')\ndf_random_run_v2 = TrecRun(DIR_v2 + 'entrypage-random/run.msmarco-doc-v2.bm25-default.txt')", "_____no_output_____" ], [ "df_popular_queries", "_____no_output_____" ], [ "df_popular_run", "_____no_output_____" ], [ "df_random_queries", "_____no_output_____" ], [ "df_random_run", "_____no_output_____" ], [ "df_random_run.run_data", "_____no_output_____" ] ], [ [ "# The actual reranking", "_____no_output_____" ] ], [ [ "def get_query_or_fail(df_queries, topic_number):\n ret = df_queries[df_queries['num'] == int(topic_number)]\n if len(ret) != 1:\n raise ValueError('Could not handle ' + str(topic_number))\n \n return ret.iloc[0]['query']\n\nmarco_v1_doc_store = ir_datasets.load('msmarco-document').docs_store()\nmarco_v2_doc_store = ir_datasets.load('msmarco-document-v2').docs_store()\n\ndef get_doc_text(doc_id):\n if doc_id.startswith('msmarco_doc_'):\n ret = marco_v2_doc_store.get(doc_id)\n else:\n ret = marco_v1_doc_store.get(doc_id)\n \n return ret.title + ' ' + ret.body\n\ndef docs_for_topic(df_run, topic_number):\n return df_run.run_data[df_run.run_data['query'] == int(topic_number)].docid\n\ndef rerank_with_model(topic, df_queries, df_run, model):\n query = get_query_or_fail(df_queries, topic)\n print('rerank query ' + query)\n documents = [Text(get_doc_text(i), {'docid': i}, 0) for i in docs_for_topic(df_run, topic)[:100]]\n ret = sorted(model.rerank(Query(query), documents), key=lambda i: i.score, reverse=True)\n\n return [{'score': i.score, 'id': i.metadata['docid'], 'body': i.text} for i in ret]\n\ndef rerank(file_name, df_run, df_queries, model, tag):\n from tqdm import tqdm\n \n with open(file_name, 'w') as out_file:\n for topic in tqdm(df_queries.num):\n for i in zip(range(100), rerank_with_model(topic, df_queries, df_run, model)):\n out_file.write(str(topic) + ' Q0 ' + i[1]['id'] + ' ' + str(i[0] + 1) + ' ' + str(i[1]['score']) + ' ' + tag + '\\n')", "_____no_output_____" ] ], [ [ "# Marco V1", "_____no_output_____" ] ], [ [ "rerank(DIR + 'entrypage-random/run.ms-marco-content.bm25-mono-t5.txt', df_random_run, df_random_queries.copy(), monoT5Reranker, 'mono-t5-at-bm25')", "\r 0%| | 0/99 [00:00<?, ?it/s]" ], [ "rerank(DIR + 'entrypage-popular/run.ms-marco-content.bm25-mono-t5.txt', df_popular_run, df_popular_queries.copy(), monoT5Reranker, 'mono-t5-at-bm25')", "\r 0%| | 0/99 [00:00<?, ?it/s]" ] ], [ [ "# Marco V2", "_____no_output_____" ] ], [ [ "rerank(DIR_v2 + 'entrypage-random/run.ms-marco-content.bm25-mono-t5.txt', df_random_run_v2, df_random_queries_v2.copy(), monoT5Reranker, 'mono-t5-at-bm25')", "\r 0%| | 0/99 [00:00<?, ?it/s]" ], [ "rerank(DIR_v2 + 'entrypage-popular/run.ms-marco-content.bm25-mono-t5.txt', df_popular_run_v2, df_popular_queries_v2.copy(), monoT5Reranker, 'mono-t5-at-bm25')", "\r 0%| | 0/99 [00:00<?, ?it/s]" ] ], [ [ "# Rerank with MonoBERT", "_____no_output_____" ] ], [ [ "from pygaggle.rerank.transformer import MonoBERT\n\nmonoBert = MonoBERT()", "_____no_output_____" ], [ "rerank(DIR + 'entrypage-random/run.ms-marco-content.bm25-mono-bert.txt', df_random_run, df_random_queries.copy(), monoBert, 'mono-bert-at-bm25')", "\r 0%| | 0/99 [00:00<?, ?it/s]" ], [ "rerank(DIR + 'entrypage-popular/run.ms-marco-content.bm25-mono-bert.txt', df_popular_run, df_popular_queries.copy(), monoBert, 'mono-bert-at-bm25')", "\r 0%| | 0/99 [00:00<?, ?it/s]" ], [ "rerank(DIR_v2 + 'entrypage-random/run.ms-marco-content.bm25-mono-bert.txt', df_random_run_v2, df_random_queries_v2.copy(), monoBert, 'mono-bert-at-bm25')", "_____no_output_____" ], [ "rerank(DIR_v2 + 'entrypage-popular/run.ms-marco-content.bm25-mono-bert.txt', df_popular_run_v2, df_popular_queries_v2.copy(), monoBert, 'mono-bert-at-bm25')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb78d7fb4caca06b32fde5d3b0b5860dc3286595
5,630
ipynb
Jupyter Notebook
Python/Notebook_Scripts/Session 0 - Setting Up.ipynb
odowns/Crash-Course-4-Practitioners
35dc18e9fd49d81e7f2f3cdf53c118553c7ba800
[ "MIT" ]
null
null
null
Python/Notebook_Scripts/Session 0 - Setting Up.ipynb
odowns/Crash-Course-4-Practitioners
35dc18e9fd49d81e7f2f3cdf53c118553c7ba800
[ "MIT" ]
null
null
null
Python/Notebook_Scripts/Session 0 - Setting Up.ipynb
odowns/Crash-Course-4-Practitioners
35dc18e9fd49d81e7f2f3cdf53c118553c7ba800
[ "MIT" ]
null
null
null
38.827586
444
0.642629
[ [ [ "# Python for Policy Analysts\n\n## Session 0: Setting Up Python\n\nCreated by: O Downs ([email protected])\n\nInstructor Edition", "_____no_output_____" ], [ "### Goals: \n\n* Getting you started with Python!\n* Download Anaconda, which will facilitate your Python use\n* Understand Terminal commands\n* Learn how to `pip install`\n* Learn how to start up a Jupyter notebook", "_____no_output_____" ], [ "## Step 0: Understanding Some Core Concepts\n\nIf you're new to coding, congratulations! You've taken the first step towards being an amazing programmer. \n\nIf you're new to Python, awesome!\n\nHere are some things you need to understand before coding:\n\n* Zero-indexing: in computer science, you generally start counting at zero. So for example, the first item in a list is item ZERO, not item one. This can get confusinig and lead to off-by-one errors, but never fear! OFten you can solve those problems by tweaking your code.\n* Computers aren't smart: although they can do amazing calculations and run programs, computers aren't intelligent the way humans are. When you're coding, be aware that your computer doesn't know what you want it to do until you tell it exactly–it won't assume anything. So be patient, and remember that errors are inevitable! Even the best programmers make lots of errors and spend a lot of time debugging.\n* Getting help: everyone needs help sometimes. Thankfully, the internet is great for getting help! If you're stuck on a problem, try Googling it. No need to reinvent the wheel–somebody's probably had exactly the same problem as you. But BE MINDFUL of websites like StackOverflow and others: not everyone is always right, so be smart and critical about code you see. And of course, remember that somebody wrote that code! Don't steal code!", "_____no_output_____" ], [ "## Step 1: Anaconda\n\nThe first step in getting you set up with Python is getting Anaconda. Anaconda can be downloaded [here](https://www.anaconda.com/distribution/) and is the platform you'll use to write code. \n\n(heres where i dont really understand things...i dont know what anaconda actually does)\n\nSo go ahead and download it.", "_____no_output_____" ], [ "## Step 2: Understanding Terminal (Mac)\n\n(also youll have to put in stuff for pc because i dont know anything about that)\n\nIt's not neccesary to understand what's happening inside the computer if you're not interested in doing hardcore computer science, but it is important to know some key commands in the Command Line. \n\nThe Command Line is your interface with the core of the computer. It's comparable to the windows you can open to view and move files on a Mac, but you only use your keyboard. It also has additional functionalities. \n\nOn Mac, open Terminal. Don't worry if it looks scary–this is your friend!", "_____no_output_____" ], [ "I dont really want to explain all this so im just linking the website i used to learn it. [here](https://www.codecademy.com/learn/learn-the-command-line)", "_____no_output_____" ], [ "## Step 3: `pip install`\n\nThis is a command you'll use a lot when you're starting out. `pip install` installs libraries for your computer. For example, in the Command Line, type `pip install seaborn` and you'll be able to use the seaborn library!", "_____no_output_____" ], [ "## Step 4: Start Up a Notebook\n\nThere are two easy ways to set up a notebook. \n\nWay 1: Using Anaconda\n1. Open Anaconda\n2. Click on the Jupyter Notebook \"Launch\" button. This will open a Terminal window which will open a Jupyter window in your browser. \n3. Navigate via this GUI to your preferred directory (folder)\n4. In the top right hand corner, click \"New\" and under \"Python\" click \"Notebook\"\n5. Write your code!\n6. You can move this notebook around using the Command Line or with your regular interface\n\nWay 2: Using the Command Line\n1. Open Terminal\n2. Type `jupyter notebook` and hit Enter\n3. Navigate via this GUI to your preferred directory (folder)\n4. In the top right hand corner, click \"New\" and under \"Python\" click \"Notebook\"\n5. Write your code!\n6. You can move this notebook around using the Command Line or with your regular interface", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cb78de7e55e717662b8dff16245fff9b8c8f4c63
37,124
ipynb
Jupyter Notebook
hurricane_ike_levels.ipynb
jkmacc-LANL/sagemaker-fargate-test
aaf283b1ee338ee732600f0172e860341fa61651
[ "Apache-2.0" ]
null
null
null
hurricane_ike_levels.ipynb
jkmacc-LANL/sagemaker-fargate-test
aaf283b1ee338ee732600f0172e860341fa61651
[ "Apache-2.0" ]
null
null
null
hurricane_ike_levels.ipynb
jkmacc-LANL/sagemaker-fargate-test
aaf283b1ee338ee732600f0172e860341fa61651
[ "Apache-2.0" ]
null
null
null
31.196639
365
0.600259
[ [ [ "# Hurricane Ike Maximum Water Levels\nCompute the maximum water level during Hurricane Ike on a 9 million node triangular mesh storm surge model. Plot the results with Datashader. ", "_____no_output_____" ] ], [ [ "import xarray as xr\nimport numpy as np\nimport pandas as pd\nimport hvplot.xarray\nimport fsspec\nfrom dask.distributed import Client, progress", "_____no_output_____" ], [ "#from dask_kubernetes import KubeCluster\n#cluster = KubeCluster()", "_____no_output_____" ], [ "%%time\nfrom dask_cloudprovider import FargateCluster\ncluster = FargateCluster(n_workers=1, image='rsignell/pangeo-worker:2020-01-23c')\ncluster.dashboard_link", "_____no_output_____" ] ], [ [ "### Start a dask cluster to crunch the data", "_____no_output_____" ] ], [ [ "cluster.scale(2);", "_____no_output_____" ], [ "cluster", "_____no_output_____" ] ], [ [ "For demos, I often click in this cell and do \"Cell=>Run All Above\", then wait until the workers appear. This can take several minutes (up to 6!) for instances to spin up and Docker containers to be downloaded. Then I shutdown the notebook and run again from the beginning, and the workers will fire up quickly because the instances have not spun down yet. ", "_____no_output_____" ] ], [ [ "%%time\nclient = Client(cluster)\nclient", "_____no_output_____" ] ], [ [ "### Read the data using the cloud-friendly zarr data format", "_____no_output_____" ] ], [ [ "ds = xr.open_zarr(fsspec.get_mapper('s3://pangeo-data-uswest2/esip/adcirc/ike', anon=False, requester_pays=True))", "_____no_output_____" ], [ "ds['zeta']", "_____no_output_____" ] ], [ [ "How many GB of sea surface height data do we have?", "_____no_output_____" ] ], [ [ "ds['zeta'].nbytes/1.e9", "_____no_output_____" ] ], [ [ "Take the maximum over the time dimension and persist the data on the workers in case we would like to use it later. This is the computationally intensive step.", "_____no_output_____" ] ], [ [ "%%time\nmax_var = ds['zeta'].max(dim='time').compute()\nprogress(max_var)", "_____no_output_____" ] ], [ [ "### Visualize data on mesh using HoloViz.org tools", "_____no_output_____" ] ], [ [ "import numpy as np\nimport datashader as dshade\nimport holoviews as hv\nimport geoviews as gv\nimport cartopy.crs as ccrs\nimport hvplot.xarray\nimport holoviews.operation.datashader as dshade\n\ndshade.datashade.precompute = True\nhv.extension('bokeh')", "_____no_output_____" ], [ "v = np.vstack((ds['x'], ds['y'], max_var)).T\nverts = pd.DataFrame(v, columns=['x','y','vmax'])", "_____no_output_____" ], [ "points = gv.operation.project_points(gv.Points(verts, vdims=['vmax']))", "_____no_output_____" ], [ "tris = pd.DataFrame(ds['element'].values.astype('int')-1, columns=['v0','v1','v2'])", "_____no_output_____" ], [ "tiles = gv.tile_sources.OSM", "_____no_output_____" ], [ "value = 'max water level'\nlabel = '{} (m)'.format(value)\ntrimesh = gv.TriMesh((tris, points), label=label)\nmesh = dshade.rasterize(trimesh).opts(\n cmap='rainbow', colorbar=True, width=600, height=400)", "_____no_output_____" ], [ "tiles * mesh", "_____no_output_____" ] ], [ [ "### Extract a time series at a specified lon, lat location", "_____no_output_____" ], [ "Because Xarray does not yet understand that `x` and `y` are coordinate variables on this triangular mesh, we create our own simple function to find the closest point. If we had a lot of these, we could use a more fancy tree algorithm.", "_____no_output_____" ] ], [ [ "# find the indices of the points in (x,y) closest to the points in (xi,yi)\ndef nearxy(x,y,xi,yi):\n ind = np.ones(len(xi),dtype=int)\n for i in range(len(xi)):\n dist = np.sqrt((x-xi[i])**2+(y-yi[i])**2)\n ind[i] = dist.argmin()\n return ind", "_____no_output_____" ], [ "#just offshore of Galveston\nlat = 29.2329856\nlon = -95.1535041", "_____no_output_____" ], [ "ind = nearxy(ds['x'].values,ds['y'].values,[lon], [lat])[0]", "_____no_output_____" ], [ "ds['zeta'][:,ind].hvplot(grid=True)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ] ]
cb791775355b66658dca8955733013d3ca982ed8
6,184
ipynb
Jupyter Notebook
Demo1.ipynb
Markhristian/CPEN-21A-ECE-2-1
a718048ac2ff022f5b3e852088f06d6f3e55fe63
[ "Apache-2.0" ]
null
null
null
Demo1.ipynb
Markhristian/CPEN-21A-ECE-2-1
a718048ac2ff022f5b3e852088f06d6f3e55fe63
[ "Apache-2.0" ]
null
null
null
Demo1.ipynb
Markhristian/CPEN-21A-ECE-2-1
a718048ac2ff022f5b3e852088f06d6f3e55fe63
[ "Apache-2.0" ]
null
null
null
21.105802
231
0.382277
[ [ [ "<a href=\"https://colab.research.google.com/github/Markhristian/CPEN-21A-ECE-2-1/blob/main/Demo1.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "##Python Variable", "_____no_output_____" ] ], [ [ "x=10\ny=20\nz=30\nprint(x)\nprint(y)\nprint(z)\np,q,r=25,50,75\nprint(p)\nprint(q)\nprint(r)\n", "10\n20\n30\n25\n50\n75\n" ], [ "a=\"Mark\"\nA=\"Macapinlac\"\nprint(a)\nprint(A)\n", "Mark\nMacapinlac\n" ] ], [ [ "##Casting", "_____no_output_____" ] ], [ [ "f=float(100)\nprint(f)\ni=int(100)\nprint(i)\n", "100.0\n100\n" ] ], [ [ "##Multiple Variables with One Value\n", "_____no_output_____" ] ], [ [ "m = n = o = 1000\nprint(m)\nprint(n)\nprint(o)\n", "1000\n1000\n1000\n" ] ], [ [ "##Operation in Python\n", "_____no_output_____" ] ], [ [ "A = 5\nB = 5\nprint(A+B)\nprint(A-B)\nprint(A*B)", "10\n0\n25\n" ], [ "A is B", "_____no_output_____" ], [ "A is not B", "_____no_output_____" ], [ "A>B or A<B", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
cb791ea9b980ab38e50641d00a54386faecf7add
18,144
ipynb
Jupyter Notebook
usr/duc/github and git/Git and GitHub.ipynb
Bugnon/oc-2018
7961de5ba9923512bd50c579c37f1dadf070b692
[ "MIT" ]
3
2018-09-20T12:16:48.000Z
2019-06-21T08:32:17.000Z
usr/duc/github and git/Git and GitHub.ipynb
Bugnon/oc-2018
7961de5ba9923512bd50c579c37f1dadf070b692
[ "MIT" ]
null
null
null
usr/duc/github and git/Git and GitHub.ipynb
Bugnon/oc-2018
7961de5ba9923512bd50c579c37f1dadf070b692
[ "MIT" ]
2
2018-09-20T11:55:05.000Z
2019-09-01T19:40:13.000Z
29.550489
434
0.613812
[ [ [ "# GitHub : Le réseau social des développeurs grâce à Git", "_____no_output_____" ], [ "_Auteur_: Hugo Ducommun\n\n_Date_: 30 Mai 2019", "_____no_output_____" ], [ "_GitHub_ est un plateforme de projets de jeunes développeurs motivés qui souhaient publier leur travail de manière libre (OpenSource). _GitHub_ est connu pour être pratique lorsqu'on travaille en équipe. Il permet à chaque collaborateurs de travailler sur un seul et même projet sans influencer l'avancement des autres. Ce site web peut également être utilisé de manière professionnelle grâce à des comptes payants.", "_____no_output_____" ], [ "## On parle souvent de `git`, de quoi s'agit-il ?\n\n**git** est un logiciel de versioning (gestion de versions) que le site _GitHub_ utilise.\n\nIl permet par conséquent de faciliter l'accès à l'historique des anciennes versions d'un projet et de synchroniser facilement les fichiers entre eux grâce à un système de **branches** que je développerai dans le point suivant.\n\nEn réalité c'est git qui est à la base du site web _GitHub_. Mais _GitHub_ a rajouté une interface graphique à git qui s'utilise principalement dans un terminal, c'est pour cela que _GitHub_ est plus connu que le logiciel de versioning lui-même. Pour cette raison, j'étudirai ici principalement le logiciel git et rajouterai quelques informations supplémentaires sur _GitHub_.", "_____no_output_____" ], [ "# Fonctionnement de `git`", "_____no_output_____" ], [ "### Introduction imagée de la notion de *branch*.", "_____no_output_____" ], [ "Git procède en branches. Voici un petit schéma que j'ai trouvé très expressif sur la manière dont ça fonctionne.", "_____no_output_____" ], [ "![git_branches](./img/git_branch.png)", "_____no_output_____" ], [ "Nous avons donc deux types de *branches* différentes, oui oui deux et pas trois comme sur le schéma. Il y a la *branch* du milieu appelé **master branch**, c'est là où se situera la version officielle et fonctionnelle de votre projet. Puis un deuxième type appelé **feature branch** qui est caractérisé par les branches nommées sur le schéma, hat et glasses (en réalité ce sont toutes les branches exceptés la master branch).\n\nLe fonctionnement est simple, le projet est ici de rajouter un chapeau et des lunettes à l'image du pouple de base. Un collaborateur s'occupera donc du chapeau (C1) et un autre des lunettes (C2). Ils vont procéder de cette manière :\n\n1. C1 et C2 vont donc copier le projet actuel (master branch) dans une feature branch personnelle.\n\n2. Faire leurs modifications et les rendre fonctionnelles (ajoutez un chapeau ou des lunettes).\n\n3. Uploader leurs modifications dans la master branch pour avoir un projet complet.", "_____no_output_____" ], [ "### Termes techniques", "_____no_output_____" ], [ "Bien sûr, ceci est un peu plus compliqué dans un vrai projet, il y a beaucoup plus de choses à faire que rajouter deux accessoires, et nous devons procéder par ligne de commandes. Mais c'est une bonne approche de cette notion.\n\nJe vais donc ici détaillé quelques termes qu'utilise git dans son fonctionnement.", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "#### ![icon_repository](./img/icons-repository.png)\n#### Repository", "_____no_output_____" ], [ "Un repository (en français dépôt) est l'ensemble de votre projet : les documents que vous éditez et dont\nvous suivez les modifications s'y trouvent.\nLe repository peut être locale ou se trouver sur votre serveur dédié.", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "#### Branch", "_____no_output_____" ], [ "C'est une des branches copiées de la master branch par défaut. Après avoir créé la branche copiée de master, elle ne sera plus affectée par les changement opérés sur les autres branches du projet.\n\nSur le schéma ci-dessous, 'Copie de A' est une branche de 'Branche A'. D'ailleurs pour avoir copié la branche A, l'utilisateur a dû **merge**.", "_____no_output_____" ], [ "La commande pour créer une nouvelle branche est : `git branch nomNouvelleBranche`\n\nLa commande : `git branch --list` vous permet de voir la liste de toutes les branches du repository actuel.", "_____no_output_____" ], [ "![branch](./img/branch.png)", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "#### ![icon_pull-request](./img/icons-pull-request.png)\n#### Pull request", "_____no_output_____" ], [ "Ce terme peut être traduit par demande de fusion (**merge**).\nC'est lorsque le collaborateur veut fusion sa branch avec une autre (généralement la master branch) pour appliquer les changements tels que les corrections de bugs ou ajout de fonctionnalité à la branche cible.\nLe responsable de la branche ciblée est libres de refuser ou accepter ce **pull request**.", "_____no_output_____" ], [ "![pull-request](./img/pull_request.png)", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "#### ![icon_fork](./img/icons-code-fork.png)\n#### Fork ", "_____no_output_____" ], [ "Fork (littéralement fourchette en français), correspond à copier une branche déjà existante. On fork souvent la master branch au début pour pouvoir se créer notre propre branch et modifier le projet sans impacter sur la master branch.", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "#### ![icon_merge](./img/icons-merge-git.png)\n#### Merge ", "_____no_output_____" ], [ "Merge est un peu le contraire de fork, après avoir modifié tout ce que l'on voulait, on peut fusionner notre branche avec une autre. Cette fonctionnalité est souvent protégée par un pull request sinon tout le monde pourrait modifier n'importe quelle branche.\n\nLes modifications de la branche B seront donc appliquées à la branche A si le merge fonctionne.", "_____no_output_____" ], [ "La commande pour fusionner la branche B est : `git merge brancheB`\n\nAttention il est important d'exécuter cette commande depuis la branche A !", "_____no_output_____" ], [ "![merge](./img/merge.png)", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "#### ![icon_commit](./img/icons-commit-git.png)\n#### Commit ", "_____no_output_____" ], [ "Commit est l'action la plus courante que vous allez exécuter avec git. Elle correspond, comme l'indique son icône à une modification de la branche en question. Lorsque vous avez localement modifié une branche, vous devez **commit** pour enregister les changements, généralement avec un message d'information pour pouvoir par la suite mieux retrouver des anciennes modifications.", "_____no_output_____" ], [ "La commande pour commit en rajoutant un message d'exemple est : `git commit -m 'Add the cow-boy hat'`", "_____no_output_____" ], [ "![commit](./img/commit.png)", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "#### ![icon_push](./img/icons-arrow-up.png)\n#### Push", "_____no_output_____" ], [ "Envoye tous vos commits dans le serveur dédié sur lequel est hebergé le repository (dépôt distant). Vous 'envoyez' en quelque sorte vos fichiers à vos collaborateurs.", "_____no_output_____" ], [ "La commande pour push est : `git push`", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "#### ![icon_pull](./img/icons-down-arrow.png)\n#### Pull ", "_____no_output_____" ], [ "Effet contraire de push, vous recevez les fichiers envoyés par vos collègues. Avant chaque grosse séance de travail, assurez vous de pull pour voir l'avancement de votre équipe. Il charge les dossiers et fichiers du repository sur votre machine en local.", "_____no_output_____" ], [ "La commande pour pull est : `git pull`", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "# Autres commandes dans git", "_____no_output_____" ], [ "Nous avons déjà vu quelques commandes dans les termes techniques, voici le reste des commandes basiques :", "_____no_output_____" ], [ "* `git init` : initialise votre dossier en tant qu'un dossier git\n* `git clone URL` : clone un repository déjà existant dans le dossier où vous exécutez la commande (exemple url : https://github.com/Bugnon/oc-2018.git)\n* `git status` : affiche le statuts des fichiers de votre repository. Permet de voir où nous en sommes.\n* `git add nomFichier` : ajouter des fichiers dans l'index (pré-sauvegarde). La commande `git add *` ajoute tous les fichiers modifiés.\n* `git checkout nomBranche` : s'utilise en tant que switch d'une branche à une autre (utilisation basique)", "_____no_output_____" ], [ "Lors de la première utilisation de git, vous devrez enregistrer votre pseudo et votre email.\nAprès avoir utilisez la commande `git init` qui initialise votre dossier en tant qu'un dossier git, utilisez les deux commandes ci-dessous :\n* `git config --global user.name 'hugoducom'`\n* `git config --global user.email '[email protected]'`", "_____no_output_____" ], [ "L'avantage de Git reste que c'est une source particulièrement bien documentée en ligne car ceux qui le maîtrise sont en général assez actif sur les forums. On arrive toujours à trouver de l'aide sur les différentes plateformes. Aussi grâce aux commandes : `git help nomCmd` par exemple `git help checkout`.", "_____no_output_____" ], [ "# Schéma récapitulatif de git et ses commandes", "_____no_output_____" ], [ "![git schematic](./img/git_schematic.png)", "_____no_output_____" ], [ "_La commande `git fetch` ne sera pas traitée ici._", "_____no_output_____" ], [ "### Publication d'un fichier", "_____no_output_____" ], [ "En somme lorsqu'on veut publier un fichier, par exemple _index.html_ dans notre repository, il faut taper dans l'ordre :\n1. `git pull`\n2. `git add index.html`\n3. `git commit -m 'Ajoute de mon fichier html'`\n4. `git push`", "_____no_output_____" ], [ "Le `git pull` du début permet de ne pas avoir de conflit de fichier lorsqu'on push en mettant à jour notre copie de travail versionnée.", "_____no_output_____" ], [ "# Vous n'avez toujours pas compris ? Voici un exemple pratique", "_____no_output_____" ], [ "Je suis un jeune codeur web qui souhaite partager mes premiers pas sur une plateforme de développement comme GitHub. Je crée donc un dossier sur mon bureau appelé \"web\". C'est ce dossier que je souhaite partager sur GitHub. Je télécharge donc [Git](https://git-scm.com/downloads).\n\nIl s'agit d'un petit projet, je travaillerai donc seulement dans la _master branch_ et ne créerai pas d'autre branche.", "_____no_output_____" ], [ "Après l'installation, je fais clique-droit sur mon dossier et appuie sur **Git Bash Here**. Une console s'ouvrira et c'est depuis là que vous taperez vos commandes.", "_____no_output_____" ], [ "Comme je me suis informé sur ce notebook, je tape d'abord `git init`. Et enregistre mes informations à l'aide de `git config`.", "_____no_output_____" ], [ "![init](./img/git-init.PNG)", "_____no_output_____" ], [ "Je commence ensuite à développer tranquillement dans ce dossier. Je crée donc mon fichier _index.html_. Je commence à coder et arrive le moment où je souhaite mettre en ligne ce que j'ai fait. Je fais donc un `git add index.html` ou `git add *` si je veux add tous les fichiers de mon dossier web.\n\nJe remarque en utilisant la commande `git status` que mes fichiers sont prêts à être commit au dépôt local.\n\nPuis `git commit -m 'Ajout de la première version de mon site'`.", "_____no_output_____" ], [ "![git add](./img/git-add.PNG)", "_____no_output_____" ], [ "Rendez-vous maintenant sur [GitHub](https://github.com/new) pour créer notre repository. Je me connecte et remplis les informations nécessaire.", "_____no_output_____" ], [ "![github](./img/github.PNG)", "_____no_output_____" ], [ "Par la suite il faut exécuter les deux commandes que GitHub nous demande :\n* `git remote add origin https://github.com/hugoducom/web.git`\n* `git push -u origin master`\n\nIl se peut que lors de la deuxième commande, on vous demande votre login et votre mot de passe GitHub. Il faut donc avoir un compte GitHub.", "_____no_output_____" ], [ "![git remote](./img/git-remote.PNG)", "_____no_output_____" ], [ "Le plus dur est fait ! Votre repository est en ligne sur GitHub, bravo ! En rechargeant la page https://github.com/hugoducom/web vous allez tomber sur votre fichier _index.html_.", "_____no_output_____" ], [ "![win](./img/win.PNG)", "_____no_output_____" ], [ "Pour la suite de votre aventure de développement web, il vous faudra simplement suivre le point 'Publication d'un fichier' un peu plus haut de ce dossier.", "_____no_output_____" ], [ "Lorsque vous aurez compris le principe de git et serez capable de tout faire en lignes de commande, vous pourrez télécharger des applications qui feront le travail à votre place comme [GitHub Desktop](https://desktop.github.com/), qui facilitera grandement vos partages de fichier dans votre carrière de développement.", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "#### Sources:\n\n* https://gerardnico.com/code/version/git/branch\n* https://fr.wikipedia.org/wiki/GitHub\n* https://fr.wikipedia.org/wiki/Git\n* https://www.sebastien-gandossi.fr/blog/difference-entre-git-reset-et-git-rm-cached\n* https://www.youtube.com/watch?v=4o9qzbssfII", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cb7927e68842503056e94f4df99e979cd978c0a1
18,153
ipynb
Jupyter Notebook
Homeworks/HW_2_trees/HW2_trees_open.ipynb
mrpriboi/ml-mipt
efdc43439010247e456b46de2a94608283a8dde3
[ "MIT" ]
null
null
null
Homeworks/HW_2_trees/HW2_trees_open.ipynb
mrpriboi/ml-mipt
efdc43439010247e456b46de2a94608283a8dde3
[ "MIT" ]
null
null
null
Homeworks/HW_2_trees/HW2_trees_open.ipynb
mrpriboi/ml-mipt
efdc43439010247e456b46de2a94608283a8dde3
[ "MIT" ]
null
null
null
27.714504
520
0.56156
[ [ [ "import numpy as np\nfrom nose.tools import assert_almost_equal, assert_almost_equals, assert_equal", "_____no_output_____" ] ], [ [ "Ответами на задачи являются функции. Они будут проверены автоматическими тестами на стороне сервера. \n\nНекоторые тесты выполняются локально для самопроверки.", "_____no_output_____" ], [ "### Вопросы для самоконтроля \nЭта часть задания не оценивается, ответы можно не записывать\n1. Что такое решающее дерево? Как по построенному дереву найти прогноз для объекта?\n2. Почему для любой выборки можно построить дерево, имеющее нулевую ошибку на ней? Приведите примеры.\n3. Почему не рекомендуется строить небинарные деревья (имеющие более двух потомков у каждой вершины)?\n4. Как устроен жадный алгоритм построения дерева?\n5. Какие критерии информативности для решения задачи классификации вы знаете?\n6. Какой смысл у критерия Джини и энтропийного критерия?\n7. Какие критерии информативности для решения задачи регрессии вы знаете?\n8. Что такое pruning (стрижка) дерева? Чем отличаются post-pruning и pre-pruning?\n9. Какие методы обработки пропущенных значений вы знаете?\n10. Как учитывать категориальные признаки в решающем дереве?", "_____no_output_____" ], [ "### Критерии информативности (45%)\n\nКритерий информативности для набора объектов $R$ вычисляется на основе того, насколько хорошо их целевые переменные предсказываются константой (при оптимальном выборе этой константы):\n$$\nH(R) = \\min_{c \\in Y} \\dfrac{1}{|R|} \\sum_{(x^i,y^i) \\in R} L(y^i, c),\n$$\nгде $L(y^i, c)$- некоторая функция потерь. Соответственно, чтобы получить вид критерия при конкретной функции потерь, можно аналитически найти оптимальное значение константы и подставить его в формулу для $H(R)$. \n\n\nВыведите критерии информативности для следующих функций потерь:\n\nДля задачи регрессии,\n1. $L(y,c) = (y-c)^2$, где $y$ - скаляр, c - константа.\n\nДля задачи классификации на $K$ классов, с дополнительным ограничением\n$$c = [c_1,\\ldots,c_k], 0 \\leq c_i \\leq 1 \\forall i, \\sum_{k=1}^K c_k = 1,$$\n2. $L(y,c) = \\sum_{k=1}^K (c_k-[y_k=1])^2$, где $y$ - это one-hot вектор, $y_k$ - его элемент k-тый элемент, $c$ - вектор вероятностей.\n3. $L(y,c) = -\\sum_{k=1}^K [y_k=1]\\log c_k$, где $y$ - это one-hot вектор, $y_k$ - его элемент k-тый элемент, $c$ - вектор вероятностей.", "_____no_output_____" ] ], [ [ "def H_1(ys):\n \"\"\"\n ys is a 1-dimentional numpy array containing y values for every object from R.\n \"\"\"\n h = np.var(ys)\n return h", "_____no_output_____" ], [ "def H_2(ys):\n \"\"\"\n ys is a numpy array with shape (num_items, num_classes).\n Where each row is a one-vector of class probabilities (e.g. [0, 0, 1] for object of class 2 from 0, 1, 2).\n \"\"\"\n p = np.sum(ys,axis=0)/ys.shape[0]\n c = 1 - np.sum(p**2)\n return c", "_____no_output_____" ], [ "epsilon = 1e-5\ndef H_3(ys):\n \"\"\"\n ys is a numpy array with shape (num_items, num_classes).\n Where each row is a one-vector of class probabilities (e.g. [0, 0, 1] for object of class 2 from 0, 1, 2).\n log2 should be used as logarithm. \n Do not forget to add epsilon to the probabitlities vector in the logarithm.\n \"\"\"\n p = np.sum(ys,axis=0)/ys.shape[0]\n b = np.log2(p+epsilon)\n c = -np.sum(p * b)\n return c", "_____no_output_____" ], [ "a_r = np.arange(10)\nb_r = np.ones(10)\nc_r = np.arange(25)/10.", "_____no_output_____" ], [ "assert_equal(H_1(a_r), 8.25)\nassert_equal(H_1(b_r), 0.0)\nassert_equal(H_1(c_r), 0.52)", "_____no_output_____" ], [ "a = np.vstack((np.ones(10), np.zeros(10))).T\nb = np.hstack([np.vstack((np.ones(5), np.zeros(5))), np.vstack((np.zeros(5), np.ones(5)))]).T\nc = np.hstack([np.vstack((np.ones(9), np.zeros(9))), np.vstack((np.zeros(1), np.ones(1)))]).T\nprint('a:\\n{}\\nb:\\n{}\\nc:\\n{}'.format(a, b, c))", "a:\n[[1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]]\nb:\n[[1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]\n [0. 1.]]\nc:\n[[1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [1. 0.]\n [0. 1.]]\n" ], [ "assert_almost_equal(H_2(a), 0.0, places=4)\nassert_almost_equal(H_2(b), 0.5, places=4)\nassert_almost_equal(H_2(c), 0.18, places=4)", "_____no_output_____" ], [ "assert_almost_equal(H_3(a), 0.0, places=4)\nassert_almost_equal(H_3(b), 1.0, places=4)\nassert_almost_equal(H_3(c), 0.469, places=3)", "_____no_output_____" ] ], [ [ "### Сложность дерева (15%)\n\nЗапишите оценку сложности построения одного решающего дерева в зависимости от размера обучающей выборки $l$, числа признаков $d$, максимальной глубины дерева $D$. В качестве предикатов используются пороговые функции $[x_j>t]$. При выборе предиката в каждой вершине перебираются все признаки, а в качестве порогов рассматриваются величины $t$, равные значениям этого признака на объектах, попавших в текущую вершину. Считайте сложность вычисления критерия информативности на подвыборке константной (т.е. $O(1)$).\n\nОценку сложности представьте в формате $O($`get_tree_complexity(D, l, d)`$)$, где `get_tree_complexity` - некоторая функция от $D$, $l$ и $d$. Функцию реализуйте ниже. \n\nПример использования (числа и зависимости случайны):\n```\ndef get_tree_complexity(D, l, d):\n return D+l+d\n \na = get_tree_complexity(1, 2, 3)\n```\nТогда число a == 6.", "_____no_output_____" ] ], [ [ "def get_tree_complexity(D, l, d):\n \"\"\"\n Compute tree complexity in form O(\"some_expression\") and return the \"some_expression\".\n \"\"\"\n return D*l*d", "_____no_output_____" ], [ "#This cell is executed on the server side.\n", "_____no_output_____" ] ], [ [ "### Bootstrap (40%)\n\nВ данной задаче необходимо вычислить вероятность попадания объекта в boostrap-выборку, а затем оценить ее численно.\n\n\nПусть выборка $\\hat{X}^{n}$ размера $n$ сгененирована методом bootstrap на основе выборки $X^{n}={\\boldsymbol{x}_{1},\\dots\\boldsymbol{x}_{n}}$. Найдите вероятность попадания объекта $x_{i}$ в выборку $\\hat{X}^{n}$ и вычислите ее для случая $n\\rightarrow\\infty$. Реализуйте функцию `probability_to_get_into_X_b`, которая возвращает эту вероятность как число от `0` до `1`. В качесте экспоненты можете использовать `math.exp(1)`.", "_____no_output_____" ] ], [ [ "def probability_to_get_into_X_b():\n p = 1 - 1/np.exp(1)\n return p", "_____no_output_____" ], [ "assert_almost_equal(probability_to_get_into_X_b(), 0.6, places=1)", "_____no_output_____" ] ], [ [ "Реализуйте свою функцию, генерирующую bootstrap-выборку из исходной. Пусть исходная выборка представлена в виде `numpy`-массива (например, `np.arange(100)`). Тогда bootstrap-выборка тоже должна быть `numpy`-массивом тех же размеров, что и исходная.", "_____no_output_____" ] ], [ [ "def my_bootstrap(X):\n \"\"\"\n Implement the function that returns the \n bootstraped dataset of the same size the\n original dataset was.\n \"\"\"\n bs = np.random.randint(0,X.shape[0],X.shape[0])\n return X[bs]", "_____no_output_____" ] ], [ [ "Численно оцените вероятность попадания объекта исходной выборки в bootstrap-выборку для размера выборки `N`. Функция `get_sample_proba` должна возвращать число от `0` до `1`. \n\nНе забывайте, что мы живем в случайном мире ;)", "_____no_output_____" ] ], [ [ "def get_sample_proba(N):\n sample_proba = my_bootstrap(np.arange(N))\n prob = np.sum(np.isin(np.arange(N),sample_proba))/N\n return prob", "_____no_output_____" ], [ "#This cell is executed on the server side.\n", "_____no_output_____" ] ], [ [ "Поздравляем, задание завершено. Не забудьте остановить свой виртуальный инстанс перед уходом (Control Panel -> Stop My Server).", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
cb79371eabdd5f367773fcaa103a9843c1054d98
120,173
ipynb
Jupyter Notebook
_as/2019/jp/11.ipynb
chrislangst/scalable-data-science
c7beee15c7dd14d27353c4864d927c1b76cd2fa9
[ "Unlicense" ]
null
null
null
_as/2019/jp/11.ipynb
chrislangst/scalable-data-science
c7beee15c7dd14d27353c4864d927c1b76cd2fa9
[ "Unlicense" ]
null
null
null
_as/2019/jp/11.ipynb
chrislangst/scalable-data-science
c7beee15c7dd14d27353c4864d927c1b76cd2fa9
[ "Unlicense" ]
null
null
null
70.153532
16,300
0.76196
[ [ [ "# [Applied Statistics](https://lamastex.github.io/scalable-data-science/as/2019/)\n## 1MS926, Spring 2019, Uppsala University \n&copy;2019 Raazesh Sainudiin. [Attribution 4.0 International (CC BY 4.0)](https://creativecommons.org/licenses/by/4.0/)", "_____no_output_____" ], [ "# 11. Non-parametric Estimation and Testing\n", "_____no_output_____" ], [ "### Topics\n\n- Non-parametric Estimation\n- Glivenko-Cantelli Theorem\n- Dvoretsky-Kiefer-Wolfowitz Inequality\n- Hypothesis Testing\n- Permutation Testing\n- Permutation Testing with Shells Data\n- Plug-in Estimation and Bootstraps\n \n\n## Inference and Estimation: The Big Picture\n\nThe Big Picture is about inference and estimation, and especially inference and estimation problems where computational techniques are helpful. \n\n<table border=\"1\" cellspacing=\"2\" cellpadding=\"2\" align=\"center\">\n<tbody>\n<tr>\n<td style=\"background-color: #ccccff;\" align=\"center\">&nbsp;</td>\n<td style=\"background-color: #ccccff;\" align=\"center\"><strong>Point estimation</strong></td>\n<td style=\"background-color: #ccccff;\" align=\"center\"><strong>Set estimation</strong></td>\n</tr>\n<tr>\n<td style=\"background-color: #ccccff;\">\n<p><strong>Parametric</strong></p>\n<p>&nbsp;</p>\n</td>\n<td style=\"background-color: #ccccff;\" align=\"center\">\n<p>MLE of finitely many parameters<br /><span style=\"color: #3366ff;\"><em>done</em></span></p>\n</td>\n<td style=\"background-color: #ccccff;\" align=\"center\">\n<p>Confidence intervals,<br /> via the central limit theorem</p>\n</td>\n</tr>\n<tr>\n<td style=\"background-color: #ccccff;\">\n<p><strong>Non-parametric</strong><br /> (infinite-dimensional parameter space)</p>\n</td>\n<td style=\"background-color: #ccccff;\" align=\"center\"><strong><em><span style=\"color: #3366ff;\">about to see ... </span></em></strong></td>\n<td style=\"background-color: #ccccff;\" align=\"center\"><strong><em><span style=\"color: #3366ff;\">about to see ... </span></em></strong></td>\n</tr>\n</tbody>\n</table>\n\nSo far we have seen parametric models, for example\n\n- $X_1, X_2, \\ldots, X_n \\overset{IID}{\\sim} Bernoulli (\\theta)$, $\\theta \\in [0,1]$\n- $X_1, X_2, \\ldots, X_n \\overset{IID}{\\sim} Exponential (\\lambda)$, $\\lambda \\in (0,\\infty)$\n- $X_1, X_2, \\ldots, X_n \\overset{IID}{\\sim} Normal(\\mu^*, \\sigma)$, $\\mu \\in \\mathbb{R}$, $\\sigma \\in (0,\\infty)$\n\nIn all these cases **the parameter space** (the space within which the parameter(s) can take values) is **finite dimensional**:\n\n- for the $Bernoulli$, $\\theta \\in [0,1] \\subseteq \\mathbb{R}^1$\n- for the $Exponential$, $\\lambda \\in (0, \\infty) \\subseteq \\mathbb{R}^1$\n- for the $Normal$, $\\mu \\in \\mathbb{R}^1$, $\\sigma \\in (0,\\infty) \\subseteq \\mathbb{R}^1$, so $(\\mu, \\sigma) \\subseteq \\mathbb{R}^2$\n\nFor parametric experiments, we can use the maximum likelihood principle and estimate the parameters using the **Maximum Likelihood Estimator (MLE)**, for instance. \n\n# Non-parametric estimation\n\nSuppose we don't know what the distribution function (DF) is? We are not trying to estimate some fixed but unknown parameter $\\theta^*$ for some RV we are assuming to be $Bernoulli(\\theta^*)$, we are trying to estimate the DF itself. In real life, data does not come neatly labeled \"I am a realisation of a $Bernoulli$ RV\", or \"I am a realisation of an $Exponential$ RV\": an important part of inference and estimation is to make inferences about the DF itself from our observations. \n\n#### Observations from some unknown process\n<img src=\"images/unknownProcessTimesAnim.gif\" width=400>\n\nConsider the following non-parametric product experiment:\n\n$$X_1, X_2, \\ldots, X_n\\ \\overset{IID}{\\sim} F^* \\in \\{\\text{all DFs}\\}$$\n\nWe want to produce a point estimate for $F^*$, which is a allowed to be any DF (\"lives in the set of all DFs\"), i.e., $F^* \\in \\{\\text{all DFs}\\}$\n\nCrucially, $\\{\\text{all DFs}\\}$, i.e., the set of all distribution functions over $\\mathbb{R}$ is infinite dimensional.\n\n<img src=\"images/TwoDFs.png\" width=400>\n\nWe have already seen an estimate, made using the data, of a distribution function: the empirical or data-based distribution function (or empirical cumulative distribution function). This can be formalized as the following process of adding indicator functions of the half-lines beginning at the data points $[X_1,+\\infty),[X_2,+\\infty),\\ldots,[X_n,+\\infty)$:\n\n$$\\widehat{F}_n (x) = \\frac{1}{n} \\sum_{i=1}^n \\mathbf{1}_{[X_i,+\\infty)}(x)$$\n\n\nwhere,\n\n$$\\mathbf{1}_{[X_i,+\\infty)}(x) := \\begin{cases} & 1 \\quad \\text{ if } X_i \\leq x \\\\ & 0 \\quad \\text{ if }X_i > x \\end{cases}$$\n", "_____no_output_____" ], [ "First let us evaluate a set of functions that will help us conceptualize faster:", "_____no_output_____" ] ], [ [ "def makeEMFHidden(myDataList):\n '''Make an empirical mass function from a data list.\n \n Param myDataList, list of data to make emf from.\n Return list of tuples comprising (data value, relative frequency) ordered by data value.'''\n \n sortedUniqueValues = sorted(list(set(myDataList)))\n freqs = [myDataList.count(i) for i in sortedUniqueValues]\n relFreqs = [ZZ(fr)/len(myDataList) for fr in freqs] # use a list comprehension\n \n return zip(sortedUniqueValues, relFreqs)\n \n\nfrom pylab import array\n\ndef makeEDFHidden(myDataList, offset=0):\n '''Make an empirical distribution function from a data list.\n \n Param myDataList, list of data to make ecdf from.\n Param offset is an offset to adjust the edf by, used for doing confidence bands.\n Return list of tuples comprising (data value, cumulative relative frequency) ordered by data value.'''\n \n sortedUniqueValues = sorted(list(set(myDataList)))\n freqs = [myDataList.count(i) for i in sortedUniqueValues]\n from pylab import cumsum\n cumFreqs = list(cumsum(freqs)) #\n cumRelFreqs = [ZZ(i)/len(myDataList) for i in cumFreqs] # get cumulative relative frequencies as rationals\n if offset > 0: # an upper band\n cumRelFreqs = [min(i ,1) for i in cumRelFreqs] # use a list comprehension\n if offset < 0: # a lower band\n cumRelFreqs = [max(i, 0) for i in cumFreqs] # use a list comprehension\n return zip(sortedUniqueValues, cumRelFreqs)\n \n# EPMF plot\ndef epmfPlot(samples):\n '''Returns an empirical probability mass function plot from samples data.'''\n \n epmf_pairs = makeEMFHidden(samples)\n epmf = point(epmf_pairs, rgbcolor = \"blue\", pointsize=\"20\")\n for k in epmf_pairs: # for each tuple in the list\n kkey, kheight = k # unpack tuple\n epmf += line([(kkey, 0),(kkey, kheight)], rgbcolor=\"blue\", linestyle=\":\")\n # padding\n epmf += point((0,1), rgbcolor=\"black\", pointsize=\"0\")\n return epmf\n \n\n# ECDF plot\ndef ecdfPlot(samples):\n '''Returns an empirical probability mass function plot from samples data.'''\n ecdf_pairs = makeEDFHidden(samples)\n ecdf = point(ecdf_pairs, rgbcolor = \"red\", faceted = false, pointsize=\"20\")\n for k in range(len(ecdf_pairs)):\n x, kheight = ecdf_pairs[k] # unpack tuple\n previous_x = 0\n previous_height = 0\n if k > 0:\n previous_x, previous_height = ecdf_pairs[k-1] # unpack previous tuple\n ecdf += line([(previous_x, previous_height),(x, previous_height)], rgbcolor=\"grey\")\n ecdf += points((x, previous_height),rgbcolor = \"white\", faceted = true, pointsize=\"20\")\n ecdf += line([(x, previous_height),(x, kheight)], rgbcolor=\"grey\", linestyle=\":\")\n # padding\n ecdf += line([(ecdf_pairs[0][0]-0.2, 0),(ecdf_pairs[0][0], 0)], rgbcolor=\"grey\")\n max_index = len(ecdf_pairs)-1\n ecdf += line([(ecdf_pairs[max_index][0], ecdf_pairs[max_index][1]),(ecdf_pairs[max_index][0]+0.2, ecdf_pairs[max_index][1])],rgbcolor=\"grey\")\n return ecdf\n \ndef calcEpsilon(alphaE, nE):\n '''Return confidence band epsilon calculated from parameters alphaE > 0 and nE > 0.'''\n \n return sqrt(1/(2*nE)*log(2/alphaE))", "_____no_output_____" ] ], [ [ "### Let us continue with the concepts\n\nWe can remind ourselves of this for a small sample of $de\\,Moivre(k=5)$ RVs:", "_____no_output_____" ] ], [ [ "deMs=[randint(1,5) for i in range(20)] # randint can be used to uniformly sample integers in a specified range\ndeMs", "_____no_output_____" ], [ "sortedUniqueValues = sorted(list(set(deMs)))\nfreqs = [deMs.count(i) for i in sortedUniqueValues]\nfrom pylab import cumsum\ncumFreqs = list(cumsum(freqs)) #\ncumRelFreqs = [ZZ(i)/len(deMs) for i in cumFreqs] # get cumulative relative frequencies as rationals\nzip(sortedUniqueValues, cumRelFreqs)", "_____no_output_____" ], [ "show(ecdfPlot(deMs), figsize=[6,3]) # use hidden ecdfPlot function to plot", "_____no_output_____" ] ], [ [ "We can use the empirical cumulative distribution function $\\widehat{F}_n$ for our non-parametric estimate because this kind of estimation is possible in infinite-dimensional contexts due to the following two theorems:\n\n- Glivenko-Cantelli Theorem (*Fundamental Theorem of Statistics*)\n- Dvoretsky-Kiefer-Wolfowitz (DKW) Inequality\n\n# Glivenko-Cantelli Theorem\n\nLet $X_1, X_2, \\ldots, X_n \\overset{IID}{\\sim} F^* \\in \\{\\text{all DFs}\\}$\n\nand the empirical distribution function (EDF) is $\\widehat{F}_n(x) := \\displaystyle\\frac{1}{n} \\sum_{i=1}^n \\mathbf{1}_{[X_i,+\\infty)}(x)$, then\n\n$$\\sup_x { | \\widehat{F}_n(x) - F^*(x) | } \\overset{P}{\\rightarrow} 0$$\n\nRemember that the EDF is a statistic of the data, a statistic is an RV, and (from our work the convergence of random variables), $\\overset{P}{\\rightarrow}$ means \"converges in probability\". The proof is beyond the scope of this course, but we can gain an appreciation of what it means by looking at what happens to the ECDF for $n$ simulations from:\n\n- $de\\,Moivre(1/5,1/5,1/5,1/5,1/5)$ and \n- $Uniform(0,1)$ as $n$ increases:", "_____no_output_____" ] ], [ [ "@interact\ndef _(n=(10,(0..200))):\n '''Interactive function to plot ecdf for obs from de Moirve (5).'''\n if (n > 0):\n us = [randint(1,5) for i in range(n)]\n p=ecdfPlot(us) # use hidden ecdfPlot function to plot\n #p+=line([(-0.2,0),(0,0),(1,1),(1.2,1)],linestyle=':')\n p.show(figsize=[8,2])", "_____no_output_____" ], [ "@interact\ndef _(n=(10,(0..200))):\n '''Interactive function to plot ecdf for obs from Uniform(0,1).'''\n if (n > 0):\n us = [random() for i in range(n)]\n p=ecdfPlot(us) # use hidden ecdfPlot function to plot\n p+=line([(-0.2,0),(0,0),(1,1),(1.2,1)],linestyle='-')\n p.show(figsize=[3,3],aspect_ratio=1)", "_____no_output_____" ] ], [ [ "It is clear, that as $n$ increases, the ECDF $\\widehat{F}_n$ gets closer and closer to the true DF $F^*$, $\\displaystyle\\sup_x { | \\widehat{F}_n(x) - F^*(x) | } \\overset{P}{\\rightarrow} 0$.\n\nThis will hold no matter what the (possibly unknown) $F^*$ is. Thus, $\\widehat{F}_n$ is a point estimate of $F^*$.\n\nWe need to add the DKW Inequality be able to get confidence sets or a 'confidence band' that traps $F^*$ with high probability.\n\n# Dvoretsky-Kiefer-Wolfowitz (DKW) Inequality\n\nLet $X_1, X_2, \\ldots, X_n \\overset{IID}{\\sim} F^* \\in \\{\\text{all DFs}\\}$\n\nand the empirical distribution function (EDF) is $\\widehat{F}_n(x) := \\displaystyle\\frac{1}{n} \\sum_{i=1}^n \\mathbf{1}_{[X_i,+\\infty)}(x)$,\n\nthen, for any $\\varepsilon > 0$,\n\n$$P\\left( \\sup_x { | \\widehat{F}_n(x) - F^*(x) | > \\varepsilon }\\right) \\leq 2 \\exp(-2n\\varepsilon^2) $$\n\nWe can use this inequality to get a $1-\\alpha$ confidence band $C_n(x) := \\left[\\underline{C}_n(x), \\overline{C}_n(x)\\right]$ about our point estimate $\\widehat{F}_n$ of our possibly unknown $F^*$ such that the $F^*$ is 'trapped' by the band with probability at least $1-\\varepsilon$.\n\n$$\\begin{eqnarray} \\underline{C}_{\\, n}(x) &=& \\max \\{ \\widehat{F}_n(x)-\\varepsilon_n, 0 \\}, \\notag \\\\ \\overline{C}_{\\, n}(x) &=& \\min \\{ \\widehat{F}_n(x)+\\varepsilon_n, 1 \\}, \\notag \\\\ \\varepsilon_n &=& \\sqrt{ \\frac{1}{2n} \\log \\left( \\frac{2}{\\alpha}\\right)} \\\\ \\end{eqnarray}$$\n\nand\n\n$$P\\left(\\underline{C}_n(x) \\leq F^*(x) \\leq \\overline{C}_n(x)\\right) \\geq 1-\\alpha$$\n\n \n\n### YouTry in class\n\nTry this out for a simple sample from the $Uniform(0,1)$, which you can generate using random. First we will just make the point estimate for $F^*$, the EDF $\\widehat{F}_n$", "_____no_output_____" ] ], [ [ "n=10\nuniformSample = [random() for i in range(n)]\nprint(uniformSample)", "[0.7654959318439482, 0.021114879009168197, 0.8998365224229663, 0.2538000302621988, 0.6883820821527767, 0.7692120997125704, 0.5191011681497731, 0.4932782146299348, 0.2512953062848242, 0.8598821202558439]\n" ] ], [ [ "In one of the assessments, you did a question that took you through the steps for getting the list of points that you would plot for an empirical distribution function (EDF). We will do exactly the same thing here.\n\nFirst we find the unique values in the sample, in order from smallest to largest, and get the frequency with which each unique value occurs:", "_____no_output_____" ] ], [ [ "sortedUniqueValuesUniform = sorted(list(set(uniformSample)))\nprint(sortedUniqueValuesUniform)", "[0.021114879009168197, 0.2512953062848242, 0.2538000302621988, 0.4932782146299348, 0.5191011681497731, 0.6883820821527767, 0.7654959318439482, 0.7692120997125704, 0.8598821202558439, 0.8998365224229663]\n" ], [ "freqsUniform = [uniformSample.count(i) for i in sortedUniqueValuesUniform]\nfreqsUniform", "_____no_output_____" ] ], [ [ "Then we accumulate the frequences to get the cumulative frequencies:", "_____no_output_____" ] ], [ [ "from pylab import cumsum\ncumFreqsUniform = list(cumsum(freqsUniform)) # accumulate\ncumFreqsUniform", "_____no_output_____" ] ], [ [ "And the the relative cumlative frequencies:", "_____no_output_____" ] ], [ [ "# cumulative rel freqs as rationals\ncumRelFreqsUniform = [ZZ(i)/len(uniformSample) for i in cumFreqsUniform] \ncumRelFreqsUniform", "_____no_output_____" ] ], [ [ "And finally zip these up with the sorted unique values to get a list of points we can plot:", "_____no_output_____" ] ], [ [ "ecdfPointsUniform = zip(sortedUniqueValuesUniform, cumRelFreqsUniform)\necdfPointsUniform", "_____no_output_____" ] ], [ [ "Here is a function that you can just use to do a ECDF plot:", "_____no_output_____" ] ], [ [ "# ECDF plot given a list of points to plot\ndef ecdfPointsPlot(listOfPoints, colour='grey', lines_only=False):\n '''Returns an empirical probability mass function plot from a list of points to plot.\n \n Param listOfPoints is the list of points to plot.\n Param colour is used for plotting the lines, defaulting to grey.\n Param lines_only controls wether only lines are plotted (true) or points are added (false, the default value).\n Returns an ecdf plot graphic.'''\n \n ecdfP = point((0,0), pointsize=\"0\")\n if not lines_only: ecdfP = point(listOfPoints, rgbcolor = \"red\", faceted = false, pointsize=\"20\")\n for k in range(len(listOfPoints)):\n x, kheight = listOfPoints[k] # unpack tuple\n previous_x = 0\n previous_height = 0\n if k > 0:\n previous_x, previous_height = listOfPoints[k-1] # unpack previous tuple\n ecdfP += line([(previous_x, previous_height),(x, previous_height)], rgbcolor=colour)\n ecdfP += line([(x, previous_height),(x, kheight)], rgbcolor=colour, linestyle=\":\")\n if not lines_only: \n ecdfP += points((x, previous_height),rgbcolor = \"white\", faceted = true, pointsize=\"20\")\n # padding\n max_index = len(listOfPoints)-1\n ecdfP += line([(listOfPoints[0][0]-0.2, 0),(listOfPoints[0][0], 0)], rgbcolor=colour)\n ecdfP += line([(listOfPoints[max_index][0], listOfPoints[max_index][1]),(listOfPoints[max_index][0]+0.2, listOfPoints[max_index][1])],rgbcolor=colour)\n return ecdfP", "_____no_output_____" ] ], [ [ "This makes the plot of the $\\widehat{F}_{10}$, the point estimate for $F^*$ for these $n=10$ simulated samples.", "_____no_output_____" ] ], [ [ "show(ecdfPointsPlot(ecdfPointsUniform), figsize=[6,3])", "_____no_output_____" ] ], [ [ "What about adding those confidence bands? You will do essentially the same thing, but adjusting for the required $\\varepsilon$. First we need to decide on an $\\alpha$ and calculate the $\\varepsilon$ corresponding to this alpha. Here is some of our code to calculate the $\\varepsilon$ corresponding to $\\alpha=0.05$ (95% confidence bands), using a hidden function calcEpsilon: ", "_____no_output_____" ] ], [ [ "alpha = 0.05\nepsilon = calcEpsilon(alpha, n)\nepsilon", "_____no_output_____" ] ], [ [ "See if you can write your own code to do this calculation, $\\varepsilon_n = \\sqrt{ \\frac{1}{2n} \\log \\left( \\frac{2}{\\alpha}\\right)}$. For completeness, do the whole thing:assign the value 0.05 to a variable named alpha, and then use this and the variable called n that we have already declared to calculate a value for $\\varepsilon$. Call the variable to which you assign the value for $\\varepsilon$ epsilon so that it replaces the value we calculated in the cell above (you should get the same value as us!).", "_____no_output_____" ], [ "Now we need to use this to adjust the EDF plot. In the two cells below we first of all do the adjustment for $\\underline{C}_{\\,n}(x) =\\max \\{ \\widehat{F}_n(x)-\\varepsilon_n, 0 \\}$, and then use zip again to get the points to actually plot for the lower boundary of the 95% confidence band.\n\nNow we need to use this to adjust the EDF plot. In the two cells below we first of all do the adjustment for $\\overline{C}_{\\,n}(x) =\\min \\{ \\widehat{F}_n(x)+\\varepsilon_n, 1 \\}$, and then use zip again to get the points to actually plot for the lower boundary of the 95% confidence band.", "_____no_output_____" ] ], [ [ "# heights for the lower band\ncumRelFreqsUniformLower = [max(crf - epsilon, 0) for crf in cumRelFreqsUniform] \nprint(cumRelFreqsUniformLower)", "[0, 0, 0, 0, 0.0705305916532624, 0.170530591653262, 0.270530591653262, 0.370530591653262, 0.470530591653262, 0.570530591653262]\n" ], [ "ecdfPointsUniformLower = zip(sortedUniqueValuesUniform, cumRelFreqsUniformLower)\necdfPointsUniformLower", "_____no_output_____" ] ], [ [ "We carefully gave our `ecdfPointsPlo`t function the flexibility to be able to plot bands, by having a colour parameter (which defaults to 'grey') and a `lines_only` parameter (which defaults to `false`). Here we can plot the lower bound of the confidence interval by adding `ecdfPointsPlot(ecdfPointsUniformLower, colour='green', lines_only=true)` to the previous plot:", "_____no_output_____" ] ], [ [ "pointEstimate = ecdfPointsPlot(ecdfPointsUniform)\nlowerBound = ecdfPointsPlot(ecdfPointsUniformLower, colour='green', lines_only=true)\nshow(pointEstimate + lowerBound, figsize=[6,3])", "_____no_output_____" ] ], [ [ "### YouTry \nYou try writing the code to create the list of points needed for plotting the upper band $\\overline{C}_{\\,n}(x) =\\min \\{ \\widehat{F}_n(x)+\\varepsilon_n, 1 \\}$. You will need to first of all get the upper heights (call them say `cumRelFreqsUniformUpper`) and then `zip` them up with the `sortedUniqueValuesUniform` to get the points to plot.", "_____no_output_____" ] ], [ [ "# heights for the upper band\n", "_____no_output_____" ] ], [ [ "Once you have got done this you can add them to the plot by altering the code below:", "_____no_output_____" ] ], [ [ "pointEstimate = ecdfPointsPlot(ecdfPointsUniform)\nlowerBound = ecdfPointsPlot(ecdfPointsUniformLower,colour='green', lines_only=true)\nshow(pointEstimate + lowerBound, figsize=[6,3])", "_____no_output_____" ] ], [ [ "(end of YouTry)\n\n---", "_____no_output_____" ], [ "If we are doing lots of collections of EDF points we may as well define a function to do it, rather than repeating the same code again and again. We use an offset parameter to give us the flexibility to use this to make points for confidence bands as well.", "_____no_output_____" ] ], [ [ "def makeEDFPoints(myDataList, offset=0):\n '''Make a list empirical distribution plotting points from from a data list.\n \n Param myDataList, list of data to make ecdf from.\n Param offset is an offset to adjust the edf by, used for doing confidence bands.\n Return list of tuples comprising (data value, cumulative relative frequency(with offset)) ordered by data value.'''\n \n sortedUniqueValues = sorted(list(set(myDataList)))\n freqs = [myDataList.count(i) for i in sortedUniqueValues]\n from pylab import cumsum\n cumFreqs = list(cumsum(freqs)) \n cumRelFreqs = [ZZ(i)/len(myDataList) for i in cumFreqs] # get cumulative relative frequencies as rationals\n if offset > 0: # an upper band\n cumRelFreqs = [min(i+offset ,1) for i in cumRelFreqs]\n if offset < 0: # a lower band\n cumRelFreqs = [max(i+offset, 0) for i in cumRelFreqs] \n return zip(sortedUniqueValues, cumRelFreqs)", "_____no_output_____" ] ], [ [ "## NZ EartQuakes\n\nNow we will try looking at the Earthquakes data we have used before to get a confidence band around an EDF for that. We start by bringing in the data and the function we wrote earlier to parse that data.\n\nFirst check if you have already `unzip`-ped `data/earthquakes.csv.zip` file by dropping in shell via `%%sh`.", "_____no_output_____" ] ], [ [ "%%sh\nls data/", "NYPowerBall.csv\nco2_mm_mlo.txt\nearthquakes.csv.zip\nearthquakes.tgz\nearthquakes_small.csv\nfinal.csv\nfinal.csv.zip\nfinal.tgz\npride_and_prejudice.txt\nrainfallInChristchurch.csv\n" ], [ "%%sh\n# only do this once! So, you don't need to do this step if you see earthquakes.csv file above\ncd data\n# windows and mac users should first try to unzip\n# unzip earthquakes.csv.zip\n## if unzip is not found try tar by uncommenting next line and commenting the above line\n## tar zxvf earthquakes.tgz\nls -al", "total 13564\ndrwxr-xr-x 13 sage sage 416 Feb 18 06:20 .\ndrwxr-xr-x 19 sage sage 608 Feb 18 06:21 ..\n-rw-r--r-- 1 sage sage 29002 Feb 13 05:14 NYPowerBall.csv\n-rw-r--r-- 1 sage sage 50555 Jan 22 19:58 co2_mm_mlo.txt\n-rw-r--r-- 1 sage sage 4085555 Jan 23 14:06 earthquakes.csv\n-rw-r--r-- 1 sage sage 1344114 Jan 23 14:15 earthquakes.csv.zip\n-rw-r--r-- 1 sage sage 1344959 Jan 23 14:15 earthquakes.tgz\n-rw-r--r-- 1 sage sage 77786 Jan 21 13:41 earthquakes_small.csv\n-rw-r--r-- 1 sage sage 4894689 Nov 9 11:50 final.csv\n-rw-r--r-- 1 sage sage 467572 Jan 23 12:56 final.csv.zip\n-rw-r--r-- 1 sage sage 467611 Jan 23 12:56 final.tgz\n-rw-r--r-- 1 sage sage 724725 Jan 22 18:43 pride_and_prejudice.txt\n-rw-r--r-- 1 sage sage 376954 Jan 21 13:41 rainfallInChristchurch.csv\n" ], [ "def getLonLatMagDepTimes(NZEQCsvFileName):\n '''returns longitude, latitude, magnitude, depth and the origin time as unix time\n for each observed earthquake in the csv filr named NZEQCsvFileName'''\n from datetime import datetime\n import time\n from dateutil.parser import parse\n import numpy as np\n \n with open(NZEQCsvFileName) as f:\n reader = f.read() \n dataList = reader.split('\\n')\n \n myDataAccumulatorList =[]\n for data in dataList[1:-1]:\n dataRow = data.split(',')\n myTimeString = dataRow[2] # origintime\n # let's also grab longitude, latitude, magnitude, depth\n myDataString = [dataRow[4],dataRow[5],dataRow[6],dataRow[7]]\n try: \n myTypedTime = time.mktime(parse(myTimeString).timetuple())\n myFloatData = [float(x) for x in myDataString]\n myFloatData.append(myTypedTime) # append the processed timestamp\n myDataAccumulatorList.append(myFloatData)\n except TypeError, e: # error handling for type incompatibilities\n print 'Error: Error is ', e\n #return np.array(myDataAccumulatorList)\n return myDataAccumulatorList\n\nmyProcessedList = getLonLatMagDepTimes('data/earthquakes.csv')\n\ndef interQuakeTimes(quakeTimes):\n '''Return a list inter-earthquake times in seconds from earthquake origin times\n Date and time elements are expected to be in the 5th column of the array\n Return a list of inter-quake times in seconds. NEEDS sorted quakeTimes Data'''\n import numpy as np\n retList = []\n if len(quakeTimes) > 1:\n retList = [quakeTimes[i]-quakeTimes[i-1] for i in range(1,len(quakeTimes))]\n #return np.array(retList)\n return retList\n\ninterQuakesSecs = interQuakeTimes(sorted([x[4] for x in myProcessedList]))\nlen(interQuakesSecs)", "_____no_output_____" ], [ "interQuakesSecs[0:10]", "_____no_output_____" ] ], [ [ "There is a lot of data here, so let's use an interactive plot to do the non-parametric DF estimation just for some of the last data:", "_____no_output_____" ] ], [ [ "@interact\ndef _(takeLast=(500,(0..min(len(interQuakesSecs),1999))), alpha=(0.05)):\n '''Interactive function to plot the edf estimate and confidence bands for inter earthquake times.'''\n if takeLast > 0 and alpha > 0 and alpha < 1:\n lastInterQuakesSecs = interQuakesSecs[len(interQuakesSecs)-takeLast:len(interQuakesSecs)]\n interQuakePoints = makeEDFPoints(lastInterQuakesSecs)\n p=ecdfPointsPlot(interQuakePoints, lines_only=true)\n epQuakes = calcEpsilon(alpha, len(lastInterQuakesSecs))\n interQuakePointsLower = makeEDFPoints(lastInterQuakesSecs, offset=-epQuakes)\n lowerQuakesBound = ecdfPointsPlot(interQuakePointsLower, colour='green', lines_only=true)\n interQuakePointsUpper = makeEDFPoints(lastInterQuakesSecs, offset=epQuakes)\n upperQuakesBound = ecdfPointsPlot(interQuakePointsUpper, colour='green', lines_only=true)\n show(p + lowerQuakesBound + upperQuakesBound, figsize=[6,3])\n else:\n print \"check your input values\"", "_____no_output_____" ] ], [ [ "What if we are not interested in estimating $F^*$ itself, but we are interested in scientificially investigating whether two distributions are the same. For example, perhaps, whether the distribution of earthquake magnitudes was the same in April as it was in March. Then, we should attempt to reject a falsifiable hypothesis ...\n\n# Hypothesis Testing\n\n**Recall:** \nA formal definition of hypothesis testing is beyond our current scope. Here we will look in particular at a non-parametric hypothesis test called a permutation test. First, a quick review:\n\nThe outcomes of a hypothesis test, in general, are:\n\n<table border=\"1\" cellspacing=\"2\" cellpadding=\"2\" align=\"center\">\n<tbody>\n<tr>\n<td align=\"center\">'true state of nature'</td>\n<td align=\"center\"><strong>Do not reject $H_0$<br /></strong></td>\n<td align=\"center\"><strong>Reject $H_0$<br /></strong></td>\n</tr>\n<tr>\n<td>\n<p><strong>$H_0$ is true<br /></strong></p>\n<p>&nbsp;</p>\n</td>\n<td align=\"center\">\n<p>OK<span style=\"color: #3366ff;\">&nbsp;</span></p>\n</td>\n<td align=\"center\">\n<p>Type I error</p>\n</td>\n</tr>\n<tr>\n<td>\n<p><strong>$H_0$ is false</strong></p>\n</td>\n<td align=\"center\">Type II error</td>\n<td align=\"center\">OK</td>\n</tr>\n</tbody>\n</table>\n\nSo, we want a small probability that we reject $H_0$ when $H_0$ is true (minimise Type I error). Similarly, we want to minimise the probability that we fail to reject $H_0$ when $H_0$ is false (type II error). \n\nThe P-value is one way to conduct a desirable hypothesis test. The scale of the evidence against $H_0$ is stated in terms of the P-value. The following interpretation of P-values is commonly used:\n\n- P-value $\\in (0, 0.01]$: Very strong evidence against $H_0$\n- P-value $\\in (0.01, 0.05]$: Strong evidence against $H_0$\n- P-value $\\in (0.05, 0.1]$: Weak evidence against $H_0$\n- P-value $\\in (0.1, 1]$: Little or no evidence against $H_0$\n\n## Permutation Testing\n\nA Permuation Test is a **non-parametric exact** method for testing whether two distributions are the same based on samples from each of them. In industry analogs and variants of permutation testing is known as *A/B Testing*.\n\nWhat do we mean by \"non-parametric exact\"? It is non-parametric because we do not impose any parametric assumptions. It is exact because it works for any sample size.\n\nFormally, we suppose that: \n$$ X_1,X_2,\\ldots,X_m \\overset{IID}{\\sim} F^* \\quad \\text{and} \\quad X_{m+1}, X_{m+2},\\ldots,X_{m+n} \\overset{IID}{\\sim} G^* \\enspace , $$\nare two sets of independent samples where the possibly unknown DFs \n$F^*,\\,G^* \\in \\{ \\text{all DFs} \\}$.\n\n(Notice that we have written it so that the subscripts on the $X$s run from 1 to $m+n$.)\n\nNow, consider the following hypothesis test: \n$$H_0: F^*=G^* \\quad \\text{versus} \\quad H_1: F^* \\neq G^* \\enspace . $$\n\nOur test statistic uses the observations in both both samples. We want a test statistic that is a sensible one for the test, i.e., will be large when when $F^*$ is 'too different' from $G^*$\n\nSo, let our test statistic $T(X_1,\\ldots,X_m,X_{m+1},\\ldots,X_{m+n})$ be say: \n$$\nT:=T(X_1,\\ldots,X_m,X_{m+1},\\ldots,X_{m+n})= \\text{abs} \\left( \\frac{1}{m} \\sum_{i=1}^m X_i - \\frac{1}{n} \\sum_{i=m+1}^n X_i \\right) \\enspace .\n$$\n\n(In words, we have chosen a test statistic that is the absolute value of the difference in the sample means. Note the limitation of this: if $F^*$ and $G^*$ have the same mean but different variances, our test statistic $T$ will not be large.)\n\nThen the idea of a permutation test is as follows:\n\n- Let $N:=m+n$ be the pooled sample size and consider all $N!$ permutations of the observed data $x_{obs}:=(x_1,x_2,\\ldots,x_m,x_{m+1},x_{m+2},\\ldots,x_{m+n})$.\n- For each permutation of the data compute the statistic $T(\\text{permuted data } x)$ and denote these $N!$ values of $T$ by $t_1,t_2,\\ldots,t_{N!}$.\n- Under $H_0: X_1,\\ldots,X_m,X_{m+1},\\ldots,X_{m+n} \\overset{IID}{\\sim}F^*=G^*$, each of the permutations of $x= (x_1,x_2,\\ldots,x_m,x_{m+1},x_{m+2},\\ldots,x_{m+n})$ has the same joint probability $\\prod_{i=1}^{m+n} f(x_i)$, where $f(x_i)$ is the density function corresponding to $F^*=G^*$, $f(x_i)=dF(x_i)=dG(x_i)$. \n- Therefore, the transformation of the data by our statistic $T$ also has the same probability over the values of $T$, namely $\\{t_1,t_2,\\ldots,t_{N!}\\}$. Let $\\mathbf{P}_0$ be this permutation distribution under the null hypothesis. $\\mathbf{P}_0$ is discrete and uniform over $\\{t_1,t_2,\\ldots,t_{N!}\\}$. \n- Let $t_{obs} := T(x_{obs})$ be the observed value of the test statistic.\n- Assuming we reject $H_0$ when $T$ is large, the P-value = $\\mathbf{P}_0 \\left( T \\geq t_{obs} \\right)$\n- Saying that $\\mathbf{P}_0$ is discrete and uniform over $\\{t_1, t_2, \\ldots, t_{N!}\\}$ says that each possible permutation has an equal probabability of occuring (under the null hypothesis). There are $N!$ possible permutations and so the probability of any individual permutation is $\\frac{1}{N!}$\n\n$$\n\\text{P-value} = \\mathbf{P}_0 \\left( T \\geq t_{obs} \\right) = \\frac{1}{N!} \\left( \\sum_{j=1}^{N!} \\mathbf{1} (t_j \\geq t_{obs}) \\right), \\qquad \\mathbf{1} (t_j \\geq t_{obs}) = \\begin{cases} 1 & \\text{if } \\quad t_j \\geq t_{obs} \\\\ 0 & \\text{otherwise} \\end{cases}\n$$\n\nThis will make more sense if we look at some real data. \n\n## Permutation Testing with Shell Data\n\nIn 2008, Guo Yaozong and Chen Shun collected data on the diameters of coarse venus shells from New Brighton beach for a course project. They recorded the diameters for two samples of shells, one from each side of the New Brighton Pier. The data is given in the following two cells.", "_____no_output_____" ] ], [ [ "leftSide = [52, 54, 60, 60, 54, 47, 57, 58, 61, 57, 50, 60, 60, 60, 62, 44, 55, 58, 55,\\\n 60, 59, 65, 59, 63, 51, 61, 62, 61, 60, 61, 65, 43, 59, 58, 67, 56, 64, 47,\\\n 64, 60, 55, 58, 41, 53, 61, 60, 49, 48, 47, 42, 50, 58, 48, 59, 55, 59, 50, \\\n 47, 47, 33, 51, 61, 61, 52, 62, 64, 64, 47, 58, 58, 61, 50, 55, 47, 39, 59,\\\n 64, 63, 63, 62, 64, 61, 50, 62, 61, 65, 62, 66, 60, 59, 58, 58, 60, 59, 61,\\\n 55, 55, 62, 51, 61, 49, 52, 59, 60, 66, 50, 59, 64, 64, 62, 60, 65, 44, 58, 63]", "_____no_output_____" ], [ "rightSide = [58, 54, 60, 55, 56, 44, 60, 52, 57, 58, 61, 66, 56, 59, 49, 48, 69, 66, 49,\\\n 72, 49, 50, 59, 59, 59, 66, 62, 44, 49, 40, 59, 55, 61, 51, 62, 52, 63, 39,\\\n 63, 52, 62, 49, 48, 65, 68, 45, 63, 58, 55, 56, 55, 57, 34, 64, 66, 54, 65,\\\n 61, 56, 57, 59, 58, 62, 58, 40, 43, 62, 59, 64, 64, 65, 65, 59, 64, 63, 65,\\\n 62, 61, 47, 59, 63, 44, 43, 59, 67, 64, 60, 62, 64, 65, 59, 55, 38, 57, 61,\\\n 52, 61, 61, 60, 34, 62, 64, 58, 39, 63, 47, 55, 54, 48, 60, 55, 60, 65, 41,\\\n 61, 59, 65, 50, 54, 60, 48, 51, 68, 52, 51, 61, 57, 49, 51, 62, 63, 59, 62,\\\n 54, 59, 46, 64, 49, 61]", "_____no_output_____" ], [ "len(leftSide), len(rightSide)", "_____no_output_____" ] ], [ [ "$(115 + 139)!$ is a very big number. Lets start small, and take a subselection of the shell data to demonstrate the permutation test concept: the first two shells from the left of the pier and the first one from the right:", "_____no_output_____" ] ], [ [ "rightSub = [52, 54]\nleftSub = [58]\ntotalSample = rightSub + leftSub\ntotalSample", "_____no_output_____" ] ], [ [ "So now we are testing the hypotheses\n\n$$\\begin{array}{lcl}H_0&:& X_1,X_2,X_3 \\overset{IID}{\\sim} F^*=G^* \\\\H_1&:&X_1, X_2 \\overset{IID}{\\sim} F^*, \\,\\,X_3 \\overset{IID}{\\sim} G^*, F^* \\neq G^*\\end{array}$$ \n\nWith the test statistic\n$$\\begin{array}{lcl}T(X_1,X_2,X_3) &=& \\text{abs} \\left(\\displaystyle\\frac{1}{2}\\displaystyle\\sum_{i=1}^2X_i - \\displaystyle\\frac{1}{1}\\displaystyle\\sum_{i=2+1}^3X_i\\right) \\\\ &=&\\text{abs}\\left(\\displaystyle\\frac{X_1+ X_2}{2} - \\displaystyle\\frac{X_3}{1}\\right)\\end{array}$$\n\nOur observed data $x_{obs} = (x_1, x_2, x_3) = (52, 54, 58)$\n\nand the realisation of the test statistic for this data is $t_{obs} = \\text{abs}\\left(\\displaystyle\\frac{52+54}{2} - \\frac{58}{1}\\right) = \\text{abs}\\left(53 - 58\\right) = \\text{abs}(-5) = 5$\n\nNow we need to tabulate the permutations and their probabilities. There are 3! = 6 possible permutataions of three items. For larger samples, you could use the `factorial` function to calculate this:", "_____no_output_____" ] ], [ [ "factorial(3)", "_____no_output_____" ] ], [ [ "We said that under the null hypotheses (the samples have the same DF) each permutation is equally likely, so each permutation has probability $\\displaystyle\\frac{1}{6}$.\n\nThere is a way in Python (the language under the hood in Sage), to get all the permuations of a sequence:", "_____no_output_____" ] ], [ [ "list(Permutations(totalSample))", "_____no_output_____" ] ], [ [ "We can tabulate the permuations, their probabilities, and the value of the test statistic that would be associated with that permutation:\n\n<table border=\"1\" cellpadding=\"5\" align=\"center\">\n<tbody>\n<tr>\n<td style=\"text-align: center;\">Permutation</td>\n<td style=\"text-align: center;\">$t$</td>\n<td style=\"text-align: center;\">$\\mathbf{P}_0(T=t)$</td>\n</tr>\n<tr>\n<td style=\"text-align: center;\"> </td>\n<td style=\"text-align: center;\"> </td>\n<td style=\"text-align: center;\">Probability under Null</td>\n</tr>\n<tr>\n<td style=\"text-align: center;\">(52, 54, 58)</td>\n<td style=\"text-align: center;\">5</td>\n<td style=\"text-align: center;\">$\\frac{1}{6}$</td>\n</tr>\n<tr>\n<td style=\"text-align: center;\">(52, 58, 54)</td>\n<td style=\"text-align: center;\">&nbsp;1</td>\n<td style=\"text-align: center;\">$\\frac{1}{6}$</td>\n</tr>\n<tr>\n<td style=\"text-align: center;\">(54, 52, 58)</td>\n<td style=\"text-align: center;\">5</td>\n<td style=\"text-align: center;\">$\\frac{1}{6}$</td>\n</tr>\n<tr>\n<td style=\"text-align: center;\">(54, 58, 52)</td>\n<td style=\"text-align: center;\">4</td>\n<td style=\"text-align: center;\">$\\frac{1}{6}$</td>\n</tr>\n<tr>\n<td style=\"text-align: center;\">(58, 52, 54)</td>\n<td style=\"text-align: center;\">1</td>\n<td style=\"text-align: center;\">$\\frac{1}{6}$</td>\n</tr>\n<tr>\n<td style=\"text-align: center;\">(58, 54, 52)</td>\n<td style=\"text-align: center;\">4</td>\n<td style=\"text-align: center;\">$\\frac{1}{6}$</td>\n</tr>\n</tbody>\n</table>", "_____no_output_____" ] ], [ [ "allPerms = list(Permutations(totalSample))\nfor p in allPerms:\n t = abs((p[0] + p[1])/2 - p[2]/1)\n print p, \" has t = \", t", "[52, 54, 58] has t = 5\n[52, 58, 54] has t = 1\n[54, 52, 58] has t = 5\n[54, 58, 52] has t = 4\n[58, 52, 54] has t = 1\n[58, 54, 52] has t = 4\n" ] ], [ [ "To calculate the P-value for our test statistic $t_{obs} = 5$, we need to look at how many permutations would give rise to test statistics that are at least as big, and add up their probabilities.\n\n$$\n\\begin{array}{lcl}\\text{P-value} &=& \\mathbf{P}_0(T \\geq t_{obs}) \\\\&=&\\mathbf{P}_0(T \\geq 5)\\\\&=&\\frac{1}{6} + \\frac {1}{6} \\\\&=&\\frac{2}{6}\\\\ &=&\\frac{1}{3} \\\\ &\\approx & 0.333\\end{array}\n$$\n\nWe could write ourselves a little bit of code to do this in SageMath. As you can see, we could easily improve this to make it more flexible so that we could use it for different numbers of samples, but it will do for now.", "_____no_output_____" ] ], [ [ "allPerms = list(Permutations(totalSample))\npProb = 1/len(allPerms)\npValue = 0\ntobs = 5\nfor p in allPerms:\n t = abs((p[0] + p[1])/2 - p[2]/1)\n if t >= tobs:\n pValue = pValue + pProb\npValue", "_____no_output_____" ] ], [ [ "This means that there is little or no evidence against the null hypothesis (that the shell diameter observations are from the same DF).\n\n### Pooled sample size\n\nThe lowest possible P-value for a pooled sample of size $N=m+n$ is $\\displaystyle\\frac{1}{N!}$. Can you see why this is? \n\nSo with our small sub-samples the smallest possible P-value would be $\\frac{1}{6} \\approx 0.167$. If we are looking for P-value $\\leq 0.01$ to constitute very strong evidence against $H_0$, then we have to have a large enough pooled sample for this to be possible. Since $5! = 5 \\times 4 \\times 3 \\times 2 \\times 1 = 120$, it is good to have $N \\geq 5$\n\n### YouTry in class\n\nTry copying and pasting our code and then adapting it to deal with a sub-sample (52, 54, 60) from the left of the pier and (58, 54) from the right side of the pier. ", "_____no_output_____" ] ], [ [ "rightSub = [52, 54, 60]\nleftSub = [58, 54]\ntotalSample = rightSub + leftSub\ntotalSample", "_____no_output_____" ] ], [ [ "### You will have to think about:\n\n- calculating the value of the test statistic for the observed data and for all the permuations of the total sample\n- calculating the probability of each permutation\n- calculating the P-value by adding the probabilities for the permutations with test statistics at least as large as the observed value of the test statistic", "_____no_output_____" ], [ "(add more cells if you need them)\n\n(end of You Try)\n\n---", "_____no_output_____" ], [ "We can use the sample function and the Python method for making permutations to experiment with a larger sample, say 5 of each.", "_____no_output_____" ] ], [ [ "n, m = 5, 5\nleftSub = sample(leftSide, n)\nrightSub = sample(rightSide,m)\ntotalSample = leftSub + rightSub\nleftSub; rightSub; totalSample", "_____no_output_____" ], [ "tobs = abs(mean(leftSub) - mean(rightSub))\ntobs", "_____no_output_____" ] ], [ [ "We have met sample briefly already: it is part of the Python random module and it does exactly what you would expect from the name: it samples a specified number of elements randomly from a sequence.", "_____no_output_____" ] ], [ [ "#define a helper function for calculating the tstat from a permutation\ndef tForPerm(perm, samplesize1, samplesize2):\n '''Calculates the t statistic for a permutation of data given the sample sizes to split the permuation into.\n \n Param perm is the permutation of data to be split into the two samples.\n Param samplesize1, samplesize2 are the two sample sizes.\n Returns the absolute value of the difference in the means of the two samples split out from perm.'''\n sample1 = [perm[i] for i in range(samplesize1)]\n sample2 = [perm[samplesize1+j] for j in range(samplesize2)]\n return abs(mean(sample1) - mean(sample2))", "_____no_output_____" ], [ "allPerms = list(Permutations(totalSample))\npProb = 1/len(allPerms)\npValue = 0\ntobs = abs(mean(leftSub) - mean(rightSub))\nfor p in allPerms:\n t = tForPerm(p, n, m)\n if t >= tobs:\n pValue = pValue + pProb\npValue", "_____no_output_____" ], [ "n+m", "_____no_output_____" ], [ "factorial(n+m) # how many permutations is it checking", "_____no_output_____" ] ], [ [ "As you can see from the length of time it takes to do the calculation for $(5+5)! = 10!$ permutations, we will be here a long time if we try to this on all of both shell data sets. Monte Carlo methods to the rescue: we can use Monte Carlo integration to calculate an approximate P-value, and this will be our next topic. \n\n \n\n### You try\n\nTry working out the P-value for a sub-sample (58, 63) from the left of the pier and (61) from the right (the two last values in the left-side data set and the last value in the right-side one). Do it as you would if given a similar question in the exam: you choose how much you want to use Sage to help and how much you do just with pen and paper. ", "_____no_output_____" ], [ "# Plug-in Estimation and Bootstrap\n\n*Raaz needs 4-5 hours*", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
cb7947b3324e337523d437c2d9364adc6ee5a609
4,817
ipynb
Jupyter Notebook
ipynb/Slovenia.ipynb
oscovida/oscovida.github.io
c74d6da79feda1b5ccce107ad3acd48cf0e74c1c
[ "CC-BY-4.0" ]
2
2020-06-19T09:16:14.000Z
2021-01-24T17:47:56.000Z
ipynb/Slovenia.ipynb
oscovida/oscovida.github.io
c74d6da79feda1b5ccce107ad3acd48cf0e74c1c
[ "CC-BY-4.0" ]
8
2020-04-20T16:49:49.000Z
2021-12-25T16:54:19.000Z
ipynb/Slovenia.ipynb
oscovida/oscovida.github.io
c74d6da79feda1b5ccce107ad3acd48cf0e74c1c
[ "CC-BY-4.0" ]
4
2020-04-20T13:24:45.000Z
2021-01-29T11:12:12.000Z
28.844311
162
0.511314
[ [ [ "# Slovenia\n\n* Homepage of project: https://oscovida.github.io\n* Plots are explained at http://oscovida.github.io/plots.html\n* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Slovenia.ipynb)", "_____no_output_____" ] ], [ [ "import datetime\nimport time\n\nstart = datetime.datetime.now()\nprint(f\"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}\")", "_____no_output_____" ], [ "%config InlineBackend.figure_formats = ['svg']\nfrom oscovida import *", "_____no_output_____" ], [ "overview(\"Slovenia\", weeks=5);", "_____no_output_____" ], [ "overview(\"Slovenia\");", "_____no_output_____" ], [ "compare_plot(\"Slovenia\", normalise=True);\n", "_____no_output_____" ], [ "# load the data\ncases, deaths = get_country_data(\"Slovenia\")\n\n# get population of the region for future normalisation:\ninhabitants = population(\"Slovenia\")\nprint(f'Population of \"Slovenia\": {inhabitants} people')\n\n# compose into one table\ntable = compose_dataframe_summary(cases, deaths)\n\n# show tables with up to 1000 rows\npd.set_option(\"max_rows\", 1000)\n\n# display the table\ntable", "_____no_output_____" ] ], [ [ "# Explore the data in your web browser\n\n- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Slovenia.ipynb)\n- and wait (~1 to 2 minutes)\n- Then press SHIFT+RETURN to advance code cell to code cell\n- See http://jupyter.org for more details on how to use Jupyter Notebook", "_____no_output_____" ], [ "# Acknowledgements:\n\n- Johns Hopkins University provides data for countries\n- Robert Koch Institute provides data for within Germany\n- Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/)\n- Open source and scientific computing community for the data tools\n- Github for hosting repository and html files\n- Project Jupyter for the Notebook and binder service\n- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))\n\n--------------------", "_____no_output_____" ] ], [ [ "print(f\"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and \"\n f\"deaths at {fetch_deaths_last_execution()}.\")", "_____no_output_____" ], [ "# to force a fresh download of data, run \"clear_cache()\"", "_____no_output_____" ], [ "print(f\"Notebook execution took: {datetime.datetime.now()-start}\")\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
cb794bc0310fc103b5acc42b24f678bbf11bd41c
30,198
ipynb
Jupyter Notebook
colab/cell_type_ML_labelling.ipynb
davemcg/scEiaD
51a9028a6d6e57ef08ef38d55392927df2040c03
[ "CC0-1.0" ]
17
2020-11-18T20:59:34.000Z
2022-03-24T16:48:49.000Z
colab/cell_type_ML_labelling.ipynb
davemcg/scEiaD
51a9028a6d6e57ef08ef38d55392927df2040c03
[ "CC0-1.0" ]
8
2020-08-13T20:33:04.000Z
2021-03-12T02:00:10.000Z
colab/cell_type_ML_labelling.ipynb
davemcg/scEiaD
51a9028a6d6e57ef08ef38d55392927df2040c03
[ "CC0-1.0" ]
1
2020-11-18T21:01:12.000Z
2020-11-18T21:01:12.000Z
43.701881
268
0.499735
[ [ [ "<a href=\"https://colab.research.google.com/github/davemcg/scEiaD/blob/master/colab/cell_type_ML_labelling.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Auto Label Retinal Cell Types\n\n## tldr \n\nYou can take your (retina) scRNA data and fairly quickly use the scEiaD ML model\nto auto label your cell types. I say fairly quickly because it is *best* if you re-quantify your data with the same reference and counter (kallisto) that we use. You *could* try using your counts from cellranger/whatever....but uh...stuff might get weird.\n\n", "_____no_output_____" ], [ "# Install scvi and kallisto-bustools", "_____no_output_____" ] ], [ [ "import sys\nimport re\n#if True, will install via pypi, else will install from source\nstable = True\nIN_COLAB = \"google.colab\" in sys.modules\n\nif IN_COLAB and stable:\n !pip install --quiet scvi-tools[tutorials]==0.9.0\n\n#!pip install --quiet python==3.8 pandas numpy scikit-learn xgboost==1.3\n\n!pip install --quiet kb-python\n", "\u001b[K |████████████████████████████████| 184kB 17.9MB/s \n\u001b[K |████████████████████████████████| 849kB 37.0MB/s \n\u001b[K |████████████████████████████████| 133kB 57.5MB/s \n\u001b[K |████████████████████████████████| 245kB 42.2MB/s \n\u001b[K |████████████████████████████████| 634kB 57.1MB/s \n\u001b[K |████████████████████████████████| 81kB 11.2MB/s \n\u001b[K |████████████████████████████████| 204kB 58.9MB/s \n\u001b[K |████████████████████████████████| 10.3MB 27.3MB/s \n\u001b[K |████████████████████████████████| 51kB 7.9MB/s \n\u001b[K |████████████████████████████████| 8.7MB 27.2MB/s \n\u001b[K |████████████████████████████████| 3.2MB 58.7MB/s \n\u001b[K |████████████████████████████████| 1.4MB 50.8MB/s \n\u001b[K |████████████████████████████████| 184kB 63.1MB/s \n\u001b[K |████████████████████████████████| 829kB 57.1MB/s \n\u001b[K |████████████████████████████████| 276kB 30.7MB/s \n\u001b[K |████████████████████████████████| 112kB 58.7MB/s \n\u001b[K |████████████████████████████████| 51kB 8.4MB/s \n\u001b[K |████████████████████████████████| 81kB 11.7MB/s \n\u001b[K |████████████████████████████████| 112kB 60.5MB/s \n\u001b[K |████████████████████████████████| 1.3MB 50.8MB/s \n\u001b[K |████████████████████████████████| 71kB 11.2MB/s \n\u001b[K |████████████████████████████████| 1.2MB 50.3MB/s \n\u001b[K |████████████████████████████████| 296kB 61.1MB/s \n\u001b[K |████████████████████████████████| 143kB 60.5MB/s \n\u001b[?25h Building wheel for loompy (setup.py) ... \u001b[?25l\u001b[?25hdone\n Building wheel for future (setup.py) ... \u001b[?25l\u001b[?25hdone\n Building wheel for PyYAML (setup.py) ... \u001b[?25l\u001b[?25hdone\n Building wheel for sinfo (setup.py) ... \u001b[?25l\u001b[?25hdone\n Building wheel for umap-learn (setup.py) ... \u001b[?25l\u001b[?25hdone\n Building wheel for numpy-groupies (setup.py) ... \u001b[?25l\u001b[?25hdone\n Building wheel for pynndescent (setup.py) ... \u001b[?25l\u001b[?25hdone\n\u001b[K |████████████████████████████████| 59.1MB 46kB/s \n\u001b[K |████████████████████████████████| 13.2MB 253kB/s \n\u001b[?25h" ], [ "!pip install --quiet pandas numpy scikit-learn xgboost==1.3.1", "\u001b[K |████████████████████████████████| 157.5MB 90kB/s \n\u001b[?25h" ] ], [ [ "# Download our kallisto index\nAs our example set is mouse, we use the Gencode vM25 transcript reference.\n\nThe script that makes the idx and t2g file is [here](https://github.com/davemcg/scEiaD/raw/c3a9dd09a1a159b1f489065a3f23a753f35b83c9/src/build_idx_and_t2g_for_colab.sh). This is precomputed as it takes about 30 minutes and 32GB of memory.\n\nThere's one more wrinkle worth noting: as scEiaD was built across human, mouse, and macaque unified gene names are required. We chose to use the *human* ensembl ID (e.g. CRX is ENSG00000105392) as the base gene naming system. \n\n\n(Download links):\n```\n# Mouse\nhttps://hpc.nih.gov/~mcgaugheyd/scEiaD/colab/gencode.vM25.transcripts.idx\nhttps://hpc.nih.gov/~mcgaugheyd/scEiaD/colab/vM25.tr2gX.humanized.tsv\n# Human\nhttps://hpc.nih.gov/~mcgaugheyd/scEiaD/colab/gencode.v35.transcripts.idx\nhttps://hpc.nih.gov/~mcgaugheyd/scEiaD/colab/v35.tr2gX.tsv\n```\n", "_____no_output_____" ] ], [ [ "%%time\n!wget -O idx.idx https://hpc.nih.gov/~mcgaugheyd/scEiaD/colab/gencode.vM25.transcripts.idx\n!wget -O t2g.txt https://hpc.nih.gov/~mcgaugheyd/scEiaD/colab/vM25.tr2gX.humanized.tsv", "--2021-04-29 12:05:21-- https://hpc.nih.gov/~mcgaugheyd/scEiaD/colab/gencode.vM25.transcripts.idx\nResolving hpc.nih.gov (hpc.nih.gov)... 128.231.2.150, 2607:f220:418:4801::2:96\nConnecting to hpc.nih.gov (hpc.nih.gov)|128.231.2.150|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 2662625893 (2.5G) [application/octet-stream]\nSaving to: ‘idx.idx’\n\nidx.idx 100%[===================>] 2.48G 36.3MB/s in 35s \n\n2021-04-29 12:05:58 (72.8 MB/s) - ‘idx.idx’ saved [2662625893/2662625893]\n\n--2021-04-29 12:05:58-- https://hpc.nih.gov/~mcgaugheyd/scEiaD/colab/vM25.tr2gX.humanized.tsv\nResolving hpc.nih.gov (hpc.nih.gov)... 128.231.2.150, 2607:f220:418:4801::2:96\nConnecting to hpc.nih.gov (hpc.nih.gov)|128.231.2.150|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 22502749 (21M) [application/octet-stream]\nSaving to: ‘t2g.txt’\n\nt2g.txt 100%[===================>] 21.46M 90.2MB/s in 0.2s \n\n2021-04-29 12:06:00 (90.2 MB/s) - ‘t2g.txt’ saved [22502749/22502749]\n\nCPU times: user 377 ms, sys: 71.2 ms, total: 448 ms\nWall time: 39.2 s\n" ] ], [ [ "# Quantify with kbtools (Kallisto - Bustools wrapper) in one easy step.\n\nGoing into the vagaries of turning a SRA deposit into a non-borked pair of fastq files is beyond the scope of this document. Plus I would swear a lot. So we just give an example set from a Human organoid retina 10x (version 2) experiment.\n\nThe Pachter Lab has a discussion of how/where to get public data here: https://colab.research.google.com/github/pachterlab/kallistobustools/blob/master/notebooks/data_download.ipynb\n\nIf you have your own 10X bam file, then 10X provides a very nice and simple tool to turn it into fastq file here: https://github.com/10XGenomics/bamtofastq\n\nTo reduce run-time we have taken the first five million reads from this fastq pair.\n\nThis will take ~3 minutes, depending on the internet speed between Google and our server\n\nYou can also directly stream the file to improve wall-time, but I was getting periodic errors, so we are doing the simpler thing and downloading each fastq file here first.\n\n ", "_____no_output_____" ] ], [ [ "%%time\n!wget -O sample_1.fastq.gz https://hpc.nih.gov/~mcgaugheyd/scEiaD/colab/SRR11799731_1.head.fastq.gz\n!wget -O sample_2.fastq.gz https://hpc.nih.gov/~mcgaugheyd/scEiaD/colab/SRR11799731_2.head.fastq.gz\n!kb count --overwrite --h5ad -i idx.idx -g t2g.txt -x DropSeq -o output --filter bustools -t 2 \\\n sample_1.fastq.gz \\\n sample_2.fastq.gz", "--2021-04-29 12:06:31-- https://hpc.nih.gov/~mcgaugheyd/scEiaD/colab/SRR11799731_1.head.fastq.gz\nResolving hpc.nih.gov (hpc.nih.gov)... 128.231.2.150, 2607:f220:418:4801::2:96\nConnecting to hpc.nih.gov (hpc.nih.gov)|128.231.2.150|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 103529059 (99M) [application/octet-stream]\nSaving to: ‘sample_1.fastq.gz’\n\nsample_1.fastq.gz 100%[===================>] 98.73M 90.3MB/s in 1.1s \n\n2021-04-29 12:06:33 (90.3 MB/s) - ‘sample_1.fastq.gz’ saved [103529059/103529059]\n\n--2021-04-29 12:06:33-- https://hpc.nih.gov/~mcgaugheyd/scEiaD/colab/SRR11799731_2.head.fastq.gz\nResolving hpc.nih.gov (hpc.nih.gov)... 128.231.2.150, 2607:f220:418:4801::2:96\nConnecting to hpc.nih.gov (hpc.nih.gov)|128.231.2.150|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 245302496 (234M) [application/octet-stream]\nSaving to: ‘sample_2.fastq.gz’\n\nsample_2.fastq.gz 100%[===================>] 233.94M 97.8MB/s in 2.4s \n\n2021-04-29 12:06:35 (97.8 MB/s) - ‘sample_2.fastq.gz’ saved [245302496/245302496]\n\n[2021-04-29 12:06:36,630] INFO Using index idx.idx to generate BUS file to output from\n[2021-04-29 12:06:36,630] INFO sample_1.fastq.gz\n[2021-04-29 12:06:36,630] INFO sample_2.fastq.gz\n[2021-04-29 12:07:17,592] INFO Sorting BUS file output/output.bus to output/tmp/output.s.bus\n[2021-04-29 12:07:20,908] INFO Whitelist not provided\n[2021-04-29 12:07:20,908] INFO Generating whitelist output/whitelist.txt from BUS file output/tmp/output.s.bus\n[2021-04-29 12:07:20,929] INFO Inspecting BUS file output/tmp/output.s.bus\n[2021-04-29 12:07:21,695] INFO Correcting BUS records in output/tmp/output.s.bus to output/tmp/output.s.c.bus with whitelist output/whitelist.txt\n[2021-04-29 12:07:21,900] INFO Sorting BUS file output/tmp/output.s.c.bus to output/output.unfiltered.bus\n[2021-04-29 12:07:24,360] INFO Generating count matrix output/counts_unfiltered/cells_x_genes from BUS file output/output.unfiltered.bus\n[2021-04-29 12:07:26,177] INFO Reading matrix output/counts_unfiltered/cells_x_genes.mtx\n[2021-04-29 12:07:26,915] INFO Writing matrix to h5ad output/counts_unfiltered/adata.h5ad\n[2021-04-29 12:07:27,075] INFO Filtering with bustools\n[2021-04-29 12:07:27,075] INFO Generating whitelist output/filter_barcodes.txt from BUS file output/output.unfiltered.bus\n[2021-04-29 12:07:27,088] INFO Correcting BUS records in output/output.unfiltered.bus to output/tmp/output.unfiltered.c.bus with whitelist output/filter_barcodes.txt\n[2021-04-29 12:07:27,180] INFO Sorting BUS file output/tmp/output.unfiltered.c.bus to output/output.filtered.bus\n[2021-04-29 12:07:29,651] INFO Generating count matrix output/counts_filtered/cells_x_genes from BUS file output/output.filtered.bus\n[2021-04-29 12:07:31,353] INFO Reading matrix output/counts_filtered/cells_x_genes.mtx\n[2021-04-29 12:07:32,041] INFO Writing matrix to h5ad output/counts_filtered/adata.h5ad\nCPU times: user 368 ms, sys: 60.4 ms, total: 428 ms\nWall time: 1min\n" ] ], [ [ "\n# Download models\n(and our xgboost functions for cell type labelling)\n\nThe scVI model is the same that we use to create the data for plae.nei.nih.gov\n\nThe xgboost model is a simplified version that *only* uses the scVI latent dims and omits the Early/Late/RPC cell types and collapses them all into \"RPC\"", "_____no_output_____" ] ], [ [ "!wget -O scVI_scEiaD.tgz https://hpc.nih.gov/~mcgaugheyd/scEiaD/2021_03_17/2021_03_17__scVI_scEiaD.tgz\n!tar -xzf scVI_scEiaD.tgz\n\n!wget -O celltype_ML_model.tar https://hpc.nih.gov/~mcgaugheyd/scEiaD/2021_03_17/2021_cell_type_ML_all.tar\n!tar -xf celltype_ML_model.tar\n\n!wget -O celltype_predictor.py https://raw.githubusercontent.com/davemcg/scEiaD/master/src/cell_type_predictor.py\n\n", "--2021-04-29 12:12:38-- https://hpc.nih.gov/~mcgaugheyd/scEiaD/2021_03_17/2021_03_17__scVI_scEiaD.tgz\nResolving hpc.nih.gov (hpc.nih.gov)... 128.231.2.150, 2607:f220:418:4801::2:96\nConnecting to hpc.nih.gov (hpc.nih.gov)|128.231.2.150|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 12851811 (12M) [application/octet-stream]\nSaving to: ‘scVI_scEiaD.tgz’\n\nscVI_scEiaD.tgz 100%[===================>] 12.26M 36.9MB/s in 0.3s \n\n2021-04-29 12:12:40 (36.9 MB/s) - ‘scVI_scEiaD.tgz’ saved [12851811/12851811]\n\n--2021-04-29 12:12:40-- https://hpc.nih.gov/~mcgaugheyd/scEiaD/2021_03_17/2021_cell_type_ML_all.tar\nResolving hpc.nih.gov (hpc.nih.gov)... 128.231.2.150, 2607:f220:418:4801::2:96\nConnecting to hpc.nih.gov (hpc.nih.gov)|128.231.2.150|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 12359680 (12M) [application/octet-stream]\nSaving to: ‘celltype_ML_model.tar’\n\ncelltype_ML_model.t 100%[===================>] 11.79M 39.5MB/s in 0.3s \n\n2021-04-29 12:12:40 (39.5 MB/s) - ‘celltype_ML_model.tar’ saved [12359680/12359680]\n\n--2021-04-29 12:12:40-- https://raw.githubusercontent.com/davemcg/scEiaD/master/src/cell_type_predictor.py\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 11534 (11K) [text/plain]\nSaving to: ‘celltype_predictor.py’\n\ncelltype_predictor. 100%[===================>] 11.26K --.-KB/s in 0s \n\n2021-04-29 12:12:41 (117 MB/s) - ‘celltype_predictor.py’ saved [11534/11534]\n\n" ] ], [ [ "# Python time", "_____no_output_____" ] ], [ [ "import anndata\nimport sys\nimport os\nimport numpy as np\nimport pandas as pd\nimport random\nimport scanpy as sc\nfrom scipy import sparse\nimport scvi\nimport torch\n# 2 cores\nsc.settings.n_jobs = 2\n# set seeds\nrandom.seed(234)\nscvi.settings.seed = 234\n\n# set some args\norg = 'mouse'\nn_epochs = 15\nconfidence = 0.5", "_____no_output_____" ] ], [ [ "# Load adata\nAnd process (mouse processing requires a bit more jiggling that can be skipped if you have human data)", "_____no_output_____" ] ], [ [ "# load query data\nadata_query = sc.read_h5ad('output/counts_filtered/adata.h5ad')\nadata_query.layers[\"counts\"] = adata_query.X.copy()\nadata_query.layers[\"counts\"] = sparse.csr_matrix(adata_query.layers[\"counts\"])\n\n\n# Set scVI model path\nscVI_model_dir_path = 'scVIprojectionSO_scEiaD_model/n_features-5000__transform-counts__partition-universe__covariate-batch__method-scVIprojectionSO__dims-8/' \n# Read in HVG genes used in scVI model\nvar_names = pd.read_csv(scVI_model_dir_path + '/var_names.csv', header = None)\n# cut down query adata object to use just the var_names used in the scVI model training\n\nif org.lower() == 'mouse':\n adata_query.var_names = adata_query.var['gene_name']\n n_missing_genes = sum(~var_names[0].isin(adata_query.var_names))\n dummy_adata = anndata.AnnData(X=sparse.csr_matrix((adata_query.shape[0], n_missing_genes)))\n dummy_adata.obs_names = adata_query.obs_names\n dummy_adata.var_names = var_names[0][~var_names[0].isin(adata_query.var_names)]\n adata_fixed = anndata.concat([adata_query, dummy_adata], axis=1)\n adata_query_HVG = adata_fixed[:, var_names[0]]\n", "_____no_output_____" ] ], [ [ "# Run scVI (trained on scEiaD data) \nGoal: get scEiaD batch corrected latent space for *your* data", "_____no_output_____" ] ], [ [ "adata_query_HVG.obs['batch'] = 'New Data'\n\nscvi.data.setup_anndata(adata_query_HVG, batch_key=\"batch\")\nvae_query = scvi.model.SCVI.load_query_data(\n adata_query_HVG, \n scVI_model_dir_path\n)\n# project scVI latent dims from scEiaD onto query data\nvae_query.train(max_epochs=n_epochs, plan_kwargs=dict(weight_decay=0.0))\n# get the latent dims into the adata\nadata_query_HVG.obsm[\"X_scVI\"] = vae_query.get_latent_representation()\n", "Trying to set attribute `.obs` of view, copying.\n" ] ], [ [ "# Get Cell Type predictions\n(this xgboost model does NOT use the organim or Age information, but as those field were often used by use, they got hard-coded in. So we will put dummy values in).", "_____no_output_____" ] ], [ [ "# extract latent dimensions\nobs=pd.DataFrame(adata_query_HVG.obs)\nobsm=pd.DataFrame(adata_query_HVG.obsm[\"X_scVI\"])\nfeatures = list(obsm.columns)\nobsm.index = obs.index.values\nobsm['Barcode'] = obsm.index\nobsm['Age'] = 1000\nobsm['organism'] = 'x'\n# xgboost ML time\nfrom celltype_predictor import *\n\n\nCT_predictions = scEiaD_classifier_predict(inputMatrix=obsm, \n labelIdCol='ID', \n labelNameCol='CellType', \n trainedModelFile= os.getcwd() + '/2021_cell_type_ML_all',\n featureCols=features, \n predProbThresh=confidence)", "\nLoading Data...\n\nPredicting Data...\n\n19 samples Failed to meet classification threshold of 0.5\n" ] ], [ [ "# What do we have?", "_____no_output_____" ] ], [ [ "CT_predictions['CellType'].value_counts()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb7953831886093bad5d6a0d74bb7f989a534ab2
8,820
ipynb
Jupyter Notebook
NBS/emotion_detection/emotion_game.ipynb
LukePratt/Real-Time-Emotion-Reading
ee0fac4fafcd5c69dbfdd08f19057992d2844893
[ "MIT" ]
null
null
null
NBS/emotion_detection/emotion_game.ipynb
LukePratt/Real-Time-Emotion-Reading
ee0fac4fafcd5c69dbfdd08f19057992d2844893
[ "MIT" ]
null
null
null
NBS/emotion_detection/emotion_game.ipynb
LukePratt/Real-Time-Emotion-Reading
ee0fac4fafcd5c69dbfdd08f19057992d2844893
[ "MIT" ]
null
null
null
33.664122
134
0.538322
[ [ [ "from scipy.spatial import distance as dist\nimport numpy as np\nimport cv2\nfrom imutils import face_utils\nfrom imutils.video import VideoStream\nimport imutils\nfrom fastai.vision import *\nimport argparse\nimport time\nimport dlib\nfrom playsound import playsound\nfrom torch.serialization import SourceChangeWarning\nwarnings.filterwarnings(\"ignore\", category=SourceChangeWarning)\nwarnings.filterwarnings(\"ignore\", category=UserWarning)", "_____no_output_____" ], [ "path = './'\nprint(path + 'export.pkl')\nlearn = load_learner(path, 'export.pkl')", "./export.pkl\n" ], [ "face_cascade = cv2.CascadeClassifier(\"../haarcascade_frontalface_alt2.xml\")\nvs = VideoStream(src=0).start()\nstart = time.perf_counter()\ndata = []\ntime_value = 0\nEYE_AR_THRESH = 0.20\nEYE_AR_CONSEC_FRAMES = 10\nCOUNTER = 0\ndef eye_aspect_ratio(eye):\n A = dist.euclidean(eye[1], eye[5])\n B = dist.euclidean(eye[2], eye[4])\n C = dist.euclidean(eye[0], eye[3])\n ear = (A + B) / (2.0 * C)\n return ear\ndef data_time(time_value, prediction, probability, ear):\n current_time = int(time.perf_counter()-start)\n if current_time != time_value:\n data.append([current_time, prediction, probability, ear])\n time_value = current_time\n return time_value\npredictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\n(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"left_eye\"]\n(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"right_eye\"]", "_____no_output_____" ], [ "# Function to test the emotion\ndef test_emotion(emotion, vs):\n frame = vs.read()\n frame = imutils.resize(frame, width=450)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n face_coord = face_cascade.detectMultiScale(gray, 1.1, 5, minSize=(30, 30))\n return \n\n# To start the game you will press q. Once pressed the first emotion you will\n# make is neutral. If you do make the emotion for 3 seconds you get a correct\n# noise and if not it will give an incorrect noise. There will be a noises\n# introducing all the emotions and noises to say you passed or you did not pass.\n# This will go in order after neutral to happy, sad, surprised, then angry. Once\n# you go through all emotions correctly you win.", "_____no_output_____" ], [ "emotions = ['neutral','happy', 'sad', 'surprise', 'angry'] # put the emotions\nFRAMES_TO_PASS = 6\nfor emotion in emotions:\n emotion_counter = 0\n first_pass = True\n while True:\n frame = vs.read()\n frame = imutils.resize(frame, width=450)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n face_coord = face_cascade.detectMultiScale(gray, 1.1, 5, minSize=(30, 30))\n cv2.putText(frame, f\"Make a {emotion} face!\", (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)\n for coords in face_coord:\n X, Y, w, h = coords\n H, W, _ = frame.shape\n X_1, X_2 = (max(0, X - int(w * 0.3)), min(X + int(1.3 * w), W))\n Y_1, Y_2 = (max(0, Y - int(0.3 * h)), min(Y + int(1.3 * h), H))\n img_cp = gray[Y_1:Y_2, X_1:X_2].copy()\n prediction, idx, probability = learn.predict(Image(pil2tensor(img_cp, np.float32).div_(225)))\n cv2.rectangle(\n img=frame,\n pt1=(X_1, Y_1),\n pt2=(X_2, Y_2),\n color=(128, 128, 0),\n thickness=2,\n )\n rect = dlib.rectangle(X, Y, X+w, Y+h)\n cv2.putText(frame, str(prediction), (10, frame.shape[0] - 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (225, 255, 255), 2)\n cv2.putText(frame, \"Press q to quit\", (250, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n if str(prediction) == emotion:\n emotion_counter += 1\n if emotion_counter > FRAMES_TO_PASS:\n playsound('../sounds/correct.mp3')\n cv2.imshow(\"frame\", frame)\n cv2.putText(frame, f\"You passed the {emotion} round!\", (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n print(f'You passed the {emotion} round')\n break\n cv2.imshow(\"frame\", frame)\n if first_pass:\n playsound('../sounds/' + emotion + '.mp3')\n first_pass = False\n if cv2.waitKey(1) & 0xFF == ord(\"q\") :\n vs.stop()\n cv2.destroyAllWindows()\n cv2.waitKey(1)\n break\n# End the game\nplaysound('../sounds/win.mp3')\nwhile True:\n frame = vs.read()\n frame = imutils.resize(frame, width=450)\n cv2.putText(frame, \"You win!!!\", (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)\n cv2.imshow(\"frame\", frame)\n if cv2.waitKey(1) & 0xFF == ord(\"q\") :\n vs.stop()\n cv2.destroyAllWindows()\n cv2.waitKey(1)\n break", "You passed the neutral round\nYou passed the happy round\nYou passed the sad round\nYou passed the surprise round\nYou passed the angry round\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
cb79682da5a370a9b8708b28f61cb54aaee7b6a2
656,099
ipynb
Jupyter Notebook
Machine _Learning_and_Reinforcement_Learning_in_Finance/03_Reinforcement_Learning_in_Finance/02_QLBS Model Implementation/dp_qlbs_oneset_m3_ex2_v3.ipynb
cilsya/coursera
4a7896f3225cb84e2f15770409c1f18bfe529615
[ "MIT" ]
1
2021-03-15T13:57:04.000Z
2021-03-15T13:57:04.000Z
Machine _Learning_and_Reinforcement_Learning_in_Finance/03_Reinforcement_Learning_in_Finance/02_QLBS Model Implementation/dp_qlbs_oneset_m3_ex2_v3.ipynb
cilsya/coursera
4a7896f3225cb84e2f15770409c1f18bfe529615
[ "MIT" ]
5
2020-03-24T16:17:05.000Z
2021-06-01T22:49:40.000Z
Machine _Learning_and_Reinforcement_Learning_in_Finance/03_Reinforcement_Learning_in_Finance/02_QLBS Model Implementation/dp_qlbs_oneset_m3_ex2_v3.ipynb
cilsya/coursera
4a7896f3225cb84e2f15770409c1f18bfe529615
[ "MIT" ]
null
null
null
431.928242
159,484
0.931536
[ [ [ "## The QLBS model for a European option\n\nWelcome to your 2nd assignment in Reinforcement Learning in Finance. In this exercise you will arrive to an option price and the hedging portfolio via standard toolkit of Dynamic Pogramming (DP).\nQLBS model learns both the optimal option price and optimal hedge directly from trading data.\n\n**Instructions:**\n- You will be using Python 3.\n- Avoid using for-loops and while-loops, unless you are explicitly told to do so.\n- Do not modify the (# GRADED FUNCTION [function name]) comment in some cells. Your work would not be graded if you change this. Each cell containing that comment should only contain one function.\n- After coding your function, run the cell right below it to check if your result is correct.\n- When encountering **```# dummy code - remove```** please replace this code with your own \n\n\n**After this assignment you will:**\n - Re-formulate option pricing and hedging method using the language of Markov Decision Processes (MDP)\n - Setup foward simulation using Monte Carlo\n - Expand optimal action (hedge) $a_t^\\star(X_t)$ and optimal Q-function $Q_t^\\star(X_t, a_t^\\star)$ in basis functions with time-dependend coefficients\n\nLet's get started!", "_____no_output_____" ], [ "## About iPython Notebooks ##\n\niPython Notebooks are interactive coding environments embedded in a webpage. You will be using iPython notebooks in this class. You only need to write code between the ### START CODE HERE ### and ### END CODE HERE ### comments. After writing your code, you can run the cell by either pressing \"SHIFT\"+\"ENTER\" or by clicking on \"Run Cell\" (denoted by a play symbol) in the upper bar of the notebook. \n\nWe will often specify \"(≈ X lines of code)\" in the comments to tell you about how much code you need to write. It is just a rough estimate, so don't feel bad if your code is longer or shorter.", "_____no_output_____" ] ], [ [ "#import warnings\n#warnings.filterwarnings(\"ignore\")\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import norm\nimport random\nimport time\nimport matplotlib.pyplot as plt\nimport sys\n\nsys.path.append(\"..\")\nimport grading", "_____no_output_____" ], [ "### ONLY FOR GRADING. DO NOT EDIT ###\nsubmissions=dict()\nassignment_key=\"wLtf3SoiEeieSRL7rCBNJA\" \nall_parts=[\"15mYc\", \"h1P6Y\", \"q9QW7\",\"s7MpJ\",\"Pa177\"]\n### ONLY FOR GRADING. DO NOT EDIT ###", "_____no_output_____" ], [ "COURSERA_TOKEN = 'gF094cwtidz2YQpP' # the key provided to the Student under his/her email on submission page\nCOURSERA_EMAIL = '[email protected]' # the email", "_____no_output_____" ] ], [ [ "## Parameters for MC simulation of stock prices", "_____no_output_____" ] ], [ [ "S0 = 100 # initial stock price\nmu = 0.05 # drift\nsigma = 0.15 # volatility\nr = 0.03 # risk-free rate\nM = 1 # maturity\n\nT = 24 # number of time steps\nN_MC = 10000 # number of paths\n\ndelta_t = M / T # time interval\ngamma = np.exp(- r * delta_t) # discount factor", "_____no_output_____" ] ], [ [ "### Black-Sholes Simulation\nSimulate $N_{MC}$ stock price sample paths with $T$ steps by the classical Black-Sholes formula.\n\n$$dS_t=\\mu S_tdt+\\sigma S_tdW_t\\quad\\quad S_{t+1}=S_te^{\\left(\\mu-\\frac{1}{2}\\sigma^2\\right)\\Delta t+\\sigma\\sqrt{\\Delta t}Z}$$\n\nwhere $Z$ is a standard normal random variable.\n\nBased on simulated stock price $S_t$ paths, compute state variable $X_t$ by the following relation.\n\n$$X_t=-\\left(\\mu-\\frac{1}{2}\\sigma^2\\right)t\\Delta t+\\log S_t$$\n\nAlso compute\n\n$$\\Delta S_t=S_{t+1}-e^{r\\Delta t}S_t\\quad\\quad \\Delta\\hat{S}_t=\\Delta S_t-\\Delta\\bar{S}_t\\quad\\quad t=0,...,T-1$$\n\nwhere $\\Delta\\bar{S}_t$ is the sample mean of all values of $\\Delta S_t$.\n\nPlots of 5 stock price $S_t$ and state variable $X_t$ paths are shown below.", "_____no_output_____" ] ], [ [ "# make a dataset\nstarttime = time.time()\nnp.random.seed(42)\n\n# stock price\nS = pd.DataFrame([], index=range(1, N_MC+1), columns=range(T+1))\nS.loc[:,0] = S0\n\n# standard normal random numbers\nRN = pd.DataFrame(np.random.randn(N_MC,T), index=range(1, N_MC+1), columns=range(1, T+1))\n\nfor t in range(1, T+1):\n S.loc[:,t] = S.loc[:,t-1] * np.exp((mu - 1/2 * sigma**2) * delta_t + sigma * np.sqrt(delta_t) * RN.loc[:,t])\n\ndelta_S = S.loc[:,1:T].values - np.exp(r * delta_t) * S.loc[:,0:T-1]\ndelta_S_hat = delta_S.apply(lambda x: x - np.mean(x), axis=0)\n\n# state variable\nX = - (mu - 1/2 * sigma**2) * np.arange(T+1) * delta_t + np.log(S) # delta_t here is due to their conventions\n\nendtime = time.time()\nprint('\\nTime Cost:', endtime - starttime, 'seconds')", "\nTime Cost: 0.20280027389526367 seconds\n" ], [ "# plot 10 paths\nstep_size = N_MC // 10\nidx_plot = np.arange(step_size, N_MC, step_size)\n\nplt.plot(S.T.iloc[:,idx_plot])\nplt.xlabel('Time Steps')\nplt.title('Stock Price Sample Paths')\nplt.show()\n\nplt.plot(X.T.iloc[:,idx_plot])\nplt.xlabel('Time Steps')\nplt.ylabel('State Variable')\nplt.show()", "_____no_output_____" ] ], [ [ "Define function *terminal_payoff* to compute the terminal payoff of a European put option.\n\n$$H_T\\left(S_T\\right)=\\max\\left(K-S_T,0\\right)$$", "_____no_output_____" ] ], [ [ "def terminal_payoff(ST, K):\n # ST final stock price\n # K strike\n payoff = max(K - ST, 0)\n return payoff", "_____no_output_____" ], [ "type(delta_S)", "_____no_output_____" ] ], [ [ "## Define spline basis functions ", "_____no_output_____" ] ], [ [ "import bspline\nimport bspline.splinelab as splinelab\n\nX_min = np.min(np.min(X))\nX_max = np.max(np.max(X))\nprint('X.shape = ', X.shape)\nprint('X_min, X_max = ', X_min, X_max)\n\np = 4 # order of spline (as-is; 3 = cubic, 4: B-spline?)\nncolloc = 12\n\ntau = np.linspace(X_min,X_max,ncolloc) # These are the sites to which we would like to interpolate\n\n# k is a knot vector that adds endpoints repeats as appropriate for a spline of order p\n# To get meaninful results, one should have ncolloc >= p+1\nk = splinelab.aptknt(tau, p) \n \n# Spline basis of order p on knots k\nbasis = bspline.Bspline(k, p) \n \nf = plt.figure()\n# B = bspline.Bspline(k, p) # Spline basis functions \nprint('Number of points k = ', len(k))\nbasis.plot()\n\nplt.savefig('Basis_functions.png', dpi=600)", "X.shape = (10000, 25)\nX_min, X_max = 4.024923524903037 5.190802775129617\nNumber of points k = 17\n" ], [ "type(basis)", "_____no_output_____" ], [ "X.values.shape", "_____no_output_____" ] ], [ [ "### Make data matrices with feature values\n\n\"Features\" here are the values of basis functions at data points\nThe outputs are 3D arrays of dimensions num_tSteps x num_MC x num_basis", "_____no_output_____" ] ], [ [ "num_t_steps = T + 1\nnum_basis = ncolloc # len(k) #\n\ndata_mat_t = np.zeros((num_t_steps, N_MC,num_basis ))\nprint('num_basis = ', num_basis)\nprint('dim data_mat_t = ', data_mat_t.shape)\n\nt_0 = time.time()\n# fill it \nfor i in np.arange(num_t_steps):\n x = X.values[:,i]\n data_mat_t[i,:,:] = np.array([ basis(el) for el in x ])\n\nt_end = time.time()\nprint('Computational time:', t_end - t_0, 'seconds')", "num_basis = 12\ndim data_mat_t = (25, 10000, 12)\nComputational time: 55.5485999584198 seconds\n" ], [ "# save these data matrices for future re-use\nnp.save('data_mat_m=r_A_%d' % N_MC, data_mat_t)", "_____no_output_____" ], [ "print(data_mat_t.shape) # shape num_steps x N_MC x num_basis\nprint(len(k))", "(25, 10000, 12)\n17\n" ] ], [ [ "## Dynamic Programming solution for QLBS \n\nThe MDP problem in this case is to solve the following Bellman optimality equation for the action-value function.\n\n$$Q_t^\\star\\left(x,a\\right)=\\mathbb{E}_t\\left[R_t\\left(X_t,a_t,X_{t+1}\\right)+\\gamma\\max_{a_{t+1}\\in\\mathcal{A}}Q_{t+1}^\\star\\left(X_{t+1},a_{t+1}\\right)\\space|\\space X_t=x,a_t=a\\right],\\space\\space t=0,...,T-1,\\quad\\gamma=e^{-r\\Delta t}$$\n\nwhere $R_t\\left(X_t,a_t,X_{t+1}\\right)$ is the one-step time-dependent random reward and $a_t\\left(X_t\\right)$ is the action (hedge).\n\nDetailed steps of solving this equation by Dynamic Programming are illustrated below.", "_____no_output_____" ], [ "With this set of basis functions $\\left\\{\\Phi_n\\left(X_t^k\\right)\\right\\}_{n=1}^N$, expand the optimal action (hedge) $a_t^\\star\\left(X_t\\right)$ and optimal Q-function $Q_t^\\star\\left(X_t,a_t^\\star\\right)$ in basis functions with time-dependent coefficients.\n$$a_t^\\star\\left(X_t\\right)=\\sum_n^N{\\phi_{nt}\\Phi_n\\left(X_t\\right)}\\quad\\quad Q_t^\\star\\left(X_t,a_t^\\star\\right)=\\sum_n^N{\\omega_{nt}\\Phi_n\\left(X_t\\right)}$$\n\nCoefficients $\\phi_{nt}$ and $\\omega_{nt}$ are computed recursively backward in time for $t=T−1,...,0$. ", "_____no_output_____" ], [ "Coefficients for expansions of the optimal action $a_t^\\star\\left(X_t\\right)$ are solved by\n\n$$\\phi_t=\\mathbf A_t^{-1}\\mathbf B_t$$\n\nwhere $\\mathbf A_t$ and $\\mathbf B_t$ are matrix and vector respectively with elements given by\n\n$$A_{nm}^{\\left(t\\right)}=\\sum_{k=1}^{N_{MC}}{\\Phi_n\\left(X_t^k\\right)\\Phi_m\\left(X_t^k\\right)\\left(\\Delta\\hat{S}_t^k\\right)^2}\\quad\\quad B_n^{\\left(t\\right)}=\\sum_{k=1}^{N_{MC}}{\\Phi_n\\left(X_t^k\\right)\\left[\\hat\\Pi_{t+1}^k\\Delta\\hat{S}_t^k+\\frac{1}{2\\gamma\\lambda}\\Delta S_t^k\\right]}$$\n\n$$\\Delta S_t=S_{t+1} - e^{-r\\Delta t} S_t\\space \\quad t=T-1,...,0$$\nwhere $\\Delta\\hat{S}_t$ is the sample mean of all values of $\\Delta S_t$.\n\nDefine function *function_A* and *function_B* to compute the value of matrix $\\mathbf A_t$ and vector $\\mathbf B_t$.", "_____no_output_____" ], [ "## Define the option strike and risk aversion parameter", "_____no_output_____" ] ], [ [ "risk_lambda = 0.001 # risk aversion\nK = 100 # option stike \n\n# Note that we set coef=0 below in function function_B_vec. This correspond to a pure risk-based hedging", "_____no_output_____" ] ], [ [ "### Part 1 Calculate coefficients $\\phi_{nt}$ of the optimal action $a_t^\\star\\left(X_t\\right)$\n\n**Instructions:**\n- implement function_A_vec() which computes $A_{nm}^{\\left(t\\right)}$ matrix\n- implement function_B_vec() which computes $B_n^{\\left(t\\right)}$ column vector", "_____no_output_____" ] ], [ [ "# functions to compute optimal hedges\ndef function_A_vec(t, delta_S_hat, data_mat, reg_param):\n \"\"\"\n function_A_vec - compute the matrix A_{nm} from Eq. (52) (with a regularization!)\n Eq. (52) in QLBS Q-Learner in the Black-Scholes-Merton article\n \n Arguments:\n t - time index, a scalar, an index into time axis of data_mat\n delta_S_hat - pandas.DataFrame of dimension N_MC x T\n data_mat - pandas.DataFrame of dimension T x N_MC x num_basis\n reg_param - a scalar, regularization parameter\n \n Return:\n - np.array, i.e. matrix A_{nm} of dimension num_basis x num_basis\n \"\"\"\n \n ### START CODE HERE ### (≈ 5-6 lines of code)\n # store result in A_mat for grading\n \n# # The cell above shows the equations we need\n# # Eq. (53) in QLBS Q-Learner in the Black-Scholes-Merton article we are trying to solve for\n# # Phi* = (At^-1)(Bt)\n# # \n# # This function solves for the A coeffecient, which is shown in the cell above, which is\n# # Eq. (52) in QLBS Q-Learner in the Black-Scholes-Merton article\n# #\n# # The article is located here\n# # https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3087076\n \n# # Get the data matrix at this specific time index\n# Xt = data_mat[t,:,:]\n \n# # As shown in the description of the arguments in this function\n# # data_mat - pandas.DataFrame of dimension T x N_MC x num_basis\n# #\n# # We got Xt at a certain t time index, so \n# # Xt pandas.DataFrame of dimension N_MC x num_basis\n# #\n# # Therefore...\n# num_basis = Xt.shape[1]\n \n# # Now we need Delta S hat at this time index for the \n# # 'A' coefficient from the \n# # Eq. (52) in QLBS Q-Learner in the Black-Scholes-Merton article\n# # \n# # We are feed the parameter delta_S_hat into this function\n# # and \n# # delta_S_hat - pandas.DataFrame of dimension N_MC x T\n# #\n# # We what the delta_S_hat at this time index\n# #\n# # Therefore...\n# current_delta_S_hat = delta_S_hat.loc[:, t]\n \n# # The last term in the A coefficient calculation in the\n# # Eq. (52) in QLBS Q-Learner in the Black-Scholes-Merton article\n# # is delta_S_hat squared\n# #\n# # NOTE: There is .reshape(-1,1) which means that 1 for the columns\n# # MUST be respected, but the -1 for the rows means that whatever\n# # elements are left, fill it up to be whatever number.\n# current_delta_S_hat_squared = np.square(current_delta_S_hat).reshape( -1, 1)\n \n# # Now we have the terms to make up the equation.\n# # Eq. (52) in QLBS Q-Learner in the Black-Scholes-Merton article\n# # NOTE: The summation is not done in this function.\n# # NOTE: You do not see it in the equation \n# # Eq. (52) in QLBS Q-Learner in the Black-Scholes-Merton article\n# # but regularization is a technique used in Machine Learning.\n# # You add the term.\n# # np.eye() creates an identity matrix of size you specify.\n# #\n# # NOTE: When doing dot products, might have to transpose so the dimensions\n# # align.\n# A_mat = ( np.dot( Xt.T, Xt*current_delta_S_hat_squared )\n# +\n# reg_param * np.eye(num_basis) )\n\n X_mat = data_mat[t, :, :]\n num_basis_funcs = X_mat.shape[1]\n this_dS = delta_S_hat.loc[:, t]\n hat_dS2 = (this_dS ** 2).reshape(-1, 1)\n A_mat = np.dot(X_mat.T, X_mat * hat_dS2) + reg_param * np.eye(num_basis_funcs)\n \n ### END CODE HERE ###\n return A_mat\n \n \ndef function_B_vec(t, \n Pi_hat, \n delta_S_hat=delta_S_hat, \n S=S, \n data_mat=data_mat_t,\n gamma=gamma,\n risk_lambda=risk_lambda):\n \"\"\"\n function_B_vec - compute vector B_{n} from Eq. (52) QLBS Q-Learner in the Black-Scholes-Merton article\n \n Arguments:\n t - time index, a scalar, an index into time axis of delta_S_hat\n Pi_hat - pandas.DataFrame of dimension N_MC x T of portfolio values \n delta_S_hat - pandas.DataFrame of dimension N_MC x T\n S - pandas.DataFrame of simulated stock prices of dimension N_MC x T\n data_mat - pandas.DataFrame of dimension T x N_MC x num_basis\n gamma - one time-step discount factor $exp(-r \\delta t)$\n risk_lambda - risk aversion coefficient, a small positive number\n Return:\n np.array() of dimension num_basis x 1\n \"\"\"\n # coef = 1.0/(2 * gamma * risk_lambda)\n # override it by zero to have pure risk hedge\n \n ### START CODE HERE ### (≈ 5-6 lines of code)\n # store result in B_vec for grading\n \n# # Get the data matrix at this specific time index\n# Xt = data_mat[t,:,:]\n \n# # Computer the first term in the brackets.\n# first_term = Pi_hat[ :, t+1 ] * delta_S_hat.loc[:, t]\n \n# # NOTE: for the last term in the equation\n# # Eq. (52) QLBS Q-Learner in the Black-Scholes-Merton article\n# # \n# # would be\n# # last_term = 1.0/(2 * gamma * risk_lambda) * S.loc[:, t]\n# last_coefficient = 1.0/(2 * gamma * risk_lambda)\n# #\n# # But the instructions say make it equal override it by zero to have pure risk hedge\n# last_coefficient = 0\n# last_term = last_coefficient * S.loc[:, t]\n \n# # Compute \n# second_factor = first_term + last_term\n \n# # Compute the equation\n# # NOTE: When doing dot products, might have to transpose so the dimensions\n# # align.\n# B_vec = np.dot(Xt.T, second_factor)\n \n tmp = Pi_hat.loc[:,t+1] * delta_S_hat.loc[:, t]\n X_mat = data_mat[t, :, :] # matrix of dimension N_MC x num_basis\n B_vec = np.dot(X_mat.T, tmp)\n \n ### END CODE HERE ###\n return B_vec", "_____no_output_____" ], [ "### GRADED PART (DO NOT EDIT) ###\nreg_param = 1e-3\nnp.random.seed(42)\n\nA_mat = function_A_vec(T-1, delta_S_hat, data_mat_t, reg_param)\nidx_row = np.random.randint(low=0, high=A_mat.shape[0], size=50)\n\nnp.random.seed(42)\nidx_col = np.random.randint(low=0, high=A_mat.shape[1], size=50)\n\n\npart_1 = list(A_mat[idx_row, idx_col])\ntry:\n part1 = \" \".join(map(repr, part_1))\nexcept TypeError:\n part1 = repr(part_1)\n\n\nsubmissions[all_parts[0]]=part1\ngrading.submit(COURSERA_EMAIL, COURSERA_TOKEN, assignment_key,all_parts[:1],all_parts,submissions)\nA_mat[idx_row, idx_col]\n### GRADED PART (DO NOT EDIT) ###", "D:\\application\\Anaconda3\\envs\\pyalgo\\lib\\site-packages\\ipykernel_launcher.py:82: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead\n" ], [ "### GRADED PART (DO NOT EDIT) ###\nnp.random.seed(42)\nrisk_lambda = 0.001\nPi = pd.DataFrame([], index=range(1, N_MC+1), columns=range(T+1))\nPi.iloc[:,-1] = S.iloc[:,-1].apply(lambda x: terminal_payoff(x, K))\n\nPi_hat = pd.DataFrame([], index=range(1, N_MC+1), columns=range(T+1))\nPi_hat.iloc[:,-1] = Pi.iloc[:,-1] - np.mean(Pi.iloc[:,-1])\nB_vec = function_B_vec(T-1, Pi_hat, delta_S_hat, S, data_mat_t, gamma, risk_lambda)\n\npart_2 = list(B_vec)\ntry:\n part2 = \" \".join(map(repr, part_2))\nexcept TypeError:\n part2 = repr(part_2)\n\n\nsubmissions[all_parts[1]]=part2\ngrading.submit(COURSERA_EMAIL, COURSERA_TOKEN, assignment_key,all_parts[:2],all_parts,submissions)\n\nB_vec\n### GRADED PART (DO NOT EDIT) ###", "Submission successful, please check on the coursera grader page for the status\n" ] ], [ [ "## Compute optimal hedge and portfolio value", "_____no_output_____" ], [ "Call *function_A* and *function_B* for $t=T-1,...,0$ together with basis function $\\Phi_n\\left(X_t\\right)$ to compute optimal action $a_t^\\star\\left(X_t\\right)=\\sum_n^N{\\phi_{nt}\\Phi_n\\left(X_t\\right)}$ backward recursively with terminal condition $a_T^\\star\\left(X_T\\right)=0$.\n\nOnce the optimal hedge $a_t^\\star\\left(X_t\\right)$ is computed, the portfolio value $\\Pi_t$ could also be computed backward recursively by \n\n$$\\Pi_t=\\gamma\\left[\\Pi_{t+1}-a_t^\\star\\Delta S_t\\right]\\quad t=T-1,...,0$$\n\ntogether with the terminal condition $\\Pi_T=H_T\\left(S_T\\right)=\\max\\left(K-S_T,0\\right)$ for a European put option.\n\nAlso compute $\\hat{\\Pi}_t=\\Pi_t-\\bar{\\Pi}_t$, where $\\bar{\\Pi}_t$ is the sample mean of all values of $\\Pi_t$.\n\nPlots of 5 optimal hedge $a_t^\\star$ and portfolio value $\\Pi_t$ paths are shown below.", "_____no_output_____" ] ], [ [ "starttime = time.time()\n\n# portfolio value\nPi = pd.DataFrame([], index=range(1, N_MC+1), columns=range(T+1))\nPi.iloc[:,-1] = S.iloc[:,-1].apply(lambda x: terminal_payoff(x, K))\n\nPi_hat = pd.DataFrame([], index=range(1, N_MC+1), columns=range(T+1))\nPi_hat.iloc[:,-1] = Pi.iloc[:,-1] - np.mean(Pi.iloc[:,-1])\n\n# optimal hedge\na = pd.DataFrame([], index=range(1, N_MC+1), columns=range(T+1))\na.iloc[:,-1] = 0\n\nreg_param = 1e-3 # free parameter\nfor t in range(T-1, -1, -1):\n A_mat = function_A_vec(t, delta_S_hat, data_mat_t, reg_param)\n B_vec = function_B_vec(t, Pi_hat, delta_S_hat, S, data_mat_t, gamma, risk_lambda)\n # print ('t = A_mat.shape = B_vec.shape = ', t, A_mat.shape, B_vec.shape)\n \n # coefficients for expansions of the optimal action\n phi = np.dot(np.linalg.inv(A_mat), B_vec)\n \n a.loc[:,t] = np.dot(data_mat_t[t,:,:],phi)\n Pi.loc[:,t] = gamma * (Pi.loc[:,t+1] - a.loc[:,t] * delta_S.loc[:,t])\n Pi_hat.loc[:,t] = Pi.loc[:,t] - np.mean(Pi.loc[:,t])\n \na = a.astype('float')\nPi = Pi.astype('float')\nPi_hat = Pi_hat.astype('float')\n\nendtime = time.time()\nprint('Computational time:', endtime - starttime, 'seconds')", "D:\\application\\Anaconda3\\envs\\pyalgo\\lib\\site-packages\\ipykernel_launcher.py:82: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead\n" ], [ "# plot 10 paths\nplt.plot(a.T.iloc[:,idx_plot])\nplt.xlabel('Time Steps')\nplt.title('Optimal Hedge')\nplt.show()\n\nplt.plot(Pi.T.iloc[:,idx_plot])\nplt.xlabel('Time Steps')\nplt.title('Portfolio Value')\nplt.show()", "_____no_output_____" ] ], [ [ "## Compute rewards for all paths", "_____no_output_____" ], [ "Once the optimal hedge $a_t^\\star$ and portfolio value $\\Pi_t$ are all computed, the reward function $R_t\\left(X_t,a_t,X_{t+1}\\right)$ could then be computed by\n\n$$R_t\\left(X_t,a_t,X_{t+1}\\right)=\\gamma a_t\\Delta S_t-\\lambda Var\\left[\\Pi_t\\space|\\space\\mathcal F_t\\right]\\quad t=0,...,T-1$$\n\nwith terminal condition $R_T=-\\lambda Var\\left[\\Pi_T\\right]$.\n\nPlot of 5 reward function $R_t$ paths is shown below.", "_____no_output_____" ] ], [ [ "\n# Compute rewards for all paths\nstarttime = time.time()\n# reward function\nR = pd.DataFrame([], index=range(1, N_MC+1), columns=range(T+1))\nR.iloc[:,-1] = - risk_lambda * np.var(Pi.iloc[:,-1])\n\nfor t in range(T):\n R.loc[1:,t] = gamma * a.loc[1:,t] * delta_S.loc[1:,t] - risk_lambda * np.var(Pi.loc[1:,t])\n\nendtime = time.time()\nprint('\\nTime Cost:', endtime - starttime, 'seconds')\n \n# plot 10 paths\nplt.plot(R.T.iloc[:, idx_plot])\nplt.xlabel('Time Steps')\nplt.title('Reward Function')\nplt.show()", "\nTime Cost: 0.1530001163482666 seconds\n" ] ], [ [ "## Part 2: Compute the optimal Q-function with the DP approach \n", "_____no_output_____" ], [ "Coefficients for expansions of the optimal Q-function $Q_t^\\star\\left(X_t,a_t^\\star\\right)$ are solved by\n\n$$\\omega_t=\\mathbf C_t^{-1}\\mathbf D_t$$\n\nwhere $\\mathbf C_t$ and $\\mathbf D_t$ are matrix and vector respectively with elements given by\n\n$$C_{nm}^{\\left(t\\right)}=\\sum_{k=1}^{N_{MC}}{\\Phi_n\\left(X_t^k\\right)\\Phi_m\\left(X_t^k\\right)}\\quad\\quad D_n^{\\left(t\\right)}=\\sum_{k=1}^{N_{MC}}{\\Phi_n\\left(X_t^k\\right)\\left(R_t\\left(X_t,a_t^\\star,X_{t+1}\\right)+\\gamma\\max_{a_{t+1}\\in\\mathcal{A}}Q_{t+1}^\\star\\left(X_{t+1},a_{t+1}\\right)\\right)}$$", "_____no_output_____" ], [ "Define function *function_C* and *function_D* to compute the value of matrix $\\mathbf C_t$ and vector $\\mathbf D_t$.\n\n**Instructions:**\n- implement function_C_vec() which computes $C_{nm}^{\\left(t\\right)}$ matrix\n- implement function_D_vec() which computes $D_n^{\\left(t\\right)}$ column vector", "_____no_output_____" ] ], [ [ "def function_C_vec(t, data_mat, reg_param):\n \"\"\"\n function_C_vec - calculate C_{nm} matrix from Eq. (56) (with a regularization!)\n Eq. (56) in QLBS Q-Learner in the Black-Scholes-Merton article\n \n Arguments:\n t - time index, a scalar, an index into time axis of data_mat \n data_mat - pandas.DataFrame of values of basis functions of dimension T x N_MC x num_basis\n reg_param - regularization parameter, a scalar\n \n Return:\n C_mat - np.array of dimension num_basis x num_basis\n \"\"\"\n ### START CODE HERE ### (≈ 5-6 lines of code)\n # your code here ....\n # C_mat = your code here ...\n X_mat = data_mat[t, :, :]\n num_basis_funcs = X_mat.shape[1]\n C_mat = np.dot(X_mat.T, X_mat) + reg_param * np.eye(num_basis_funcs)\n ### END CODE HERE ###\n return C_mat\n \ndef function_D_vec(t, Q, R, data_mat, gamma=gamma):\n \"\"\"\n function_D_vec - calculate D_{nm} vector from Eq. (56) (with a regularization!)\n Eq. (56) in QLBS Q-Learner in the Black-Scholes-Merton article\n \n Arguments:\n t - time index, a scalar, an index into time axis of data_mat \n Q - pandas.DataFrame of Q-function values of dimension N_MC x T\n R - pandas.DataFrame of rewards of dimension N_MC x T\n data_mat - pandas.DataFrame of values of basis functions of dimension T x N_MC x num_basis\n gamma - one time-step discount factor $exp(-r \\delta t)$\n \n Return:\n D_vec - np.array of dimension num_basis x 1\n \"\"\"\n \n ### START CODE HERE ### (≈ 5-6 lines of code)\n # your code here ....\n # D_vec = your code here ...\n X_mat = data_mat[t, :, :]\n D_vec = np.dot(X_mat.T, R.loc[:,t] + gamma * Q.loc[:, t+1])\n ### END CODE HERE ###\n return D_vec", "_____no_output_____" ], [ "### GRADED PART (DO NOT EDIT) ###\nC_mat = function_C_vec(T-1, data_mat_t, reg_param)\nnp.random.seed(42)\nidx_row = np.random.randint(low=0, high=C_mat.shape[0], size=50)\n\nnp.random.seed(42)\nidx_col = np.random.randint(low=0, high=C_mat.shape[1], size=50)\n\npart_3 = list(C_mat[idx_row, idx_col])\ntry:\n part3 = \" \".join(map(repr, part_3))\nexcept TypeError:\n part3 = repr(part_3)\n\n\nsubmissions[all_parts[2]]=part3\ngrading.submit(COURSERA_EMAIL, COURSERA_TOKEN, assignment_key,all_parts[:3],all_parts,submissions)\n\nC_mat[idx_row, idx_col]\n### GRADED PART (DO NOT EDIT) ###", "Submission successful, please check on the coursera grader page for the status\n" ], [ "### GRADED PART (DO NOT EDIT) ###\nQ = pd.DataFrame([], index=range(1, N_MC+1), columns=range(T+1))\nQ.iloc[:,-1] = - Pi.iloc[:,-1] - risk_lambda * np.var(Pi.iloc[:,-1])\nD_vec = function_D_vec(T-1, Q, R, data_mat_t,gamma)\n\n\npart_4 = list(D_vec)\ntry:\n part4 = \" \".join(map(repr, part_4))\nexcept TypeError:\n part4 = repr(part_4)\n\n\nsubmissions[all_parts[3]]=part4\ngrading.submit(COURSERA_EMAIL, COURSERA_TOKEN, assignment_key,all_parts[:4],all_parts,submissions)\n\nD_vec\n### GRADED PART (DO NOT EDIT) ###", "Submission successful, please check on the coursera grader page for the status\n" ] ], [ [ "Call *function_C* and *function_D* for $t=T-1,...,0$ together with basis function $\\Phi_n\\left(X_t\\right)$ to compute optimal action Q-function $Q_t^\\star\\left(X_t,a_t^\\star\\right)=\\sum_n^N{\\omega_{nt}\\Phi_n\\left(X_t\\right)}$ backward recursively with terminal condition $Q_T^\\star\\left(X_T,a_T=0\\right)=-\\Pi_T\\left(X_T\\right)-\\lambda Var\\left[\\Pi_T\\left(X_T\\right)\\right]$.", "_____no_output_____" ] ], [ [ "starttime = time.time()\n\n# Q function\nQ = pd.DataFrame([], index=range(1, N_MC+1), columns=range(T+1))\nQ.iloc[:,-1] = - Pi.iloc[:,-1] - risk_lambda * np.var(Pi.iloc[:,-1])\n\nreg_param = 1e-3\nfor t in range(T-1, -1, -1):\n ######################\n C_mat = function_C_vec(t,data_mat_t,reg_param)\n D_vec = function_D_vec(t, Q,R,data_mat_t,gamma)\n omega = np.dot(np.linalg.inv(C_mat), D_vec)\n \n Q.loc[:,t] = np.dot(data_mat_t[t,:,:], omega)\n \nQ = Q.astype('float')\nendtime = time.time()\nprint('\\nTime Cost:', endtime - starttime, 'seconds')\n\n# plot 10 paths\nplt.plot(Q.T.iloc[:, idx_plot])\nplt.xlabel('Time Steps')\nplt.title('Optimal Q-Function')\nplt.show()", "\nTime Cost: 0.16299986839294434 seconds\n" ] ], [ [ "The QLBS option price is given by $C_t^{\\left(QLBS\\right)}\\left(S_t,ask\\right)=-Q_t\\left(S_t,a_t^\\star\\right)$\n", "_____no_output_____" ], [ "## Summary of the QLBS pricing and comparison with the BSM pricing ", "_____no_output_____" ], [ "Compare the QLBS price to European put price given by Black-Sholes formula.\n\n$$C_t^{\\left(BS\\right)}=Ke^{-r\\left(T-t\\right)}\\mathcal N\\left(-d_2\\right)-S_t\\mathcal N\\left(-d_1\\right)$$", "_____no_output_____" ] ], [ [ "# The Black-Scholes prices\ndef bs_put(t, S0=S0, K=K, r=r, sigma=sigma, T=M):\n d1 = (np.log(S0/K) + (r + 1/2 * sigma**2) * (T-t)) / sigma / np.sqrt(T-t)\n d2 = (np.log(S0/K) + (r - 1/2 * sigma**2) * (T-t)) / sigma / np.sqrt(T-t)\n price = K * np.exp(-r * (T-t)) * norm.cdf(-d2) - S0 * norm.cdf(-d1)\n return price\n\ndef bs_call(t, S0=S0, K=K, r=r, sigma=sigma, T=M):\n d1 = (np.log(S0/K) + (r + 1/2 * sigma**2) * (T-t)) / sigma / np.sqrt(T-t)\n d2 = (np.log(S0/K) + (r - 1/2 * sigma**2) * (T-t)) / sigma / np.sqrt(T-t)\n price = S0 * norm.cdf(d1) - K * np.exp(-r * (T-t)) * norm.cdf(d2)\n return price\n", "_____no_output_____" ] ], [ [ "## The DP solution for QLBS", "_____no_output_____" ] ], [ [ "# QLBS option price\nC_QLBS = - Q.copy()\n\nprint('-------------------------------------------')\nprint(' QLBS Option Pricing (DP solution) ')\nprint('-------------------------------------------\\n')\nprint('%-25s' % ('Initial Stock Price:'), S0)\nprint('%-25s' % ('Drift of Stock:'), mu)\nprint('%-25s' % ('Volatility of Stock:'), sigma)\nprint('%-25s' % ('Risk-free Rate:'), r)\nprint('%-25s' % ('Risk aversion parameter: '), risk_lambda)\nprint('%-25s' % ('Strike:'), K)\nprint('%-25s' % ('Maturity:'), M)\nprint('%-26s %.4f' % ('\\nQLBS Put Price: ', C_QLBS.iloc[0,0]))\nprint('%-26s %.4f' % ('\\nBlack-Sholes Put Price:', bs_put(0)))\nprint('\\n')\n\n# plot 10 paths\nplt.plot(C_QLBS.T.iloc[:,idx_plot])\nplt.xlabel('Time Steps')\nplt.title('QLBS Option Price')\nplt.show()", "-------------------------------------------\n QLBS Option Pricing (DP solution) \n-------------------------------------------\n\nInitial Stock Price: 100\nDrift of Stock: 0.05\nVolatility of Stock: 0.15\nRisk-free Rate: 0.03\nRisk aversion parameter: 0.001\nStrike: 100\nMaturity: 1\n\nQLBS Put Price: 4.9261\n\nBlack-Sholes Put Price: 4.5296\n\n\n" ], [ "### GRADED PART (DO NOT EDIT) ###\n\npart5 = str(C_QLBS.iloc[0,0])\nsubmissions[all_parts[4]]=part5\ngrading.submit(COURSERA_EMAIL, COURSERA_TOKEN, assignment_key,all_parts[:5],all_parts,submissions)\n\nC_QLBS.iloc[0,0]\n### GRADED PART (DO NOT EDIT) ###", "Submission successful, please check on the coursera grader page for the status\n" ] ], [ [ "### make a summary picture", "_____no_output_____" ] ], [ [ "# plot: Simulated S_t and X_t values\n# optimal hedge and portfolio values\n# rewards and optimal Q-function\n\nf, axarr = plt.subplots(3, 2)\nf.subplots_adjust(hspace=.5)\nf.set_figheight(8.0)\nf.set_figwidth(8.0)\n\naxarr[0, 0].plot(S.T.iloc[:,idx_plot]) \naxarr[0, 0].set_xlabel('Time Steps')\naxarr[0, 0].set_title(r'Simulated stock price $S_t$')\n\naxarr[0, 1].plot(X.T.iloc[:,idx_plot]) \naxarr[0, 1].set_xlabel('Time Steps')\naxarr[0, 1].set_title(r'State variable $X_t$')\n\naxarr[1, 0].plot(a.T.iloc[:,idx_plot]) \naxarr[1, 0].set_xlabel('Time Steps')\naxarr[1, 0].set_title(r'Optimal action $a_t^{\\star}$')\n\naxarr[1, 1].plot(Pi.T.iloc[:,idx_plot]) \naxarr[1, 1].set_xlabel('Time Steps')\naxarr[1, 1].set_title(r'Optimal portfolio $\\Pi_t$')\n\naxarr[2, 0].plot(R.T.iloc[:,idx_plot]) \naxarr[2, 0].set_xlabel('Time Steps')\naxarr[2, 0].set_title(r'Rewards $R_t$') \n\naxarr[2, 1].plot(Q.T.iloc[:,idx_plot]) \naxarr[2, 1].set_xlabel('Time Steps')\naxarr[2, 1].set_title(r'Optimal DP Q-function $Q_t^{\\star}$')\n\n\n# plt.savefig('QLBS_DP_summary_graphs_ATM_option_mu=r.png', dpi=600)\n# plt.savefig('QLBS_DP_summary_graphs_ATM_option_mu>r.png', dpi=600)\n#plt.savefig('QLBS_DP_summary_graphs_ATM_option_mu>r.png', dpi=600)\nplt.savefig('r.png', dpi=600)\n\nplt.show()", "_____no_output_____" ], [ "# plot convergence to the Black-Scholes values\n\n# lam = 0.0001, Q = 4.1989 +/- 0.3612 # 4.378\n# lam = 0.001: Q = 4.9004 +/- 0.1206 # Q=6.283\n# lam = 0.005: Q = 8.0184 +/- 0.9484 # Q = 14.7489\n# lam = 0.01: Q = 11.9158 +/- 2.2846 # Q = 25.33\n\nlam_vals = np.array([0.0001, 0.001, 0.005, 0.01])\n# Q_vals = np.array([3.77, 3.81, 4.57, 7.967,12.2051])\nQ_vals = np.array([4.1989, 4.9004, 8.0184, 11.9158])\nQ_std = np.array([0.3612,0.1206, 0.9484, 2.2846])\n\nBS_price = bs_put(0)\n\n# f, axarr = plt.subplots(1, 1)\nfig, ax = plt.subplots(1, 1)\n\nf.subplots_adjust(hspace=.5)\nf.set_figheight(4.0)\nf.set_figwidth(4.0)\n\n# ax.plot(lam_vals,Q_vals) \nax.errorbar(lam_vals, Q_vals, yerr=Q_std, fmt='o')\n\nax.set_xlabel('Risk aversion')\nax.set_ylabel('Optimal option price')\nax.set_title(r'Optimal option price vs risk aversion')\nax.axhline(y=BS_price,linewidth=2, color='r')\ntextstr = 'BS price = %2.2f'% (BS_price)\nprops = dict(boxstyle='round', facecolor='wheat', alpha=0.5) \n# place a text box in upper left in axes coords\nax.text(0.05, 0.95, textstr, fontsize=11,transform=ax.transAxes, verticalalignment='top', bbox=props)\nplt.savefig('Opt_price_vs_lambda_Markowitz.png')\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb796af67aca0a9eafc4b5487b524b8365a79214
25,819
ipynb
Jupyter Notebook
Groups/Group_ID_46/Feature Extraction from Image/Edge Features/Example_Edge_Features_Prewitt.ipynb
Vijayant7899/DataScience
4b80361652a5056dd7c1a97267dc1792cfa106ec
[ "MIT" ]
5
2020-12-13T07:53:22.000Z
2020-12-20T18:49:27.000Z
Groups/Group_ID_46/Feature Extraction from Image/Edge Features/Example_Edge_Features_Prewitt.ipynb
Vijayant7899/DataScience
4b80361652a5056dd7c1a97267dc1792cfa106ec
[ "MIT" ]
null
null
null
Groups/Group_ID_46/Feature Extraction from Image/Edge Features/Example_Edge_Features_Prewitt.ipynb
Vijayant7899/DataScience
4b80361652a5056dd7c1a97267dc1792cfa106ec
[ "MIT" ]
24
2020-12-12T11:23:28.000Z
2021-10-04T13:09:38.000Z
226.482456
22,812
0.92002
[ [ [ "# Feature Extraction from Image (Edge Detection)\n---\n\n### Package Name: Edge_Features\n---\n\n## Methods:\n\n### 1. horizontal_edge_prewitt(image):\nThis method returns the horizontal edge features extracted from a grayscale image using prewitt_h().\n\n#### Parameters:\n\n- Image: It takes a grayscale image having dimensions M x N\n\n### 2. vertical_edge_prewitt(image):\nThis method returns the vertical edge features extracted from a grayscale image using prewitt_v().\n\n#### Parameters:\n\n- **Image:** It takes a grayscale image having dimensions M x N\n---\n\n## Example:", "_____no_output_____" ] ], [ [ "import cv2\nimport matplotlib.pyplot as plt\nfrom Edge_Features import Edge_Features", "_____no_output_____" ], [ "# Creating object by calling class Edge_Features\nfeatures = Edge_Features()\n\n# Read input image as grayscale image\nimage = cv2.imread('flat_chessboard.png', cv2.IMREAD_GRAYSCALE)\n\n# Passing image to horizontal_edge_prewitt() to get horizontal edge features\nhorizontal_edges = features.horizontal_edge_prewitt(image)\n\n# Passing image to vertical_edge_prewitt() to get vartical edge features\nvertical_edges = features.vertical_edge_prewitt(image)\n\nall_edges = horizontal_edges + vertical_edges\n\n#Display input image and extracted edge features\nfig, axes = plt.subplots(nrows=1, ncols=4, figsize=(12,4))\naxes[0].imshow(image, cmap='gray')\naxes[0].set_title(\"Chessboard Image\")\naxes[1].imshow(horizontal_edges, cmap='gray')\naxes[1].set_title('Horizontal Edges')\naxes[2].imshow(vertical_edges, cmap='gray')\naxes[2].set_title('Vertical Edges')\naxes[3].imshow(all_edges, cmap='gray')\naxes[3].set_title('All Edges')\nplt.tight_layout()\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ] ]
cb7982474ed2b7625a436dc96641022c8dc9d030
27,332
ipynb
Jupyter Notebook
notebooks/prep_visdata.ipynb
daviscvance/Airbnb-Listing-Recommendations
0286e521e6a4351688bd0b08a11f5405898d2448
[ "MIT" ]
3
2019-11-29T14:47:38.000Z
2020-12-16T03:04:14.000Z
notebooks/prep_visdata.ipynb
daviscvance/Airbnb-Listing-Recommendations
0286e521e6a4351688bd0b08a11f5405898d2448
[ "MIT" ]
null
null
null
notebooks/prep_visdata.ipynb
daviscvance/Airbnb-Listing-Recommendations
0286e521e6a4351688bd0b08a11f5405898d2448
[ "MIT" ]
1
2019-12-06T04:00:36.000Z
2019-12-06T04:00:36.000Z
60.335541
7,904
0.734487
[ [ [ "# Tableau data\n\n### NYC Lat Long data for plotting\n\nThis notebook is quick and dirty! Just preparing data for tableau visualization", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\ndf = pd.read_csv('data/newyork/NYClatlong.csv').iloc[:,1:]\ndf.rename({'neighbourhood_cleansed': 'neighborhood', \n 'host_is_superhost': 'superhost',\n 'is_business_travel_ready': 'business'\n }, axis='columns', inplace=True)\ndf.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 48852 entries, 0 to 48851\nData columns (total 12 columns):\nid 48852 non-null int64\nneighborhood 48852 non-null object\nprice 48852 non-null object\ncity 48791 non-null object\nstate 48851 non-null object\nzipcode 48172 non-null object\naccommodates 48852 non-null int64\nlatitude 48852 non-null float64\nlongitude 48852 non-null float64\nsuperhost 48746 non-null object\nproperty_type 48852 non-null object\nbusiness 48852 non-null object\ndtypes: float64(2), int64(2), object(8)\nmemory usage: 4.5+ MB\n" ], [ "def str2bool(x):\n return str(x).lower() == 't'\n\ndf['superhost'] = df.superhost.apply(str2bool)\ndf['business'] = df.business.apply(str2bool)\n\n\ndef clean_price(x):\n y = x.split('.')[0] \\\n .replace('$', '') \\\n .replace(',', '') \n return y\n\ndf['price'] = df.price.apply(clean_price).astype('uint16')", "_____no_output_____" ], [ "df['price_p_person'] = round(df.price / df.accommodates, 2)", "_____no_output_____" ], [ "df.price[(df.price < 1000)].plot(kind='hist', bins=40)\ndf.price.describe(percentiles=list(np.arange(0,1,.1)))", "_____no_output_____" ], [ "df.price_p_person[(df.price_p_person < 250)].plot(kind='hist', bins=35)\ndf.price_p_person.describe(percentiles=list(np.arange(0,1,.1)))", "_____no_output_____" ], [ "columns = ['price','superhost','business','price_p_person','latitude','longitude']\naggs = df.groupby('neighborhood')[columns].agg({'average':'mean', 'count':'count'})\naggs.columns = aggs.columns.get_level_values(1)", "/Users/user/Desktop/anaconda/lib/python3.6/site-packages/pandas/core/groupby.py:4291: FutureWarning: using a dict with renaming is deprecated and will be removed in a future version\n return super(DataFrameGroupBy, self).aggregate(arg, *args, **kwargs)\n" ], [ "colnames = {'price':'avg_price','superhost':'avg_superhost',\n 'business':'avg_business','price_p_person':'avg_price_p_person',\n 'latitude':'avg_lat','longitude':'avg_long'}\naggs = aggs.reset_index().iloc[:,:8].rename(colnames, axis='columns')\naggs['count'] = aggs.iloc[:,-1]", "_____no_output_____" ], [ "data = pd.merge(df, aggs, how='left', on='neighborhood')\ncnt = data.iloc[:, -1]\ndata = data.iloc[:, :19]\ndata['count'] = cnt\ndata.head(2)", "_____no_output_____" ], [ "data.to_csv('data/clean_latlong.csv')", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb79838722cc91b2ed8ea7f8f296b3c6ddf522eb
61,807
ipynb
Jupyter Notebook
amazon_review/domain_shiift.ipynb
yochimonji/amazon_review
bcf08f6b028d2cd4d1d26d5dd6fa506eeba3b4bb
[ "MIT" ]
null
null
null
amazon_review/domain_shiift.ipynb
yochimonji/amazon_review
bcf08f6b028d2cd4d1d26d5dd6fa506eeba3b4bb
[ "MIT" ]
null
null
null
amazon_review/domain_shiift.ipynb
yochimonji/amazon_review
bcf08f6b028d2cd4d1d26d5dd6fa506eeba3b4bb
[ "MIT" ]
null
null
null
27.790917
154
0.46519
[ [ [ "import copy\nimport random\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom scipy import stats\nfrom torch import nn\nfrom torchtext.legacy import data\nfrom torchtext.vocab import Vectors\nfrom tqdm import tqdm\n\nfrom util import calc_accuracy, calc_f1, init_device, load_params\nfrom util.model import MyClassifier\nfrom util.nlp_preprocessing import dataframe2dataset, tokenizer_ja", "_____no_output_____" ], [ "# ランダムシード初期化\nseed = 0\nrandom.seed(seed)\nnp.random.seed(seed)\ntorch.manual_seed(seed)\ntorch.backends.cudnn.benchmark = False\ntorch.backends.cudnn.deterministic = True\ndevice = init_device()\n\n# パラメータ読み込み\nprint(\"Loading parameters...\")\nparams = load_params(\"/workspace/amazon_review/config/params_mmd.json\")\n\n# データセット読み込み\ntrain_df = pd.read_json(params[\"ja_train_path\"], orient=\"record\", lines=True)\nif params[\"is_developing\"]:\n train_df = train_df.sample(n=200000, random_state=1)\ndev_df = pd.read_json(params[\"ja_dev_path\"], orient=\"record\", lines=True)\ntest_df = pd.read_json(params[\"ja_test_path\"], orient=\"record\", lines=True)\n\n# sourceカテゴリーとtargetカテゴリーを分ける\ntrain_source_df = train_df[train_df[\"product_category\"] == params[\"source_category\"]]\ndev_source_df = dev_df[dev_df[\"product_category\"] == params[\"source_category\"]]\ntest_source_df = test_df[test_df[\"product_category\"] == params[\"source_category\"]]\ntrain_target_df = train_df[train_df[\"product_category\"] == params[\"target_category\"]]\ndev_target_df = dev_df[dev_df[\"product_category\"] == params[\"target_category\"]]\ntest_target_df = test_df[test_df[\"product_category\"] == params[\"target_category\"]]\n\n# クラスラベル設定\nfor df in [train_source_df, dev_source_df, test_source_df, train_target_df, dev_target_df, test_target_df]:\n # 3以上かを予測する場合\n df[\"class\"] = 0\n df[\"class\"][df[\"stars\"] > 3] = 1\n\n # 5クラス分類する場合\n # df[\"class\"] = df[\"stars\"] - 1\n\n# フィールド作成\nprint(\"Building data iterator...\")\ntext_field = data.Field(\n sequential=True,\n tokenize=tokenizer_ja,\n use_vocab=True,\n lower=True,\n include_lengths=True,\n batch_first=True,\n fix_length=params[\"token_max_length\"],\n init_token=\"<cls>\",\n eos_token=\"<eos>\",\n)\nlabel_field = data.Field(sequential=False, use_vocab=False)\nfields = [(\"text\", text_field), (\"label\", label_field)]\n\n# データセット作成\ncolumns = [\"review_body\", \"class\"]\ntrain_source_dataset = dataframe2dataset(train_source_df, fields, columns)\ndev_source_dataset = dataframe2dataset(dev_source_df, fields, columns)\n# test_source_dataset = dataframe2dataset(test_source_df, fields, columns)\n# train_target_dataset = dataframe2dataset(train_target_df, fields, columns)\ndev_target_dataset = dataframe2dataset(dev_target_df, fields, columns)\ntest_target_dataset = dataframe2dataset(test_target_df, fields, columns)\nall_train_dataset = dataframe2dataset(pd.concat([train_source_df, train_target_df]), fields, columns)\n\n# embedding作成\nif params[\"use_pretrained_vector\"]:\n japanese_fasttext_vectors = Vectors(name=params[\"ja_vector_path\"])\n text_field.build_vocab(all_train_dataset, vectors=japanese_fasttext_vectors, min_freq=1)\nelse:\n text_field.build_vocab(all_train_dataset, min_freq=1)\n\n# データローダー作成\ntrain_source_iter = data.BucketIterator(dataset=train_source_dataset, batch_size=params[\"batch_size\"], train=True)\ndev_source_iter = data.BucketIterator(\n dataset=dev_source_dataset, batch_size=params[\"batch_size\"], train=False, sort=False\n)\n# test_source_iter = data.BucketIterator(\n# dataset=test_source_dataset, batch_size=params[\"batch_size\"], train=False, sort=False\n# )\n# train_target_iter = data.BucketIterator(dataset=train_target_dataset, batch_size=params[\"batch_size\"], train=True)\ndev_target_iter = data.BucketIterator(\n dataset=dev_target_dataset, batch_size=params[\"batch_size\"], train=False, sort=False\n)\ntest_target_iter = data.BucketIterator(\n dataset=test_target_dataset, batch_size=params[\"batch_size\"], train=False, sort=False\n)\n\n# モデル構築\nv_size = len(text_field.vocab.stoi)\nif params[\"use_pretrained_vector\"]:\n model = MyClassifier(params[\"emb_dim\"], v_size, params[\"token_max_length\"], params[\"class_num\"], text_field).to(\n device\n )\nelse:\n model = MyClassifier(params[\"emb_dim\"], v_size, params[\"token_max_length\"], params[\"class_num\"]).to(device)\n\ncriterion = getattr(nn, params[\"criterion\"])()\noptimizer = getattr(torch.optim, params[\"optimizer\"])(model.parameters(), lr=params[\"lr\"])\n\n# sourceで訓練\n# print(\"sourceで事前学習開始\")\n# for epoch in range(params[\"epochs\"]):\n# print(f\"\\nepoch {epoch+1} / {params['epochs']}\")\n# total_loss = 0\n\n# for i, batch in tqdm(enumerate(train_source_iter), total=len(train_source_iter)):\n# model.train()\n\n# x, y = batch.text[0].to(device), batch.label.to(device)\n# _, pred = model(x)\n# loss = criterion(pred, y)\n\n# optimizer.zero_grad()\n# loss.backward()\n# optimizer.step()\n# total_loss += loss.cpu()\n# print(f\"Train Source Loss: {total_loss / len(train_source_iter):.3f}\")\n\n# total_dev_accuracy = 0\n# total_dev_f1 = 0\n# model.eval()\n# for valid_batch in dev_source_iter:\n# x, y = valid_batch.text[0].to(device), valid_batch.label.to(device)\n# with torch.no_grad():\n# _, pred = model(x)\n# label_array = y.cpu().numpy()\n# logit_array = pred.cpu().numpy()\n# total_dev_accuracy += calc_accuracy(label_array, logit_array)\n# total_dev_f1 += calc_f1(label_array, logit_array)\n# print(f\"Dev Source Accuracy: {total_dev_accuracy / len(dev_source_iter):.2f}\")\n# print(f\"Dev Source F1 Score: {total_dev_f1 / len(dev_source_iter):.2f}\")", "GPU available: cuda\nLoading parameters...\n ja_train_path:\t /workspace/data/dataset_ja_train.json\n ja_dev_path:\t /workspace/data/dataset_ja_dev.json\n ja_test_path:\t /workspace/data/dataset_ja_test.json\n en_train_path:\t /workspace/data/dataset_en_train.json\n en_dev_path:\t /workspace/data/dataset_en_dev.json\n en_test_path:\t /workspace/data/dataset_en_test.json\n ja_vector_path:\t /workspace/amazon_review/weight/japanese_fasttext_vectors.vec\n is_developing:\t True\n source_category:\t home\n target_category:\t wireless\n target_ratio:\t 0.5\n lambda:\t 0.3\n use_pretrained_vector:\t False\n token_max_length:\t 256\n batch_size:\t 32\n emb_dim:\t 300\n class_num:\t 2\n criterion:\t CrossEntropyLoss\n lr:\t 1e-05\n optimizer:\t Adam\n epochs:\t 10\n trial_count:\t 10\n" ], [ "# ブートストラップで複数回実行する\nprint(\"\\ntargetでFineTuning開始\")\n# 事前学習したモデルを保持\n# メモリを共有しないためにdeepcopyを使用する\nmodel_pretrained = copy.deepcopy(model.cpu())", "\ntargetでFineTuning開始\n" ], [ "params[\"target_ratio\"] = [0.01, 0.05, 0.1, 0.3, 0.5]\naccuracy_list = []\nf1_list = []\n\nfor target_ratio in params[\"target_ratio\"]:\n print(\"------------------------------\")\n print(f\"target_ratio = {target_ratio}\")\n print(\"------------------------------\")\n\n for count in range(params[\"trial_count\"]):\n print(f\"\\n{count+1}回目の試行\")\n\n # targetでFineTuningする準備\n # target_ratioで指定した比率までtargetのデータ数を減らす\n source_num = train_source_df.shape[0]\n target_num = int(source_num * target_ratio)\n if target_num > train_target_df.shape[0]:\n print(\"Target ratio is too large.\")\n exit()\n train_target_df_sample = train_target_df.sample(target_num, replace=False)\n print(f\"Source num: {source_num}, Target num: {target_num}\")\n\n # targetのデータローダー作成\n train_target_dataset = dataframe2dataset(train_target_df_sample, fields, columns)\n train_target_iter = data.BucketIterator(\n dataset=train_target_dataset, batch_size=params[\"batch_size\"], train=True\n )\n\n # 事前学習したモデルをロード\n model = copy.deepcopy(model_pretrained).to(device)\n optimizer = getattr(torch.optim, params[\"optimizer\"])(model.parameters(), lr=params[\"lr\"])\n\n # targetでFineTuning\n for epoch in range(params[\"epochs\"]):\n print(f\"\\nepoch {epoch+1} / {params['epochs']}\")\n total_loss = 0\n\n # for i, batch in tqdm(enumerate(train_target_iter), total=len(train_target_iter)):\n for i, batch in enumerate(train_target_iter):\n model.train()\n\n x, y = batch.text[0].to(device), batch.label.to(device)\n _, pred = model(x)\n loss = criterion(pred, y)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n total_loss += loss.cpu()\n print(f\"Train Target Loss: {total_loss / len(train_target_iter):.3f}\")\n\n # total_dev_accuracy = 0\n # total_dev_f1 = 0\n # model.eval()\n # for valid_batch in dev_target_iter:\n # x, y = valid_batch.text[0].to(device), valid_batch.label.to(device)\n # with torch.no_grad():\n # _, pred = model(x)\n # label_array = y.cpu().numpy()\n # logit_array = pred.cpu().numpy()\n # total_dev_accuracy += calc_accuracy(label_array, logit_array)\n # total_dev_f1 += calc_f1(label_array, logit_array)\n # print(f\"Dev Target Accuracy: {total_dev_accuracy / len(dev_target_iter):.2f}\")\n # print(f\"Dev Target F1 Score: {total_dev_f1 / len(dev_target_iter):.2f}\")\n\n total_test_accuracy = 0\n total_test_f1 = 0\n model.eval()\n for test_batch in test_target_iter:\n x, y = test_batch.text[0].to(device), test_batch.label.to(device)\n with torch.no_grad():\n _, pred = model(x)\n\n label_array = y.cpu().numpy()\n logit_array = pred.cpu().numpy()\n total_test_accuracy += calc_accuracy(label_array, logit_array)\n total_test_f1 += calc_f1(label_array, logit_array)\n\n test_accuracy = total_test_accuracy / len(test_target_iter)\n test_f1 = total_test_f1 / len(test_target_iter)\n accuracy_list.append(test_accuracy)\n f1_list.append(test_f1)\n print(f\"\\nTest Target Accuracy: {test_accuracy:.2f}\")\n print(f\"Test Target F1 Score: {test_f1:.2f}\")\n\n accuracy_interval = stats.t.interval(\n alpha=0.95, df=len(accuracy_list) - 1, loc=np.mean(accuracy_list), scale=stats.sem(accuracy_list)\n )\n f1_interval = stats.t.interval(alpha=0.95, df=len(f1_list) - 1, loc=np.mean(f1_list), scale=stats.sem(f1_list))\n print(\"\\n\\t\\tMean, Std, 95% interval (bottom, up)\")\n print(\n f\"Accuracy\\t{np.mean(accuracy_list):.2f}, {np.std(accuracy_list, ddof=1):.2f}, {accuracy_interval[0]:.2f}, {accuracy_interval[1]:.2f}\"\n )\n print(\n f\"F1 Score\\t{np.mean(f1_list):.2f}, {np.std(f1_list, ddof=1):.2f}, {f1_interval[0]:.2f}, {f1_interval[1]:.2f}\"\n )", "------------------------------\ntarget_ratio = 0.01\n------------------------------\n\n1回目の試行\nSource num: 18662, Target num: 186\n\nepoch 1 / 10\nTrain Target Loss: 0.705\n\nepoch 2 / 10\nTrain Target Loss: 0.487\n\nepoch 3 / 10\nTrain Target Loss: 0.385\n\nepoch 4 / 10\nTrain Target Loss: 0.325\n\nepoch 5 / 10\nTrain Target Loss: 0.275\n\nepoch 6 / 10\nTrain Target Loss: 0.209\n\nepoch 7 / 10\nTrain Target Loss: 0.195\n\nepoch 8 / 10\nTrain Target Loss: 0.173\n\nepoch 9 / 10\nTrain Target Loss: 0.147\n\nepoch 10 / 10\nTrain Target Loss: 0.134\n\nTest Target Accuracy: 47.36\nTest Target F1 Score: 32.52\n\n2回目の試行\nSource num: 18662, Target num: 186\n\nepoch 1 / 10\nTrain Target Loss: 0.715\n\nepoch 2 / 10\nTrain Target Loss: 0.498\n\nepoch 3 / 10\nTrain Target Loss: 0.405\n\nepoch 4 / 10\nTrain Target Loss: 0.315\n\nepoch 5 / 10\nTrain Target Loss: 0.267\n\nepoch 6 / 10\nTrain Target Loss: 0.237\n\nepoch 7 / 10\nTrain Target Loss: 0.207\n\nepoch 8 / 10\nTrain Target Loss: 0.170\n\nepoch 9 / 10\nTrain Target Loss: 0.156\n\nepoch 10 / 10\nTrain Target Loss: 0.132\n\nTest Target Accuracy: 52.45\nTest Target F1 Score: 34.36\n\n3回目の試行\nSource num: 18662, Target num: 186\n\nepoch 1 / 10\nTrain Target Loss: 0.715\n\nepoch 2 / 10\nTrain Target Loss: 0.510\n\nepoch 3 / 10\nTrain Target Loss: 0.418\n\nepoch 4 / 10\nTrain Target Loss: 0.344\n\nepoch 5 / 10\nTrain Target Loss: 0.272\n\nepoch 6 / 10\nTrain Target Loss: 0.237\n\nepoch 7 / 10\nTrain Target Loss: 0.197\n\nepoch 8 / 10\nTrain Target Loss: 0.175\n\nepoch 9 / 10\nTrain Target Loss: 0.172\n\nepoch 10 / 10\nTrain Target Loss: 0.143\n\nTest Target Accuracy: 44.51\nTest Target F1 Score: 27.53\n\n4回目の試行\nSource num: 18662, Target num: 186\n\nepoch 1 / 10\nTrain Target Loss: 0.728\n\nepoch 2 / 10\nTrain Target Loss: 0.492\n\nepoch 3 / 10\nTrain Target Loss: 0.400\n\nepoch 4 / 10\nTrain Target Loss: 0.319\n\nepoch 5 / 10\nTrain Target Loss: 0.272\n\nepoch 6 / 10\nTrain Target Loss: 0.243\n\nepoch 7 / 10\nTrain Target Loss: 0.213\n\nepoch 8 / 10\nTrain Target Loss: 0.172\n\nepoch 9 / 10\nTrain Target Loss: 0.160\n\nepoch 10 / 10\nTrain Target Loss: 0.166\n\nTest Target Accuracy: 50.92\nTest Target F1 Score: 33.80\n\n5回目の試行\nSource num: 18662, Target num: 186\n\nepoch 1 / 10\nTrain Target Loss: 0.766\n\nepoch 2 / 10\nTrain Target Loss: 0.522\n\nepoch 3 / 10\nTrain Target Loss: 0.424\n\nepoch 4 / 10\nTrain Target Loss: 0.334\n\nepoch 5 / 10\nTrain Target Loss: 0.296\n\nepoch 6 / 10\nTrain Target Loss: 0.252\n\nepoch 7 / 10\nTrain Target Loss: 0.232\n\nepoch 8 / 10\nTrain Target Loss: 0.181\n\nepoch 9 / 10\nTrain Target Loss: 0.166\n\nepoch 10 / 10\nTrain Target Loss: 0.153\n\nTest Target Accuracy: 51.38\nTest Target F1 Score: 34.11\n\n6回目の試行\nSource num: 18662, Target num: 186\n\nepoch 1 / 10\nTrain Target Loss: 0.734\n\nepoch 2 / 10\nTrain Target Loss: 0.555\n\nepoch 3 / 10\nTrain Target Loss: 0.441\n\nepoch 4 / 10\nTrain Target Loss: 0.389\n\nepoch 5 / 10\nTrain Target Loss: 0.323\n\nepoch 6 / 10\nTrain Target Loss: 0.281\n\nepoch 7 / 10\nTrain Target Loss: 0.266\n\nepoch 8 / 10\nTrain Target Loss: 0.226\n\nepoch 9 / 10\nTrain Target Loss: 0.220\n\nepoch 10 / 10\nTrain Target Loss: 0.192\n\nTest Target Accuracy: 49.43\nTest Target F1 Score: 30.66\n\n7回目の試行\nSource num: 18662, Target num: 186\n\nepoch 1 / 10\nTrain Target Loss: 0.698\n\nepoch 2 / 10\nTrain Target Loss: 0.480\n\nepoch 3 / 10\nTrain Target Loss: 0.410\n\nepoch 4 / 10\nTrain Target Loss: 0.329\n\nepoch 5 / 10\nTrain Target Loss: 0.264\n\nepoch 6 / 10\nTrain Target Loss: 0.243\n\nepoch 7 / 10\nTrain Target Loss: 0.209\n\nepoch 8 / 10\nTrain Target Loss: 0.184\n\nepoch 9 / 10\nTrain Target Loss: 0.153\n\nepoch 10 / 10\nTrain Target Loss: 0.147\n\nTest Target Accuracy: 39.82\nTest Target F1 Score: 26.54\n\n8回目の試行\nSource num: 18662, Target num: 186\n\nepoch 1 / 10\nTrain Target Loss: 0.762\n\nepoch 2 / 10\nTrain Target Loss: 0.543\n\nepoch 3 / 10\nTrain Target Loss: 0.437\n\nepoch 4 / 10\nTrain Target Loss: 0.358\n\nepoch 5 / 10\nTrain Target Loss: 0.290\n\nepoch 6 / 10\nTrain Target Loss: 0.241\n\nepoch 7 / 10\nTrain Target Loss: 0.211\n\nepoch 8 / 10\nTrain Target Loss: 0.189\n\nepoch 9 / 10\nTrain Target Loss: 0.173\n\nepoch 10 / 10\nTrain Target Loss: 0.168\n\nTest Target Accuracy: 45.83\nTest Target F1 Score: 28.57\n\n9回目の試行\nSource num: 18662, Target num: 186\n\nepoch 1 / 10\nTrain Target Loss: 0.714\n\nepoch 2 / 10\nTrain Target Loss: 0.549\n\nepoch 3 / 10\nTrain Target Loss: 0.425\n\nepoch 4 / 10\nTrain Target Loss: 0.339\n\nepoch 5 / 10\nTrain Target Loss: 0.278\n\nepoch 6 / 10\nTrain Target Loss: 0.249\n\nepoch 7 / 10\nTrain Target Loss: 0.205\n\nepoch 8 / 10\nTrain Target Loss: 0.173\n\nepoch 9 / 10\nTrain Target Loss: 0.169\n\nepoch 10 / 10\nTrain Target Loss: 0.145\n\nTest Target Accuracy: 58.61\nTest Target F1 Score: 34.90\n\n10回目の試行\nSource num: 18662, Target num: 186\n\nepoch 1 / 10\nTrain Target Loss: 0.695\n\nepoch 2 / 10\nTrain Target Loss: 0.508\n\nepoch 3 / 10\nTrain Target Loss: 0.406\n\nepoch 4 / 10\nTrain Target Loss: 0.334\n\nepoch 5 / 10\nTrain Target Loss: 0.277\n\nepoch 6 / 10\nTrain Target Loss: 0.248\n\nepoch 7 / 10\nTrain Target Loss: 0.220\n\nepoch 8 / 10\nTrain Target Loss: 0.214\n\nepoch 9 / 10\nTrain Target Loss: 0.179\n\nepoch 10 / 10\nTrain Target Loss: 0.174\n\nTest Target Accuracy: 55.59\nTest Target F1 Score: 34.37\n\n\t\tMean, Std, 95% interval (bottom, up)\nAccuracy\t49.59, 5.49, 45.66, 53.51\nF1 Score\t31.74, 3.17, 29.47, 34.00\n------------------------------\ntarget_ratio = 0.05\n------------------------------\n\n1回目の試行\nSource num: 18662, Target num: 933\n\nepoch 1 / 10\nTrain Target Loss: 0.715\n\nepoch 2 / 10\nTrain Target Loss: 0.564\n\nepoch 3 / 10\nTrain Target Loss: 0.459\n\nepoch 4 / 10\nTrain Target Loss: 0.341\n\nepoch 5 / 10\nTrain Target Loss: 0.265\n\nepoch 6 / 10\nTrain Target Loss: 0.227\n\nepoch 7 / 10\nTrain Target Loss: 0.184\n\nepoch 8 / 10\nTrain Target Loss: 0.161\n\nepoch 9 / 10\nTrain Target Loss: 0.155\n\nepoch 10 / 10\nTrain Target Loss: 0.135\n\nTest Target Accuracy: 44.25\nTest Target F1 Score: 26.97\n\n2回目の試行\nSource num: 18662, Target num: 933\n\nepoch 1 / 10\nTrain Target Loss: 0.696\n\nepoch 2 / 10\nTrain Target Loss: 0.543\n\nepoch 3 / 10\nTrain Target Loss: 0.405\n\nepoch 4 / 10\nTrain Target Loss: 0.322\n\nepoch 5 / 10\nTrain Target Loss: 0.239\n\nepoch 6 / 10\nTrain Target Loss: 0.195\n\nepoch 7 / 10\nTrain Target Loss: 0.185\n\nepoch 8 / 10\nTrain Target Loss: 0.161\n\nepoch 9 / 10\nTrain Target Loss: 0.146\n\nepoch 10 / 10\nTrain Target Loss: 0.143\n\nTest Target Accuracy: 61.30\nTest Target F1 Score: 57.80\n\n3回目の試行\nSource num: 18662, Target num: 933\n\nepoch 1 / 10\nTrain Target Loss: 0.694\n\nepoch 2 / 10\nTrain Target Loss: 0.534\n\nepoch 3 / 10\nTrain Target Loss: 0.419\n\nepoch 4 / 10\nTrain Target Loss: 0.311\n\nepoch 5 / 10\nTrain Target Loss: 0.251\n\nepoch 6 / 10\nTrain Target Loss: 0.212\n\nepoch 7 / 10\nTrain Target Loss: 0.190\n\nepoch 8 / 10\nTrain Target Loss: 0.182\n\nepoch 9 / 10\nTrain Target Loss: 0.161\n\nepoch 10 / 10\nTrain Target Loss: 0.146\n\nTest Target Accuracy: 58.68\nTest Target F1 Score: 37.29\n\n4回目の試行\nSource num: 18662, Target num: 933\n\nepoch 1 / 10\nTrain Target Loss: 0.715\n\nepoch 2 / 10\nTrain Target Loss: 0.561\n\nepoch 3 / 10\nTrain Target Loss: 0.437\n\nepoch 4 / 10\nTrain Target Loss: 0.324\n\nepoch 5 / 10\nTrain Target Loss: 0.250\n\nepoch 6 / 10\nTrain Target Loss: 0.204\n\nepoch 7 / 10\nTrain Target Loss: 0.185\n\nepoch 8 / 10\nTrain Target Loss: 0.196\n\nepoch 9 / 10\nTrain Target Loss: 0.160\n\nepoch 10 / 10\nTrain Target Loss: 0.149\n\nTest Target Accuracy: 41.35\nTest Target F1 Score: 27.35\n\n5回目の試行\nSource num: 18662, Target num: 933\n\nepoch 1 / 10\nTrain Target Loss: 0.700\n\nepoch 2 / 10\nTrain Target Loss: 0.556\n\nepoch 3 / 10\nTrain Target Loss: 0.462\n\nepoch 4 / 10\nTrain Target Loss: 0.341\n\nepoch 5 / 10\nTrain Target Loss: 0.260\n\nepoch 6 / 10\nTrain Target Loss: 0.236\n\nepoch 7 / 10\nTrain Target Loss: 0.202\n\nepoch 8 / 10\nTrain Target Loss: 0.172\n\nepoch 9 / 10\nTrain Target Loss: 0.144\n\nepoch 10 / 10\nTrain Target Loss: 0.128\n\nTest Target Accuracy: 62.22\nTest Target F1 Score: 35.86\n\n6回目の試行\nSource num: 18662, Target num: 933\n\nepoch 1 / 10\nTrain Target Loss: 0.715\n\nepoch 2 / 10\nTrain Target Loss: 0.559\n\nepoch 3 / 10\nTrain Target Loss: 0.441\n\nepoch 4 / 10\nTrain Target Loss: 0.351\n\nepoch 5 / 10\nTrain Target Loss: 0.276\n\nepoch 6 / 10\nTrain Target Loss: 0.227\n\nepoch 7 / 10\nTrain Target Loss: 0.195\n\nepoch 8 / 10\nTrain Target Loss: 0.182\n\nepoch 9 / 10\nTrain Target Loss: 0.166\n\nepoch 10 / 10\nTrain Target Loss: 0.173\n\nTest Target Accuracy: 47.99\nTest Target F1 Score: 29.76\n\n7回目の試行\nSource num: 18662, Target num: 933\n\nepoch 1 / 10\nTrain Target Loss: 0.688\n\nepoch 2 / 10\nTrain Target Loss: 0.558\n\nepoch 3 / 10\nTrain Target Loss: 0.439\n\nepoch 4 / 10\nTrain Target Loss: 0.338\n\nepoch 5 / 10\nTrain Target Loss: 0.265\n\nepoch 6 / 10\nTrain Target Loss: 0.251\n\nepoch 7 / 10\nTrain Target Loss: 0.198\n\nepoch 8 / 10\nTrain Target Loss: 0.186\n\nepoch 9 / 10\nTrain Target Loss: 0.193\n\nepoch 10 / 10\nTrain Target Loss: 0.179\n\nTest Target Accuracy: 58.57\nTest Target F1 Score: 36.79\n\n8回目の試行\nSource num: 18662, Target num: 933\n\nepoch 1 / 10\nTrain Target Loss: 0.711\n\nepoch 2 / 10\nTrain Target Loss: 0.572\n\nepoch 3 / 10\nTrain Target Loss: 0.433\n\nepoch 4 / 10\nTrain Target Loss: 0.304\n\nepoch 5 / 10\nTrain Target Loss: 0.245\n\nepoch 6 / 10\nTrain Target Loss: 0.202\n\nepoch 7 / 10\nTrain Target Loss: 0.182\n\nepoch 8 / 10\nTrain Target Loss: 0.153\n\nepoch 9 / 10\nTrain Target Loss: 0.126\n\nepoch 10 / 10\nTrain Target Loss: 0.134\n\nTest Target Accuracy: 61.17\nTest Target F1 Score: 43.27\n\n9回目の試行\nSource num: 18662, Target num: 933\n\nepoch 1 / 10\nTrain Target Loss: 0.693\n\nepoch 2 / 10\nTrain Target Loss: 0.539\n\nepoch 3 / 10\nTrain Target Loss: 0.423\n\nepoch 4 / 10\nTrain Target Loss: 0.324\n\nepoch 5 / 10\nTrain Target Loss: 0.246\n\nepoch 6 / 10\nTrain Target Loss: 0.224\n\nepoch 7 / 10\nTrain Target Loss: 0.194\n\nepoch 8 / 10\nTrain Target Loss: 0.192\n\nepoch 9 / 10\nTrain Target Loss: 0.185\n\nepoch 10 / 10\nTrain Target Loss: 0.154\n\nTest Target Accuracy: 60.93\nTest Target F1 Score: 50.26\n\n10回目の試行\nSource num: 18662, Target num: 933\n\nepoch 1 / 10\nTrain Target Loss: 0.712\n\nepoch 2 / 10\nTrain Target Loss: 0.566\n\nepoch 3 / 10\nTrain Target Loss: 0.447\n\nepoch 4 / 10\nTrain Target Loss: 0.331\n\nepoch 5 / 10\nTrain Target Loss: 0.254\n\nepoch 6 / 10\nTrain Target Loss: 0.231\n\nepoch 7 / 10\nTrain Target Loss: 0.177\n\nepoch 8 / 10\nTrain Target Loss: 0.150\n\nepoch 9 / 10\nTrain Target Loss: 0.144\n\nepoch 10 / 10\nTrain Target Loss: 0.128\n\nTest Target Accuracy: 55.00\nTest Target F1 Score: 35.14\n\n\t\tMean, Std, 95% interval (bottom, up)\nAccuracy\t52.37, 7.14, 49.03, 55.71\nF1 Score\t34.89, 7.87, 31.21, 38.58\n------------------------------\ntarget_ratio = 0.1\n------------------------------\n\n1回目の試行\nSource num: 18662, Target num: 1866\n\nepoch 1 / 10\nTrain Target Loss: 0.697\n\nepoch 2 / 10\nTrain Target Loss: 0.571\n\nepoch 3 / 10\nTrain Target Loss: 0.410\n\nepoch 4 / 10\nTrain Target Loss: 0.287\n\nepoch 5 / 10\nTrain Target Loss: 0.217\n\nepoch 6 / 10\nTrain Target Loss: 0.202\n\nepoch 7 / 10\nTrain Target Loss: 0.156\n\nepoch 8 / 10\nTrain Target Loss: 0.149\n\nepoch 9 / 10\nTrain Target Loss: 0.122\n\nepoch 10 / 10\nTrain Target Loss: 0.101\n\nTest Target Accuracy: 68.58\nTest Target F1 Score: 41.96\n\n2回目の試行\nSource num: 18662, Target num: 1866\n\nepoch 1 / 10\nTrain Target Loss: 0.694\n\nepoch 2 / 10\nTrain Target Loss: 0.573\n\nepoch 3 / 10\nTrain Target Loss: 0.436\n\nepoch 4 / 10\nTrain Target Loss: 0.332\n\nepoch 5 / 10\nTrain Target Loss: 0.254\n\nepoch 6 / 10\nTrain Target Loss: 0.200\n\nepoch 7 / 10\nTrain Target Loss: 0.179\n\nepoch 8 / 10\nTrain Target Loss: 0.143\n\nepoch 9 / 10\nTrain Target Loss: 0.137\n\nepoch 10 / 10\nTrain Target Loss: 0.127\n\nTest Target Accuracy: 59.35\nTest Target F1 Score: 34.11\n\n3回目の試行\nSource num: 18662, Target num: 1866\n\nepoch 1 / 10\nTrain Target Loss: 0.697\n\nepoch 2 / 10\nTrain Target Loss: 0.570\n\nepoch 3 / 10\nTrain Target Loss: 0.444\n\nepoch 4 / 10\nTrain Target Loss: 0.319\n\nepoch 5 / 10\nTrain Target Loss: 0.256\n\nepoch 6 / 10\nTrain Target Loss: 0.201\n\nepoch 7 / 10\nTrain Target Loss: 0.173\n\nepoch 8 / 10\nTrain Target Loss: 0.146\n\nepoch 9 / 10\nTrain Target Loss: 0.138\n\nepoch 10 / 10\nTrain Target Loss: 0.136\n\nTest Target Accuracy: 61.04\nTest Target F1 Score: 33.17\n\n4回目の試行\nSource num: 18662, Target num: 1866\n\nepoch 1 / 10\nTrain Target Loss: 0.690\n\nepoch 2 / 10\nTrain Target Loss: 0.553\n\nepoch 3 / 10\nTrain Target Loss: 0.429\n\nepoch 4 / 10\nTrain Target Loss: 0.315\n\nepoch 5 / 10\nTrain Target Loss: 0.247\n\nepoch 6 / 10\nTrain Target Loss: 0.189\n\nepoch 7 / 10\nTrain Target Loss: 0.152\n\nepoch 8 / 10\nTrain Target Loss: 0.139\n\nepoch 9 / 10\nTrain Target Loss: 0.123\n\nepoch 10 / 10\nTrain Target Loss: 0.115\n\nTest Target Accuracy: 63.37\nTest Target F1 Score: 36.40\n\n5回目の試行\nSource num: 18662, Target num: 1866\n\nepoch 1 / 10\nTrain Target Loss: 0.698\n\nepoch 2 / 10\nTrain Target Loss: 0.571\n\nepoch 3 / 10\nTrain Target Loss: 0.426\n\nepoch 4 / 10\nTrain Target Loss: 0.304\n\nepoch 5 / 10\nTrain Target Loss: 0.217\n\nepoch 6 / 10\nTrain Target Loss: 0.190\n\nepoch 7 / 10\nTrain Target Loss: 0.168\n\nepoch 8 / 10\nTrain Target Loss: 0.139\n\nepoch 9 / 10\nTrain Target Loss: 0.132\n\nepoch 10 / 10\nTrain Target Loss: 0.124\n\nTest Target Accuracy: 58.39\nTest Target F1 Score: 36.83\n\n6回目の試行\nSource num: 18662, Target num: 1866\n\nepoch 1 / 10\nTrain Target Loss: 0.690\n\nepoch 2 / 10\nTrain Target Loss: 0.569\n\nepoch 3 / 10\nTrain Target Loss: 0.449\n\nepoch 4 / 10\nTrain Target Loss: 0.316\n\nepoch 5 / 10\nTrain Target Loss: 0.235\n\nepoch 6 / 10\nTrain Target Loss: 0.205\n\nepoch 7 / 10\nTrain Target Loss: 0.167\n\nepoch 8 / 10\nTrain Target Loss: 0.159\n\nepoch 9 / 10\nTrain Target Loss: 0.142\n\nepoch 10 / 10\nTrain Target Loss: 0.122\n\nTest Target Accuracy: 60.75\nTest Target F1 Score: 38.25\n\n7回目の試行\nSource num: 18662, Target num: 1866\n\nepoch 1 / 10\nTrain Target Loss: 0.693\n\nepoch 2 / 10\nTrain Target Loss: 0.572\n\nepoch 3 / 10\nTrain Target Loss: 0.427\n\nepoch 4 / 10\nTrain Target Loss: 0.308\n\nepoch 5 / 10\nTrain Target Loss: 0.245\n\nepoch 6 / 10\nTrain Target Loss: 0.193\n\nepoch 7 / 10\nTrain Target Loss: 0.160\n\nepoch 8 / 10\nTrain Target Loss: 0.136\n\nepoch 9 / 10\nTrain Target Loss: 0.118\n\nepoch 10 / 10\nTrain Target Loss: 0.109\n\nTest Target Accuracy: 61.52\nTest Target F1 Score: 40.20\n\n8回目の試行\nSource num: 18662, Target num: 1866\n\nepoch 1 / 10\nTrain Target Loss: 0.700\n\nepoch 2 / 10\nTrain Target Loss: 0.571\n\nepoch 3 / 10\nTrain Target Loss: 0.437\n\nepoch 4 / 10\nTrain Target Loss: 0.314\n\nepoch 5 / 10\nTrain Target Loss: 0.244\n\nepoch 6 / 10\nTrain Target Loss: 0.186\n\nepoch 7 / 10\nTrain Target Loss: 0.146\n\nepoch 8 / 10\nTrain Target Loss: 0.131\n\nepoch 9 / 10\nTrain Target Loss: 0.121\n\nepoch 10 / 10\nTrain Target Loss: 0.119\n\nTest Target Accuracy: 61.12\nTest Target F1 Score: 37.03\n\n9回目の試行\nSource num: 18662, Target num: 1866\n\nepoch 1 / 10\nTrain Target Loss: 0.684\n\nepoch 2 / 10\nTrain Target Loss: 0.548\n\nepoch 3 / 10\nTrain Target Loss: 0.407\n\nepoch 4 / 10\nTrain Target Loss: 0.290\n\nepoch 5 / 10\nTrain Target Loss: 0.216\n\nepoch 6 / 10\nTrain Target Loss: 0.180\n\nepoch 7 / 10\nTrain Target Loss: 0.163\n\nepoch 8 / 10\nTrain Target Loss: 0.137\n\nepoch 9 / 10\nTrain Target Loss: 0.126\n\nepoch 10 / 10\nTrain Target Loss: 0.123\n\nTest Target Accuracy: 57.19\nTest Target F1 Score: 36.25\n\n10回目の試行\nSource num: 18662, Target num: 1866\n\nepoch 1 / 10\nTrain Target Loss: 0.696\n\nepoch 2 / 10\nTrain Target Loss: 0.540\n\nepoch 3 / 10\nTrain Target Loss: 0.413\n\nepoch 4 / 10\nTrain Target Loss: 0.305\n\nepoch 5 / 10\nTrain Target Loss: 0.235\n\nepoch 6 / 10\nTrain Target Loss: 0.181\n\nepoch 7 / 10\nTrain Target Loss: 0.179\n\nepoch 8 / 10\nTrain Target Loss: 0.145\n\nepoch 9 / 10\nTrain Target Loss: 0.142\n\nepoch 10 / 10\nTrain Target Loss: 0.141\n\nTest Target Accuracy: 44.73\nTest Target F1 Score: 27.43\n\n\t\tMean, Std, 95% interval (bottom, up)\nAccuracy\t54.78, 7.54, 51.96, 57.60\nF1 Score\t35.32, 6.78, 32.78, 37.85\n------------------------------\ntarget_ratio = 0.3\n------------------------------\n\n1回目の試行\nSource num: 18662, Target num: 5598\n\nepoch 1 / 10\nTrain Target Loss: 0.675\n\nepoch 2 / 10\nTrain Target Loss: 0.551\n\nepoch 3 / 10\nTrain Target Loss: 0.419\n\nepoch 4 / 10\nTrain Target Loss: 0.316\n\nepoch 5 / 10\nTrain Target Loss: 0.255\n\nepoch 6 / 10\nTrain Target Loss: 0.200\n\nepoch 7 / 10\nTrain Target Loss: 0.165\n\nepoch 8 / 10\nTrain Target Loss: 0.155\n\nepoch 9 / 10\nTrain Target Loss: 0.134\n\nepoch 10 / 10\nTrain Target Loss: 0.117\n\nTest Target Accuracy: 64.40\nTest Target F1 Score: 37.14\n\n2回目の試行\nSource num: 18662, Target num: 5598\n\nepoch 1 / 10\nTrain Target Loss: 0.670\n\nepoch 2 / 10\nTrain Target Loss: 0.559\n\nepoch 3 / 10\nTrain Target Loss: 0.439\n\nepoch 4 / 10\nTrain Target Loss: 0.331\n\nepoch 5 / 10\nTrain Target Loss: 0.259\n\nepoch 6 / 10\nTrain Target Loss: 0.204\n\nepoch 7 / 10\nTrain Target Loss: 0.175\n\nepoch 8 / 10\nTrain Target Loss: 0.146\n\nepoch 9 / 10\nTrain Target Loss: 0.141\n\nepoch 10 / 10\nTrain Target Loss: 0.123\n\nTest Target Accuracy: 63.94\nTest Target F1 Score: 38.46\n\n3回目の試行\nSource num: 18662, Target num: 5598\n\nepoch 1 / 10\nTrain Target Loss: 0.672\n\nepoch 2 / 10\nTrain Target Loss: 0.568\n\nepoch 3 / 10\nTrain Target Loss: 0.441\n\nepoch 4 / 10\nTrain Target Loss: 0.327\n\nepoch 5 / 10\nTrain Target Loss: 0.266\n\nepoch 6 / 10\nTrain Target Loss: 0.214\n\nepoch 7 / 10\nTrain Target Loss: 0.173\n\nepoch 8 / 10\nTrain Target Loss: 0.147\n\nepoch 9 / 10\nTrain Target Loss: 0.130\n\nepoch 10 / 10\nTrain Target Loss: 0.112\n\nTest Target Accuracy: 44.23\nTest Target F1 Score: 33.31\n\n4回目の試行\nSource num: 18662, Target num: 5598\n\nepoch 1 / 10\nTrain Target Loss: 0.675\n\nepoch 2 / 10\nTrain Target Loss: 0.567\n\nepoch 3 / 10\nTrain Target Loss: 0.422\n\nepoch 4 / 10\nTrain Target Loss: 0.316\n\nepoch 5 / 10\nTrain Target Loss: 0.246\n\nepoch 6 / 10\nTrain Target Loss: 0.198\n\nepoch 7 / 10\nTrain Target Loss: 0.165\n\nepoch 8 / 10\nTrain Target Loss: 0.152\n\nepoch 9 / 10\nTrain Target Loss: 0.130\n\nepoch 10 / 10\nTrain Target Loss: 0.108\n\nTest Target Accuracy: 64.47\nTest Target F1 Score: 38.97\n\n5回目の試行\nSource num: 18662, Target num: 5598\n\nepoch 1 / 10\nTrain Target Loss: 0.676\n\nepoch 2 / 10\nTrain Target Loss: 0.562\n\nepoch 3 / 10\nTrain Target Loss: 0.426\n\nepoch 4 / 10\nTrain Target Loss: 0.320\n\nepoch 5 / 10\nTrain Target Loss: 0.247\n\nepoch 6 / 10\nTrain Target Loss: 0.198\n\nepoch 7 / 10\nTrain Target Loss: 0.164\n\nepoch 8 / 10\nTrain Target Loss: 0.141\n\nepoch 9 / 10\nTrain Target Loss: 0.122\n\nepoch 10 / 10\nTrain Target Loss: 0.121\n\nTest Target Accuracy: 56.91\nTest Target F1 Score: 35.74\n\n6回目の試行\nSource num: 18662, Target num: 5598\n\nepoch 1 / 10\nTrain Target Loss: 0.674\n\nepoch 2 / 10\nTrain Target Loss: 0.560\n\nepoch 3 / 10\nTrain Target Loss: 0.440\n\nepoch 4 / 10\nTrain Target Loss: 0.320\n\nepoch 5 / 10\nTrain Target Loss: 0.253\n\nepoch 6 / 10\nTrain Target Loss: 0.186\n\nepoch 7 / 10\nTrain Target Loss: 0.160\n\nepoch 8 / 10\nTrain Target Loss: 0.152\n\nepoch 9 / 10\nTrain Target Loss: 0.121\n\nepoch 10 / 10\nTrain Target Loss: 0.115\n\nTest Target Accuracy: 62.24\nTest Target F1 Score: 37.46\n\n7回目の試行\nSource num: 18662, Target num: 5598\n\nepoch 1 / 10\nTrain Target Loss: 0.677\n\nepoch 2 / 10\nTrain Target Loss: 0.557\n\nepoch 3 / 10\nTrain Target Loss: 0.435\n\nepoch 4 / 10\nTrain Target Loss: 0.314\n\nepoch 5 / 10\nTrain Target Loss: 0.247\n\nepoch 6 / 10\nTrain Target Loss: 0.205\n\nepoch 7 / 10\nTrain Target Loss: 0.162\n\nepoch 8 / 10\nTrain Target Loss: 0.144\n\nepoch 9 / 10\nTrain Target Loss: 0.113\n\nepoch 10 / 10\nTrain Target Loss: 0.109\n\nTest Target Accuracy: 63.42\nTest Target F1 Score: 37.05\n\n8回目の試行\nSource num: 18662, Target num: 5598\n\nepoch 1 / 10\nTrain Target Loss: 0.669\n\nepoch 2 / 10\nTrain Target Loss: 0.548\n\nepoch 3 / 10\nTrain Target Loss: 0.411\n\nepoch 4 / 10\nTrain Target Loss: 0.309\n\nepoch 5 / 10\nTrain Target Loss: 0.245\n\nepoch 6 / 10\nTrain Target Loss: 0.197\n\nepoch 7 / 10\nTrain Target Loss: 0.169\n\nepoch 8 / 10\nTrain Target Loss: 0.146\n\nepoch 9 / 10\nTrain Target Loss: 0.127\n\nepoch 10 / 10\nTrain Target Loss: 0.107\n\nTest Target Accuracy: 54.94\nTest Target F1 Score: 33.77\n\n9回目の試行\nSource num: 18662, Target num: 5598\n\nepoch 1 / 10\nTrain Target Loss: 0.678\n\nepoch 2 / 10\nTrain Target Loss: 0.567\n\nepoch 3 / 10\nTrain Target Loss: 0.434\n\nepoch 4 / 10\nTrain Target Loss: 0.325\n\nepoch 5 / 10\nTrain Target Loss: 0.250\n\nepoch 6 / 10\nTrain Target Loss: 0.193\n\nepoch 7 / 10\nTrain Target Loss: 0.159\n\nepoch 8 / 10\nTrain Target Loss: 0.146\n\nepoch 9 / 10\nTrain Target Loss: 0.122\n\nepoch 10 / 10\nTrain Target Loss: 0.126\n\nTest Target Accuracy: 61.17\nTest Target F1 Score: 36.40\n\n10回目の試行\nSource num: 18662, Target num: 5598\n\nepoch 1 / 10\nTrain Target Loss: 0.675\n\nepoch 2 / 10\nTrain Target Loss: 0.564\n\nepoch 3 / 10\nTrain Target Loss: 0.432\n\nepoch 4 / 10\nTrain Target Loss: 0.311\n\nepoch 5 / 10\nTrain Target Loss: 0.250\n\nepoch 6 / 10\nTrain Target Loss: 0.198\n\nepoch 7 / 10\nTrain Target Loss: 0.167\n\nepoch 8 / 10\nTrain Target Loss: 0.143\n\nepoch 9 / 10\nTrain Target Loss: 0.132\n\nepoch 10 / 10\nTrain Target Loss: 0.113\n\nTest Target Accuracy: 65.56\nTest Target F1 Score: 36.96\n\n\t\tMean, Std, 95% interval (bottom, up)\nAccuracy\t56.12, 7.60, 53.69, 58.55\nF1 Score\t35.62, 5.94, 33.72, 37.52\n------------------------------\ntarget_ratio = 0.5\n------------------------------\n\n1回目の試行\nSource num: 18662, Target num: 9331\n\nepoch 1 / 10\nTrain Target Loss: 0.662\n\nepoch 2 / 10\nTrain Target Loss: 0.557\n\nepoch 3 / 10\nTrain Target Loss: 0.438\n\nepoch 4 / 10\nTrain Target Loss: 0.345\n\nepoch 5 / 10\nTrain Target Loss: 0.259\n\nepoch 6 / 10\nTrain Target Loss: 0.209\n\nepoch 7 / 10\nTrain Target Loss: 0.176\n\nepoch 8 / 10\nTrain Target Loss: 0.151\n\nepoch 9 / 10\nTrain Target Loss: 0.133\n\nepoch 10 / 10\nTrain Target Loss: 0.130\n\nTest Target Accuracy: 63.57\nTest Target F1 Score: 38.34\n\n2回目の試行\nSource num: 18662, Target num: 9331\n\nepoch 1 / 10\nTrain Target Loss: 0.663\n\nepoch 2 / 10\nTrain Target Loss: 0.553\n\nepoch 3 / 10\nTrain Target Loss: 0.427\n\nepoch 4 / 10\nTrain Target Loss: 0.340\n\nepoch 5 / 10\nTrain Target Loss: 0.261\n\nepoch 6 / 10\nTrain Target Loss: 0.216\n\nepoch 7 / 10\nTrain Target Loss: 0.183\n\nepoch 8 / 10\nTrain Target Loss: 0.154\n\nepoch 9 / 10\nTrain Target Loss: 0.133\n\nepoch 10 / 10\nTrain Target Loss: 0.127\n\nTest Target Accuracy: 43.77\nTest Target F1 Score: 26.05\n\n3回目の試行\nSource num: 18662, Target num: 9331\n\nepoch 1 / 10\nTrain Target Loss: 0.664\n\nepoch 2 / 10\nTrain Target Loss: 0.554\n\nepoch 3 / 10\nTrain Target Loss: 0.427\n\nepoch 4 / 10\nTrain Target Loss: 0.335\n\nepoch 5 / 10\nTrain Target Loss: 0.257\n\nepoch 6 / 10\nTrain Target Loss: 0.211\n\nepoch 7 / 10\nTrain Target Loss: 0.183\n\nepoch 8 / 10\nTrain Target Loss: 0.155\n\nepoch 9 / 10\nTrain Target Loss: 0.134\n\nepoch 10 / 10\nTrain Target Loss: 0.129\n\nTest Target Accuracy: 62.17\nTest Target F1 Score: 38.24\n\n4回目の試行\nSource num: 18662, Target num: 9331\n\nepoch 1 / 10\nTrain Target Loss: 0.659\n\nepoch 2 / 10\nTrain Target Loss: 0.554\n\nepoch 3 / 10\nTrain Target Loss: 0.436\n\nepoch 4 / 10\nTrain Target Loss: 0.335\n\nepoch 5 / 10\nTrain Target Loss: 0.258\n\nepoch 6 / 10\nTrain Target Loss: 0.212\n\nepoch 7 / 10\nTrain Target Loss: 0.180\n\nepoch 8 / 10\nTrain Target Loss: 0.156\n\nepoch 9 / 10\nTrain Target Loss: 0.133\n\nepoch 10 / 10\nTrain Target Loss: 0.119\n\nTest Target Accuracy: 46.66\nTest Target F1 Score: 28.68\n\n5回目の試行\nSource num: 18662, Target num: 9331\n\nepoch 1 / 10\nTrain Target Loss: 0.662\n\nepoch 2 / 10\nTrain Target Loss: 0.548\n\nepoch 3 / 10\nTrain Target Loss: 0.427\n\nepoch 4 / 10\nTrain Target Loss: 0.335\n\nepoch 5 / 10\nTrain Target Loss: 0.258\n\nepoch 6 / 10\nTrain Target Loss: 0.215\n\nepoch 7 / 10\nTrain Target Loss: 0.175\n\nepoch 8 / 10\nTrain Target Loss: 0.154\n\nepoch 9 / 10\nTrain Target Loss: 0.135\n\nepoch 10 / 10\nTrain Target Loss: 0.114\n\nTest Target Accuracy: 63.85\nTest Target F1 Score: 38.04\n\n6回目の試行\nSource num: 18662, Target num: 9331\n\nepoch 1 / 10\nTrain Target Loss: 0.665\n\nepoch 2 / 10\nTrain Target Loss: 0.546\n\nepoch 3 / 10\nTrain Target Loss: 0.428\n\nepoch 4 / 10\nTrain Target Loss: 0.330\n\nepoch 5 / 10\nTrain Target Loss: 0.270\n\nepoch 6 / 10\nTrain Target Loss: 0.212\n\nepoch 7 / 10\nTrain Target Loss: 0.179\n\nepoch 8 / 10\nTrain Target Loss: 0.153\n\nepoch 9 / 10\nTrain Target Loss: 0.128\n\nepoch 10 / 10\nTrain Target Loss: 0.112\n\nTest Target Accuracy: 62.24\nTest Target F1 Score: 40.66\n\n7回目の試行\nSource num: 18662, Target num: 9331\n\nepoch 1 / 10\nTrain Target Loss: 0.662\n\nepoch 2 / 10\nTrain Target Loss: 0.556\n\nepoch 3 / 10\nTrain Target Loss: 0.437\n\nepoch 4 / 10\nTrain Target Loss: 0.341\n\nepoch 5 / 10\nTrain Target Loss: 0.269\n\nepoch 6 / 10\nTrain Target Loss: 0.223\n\nepoch 7 / 10\nTrain Target Loss: 0.192\n\nepoch 8 / 10\nTrain Target Loss: 0.167\n\nepoch 9 / 10\nTrain Target Loss: 0.136\n\nepoch 10 / 10\nTrain Target Loss: 0.120\n\nTest Target Accuracy: 68.58\nTest Target F1 Score: 40.29\n\n8回目の試行\nSource num: 18662, Target num: 9331\n\nepoch 1 / 10\nTrain Target Loss: 0.664\n\nepoch 2 / 10\nTrain Target Loss: 0.552\n\nepoch 3 / 10\nTrain Target Loss: 0.431\n\nepoch 4 / 10\nTrain Target Loss: 0.323\n\nepoch 5 / 10\nTrain Target Loss: 0.258\n\nepoch 6 / 10\nTrain Target Loss: 0.213\n\nepoch 7 / 10\nTrain Target Loss: 0.181\n\nepoch 8 / 10\nTrain Target Loss: 0.158\n\nepoch 9 / 10\nTrain Target Loss: 0.131\n\nepoch 10 / 10\nTrain Target Loss: 0.133\n\nTest Target Accuracy: 64.40\nTest Target F1 Score: 38.91\n\n9回目の試行\nSource num: 18662, Target num: 9331\n\nepoch 1 / 10\nTrain Target Loss: 0.661\n\nepoch 2 / 10\nTrain Target Loss: 0.547\n\nepoch 3 / 10\nTrain Target Loss: 0.425\n\nepoch 4 / 10\nTrain Target Loss: 0.338\n\nepoch 5 / 10\nTrain Target Loss: 0.263\n\nepoch 6 / 10\nTrain Target Loss: 0.205\n\nepoch 7 / 10\nTrain Target Loss: 0.178\n\nepoch 8 / 10\nTrain Target Loss: 0.164\n\nepoch 9 / 10\nTrain Target Loss: 0.130\n\nepoch 10 / 10\nTrain Target Loss: 0.122\n\nTest Target Accuracy: 61.52\nTest Target F1 Score: 43.20\n\n10回目の試行\nSource num: 18662, Target num: 9331\n\nepoch 1 / 10\nTrain Target Loss: 0.663\n\nepoch 2 / 10\nTrain Target Loss: 0.546\n\nepoch 3 / 10\nTrain Target Loss: 0.422\n\nepoch 4 / 10\nTrain Target Loss: 0.329\n\nepoch 5 / 10\nTrain Target Loss: 0.255\n\nepoch 6 / 10\nTrain Target Loss: 0.207\n\nepoch 7 / 10\nTrain Target Loss: 0.170\n\nepoch 8 / 10\nTrain Target Loss: 0.148\n\nepoch 9 / 10\nTrain Target Loss: 0.137\n\nepoch 10 / 10\nTrain Target Loss: 0.112\n\nTest Target Accuracy: 60.58\nTest Target F1 Score: 43.04\n\n\t\tMean, Std, 95% interval (bottom, up)\nAccuracy\t56.84, 7.73, 54.64, 59.04\nF1 Score\t36.00, 5.89, 34.33, 37.68\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
cb7995541bd4377235d2172b40a034eb4f3b18fe
4,692
ipynb
Jupyter Notebook
day25.ipynb
g2boojum/AdventOfCode2021
dea4af5a904dfeb395fc1657ca95f5ac35e3f9fe
[ "BSD-2-Clause" ]
null
null
null
day25.ipynb
g2boojum/AdventOfCode2021
dea4af5a904dfeb395fc1657ca95f5ac35e3f9fe
[ "BSD-2-Clause" ]
null
null
null
day25.ipynb
g2boojum/AdventOfCode2021
dea4af5a904dfeb395fc1657ca95f5ac35e3f9fe
[ "BSD-2-Clause" ]
null
null
null
22.887805
87
0.466965
[ [ [ "import itertools\nimport collections\nimport numpy as np\nimport pathlib", "_____no_output_____" ] ], [ [ "## part 1 ##", "_____no_output_____" ] ], [ [ "testlines = '''v...>>.vv>\n.vv>>.vv..\n>>.>v>...v\n>>v>>.>.v.\nv>v.vv.v..\n>.>>..v...\n.vv..>.>v.\nv.v..>>v.v\n....v..v.>'''.splitlines()", "_____no_output_____" ], [ "puzzlelines = pathlib.Path('day25.txt').read_text().splitlines()", "_____no_output_____" ], [ "def get_size(lines):\n xmax = len(lines[0]) \n zmax = len(lines)\n return xmax, zmax", "_____no_output_____" ], [ "def get_positions(lines):\n rights = set()\n downs = set()\n for z, line in enumerate(lines):\n for x, c in enumerate(line):\n if c == '>':\n rights.add((x,z))\n elif c == 'v':\n downs.add((x,z))\n return rights, downs", "_____no_output_____" ], [ "def step(rights, downs, xmax, zmax):\n newrights = set()\n newdowns = set()\n currpos = rights | downs\n num_moves = 0\n # right-moving herd\n for x,z in rights:\n xnext = (x + 1) % xmax\n if (xnext, z) not in currpos:\n # free to move\n newrights.add((xnext, z))\n num_moves += 1\n else:\n newrights.add((x,z))\n currpos = newrights | downs\n # down-moving herd\n for x,z in downs:\n znext = (z + 1) % zmax\n if (x, znext) not in currpos:\n # free to move\n newdowns.add((x,znext))\n num_moves += 1\n else:\n newdowns.add((x,z))\n return num_moves, newrights, newdowns\n ", "_____no_output_____" ], [ "def solve1(lines):\n xmax, zmax = get_size(lines)\n newrights, newdowns = get_positions(lines)\n steps = 0\n while True:\n num_moves, newrights, newdowns = step(newrights, newdowns, xmax, zmax)\n steps += 1\n if num_moves == 0:\n break\n print(steps)", "_____no_output_____" ], [ "solve1(testlines)", "58\n" ], [ "solve1(puzzlelines)", "523\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb799a2365167c75f4080e5729dac08584bd5544
58,030
ipynb
Jupyter Notebook
Extra/Buenrostro_2018/test_blacklist/SCRAT_buenrostro2018-blacklist-rm.ipynb
tAndreani/scATAC-benchmarking
0b7167490aff986738985cdba688b48d4ed9988f
[ "MIT" ]
2
2019-10-28T09:27:51.000Z
2019-11-25T18:03:04.000Z
Extra/Buenrostro_2018/test_blacklist/SCRAT_buenrostro2018-blacklist-rm.ipynb
huidongchen/scATAC-benchmarking
31d71d60536e8f4bf5bae314630013feaff3caf2
[ "MIT" ]
null
null
null
Extra/Buenrostro_2018/test_blacklist/SCRAT_buenrostro2018-blacklist-rm.ipynb
huidongchen/scATAC-benchmarking
31d71d60536e8f4bf5bae314630013feaff3caf2
[ "MIT" ]
null
null
null
49.513652
339
0.507203
[ [ [ "### Installation", "_____no_output_____" ], [ "`devtools::install_github(\"zji90/SCRATdatahg19\")` \n`source(\"https://raw.githubusercontent.com/zji90/SCRATdata/master/installcode.R\")` ", "_____no_output_____" ], [ "### Import packages", "_____no_output_____" ] ], [ [ "library(devtools)\nlibrary(GenomicAlignments)\nlibrary(Rsamtools)\nlibrary(SCRATdatahg19)\nlibrary(SCRAT)", "Loading required package: BiocGenerics\nLoading required package: parallel\n\nAttaching package: ‘BiocGenerics’\n\nThe following objects are masked from ‘package:parallel’:\n\n clusterApply, clusterApplyLB, clusterCall, clusterEvalQ,\n clusterExport, clusterMap, parApply, parCapply, parLapply,\n parLapplyLB, parRapply, parSapply, parSapplyLB\n\nThe following objects are masked from ‘package:stats’:\n\n IQR, mad, sd, var, xtabs\n\nThe following objects are masked from ‘package:base’:\n\n anyDuplicated, append, as.data.frame, basename, cbind, colMeans,\n colnames, colSums, dirname, do.call, duplicated, eval, evalq,\n Filter, Find, get, grep, grepl, intersect, is.unsorted, lapply,\n lengths, Map, mapply, match, mget, order, paste, pmax, pmax.int,\n pmin, pmin.int, Position, rank, rbind, Reduce, rowMeans, rownames,\n rowSums, sapply, setdiff, sort, table, tapply, union, unique,\n unsplit, which, which.max, which.min\n\nLoading required package: S4Vectors\nLoading required package: stats4\n\nAttaching package: ‘S4Vectors’\n\nThe following object is masked from ‘package:base’:\n\n expand.grid\n\nLoading required package: IRanges\nLoading required package: GenomeInfoDb\nLoading required package: GenomicRanges\nLoading required package: SummarizedExperiment\nLoading required package: Biobase\nWelcome to Bioconductor\n\n Vignettes contain introductory material; view with\n 'browseVignettes()'. To cite Bioconductor, see\n 'citation(\"Biobase\")', and for packages 'citation(\"pkgname\")'.\n\nLoading required package: DelayedArray\nLoading required package: matrixStats\n\nAttaching package: ‘matrixStats’\n\nThe following objects are masked from ‘package:Biobase’:\n\n anyMissing, rowMedians\n\nLoading required package: BiocParallel\n\nAttaching package: ‘DelayedArray’\n\nThe following objects are masked from ‘package:matrixStats’:\n\n colMaxs, colMins, colRanges, rowMaxs, rowMins, rowRanges\n\nThe following objects are masked from ‘package:base’:\n\n aperm, apply\n\nLoading required package: Biostrings\nLoading required package: XVector\n\nAttaching package: ‘Biostrings’\n\nThe following object is masked from ‘package:DelayedArray’:\n\n type\n\nThe following object is masked from ‘package:base’:\n\n strsplit\n\nLoading required package: Rsamtools\nWarning message:\n“replacing previous import ‘DT::dataTableOutput’ by ‘shiny::dataTableOutput’ when loading ‘SCRAT’”Warning message:\n“replacing previous import ‘DT::renderDataTable’ by ‘shiny::renderDataTable’ when loading ‘SCRAT’”Warning message:\n“replacing previous import ‘mclust::em’ by ‘shiny::em’ when loading ‘SCRAT’”" ] ], [ [ "### Obtain Feature Matrix", "_____no_output_____" ] ], [ [ "start_time = Sys.time()", "_____no_output_____" ], [ "metadata <- read.table('./input/metadata.tsv',\n header = TRUE,\n stringsAsFactors=FALSE,quote=\"\",row.names=1)", "_____no_output_____" ], [ "SCRATsummary <- function (dir = \"\", genome, bamfile = NULL, singlepair = \"automated\", \n removeblacklist = T, log2transform = T, adjustlen = T, featurelist = c(\"GENE\", \n \"ENCL\", \"MOTIF_TRANSFAC\", \"MOTIF_JASPAR\", \"GSEA\"), customfeature = NULL, \n Genestarttype = \"TSSup\", Geneendtype = \"TSSdown\", Genestartbp = 3000, \n Geneendbp = 1000, ENCLclunum = 2000, Motifflank = 100, GSEAterm = \"c5.bp\", \n GSEAstarttype = \"TSSup\", GSEAendtype = \"TSSdown\", GSEAstartbp = 3000, \n GSEAendbp = 1000) \n{\n if (is.null(bamfile)) {\n bamfile <- list.files(dir, pattern = \".bam$\")\n }\n datapath <- system.file(\"extdata\", package = paste0(\"SCRATdata\", \n genome))\n bamdata <- list()\n for (i in bamfile) {\n filepath <- file.path(dir, i)\n if (singlepair == \"automated\") {\n bamfile <- BamFile(filepath)\n tmpsingle <- readGAlignments(bamfile)\n tmppair <- readGAlignmentPairs(bamfile)\n pairendtf <- testPairedEndBam(bamfile)\n if (pairendtf) {\n tmp <- tmppair\n startpos <- pmin(start(first(tmp)), start(last(tmp)))\n endpos <- pmax(end(first(tmp)), end(last(tmp)))\n id <- which(!is.na(as.character(seqnames(tmp))))\n tmp <- GRanges(seqnames=as.character(seqnames(tmp))[id],IRanges(start=startpos[id],end=endpos[id]))\n }\n else {\n tmp <- GRanges(tmpsingle)\n }\n }\n else if (singlepair == \"single\") {\n tmp <- GRanges(readGAlignments(filepath))\n }\n else if (singlepair == \"pair\") {\n tmp <- readGAlignmentPairs(filepath)\n startpos <- pmin(start(first(tmp)), start(last(tmp)))\n endpos <- pmax(end(first(tmp)), end(last(tmp)))\n id <- which(!is.na(as.character(seqnames(tmp))))\n tmp <- GRanges(seqnames=as.character(seqnames(tmp))[id],IRanges(start=startpos[id],end=endpos[id]))\n }\n if (removeblacklist) {\n load(paste0(datapath, \"/gr/blacklist.rda\"))\n tmp <- tmp[-as.matrix(findOverlaps(tmp, gr))[, 1], \n ]\n }\n bamdata[[i]] <- tmp\n }\n bamsummary <- sapply(bamdata, length)\n allres <- NULL\n datapath <- system.file(\"extdata\", package = paste0(\"SCRATdata\", \n genome))\n if (\"GENE\" %in% featurelist) {\n print(\"Processing GENE features\")\n load(paste0(datapath, \"/gr/generegion.rda\"))\n if (Genestarttype == \"TSSup\") {\n grstart <- ifelse(as.character(strand(gr)) == \"+\", \n start(gr) - as.numeric(Genestartbp), end(gr) + \n as.numeric(Genestartbp))\n }\n else if (Genestarttype == \"TSSdown\") {\n grstart <- ifelse(as.character(strand(gr)) == \"+\", \n start(gr) + as.numeric(Genestartbp), end(gr) - \n as.numeric(Genestartbp))\n }\n else if (Genestarttype == \"TESup\") {\n grstart <- ifelse(as.character(strand(gr)) == \"+\", \n end(gr) - as.numeric(Genestartbp), start(gr) + \n as.numeric(Genestartbp))\n }\n else if (Genestarttype == \"TESdown\") {\n grstart <- ifelse(as.character(strand(gr)) == \"+\", \n end(gr) + as.numeric(Genestartbp), start(gr) - \n as.numeric(Genestartbp))\n }\n if (Geneendtype == \"TSSup\") {\n grend <- ifelse(as.character(strand(gr)) == \"+\", \n start(gr) - as.numeric(Geneendbp), end(gr) + \n as.numeric(Geneendbp))\n }\n else if (Geneendtype == \"TSSdown\") {\n grend <- ifelse(as.character(strand(gr)) == \"+\", \n start(gr) + as.numeric(Geneendbp), end(gr) - \n as.numeric(Geneendbp))\n }\n else if (Geneendtype == \"TESup\") {\n grend <- ifelse(as.character(strand(gr)) == \"+\", \n end(gr) - as.numeric(Geneendbp), start(gr) + \n as.numeric(Geneendbp))\n }\n else if (Geneendtype == \"TESdown\") {\n grend <- ifelse(as.character(strand(gr)) == \"+\", \n end(gr) + as.numeric(Geneendbp), start(gr) - \n as.numeric(Geneendbp))\n }\n ngr <- names(gr)\n gr <- GRanges(seqnames = seqnames(gr), IRanges(start = pmin(grstart, \n grend), end = pmax(grstart, grend)))\n names(gr) <- ngr\n tmp <- sapply(bamdata, function(i) countOverlaps(gr, \n i))\n tmp <- sweep(tmp, 2, bamsummary, \"/\") * 10000\n if (log2transform) {\n tmp <- log2(tmp + 1)\n }\n if (adjustlen) {\n grrange <- end(gr) - start(gr) + 1\n tmp <- sweep(tmp, 1, grrange, \"/\") * 1e+06\n }\n tmp <- tmp[rowSums(tmp) > 0, , drop = F]\n allres <- rbind(allres, tmp)\n }\n if (\"ENCL\" %in% featurelist) {\n print(\"Processing ENCL features\")\n load(paste0(datapath, \"/gr/ENCL\", ENCLclunum, \".rda\"))\n tmp <- sapply(bamdata, function(i) countOverlaps(gr, \n i))\n tmp <- sweep(tmp, 2, bamsummary, \"/\") * 10000\n if (log2transform) {\n tmp <- log2(tmp + 1)\n }\n if (adjustlen) {\n grrange <- sapply(gr, function(i) sum(end(i) - start(i) + \n 1))\n tmp <- sweep(tmp, 1, grrange, \"/\") * 1e+06\n }\n tmp <- tmp[rowSums(tmp) > 0, , drop = F]\n allres <- rbind(allres, tmp)\n }\n if (\"MOTIF_TRANSFAC\" %in% featurelist) {\n print(\"Processing MOTIF_TRANSFAC features\")\n load(paste0(datapath, \"/gr/transfac1.rda\"))\n gr <- flank(gr, as.numeric(Motifflank), both = T)\n tmp <- sapply(bamdata, function(i) countOverlaps(gr, \n i))\n tmp <- sweep(tmp, 2, bamsummary, \"/\") * 10000\n if (log2transform) {\n tmp <- log2(tmp + 1)\n }\n if (adjustlen) {\n grrange <- sapply(gr, function(i) sum(end(i) - start(i) + \n 1))\n tmp <- sweep(tmp, 1, grrange, \"/\") * 1e+06\n }\n tmp <- tmp[rowSums(tmp) > 0, , drop = F]\n allres <- rbind(allres, tmp)\n load(paste0(datapath, \"/gr/transfac2.rda\"))\n gr <- flank(gr, as.numeric(Motifflank), both = T)\n tmp <- sapply(bamdata, function(i) countOverlaps(gr, \n i))\n tmp <- sweep(tmp, 2, bamsummary, \"/\") * 10000\n if (log2transform) {\n tmp <- log2(tmp + 1)\n }\n if (adjustlen) {\n grrange <- sapply(gr, function(i) sum(end(i) - start(i) + \n 1))\n tmp <- sweep(tmp, 1, grrange, \"/\") * 1e+06\n }\n tmp <- tmp[rowSums(tmp) > 0, , drop = F]\n allres <- rbind(allres, tmp)\n if (genome %in% c(\"hg19\", \"hg38\")) {\n load(paste0(datapath, \"/gr/transfac3.rda\"))\n gr <- flank(gr, as.numeric(Motifflank), both = T)\n tmp <- sapply(bamdata, function(i) countOverlaps(gr, \n i))\n tmp <- sweep(tmp, 2, bamsummary, \"/\") * 10000\n if (log2transform) {\n tmp <- log2(tmp + 1)\n }\n if (adjustlen) {\n grrange <- sapply(gr, function(i) sum(end(i) - \n start(i) + 1))\n tmp <- sweep(tmp, 1, grrange, \"/\") * 1e+06\n }\n tmp <- tmp[rowSums(tmp) > 0, , drop = F]\n allres <- rbind(allres, tmp)\n }\n }\n if (\"MOTIF_JASPAR\" %in% featurelist) {\n print(\"Processing MOTIF_JASPAR features\")\n load(paste0(datapath, \"/gr/jaspar1.rda\"))\n gr <- flank(gr, as.numeric(Motifflank), both = T)\n tmp <- sapply(bamdata, function(i) countOverlaps(gr, \n i))\n tmp <- sweep(tmp, 2, bamsummary, \"/\") * 10000\n if (log2transform) {\n tmp <- log2(tmp + 1)\n }\n if (adjustlen) {\n grrange <- sapply(gr, function(i) sum(end(i) - start(i) + \n 1))\n tmp <- sweep(tmp, 1, grrange, \"/\") * 1e+06\n }\n tmp <- tmp[rowSums(tmp) > 0, , drop = F]\n allres <- rbind(allres, tmp)\n load(paste0(datapath, \"/gr/jaspar2.rda\"))\n gr <- flank(gr, as.numeric(Motifflank), both = T)\n tmp <- sapply(bamdata, function(i) countOverlaps(gr, \n i))\n tmp <- sweep(tmp, 2, bamsummary, \"/\") * 10000\n if (log2transform) {\n tmp <- log2(tmp + 1)\n }\n if (adjustlen) {\n grrange <- sapply(gr, function(i) sum(end(i) - start(i) + \n 1))\n tmp <- sweep(tmp, 1, grrange, \"/\") * 1e+06\n }\n tmp <- tmp[rowSums(tmp) > 0, , drop = F]\n allres <- rbind(allres, tmp)\n }\n if (\"GSEA\" %in% featurelist) {\n print(\"Processing GSEA features\")\n for (i in GSEAterm) {\n load(paste0(datapath, \"/gr/GSEA\", i, \".rda\"))\n allgr <- gr\n for (sgrn in names(allgr)) {\n gr <- allgr[[sgrn]]\n if (GSEAstarttype == \"TSSup\") {\n grstart <- ifelse(as.character(strand(gr)) == \n \"+\", start(gr) - as.numeric(GSEAstartbp), \n end(gr) + as.numeric(GSEAstartbp))\n }\n else if (GSEAstarttype == \"TSSdown\") {\n grstart <- ifelse(as.character(strand(gr)) == \n \"+\", start(gr) + as.numeric(GSEAstartbp), \n end(gr) - as.numeric(GSEAstartbp))\n }\n else if (GSEAstarttype == \"TESup\") {\n grstart <- ifelse(as.character(strand(gr)) == \n \"+\", end(gr) - as.numeric(GSEAstartbp), start(gr) + \n as.numeric(GSEAstartbp))\n }\n else if (GSEAstarttype == \"TESdown\") {\n grstart <- ifelse(as.character(strand(gr)) == \n \"+\", end(gr) + as.numeric(GSEAstartbp), start(gr) - \n as.numeric(GSEAstartbp))\n }\n if (GSEAendtype == \"TSSup\") {\n grend <- ifelse(as.character(strand(gr)) == \n \"+\", start(gr) - as.numeric(GSEAendbp), end(gr) + \n as.numeric(GSEAendbp))\n }\n else if (GSEAendtype == \"TSSdown\") {\n grend <- ifelse(as.character(strand(gr)) == \n \"+\", start(gr) + as.numeric(GSEAendbp), end(gr) - \n as.numeric(GSEAendbp))\n }\n else if (GSEAendtype == \"TESup\") {\n grend <- ifelse(as.character(strand(gr)) == \n \"+\", end(gr) - as.numeric(GSEAendbp), start(gr) + \n as.numeric(GSEAendbp))\n }\n else if (GSEAendtype == \"TESdown\") {\n grend <- ifelse(as.character(strand(gr)) == \n \"+\", end(gr) + as.numeric(GSEAendbp), start(gr) - \n as.numeric(GSEAendbp))\n }\n ngr <- names(gr)\n gr <- GRanges(seqnames = seqnames(gr), IRanges(start = pmin(grstart, \n grend), end = pmax(grstart, grend)))\n names(gr) <- ngr\n allgr[[sgrn]] <- gr\n }\n gr <- allgr\n tmp <- sapply(bamdata, function(i) countOverlaps(gr, \n i))\n tmp <- sweep(tmp, 2, bamsummary, \"/\") * 10000\n if (log2transform) {\n tmp <- log2(tmp + 1)\n }\n if (adjustlen) {\n grrange <- sapply(gr, function(i) sum(end(i) - \n start(i) + 1))\n tmp <- sweep(tmp, 1, grrange, \"/\") * 1e+06\n }\n tmp <- tmp[rowSums(tmp) > 0, , drop = F]\n allres <- rbind(allres, tmp)\n }\n }\n if (\"Custom\" %in% featurelist) {\n print(\"Processing custom features\")\n gr <- read.table(customfeature, as.is = T, sep = \"\\t\")\n gr <- GRanges(seqnames = gr[, 1], IRanges(start = gr[, \n 2], end = gr[, 3]))\n tmp <- sapply(bamdata, function(i) countOverlaps(gr, \n i))\n tmp <- sweep(tmp, 2, bamsummary, \"/\") * 10000\n if (log2transform) {\n tmp <- log2(tmp + 1)\n }\n if (adjustlen) {\n grrange <- end(gr) - start(gr) + 1\n tmp <- sweep(tmp, 1, grrange, \"/\") * 1e+06\n }\n tmp <- tmp[rowSums(tmp) > 0, , drop = F]\n allres <- rbind(allres, tmp)\n }\n allres\n}", "_____no_output_____" ], [ "df_out <- SCRATsummary(dir = \"./input/sc-bams_nodup/\", \n genome = \"hg19\",\n featurelist=\"MOTIF_JASPAR\",\n log2transform = FALSE, adjustlen = FALSE, removeblacklist=FALSE)", "[1] \"Processing MOTIF_JASPAR features\"\n" ], [ "end_time <- Sys.time()", "_____no_output_____" ], [ "end_time - start_time", "_____no_output_____" ], [ "dim(df_out)\ndf_out[1:5,1:5]", "_____no_output_____" ], [ "colnames(df_out) = sapply(strsplit(colnames(df_out), \"\\\\.\"),'[',1)\ndim(df_out)\ndf_out[1:5,1:5]", "_____no_output_____" ], [ "if(! all(colnames(df_out) == rownames(metadata))){\n df_out = df_out[,rownames(metadata)]\n dim(df_out)\n df_out[1:5,1:5]\n}", "_____no_output_____" ], [ "dim(df_out)\ndf_out[1:5,1:5]", "_____no_output_____" ], [ "saveRDS(df_out, file = './output/feature_matrices/FM_SCRAT_buenrostro2018_no_blacklist.rds')", "_____no_output_____" ], [ "sessionInfo()", "_____no_output_____" ], [ "save.image(file = 'SCRAT_buenrostro2018.RData')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb799dce1f6d0f53c80e5d36e83389ba2ddad71b
280,311
ipynb
Jupyter Notebook
neural_network_grains_classifier.ipynb
MaciejZj/Copper-grains-classification
81f4179d0928794f38196175ac3c3d30fd5dca8a
[ "MIT" ]
null
null
null
neural_network_grains_classifier.ipynb
MaciejZj/Copper-grains-classification
81f4179d0928794f38196175ac3c3d30fd5dca8a
[ "MIT" ]
null
null
null
neural_network_grains_classifier.ipynb
MaciejZj/Copper-grains-classification
81f4179d0928794f38196175ac3c3d30fd5dca8a
[ "MIT" ]
null
null
null
478.346416
143,028
0.93672
[ [ [ "# Copper grains classification based on thermal images\n\nThis demo shows construction and usage of a neural network that classifies\ncopper grains.\nThe grains are recorded with a thermal camera using active thermovision\napproach.\nThe network is fed with numbers of low emissivity spots on every stage of\ncooling down the grain samples.\nFor more information about tracking and counting these spots refer to other\nJupyter demos in this project.", "_____no_output_____" ] ], [ [ "from inspect import getsource\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\nfrom blob_series_tracker import count_blobs_with_all_methods\nfrom img_processing import decode_labels, default_img_set, full_prepare\nfrom neural_network import (default_grain_classifier_model,\n network_cross_validation)\n\n\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'", "_____no_output_____" ] ], [ [ "## Data preparation\n\nLet's visualize how a sample image of grains looks like.\nTo start with we will load the whole default dataset, then we will select a\nsample labeled as E5 class at the beginning of the colling process and show it.", "_____no_output_____" ] ], [ [ "X, y = default_img_set()\nplt.imshow(X[0][0]);", "_____no_output_____" ] ], [ [ "Next we will prepare the images for classification. They should be cropped to\nremove the FLIR UI overlay.\nThen they ought to be converted to grayscale and inverted.\nThe task is to track points with low thermal emissivity which are presented as\nblack spots on the image.\nHowever multiple image processing algorithms treat white spots as foreground\nfeatures and black space as background.\nThis is opposite to the way in which colors display temperature on our image so\nwe have to invert the photo.\nAll these preparations are completed by a single function.", "_____no_output_____" ] ], [ [ "X = [[full_prepare(img) for img in same_sample] for same_sample in X]", "_____no_output_____" ] ], [ [ "Subsequently we have to track and count low emissivity blobs on each stage of\ncooling the grains.\nThere are three ways of counting the spots:\n* counting all spots at every stage of cooling,\n* tracking blobs that are present from the begging of the cooling and ignoring\n blobs that appear later,\n* tracking blobs that are present from the begging of the cooling and\n calculating the ratio of remaining blobs to their initial number.\n\nTo inspect how these methods work relate to other Jupyter demos.\nIn this notebook we will simply implement a function that uses every one of\nthese approaches and compare results.", "_____no_output_____" ] ], [ [ "Xs = count_blobs_with_all_methods(X)", "_____no_output_____" ] ], [ [ "## Classification\n\nWe will write a function to classify given grains data.\nWe have three X datasets, one for every blob counting method.\nLater we will call the classification demo function on each of them.\n\nThe function turns datasets into NumPy arrays as most of the libraries use this\ncontainer for computation efficiency.\nThen the data is split into train and test sets.\nA function from Scikit-learn is used to perform the split.\nNotice that the function is given constant random seed to ensure repetitiveness\nof the demo.\nSo the call looks like this:\n```python\nX = np.array(X)\ny = np.array(y)\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, stratify=y, test_size=0.33, random_state=1)\n```\n\n### Neural network model\n\nThe classifier model is returned by the `default_grain_classifier_model` function.\nLet's take a look at the model definition.", "_____no_output_____" ] ], [ [ "print(getsource(default_grain_classifier_model))", "def default_grain_classifier_model():\n '''\n Get default uncompiled model for grain classifcation,\n based on 5 step cooling process using number of blobs.\n '''\n model = keras.Sequential([\n keras.layers.Dense(5, activation='tanh'),\n keras.layers.Dense(256, activation='tanh'),\n keras.layers.Dense(128, activation='tanh'),\n keras.layers.Dense(4, activation='softmax')\n ])\n return model\n\n" ] ], [ [ "The model includes four layers, with two hidden ones.\nThe number of neurons in the input layer is equal to the size of the input\nvector, that is five.\nIf you wish to know why the vector has this size, please refer to the other demo\nnotebooks.\nThe input is equal to the number of classes, in this case it is four.\nOutputs of the network represent the probability of the input belonging to each\nof the classes.\nThe number of the neurons in hidden layers and their activation functions were\nchosen based on experiments.\nYou can examine the comparison of various network structures in the thesis.\n\n### Network training\n\nThe last step of constructing a network is the training.\nThe model compiler has to be given `optimizer`, `loss` and `metrics` parameters\nthat define the training process.\nThe best loss function that calculates error between output of the network and\ntraining label is the `sparse_categorical_crossentropy`.\nDuring comparisons adam optimiser proved to be the best one, as it provides\nadaptive learning rate.\nThe comparison of different network training parameters is included in the\nthesis.\n\nThe `classification_demo` function implements the ideas presented above.\nIt also prints model evaluation on test set and prints training history.", "_____no_output_____" ] ], [ [ "def classification_demo(X, y):\n '''\n Demo grain classification on given data.\n Train and test default model.\n '''\n X = np.array(X)\n y = np.array(y)\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, stratify=y, test_size=0.33, random_state=3)\n\n model = default_grain_classifier_model()\n model.compile(\n optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n history = model.fit(X_train, y_train, epochs=300, verbose=0)\n\n print(\"Test y:\", y_test)\n print(\"Test y:\", [decode_labels(y) for y in y_test])\n print('Test prediction outputs:')\n classes = ('E5R', 'E6R', 'E11R', 'E16R')\n print(pd.DataFrame(model.predict(X_test), columns=classes, index=classes))\n prediction = model.predict_classes(X_test)\n print('Test prediction classification:\\n', prediction)\n print('Test prediction classification:\\n',\n [decode_labels(y) for y in prediction])\n print('Model evaluation loss and accuracy:\\n',\n model.evaluate(X_test, y_test, verbose=0), '\\n')\n return history", "_____no_output_____" ] ], [ [ "## Test results\n\nNow we can use the function to print the results of classification.\nThe output is relatively lengthy, let's clarify it a bit.\nIt prints classes of test set in encoded and decoded form.\nThen the prediction output for each test is shown.\nThe closer the output is to one, the more sure the network is that a sample\nbelongs to given class.\nThen the predictions are presented in encoded and decoded format.\nEach demo is summarized by the loss and accuracy scores.\n\nThe method of tracking and calculating ratio of blobs yield the best results.\nNot only it has the highest scores, but also the output is more stable.\nThe first two methods of counting blobs cause the network to produce unstable\nresults.\nThe efficiency varies between each run of the training.\nThe last method of the data extraction yields the best and clearly visible\nclasses separation in the evaluation.\nHowever, this way of validating the classifier is unreliable especially on the\nextremely small data set.\nThis way of testing wastes one-third of the very limited data set and introduces\nnegative bias to the evaluation.\nA better way of judging the efficiency of the network is presented later in the\nnotebook.", "_____no_output_____" ] ], [ [ "demo_names = ('All blobs detection',\n 'Detect only remaining blobs',\n 'Ratio of remaining blobs')\ntraining_histories = []\nfor X, demo_name in zip(Xs, demo_names):\n print(demo_name)\n training_histories.append(classification_demo(X, y))", "All blobs detection\nTest y: [3 0 2 1]\nTest y: ['E16R', 'E5R', 'E11R', 'E6R']\nTest prediction outputs:\n E5R E6R E11R E16R\nE5R 0.000088 2.053345e-04 4.995573e-01 0.500149\nE6R 0.999228 1.578969e-12 1.760408e-08 0.000772\nE11R 0.999224 1.587597e-12 1.770039e-08 0.000776\nE16R 0.999052 1.980504e-12 2.209740e-08 0.000948\nTest prediction classification:\n [3 0 0 0]\nTest prediction classification:\n ['E16R', 'E5R', 'E5R', 'E5R']\nModel evaluation loss and accuracy:\n [11.372742748993915, 0.5] \n\nDetect only remaining blobs\nTest y: [3 0 2 1]\nTest y: ['E16R', 'E5R', 'E11R', 'E6R']\nTest prediction outputs:\n E5R E6R E11R E16R\nE5R 0.000001 4.603295e-10 5.660361e-01 4.339629e-01\nE6R 0.999137 8.625648e-04 2.193910e-07 5.237193e-07\nE11R 0.000001 4.773795e-10 5.795143e-01 4.204847e-01\nE16R 0.000163 9.962723e-01 1.495509e-05 3.549328e-03\nTest prediction classification:\n [2 0 2 1]\nTest prediction classification:\n ['E11R', 'E5R', 'E11R', 'E6R']\nModel evaluation loss and accuracy:\n [0.34623989785904996, 0.75] \n\nRatio of remaining blobs\nTest y: [3 0 2 1]\nTest y: ['E16R', 'E5R', 'E11R', 'E6R']\nTest prediction outputs:\n E5R E6R E11R E16R\nE5R 5.403517e-13 2.504711e-09 6.304197e-02 9.369580e-01\nE6R 9.998676e-01 1.324288e-04 8.938076e-19 5.112053e-14\nE11R 1.555779e-15 4.325757e-12 9.877077e-01 1.229231e-02\nE16R 4.235042e-04 9.847330e-01 1.477951e-02 6.399940e-05\nTest prediction classification:\n [3 0 2 1]\nTest prediction classification:\n ['E16R', 'E5R', 'E11R', 'E6R']\nModel evaluation loss and accuracy:\n [0.0232506170286797, 1.0] \n\n" ] ], [ [ "To investigate the network learning process and ensure it's propriety we can\nplot the training history.\nWe will write a small helper function and plot accuracy and loss versus epochs.", "_____no_output_____" ] ], [ [ "def plot_history(ax, history, demo_name):\n ax.title.set_text('Model training history. ' + demo_name)\n ax.set_xlabel('Epoch')\n\n lns1 = ax.plot(history.history['accuracy'], c='b', label='Accuracy');\n ax.set_ylabel('Accuracy')\n ax.twinx()\n lns2 = ax.plot(history.history['loss'], c='r', label='Loss');\n plt.ylabel('Loss')\n\n lns = lns1 + lns2\n labs = [l.get_label() for l in lns]\n ax.legend(lns, labs)\n\n_, ax = plt.subplots(1, 3, figsize=(24, 6))\nfor a, history, name in zip(ax, training_histories, demo_names): \n plot_history(a, history, name)", "_____no_output_____" ] ], [ [ "The plots are generated for networks employing different methods of counting blobs for feature extraction.\nAs could have been foreseen, the latter methods that make a use of blob tracking mechanisms have more clear shape, greater accuracy and smaller loss.", "_____no_output_____" ], [ "# Network validation\n\nThe results of the test presented above are fully conclusive.\nThe best way to validate the network and check if the idea of grains\nclassification does work is to perform cross-validation of the model.\nThis method splits the data k-times and performs the validation on each part of\ntrain and test sets.\nThe ultimate result of validation is averaged.\n\nLet's crate a function that will validate perform the cross-validation for every\nway of blob counting.", "_____no_output_____" ] ], [ [ "def cross_val_demo(X, y):\n '''Demo cross validation of default grain classifier on given data.'''\n X = np.array(X)\n y = np.array(y)\n\n model = default_grain_classifier_model()\n model.compile(\n optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n scores = network_cross_validation(model, X, y, 3)\n\n print('Folds scores: (loss, acc)\\n', scores)\n scores = np.array(scores)\n print('Cross validation mean score (loss, acc):\\n',\n scores.mean(axis=0), '\\n')", "_____no_output_____" ] ], [ [ "The `network_cross_validation` takes the model to test, dataset and number of\nsplits to perform.\nWe can investigate it's implementation to see how it works.", "_____no_output_____" ] ], [ [ "print(getsource(network_cross_validation))", "def network_cross_validation(model, X, y, n_splits):\n '''Compute cross validation fold scores for given keras model.'''\n eval_scores = []\n\n folds = StratifiedKFold(n_splits=n_splits).split(X, y)\n for train_index, test_index in folds:\n x_train, x_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n model.fit(x_train, y_train, epochs=300, verbose=0)\n eval_scores.append(model.evaluate(x_test, y_test, verbose=0))\n return eval_scores\n\n" ] ], [ [ "Now we can perform the validation.\nNotice that the accuracy of the last blob tracking method becomes stable 0.92,\nwhich is a satisfactory result.\nIt should be preferred as the best way to classify copper grains.\nThe other methods of data extraction yield poor classification results and\nshould be rejected.", "_____no_output_____" ] ], [ [ "for X, demo_name in zip(Xs, demo_names):\n print(demo_name)\n cross_val_demo(X, y)", "All blobs detection\nFolds scores: (loss, acc)\n [[8.650588184595108, 0.25], [4.666504085063934, 0.25], [0.3054614175343886, 0.75]]\nCross validation mean score (loss, acc):\n [4.54085123 0.41666667] \n\nDetect only remaining blobs\nFolds scores: (loss, acc)\n [[3.9184877690277062, 0.25], [0.3508455941628199, 0.75], [2.5576355306353094, 0.5]]\nCross validation mean score (loss, acc):\n [2.2756563 0.5 ] \n\nRatio of remaining blobs\nFolds scores: (loss, acc)\n [[0.06772129192813736, 1.0], [4.375062700837148, 0.75], [0.030403532480931972, 1.0]]\nCross validation mean score (loss, acc):\n [1.49106251 0.91666667] \n\n" ] ], [ [ "## Conclusion\n\nThe results of the validation proved that the copper grains can be classified\nusing active thermography approach.\nThe classifier has been tested and trained on a extremely small dataset.\nHowever the results show that the suggested idea is worth further investigation.\nThe next steps in the project may include creating larger dataset with enhanced\nmeasurement stand and development of a more advanced neural network, such as\nconvolutional neural network.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb79a2e5298abb8f8ce58411a0a05f335f614e2a
129,323
ipynb
Jupyter Notebook
assignments/assignment2/Neural Network.ipynb
INeedTractorPlz/dlcourse_ai
52c85f82ed567996667d761cfe2c1f8ab165380b
[ "MIT" ]
1
2019-07-22T09:50:54.000Z
2019-07-22T09:50:54.000Z
assignments/assignment2/Neural Network.ipynb
INeedTractorPlz/dlcourse_ai
52c85f82ed567996667d761cfe2c1f8ab165380b
[ "MIT" ]
null
null
null
assignments/assignment2/Neural Network.ipynb
INeedTractorPlz/dlcourse_ai
52c85f82ed567996667d761cfe2c1f8ab165380b
[ "MIT" ]
1
2020-11-30T18:38:21.000Z
2020-11-30T18:38:21.000Z
112.945852
52,832
0.837515
[ [ [ "# Задание 2.1 - Нейронные сети\n\nВ этом задании вы реализуете и натренируете настоящую нейроную сеть своими руками!\n\nВ некотором смысле это будет расширением прошлого задания - нам нужно просто составить несколько линейных классификаторов вместе!\n\n<img src=\"https://i.redd.it/n9fgba8b0qr01.png\" alt=\"Stack_more_layers\" width=\"400px\"/>", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\n\n%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "from dataset import load_svhn, random_split_train_val\nfrom gradient_check import check_layer_gradient, check_layer_param_gradient, check_model_gradient\nfrom layers import FullyConnectedLayer, ReLULayer\nfrom model import TwoLayerNet\nfrom trainer import Trainer, Dataset\nfrom optim import SGD, MomentumSGD\nfrom metrics import multiclass_accuracy", "_____no_output_____" ] ], [ [ "# Загружаем данные\n\nИ разделяем их на training и validation.", "_____no_output_____" ] ], [ [ "def prepare_for_neural_network(train_X, test_X):\n train_flat = train_X.reshape(train_X.shape[0], -1).astype(np.float) / 255.0\n test_flat = test_X.reshape(test_X.shape[0], -1).astype(np.float) / 255.0\n \n # Subtract mean\n mean_image = np.mean(train_flat, axis = 0)\n train_flat -= mean_image\n test_flat -= mean_image\n \n return train_flat, test_flat\n \ntrain_X, train_y, test_X, test_y = load_svhn(\"data\", max_train=10000, max_test=1000) \ntrain_X, test_X = prepare_for_neural_network(train_X, test_X)\n# Split train into train and val\ntrain_X, train_y, val_X, val_y = random_split_train_val(train_X, train_y, num_val = 1000)", "_____no_output_____" ] ], [ [ "# Как всегда, начинаем с кирпичиков\n\nМы будем реализовывать необходимые нам слои по очереди. Каждый слой должен реализовать:\n- прямой проход (forward pass), который генерирует выход слоя по входу и запоминает необходимые данные\n- обратный проход (backward pass), который получает градиент по выходу слоя и вычисляет градиент по входу и по параметрам\n\nНачнем с ReLU, у которого параметров нет.", "_____no_output_____" ] ], [ [ "# TODO: Implement ReLULayer layer in layers.py\n# Note: you'll need to copy implementation of the gradient_check function from the previous assignment\n\nX = np.array([[1,-2,3],\n [-1, 2, 0.1]\n ])\n\nassert check_layer_gradient(ReLULayer(), X)", "Gradient check passed!\n" ] ], [ [ "А теперь реализуем полносвязный слой (fully connected layer), у которого будет два массива параметров: W (weights) и B (bias).\n\nВсе параметры наши слои будут использовать для параметров специальный класс `Param`, в котором будут храниться значения параметров и градиенты этих параметров, вычисляемые во время обратного прохода.\n\nЭто даст возможность аккумулировать (суммировать) градиенты из разных частей функции потерь, например, из cross-entropy loss и regularization loss.", "_____no_output_____" ] ], [ [ "# TODO: Implement FullyConnected layer forward and backward methods\nassert check_layer_gradient(FullyConnectedLayer(3, 4), X)\n# TODO: Implement storing gradients for W and B\nassert check_layer_param_gradient(FullyConnectedLayer(3, 4), X, 'W')\nassert check_layer_param_gradient(FullyConnectedLayer(3, 4), X, 'B')", "Gradient check passed!\nGradient check passed!\nGradient check passed!\n" ] ], [ [ "## Создаем нейронную сеть\n\nТеперь мы реализуем простейшую нейронную сеть с двумя полносвязным слоями и нелинейностью ReLU. Реализуйте функцию `compute_loss_and_gradients`, она должна запустить прямой и обратный проход через оба слоя для вычисления градиентов.\n\nНе забудьте реализовать очистку градиентов в начале функции.", "_____no_output_____" ] ], [ [ "# TODO: In model.py, implement compute_loss_and_gradients function\nmodel = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 3, reg = 0)\nloss = model.compute_loss_and_gradients(train_X[:2], train_y[:2])\n\n# TODO Now implement backward pass and aggregate all of the params\ncheck_model_gradient(model, train_X[:2], train_y[:2])", "Checking gradient for W1\nGradient check passed!\nChecking gradient for B1\nGradient check passed!\nChecking gradient for W2\nGradient check passed!\nChecking gradient for B2\nGradient check passed!\n" ] ], [ [ "Теперь добавьте к модели регуляризацию - она должна прибавляться к loss и делать свой вклад в градиенты.", "_____no_output_____" ] ], [ [ "# TODO Now implement l2 regularization in the forward and backward pass\nmodel_with_reg = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 3, reg = 1e1)\nloss_with_reg = model_with_reg.compute_loss_and_gradients(train_X[:2], train_y[:2])\nassert loss_with_reg > loss and not np.isclose(loss_with_reg, loss), \\\n \"Loss with regularization (%2.4f) should be higher than without it (%2.4f)!\" % (loss, loss_with_reg)\n\ncheck_model_gradient(model_with_reg, train_X[:2], train_y[:2])", "Checking gradient for W1\nGradient check passed!\nChecking gradient for B1\nGradient check passed!\nChecking gradient for W2\nGradient check passed!\nChecking gradient for B2\nGradient check passed!\n" ] ], [ [ "Также реализуем функцию предсказания (вычисления значения) модели на новых данных.\n\nКакое значение точности мы ожидаем увидеть до начала тренировки?", "_____no_output_____" ] ], [ [ "# Finally, implement predict function!\n\n# TODO: Implement predict function\n# What would be the value we expect?\nmulticlass_accuracy(model_with_reg.predict(train_X[:30]), train_y[:30]) ", "_____no_output_____" ] ], [ [ "# Допишем код для процесса тренировки", "_____no_output_____" ] ], [ [ "model = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 100, reg = 2e-3);\ndataset = Dataset(train_X, train_y, val_X, val_y);\ntrainer = Trainer(model, dataset, SGD(), num_epochs=100, batch_size=100,\n learning_rate=5e-1, learning_rate_decay= 0.95);\n\n# TODO Implement missing pieces in Trainer.fit function\n# You should expect loss to go down and train and val accuracy go up for every epoch\nloss_history, train_history, val_history = trainer.fit()", "Loss: 2.057760, Train accuracy: 0.294444, val accuracy: 0.305000\nLoss: 1.719343, Train accuracy: 0.439889, val accuracy: 0.473000\nLoss: 1.991950, Train accuracy: 0.469556, val accuracy: 0.443000\nLoss: 1.644600, Train accuracy: 0.596556, val accuracy: 0.600000\nLoss: 1.604584, Train accuracy: 0.650667, val accuracy: 0.630000\nLoss: 1.226683, Train accuracy: 0.682444, val accuracy: 0.663000\nLoss: 1.366220, Train accuracy: 0.666444, val accuracy: 0.638000\nLoss: 1.416624, Train accuracy: 0.670222, val accuracy: 0.634000\nLoss: 1.186511, Train accuracy: 0.692667, val accuracy: 0.668000\nLoss: 1.162692, Train accuracy: 0.646556, val accuracy: 0.628000\nLoss: 1.279210, Train accuracy: 0.717444, val accuracy: 0.687000\nLoss: 1.391227, Train accuracy: 0.747000, val accuracy: 0.711000\nLoss: 1.244005, Train accuracy: 0.755333, val accuracy: 0.712000\nLoss: 1.657514, Train accuracy: 0.668444, val accuracy: 0.656000\nLoss: 0.940288, Train accuracy: 0.772889, val accuracy: 0.738000\nLoss: 1.259768, Train accuracy: 0.760000, val accuracy: 0.714000\nLoss: 1.190087, Train accuracy: 0.765889, val accuracy: 0.727000\nLoss: 1.229948, Train accuracy: 0.773333, val accuracy: 0.727000\nLoss: 1.060366, Train accuracy: 0.763111, val accuracy: 0.722000\nLoss: 1.033964, Train accuracy: 0.803111, val accuracy: 0.752000\nLoss: 1.128697, Train accuracy: 0.793333, val accuracy: 0.733000\nLoss: 1.298550, Train accuracy: 0.770444, val accuracy: 0.725000\nLoss: 0.951812, Train accuracy: 0.797333, val accuracy: 0.745000\nLoss: 1.181184, Train accuracy: 0.791222, val accuracy: 0.739000\nLoss: 1.101036, Train accuracy: 0.813222, val accuracy: 0.762000\nLoss: 1.167422, Train accuracy: 0.830111, val accuracy: 0.765000\nLoss: 1.134875, Train accuracy: 0.802667, val accuracy: 0.745000\nLoss: 1.089486, Train accuracy: 0.827444, val accuracy: 0.764000\nLoss: 1.093590, Train accuracy: 0.823444, val accuracy: 0.752000\nLoss: 1.096978, Train accuracy: 0.825222, val accuracy: 0.771000\nLoss: 1.128563, Train accuracy: 0.832667, val accuracy: 0.760000\nLoss: 0.977432, Train accuracy: 0.823889, val accuracy: 0.755000\nLoss: 0.949909, Train accuracy: 0.836111, val accuracy: 0.774000\nLoss: 1.161520, Train accuracy: 0.836222, val accuracy: 0.763000\nLoss: 0.971681, Train accuracy: 0.828667, val accuracy: 0.760000\nLoss: 1.082617, Train accuracy: 0.841333, val accuracy: 0.761000\nLoss: 0.964552, Train accuracy: 0.833556, val accuracy: 0.763000\nLoss: 1.044889, Train accuracy: 0.839111, val accuracy: 0.762000\nLoss: 0.990786, Train accuracy: 0.845222, val accuracy: 0.773000\nLoss: 1.023046, Train accuracy: 0.840778, val accuracy: 0.767000\nLoss: 1.028572, Train accuracy: 0.849889, val accuracy: 0.774000\nLoss: 1.030232, Train accuracy: 0.851000, val accuracy: 0.776000\nLoss: 1.058268, Train accuracy: 0.852778, val accuracy: 0.767000\nLoss: 0.862025, Train accuracy: 0.853778, val accuracy: 0.766000\nLoss: 1.149796, Train accuracy: 0.848778, val accuracy: 0.773000\nLoss: 0.805140, Train accuracy: 0.855111, val accuracy: 0.769000\nLoss: 0.925925, Train accuracy: 0.853667, val accuracy: 0.775000\nLoss: 0.916124, Train accuracy: 0.855667, val accuracy: 0.768000\nLoss: 1.056986, Train accuracy: 0.859444, val accuracy: 0.769000\nLoss: 1.079134, Train accuracy: 0.855778, val accuracy: 0.780000\nLoss: 1.047524, Train accuracy: 0.855444, val accuracy: 0.767000\nLoss: 1.027197, Train accuracy: 0.858000, val accuracy: 0.771000\nLoss: 1.011628, Train accuracy: 0.858222, val accuracy: 0.776000\nLoss: 0.993568, Train accuracy: 0.860333, val accuracy: 0.774000\nLoss: 0.948556, Train accuracy: 0.862667, val accuracy: 0.774000\nLoss: 1.013963, Train accuracy: 0.862667, val accuracy: 0.771000\nLoss: 1.055716, Train accuracy: 0.861222, val accuracy: 0.780000\nLoss: 0.942298, Train accuracy: 0.859444, val accuracy: 0.775000\nLoss: 1.007253, Train accuracy: 0.858111, val accuracy: 0.769000\nLoss: 0.960938, Train accuracy: 0.862111, val accuracy: 0.771000\nLoss: 1.006923, Train accuracy: 0.859444, val accuracy: 0.776000\nLoss: 0.922071, Train accuracy: 0.862333, val accuracy: 0.775000\nLoss: 0.975018, Train accuracy: 0.862222, val accuracy: 0.773000\nLoss: 0.881515, Train accuracy: 0.863667, val accuracy: 0.775000\nLoss: 0.954346, Train accuracy: 0.864111, val accuracy: 0.773000\nLoss: 0.963030, Train accuracy: 0.863000, val accuracy: 0.767000\nLoss: 0.971395, Train accuracy: 0.866111, val accuracy: 0.777000\nLoss: 0.967604, Train accuracy: 0.864000, val accuracy: 0.778000\nLoss: 0.970403, Train accuracy: 0.866444, val accuracy: 0.777000\nLoss: 0.972550, Train accuracy: 0.864667, val accuracy: 0.777000\nLoss: 0.943565, Train accuracy: 0.864333, val accuracy: 0.776000\nLoss: 1.039262, Train accuracy: 0.865889, val accuracy: 0.775000\nLoss: 0.883383, Train accuracy: 0.865000, val accuracy: 0.774000\nLoss: 0.886400, Train accuracy: 0.866111, val accuracy: 0.779000\nLoss: 0.961355, Train accuracy: 0.868556, val accuracy: 0.770000\nLoss: 0.927675, Train accuracy: 0.866667, val accuracy: 0.773000\nLoss: 0.861297, Train accuracy: 0.866333, val accuracy: 0.777000\nLoss: 0.918022, Train accuracy: 0.866111, val accuracy: 0.778000\nLoss: 0.991923, Train accuracy: 0.867111, val accuracy: 0.772000\nLoss: 0.926146, Train accuracy: 0.866889, val accuracy: 0.780000\nLoss: 0.964977, Train accuracy: 0.866667, val accuracy: 0.774000\nLoss: 0.930693, Train accuracy: 0.867444, val accuracy: 0.776000\nLoss: 1.037888, Train accuracy: 0.867889, val accuracy: 0.777000\nLoss: 0.953677, Train accuracy: 0.867444, val accuracy: 0.770000\nLoss: 0.883129, Train accuracy: 0.868111, val accuracy: 0.775000\nLoss: 0.898530, Train accuracy: 0.867889, val accuracy: 0.779000\nLoss: 0.876747, Train accuracy: 0.868222, val accuracy: 0.778000\nLoss: 0.962279, Train accuracy: 0.868333, val accuracy: 0.779000\nLoss: 1.098237, Train accuracy: 0.867667, val accuracy: 0.779000\nLoss: 0.919174, Train accuracy: 0.868667, val accuracy: 0.777000\nLoss: 0.787728, Train accuracy: 0.867333, val accuracy: 0.777000\nLoss: 0.913122, Train accuracy: 0.868000, val accuracy: 0.776000\nLoss: 0.957498, Train accuracy: 0.869000, val accuracy: 0.773000\nLoss: 0.911084, Train accuracy: 0.868778, val accuracy: 0.778000\nLoss: 0.937171, Train accuracy: 0.868667, val accuracy: 0.778000\nLoss: 1.007211, Train accuracy: 0.869111, val accuracy: 0.777000\nLoss: 1.041831, Train accuracy: 0.868778, val accuracy: 0.776000\nLoss: 1.007212, Train accuracy: 0.868778, val accuracy: 0.778000\nLoss: 0.971168, Train accuracy: 0.867889, val accuracy: 0.778000\nLoss: 0.957733, Train accuracy: 0.869222, val accuracy: 0.776000\n" ], [ "train_X[model.predict(train_X) != 1]", "_____no_output_____" ], [ "train_y", "_____no_output_____" ], [ "def ReLU(x):\n if x <= 0:\n return 0;\n else:\n return x;\nReLU_vec = np.vectorize(ReLU);\ntrain_X[ReLU_vec(train_X) != 0]", "_____no_output_____" ], [ "val_X_W = model.first.forward(val_X)\nval_X_W", "_____no_output_____" ], [ "model.second.forward(model.ReLU.forward(val_X_W))", "_____no_output_____" ], [ "plt.plot(train_history)\nplt.plot(val_history)", "_____no_output_____" ], [ "plt.plot(loss_history)", "_____no_output_____" ] ], [ [ "# Улучшаем процесс тренировки\n\nМы реализуем несколько ключевых оптимизаций, необходимых для тренировки современных нейросетей.", "_____no_output_____" ], [ "## Уменьшение скорости обучения (learning rate decay)\n\nОдна из необходимых оптимизаций во время тренировки нейронных сетей - постепенное уменьшение скорости обучения по мере тренировки.\n\nОдин из стандартных методов - уменьшение скорости обучения (learning rate) каждые N эпох на коэффициент d (часто называемый decay). Значения N и d, как всегда, являются гиперпараметрами и должны подбираться на основе эффективности на проверочных данных (validation data). \n\nВ нашем случае N будет равным 1.", "_____no_output_____" ] ], [ [ "# TODO Implement learning rate decay inside Trainer.fit method\n# Decay should happen once per epoch\n\nmodel = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 100, reg = 1e-3)\ndataset = Dataset(train_X, train_y, val_X, val_y)\ntrainer = Trainer(model, dataset, SGD(), num_epochs=10, batch_size=100,\n learning_rate=5e-1, learning_rate_decay=0.99)\n\ninitial_learning_rate = trainer.learning_rate\nloss_history, train_history, val_history = trainer.fit()\n\nassert trainer.learning_rate < initial_learning_rate, \"Learning rate should've been reduced\"\nassert trainer.learning_rate > 0.5*initial_learning_rate, \"Learning rate shouldn'tve been reduced that much!\"", "Loss: 2.023491, Train accuracy: 0.289778, val accuracy: 0.295000\nLoss: 1.598660, Train accuracy: 0.494333, val accuracy: 0.498000\nLoss: 1.480496, Train accuracy: 0.570556, val accuracy: 0.566000\nLoss: 1.411475, Train accuracy: 0.536444, val accuracy: 0.542000\nLoss: 1.341957, Train accuracy: 0.614778, val accuracy: 0.607000\nLoss: 1.621627, Train accuracy: 0.519889, val accuracy: 0.501000\nLoss: 1.347271, Train accuracy: 0.666444, val accuracy: 0.628000\nLoss: 1.264947, Train accuracy: 0.701778, val accuracy: 0.666000\nLoss: 1.380224, Train accuracy: 0.651778, val accuracy: 0.621000\nLoss: 1.116376, Train accuracy: 0.713000, val accuracy: 0.685000\n" ] ], [ [ "# Накопление импульса (Momentum SGD)\n\nДругой большой класс оптимизаций - использование более эффективных методов градиентного спуска. Мы реализуем один из них - накопление импульса (Momentum SGD).\n\nЭтот метод хранит скорость движения, использует градиент для ее изменения на каждом шаге, и изменяет веса пропорционально значению скорости.\n(Физическая аналогия: Вместо скорости градиенты теперь будут задавать ускорение, но будет присутствовать сила трения.)\n\n```\nvelocity = momentum * velocity - learning_rate * gradient \nw = w + velocity\n```\n\n`momentum` здесь коэффициент затухания, который тоже является гиперпараметром (к счастью, для него часто есть хорошее значение по умолчанию, типичный диапазон -- 0.8-0.99).\n\nНесколько полезных ссылок, где метод разбирается более подробно: \nhttp://cs231n.github.io/neural-networks-3/#sgd \nhttps://distill.pub/2017/momentum/", "_____no_output_____" ] ], [ [ "# TODO: Implement MomentumSGD.update function in optim.py\n\nmodel = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 100, reg = 1e-3)\ndataset = Dataset(train_X, train_y, val_X, val_y)\ntrainer = Trainer(model, dataset, MomentumSGD(), num_epochs=10, batch_size=30,\n learning_rate=5e-2, learning_rate_decay=0.99)\n\n# You should see even better results than before!\nloss_history, train_history, val_history = trainer.fit()", "Loss: 1.577062, Train accuracy: 0.474111, val accuracy: 0.432000\nLoss: 1.921048, Train accuracy: 0.600444, val accuracy: 0.584000\nLoss: 1.278775, Train accuracy: 0.552778, val accuracy: 0.542000\nLoss: 1.560019, Train accuracy: 0.621778, val accuracy: 0.589000\nLoss: 1.317357, Train accuracy: 0.647000, val accuracy: 0.617000\nLoss: 1.640894, Train accuracy: 0.627444, val accuracy: 0.595000\nLoss: 1.192661, Train accuracy: 0.673333, val accuracy: 0.653000\nLoss: 1.173557, Train accuracy: 0.669444, val accuracy: 0.649000\nLoss: 1.703374, Train accuracy: 0.607556, val accuracy: 0.567000\nLoss: 1.195787, Train accuracy: 0.701778, val accuracy: 0.638000\n" ] ], [ [ "# Ну что, давайте уже тренировать сеть!", "_____no_output_____" ], [ "## Последний тест - переобучимся (overfit) на маленьком наборе данных\n\nХороший способ проверить, все ли реализовано корректно - переобучить сеть на маленьком наборе данных. \nНаша модель обладает достаточной мощностью, чтобы приблизить маленький набор данных идеально, поэтому мы ожидаем, что на нем мы быстро дойдем до 100% точности на тренировочном наборе. \n\nЕсли этого не происходит, то где-то была допущена ошибка!", "_____no_output_____" ] ], [ [ "data_size = 15\nmodel = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 100, reg = 1e-1)\ndataset = Dataset(train_X[:data_size], train_y[:data_size], val_X[:data_size], val_y[:data_size])\ntrainer = Trainer(model, dataset, SGD(), learning_rate=1e-1, num_epochs=80, batch_size=5)\n\n# You should expect this to reach 1.0 training accuracy \nloss_history, train_history, val_history = trainer.fit()", "Loss: 5.159348, Train accuracy: 0.400000, val accuracy: 0.066667\nLoss: 4.833924, Train accuracy: 0.400000, val accuracy: 0.066667\nLoss: 4.471562, Train accuracy: 0.333333, val accuracy: 0.066667\nLoss: 4.171224, Train accuracy: 0.400000, val accuracy: 0.066667\nLoss: 3.780857, Train accuracy: 0.400000, val accuracy: 0.000000\nLoss: 3.386390, Train accuracy: 0.333333, val accuracy: 0.000000\nLoss: 3.043319, Train accuracy: 0.333333, val accuracy: 0.000000\nLoss: 2.915739, Train accuracy: 0.400000, val accuracy: 0.000000\nLoss: 3.152023, Train accuracy: 0.400000, val accuracy: 0.000000\nLoss: 2.747371, Train accuracy: 0.400000, val accuracy: 0.000000\nLoss: 2.351243, Train accuracy: 0.400000, val accuracy: 0.000000\nLoss: 2.829886, Train accuracy: 0.466667, val accuracy: 0.000000\nLoss: 2.646840, Train accuracy: 0.466667, val accuracy: 0.000000\nLoss: 2.369119, Train accuracy: 0.466667, val accuracy: 0.000000\nLoss: 2.203926, Train accuracy: 0.600000, val accuracy: 0.000000\nLoss: 1.852941, Train accuracy: 0.533333, val accuracy: 0.000000\nLoss: 2.300644, Train accuracy: 0.666667, val accuracy: 0.000000\nLoss: 1.940125, Train accuracy: 0.533333, val accuracy: 0.000000\nLoss: 2.320395, Train accuracy: 0.666667, val accuracy: 0.000000\nLoss: 1.971270, Train accuracy: 0.733333, val accuracy: 0.066667\nLoss: 2.011852, Train accuracy: 0.800000, val accuracy: 0.066667\nLoss: 2.052266, Train accuracy: 0.733333, val accuracy: 0.133333\nLoss: 2.263934, Train accuracy: 0.800000, val accuracy: 0.133333\nLoss: 2.084981, Train accuracy: 0.800000, val accuracy: 0.066667\nLoss: 1.673593, Train accuracy: 0.800000, val accuracy: 0.066667\nLoss: 1.436405, Train accuracy: 0.733333, val accuracy: 0.066667\nLoss: 1.401208, Train accuracy: 0.733333, val accuracy: 0.000000\nLoss: 1.200151, Train accuracy: 0.800000, val accuracy: 0.066667\nLoss: 1.280173, Train accuracy: 0.800000, val accuracy: 0.066667\nLoss: 1.434800, Train accuracy: 0.800000, val accuracy: 0.133333\nLoss: 1.497077, Train accuracy: 0.800000, val accuracy: 0.066667\nLoss: 1.300072, Train accuracy: 0.800000, val accuracy: 0.133333\nLoss: 1.985589, Train accuracy: 0.800000, val accuracy: 0.066667\nLoss: 1.027857, Train accuracy: 0.800000, val accuracy: 0.066667\nLoss: 1.225706, Train accuracy: 0.800000, val accuracy: 0.000000\nLoss: 1.279258, Train accuracy: 0.800000, val accuracy: 0.066667\nLoss: 1.772020, Train accuracy: 0.800000, val accuracy: 0.000000\nLoss: 0.992930, Train accuracy: 0.800000, val accuracy: 0.000000\nLoss: 1.452200, Train accuracy: 0.800000, val accuracy: 0.066667\nLoss: 1.516582, Train accuracy: 0.933333, val accuracy: 0.000000\nLoss: 1.208096, Train accuracy: 0.933333, val accuracy: 0.000000\nLoss: 0.844603, Train accuracy: 0.933333, val accuracy: 0.000000\nLoss: 1.197143, Train accuracy: 0.933333, val accuracy: 0.000000\nLoss: 1.526560, Train accuracy: 0.933333, val accuracy: 0.000000\nLoss: 0.969449, Train accuracy: 0.933333, val accuracy: 0.000000\nLoss: 1.227740, Train accuracy: 0.933333, val accuracy: 0.000000\nLoss: 1.236175, Train accuracy: 0.933333, val accuracy: 0.000000\nLoss: 1.219683, Train accuracy: 0.933333, val accuracy: 0.000000\nLoss: 1.209267, Train accuracy: 0.933333, val accuracy: 0.000000\nLoss: 1.458500, Train accuracy: 0.933333, val accuracy: 0.000000\nLoss: 1.107501, Train accuracy: 0.933333, val accuracy: 0.000000\nLoss: 1.508022, Train accuracy: 0.933333, val accuracy: 0.000000\nLoss: 1.194399, Train accuracy: 1.000000, val accuracy: 0.066667\nLoss: 1.216503, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.152592, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.325043, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.217868, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.387153, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.221969, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.219313, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.144614, Train accuracy: 0.933333, val accuracy: 0.000000\nLoss: 1.433915, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.072342, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.109718, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.501091, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.354585, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.316890, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.085719, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.228837, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.099689, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.181471, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.310800, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.267077, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.246269, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.183298, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.394218, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.305020, Train accuracy: 1.000000, val accuracy: 0.066667\nLoss: 1.184897, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.072109, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 1.151937, Train accuracy: 1.000000, val accuracy: 0.000000\n" ] ], [ [ "Теперь найдем гипепараметры, для которых этот процесс сходится быстрее.\nЕсли все реализовано корректно, то существуют параметры, при которых процесс сходится в **20** эпох или еще быстрее.\nНайдите их!", "_____no_output_____" ] ], [ [ "# Now, tweak some hyper parameters and make it train to 1.0 accuracy in 20 epochs or less\n\nmodel = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 100, reg = 0)\ndataset = Dataset(train_X[:data_size], train_y[:data_size], val_X[:data_size], val_y[:data_size])\n# TODO: Change any hyperparamers or optimizators to reach training accuracy in 20 epochs\ntrainer = Trainer(model, dataset, SGD(), learning_rate=1e-1, num_epochs=20, batch_size=3)\n\nloss_history, train_history, val_history = trainer.fit()", "Loss: 2.291590, Train accuracy: 0.333333, val accuracy: 0.066667\nLoss: 2.239213, Train accuracy: 0.333333, val accuracy: 0.066667\nLoss: 2.380944, Train accuracy: 0.333333, val accuracy: 0.000000\nLoss: 2.440659, Train accuracy: 0.466667, val accuracy: 0.000000\nLoss: 2.070347, Train accuracy: 0.533333, val accuracy: 0.066667\nLoss: 1.450373, Train accuracy: 0.533333, val accuracy: 0.066667\nLoss: 1.413522, Train accuracy: 0.600000, val accuracy: 0.000000\nLoss: 2.065331, Train accuracy: 0.666667, val accuracy: 0.133333\nLoss: 1.050814, Train accuracy: 0.666667, val accuracy: 0.133333\nLoss: 1.151135, Train accuracy: 0.733333, val accuracy: 0.066667\nLoss: 1.062394, Train accuracy: 0.866667, val accuracy: 0.000000\nLoss: 0.504070, Train accuracy: 0.933333, val accuracy: 0.133333\nLoss: 0.473419, Train accuracy: 0.933333, val accuracy: 0.066667\nLoss: 0.611069, Train accuracy: 0.933333, val accuracy: 0.000000\nLoss: 0.639213, Train accuracy: 1.000000, val accuracy: 0.066667\nLoss: 0.410893, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 0.219203, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 0.066726, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 0.162286, Train accuracy: 1.000000, val accuracy: 0.000000\nLoss: 0.045190, Train accuracy: 1.000000, val accuracy: 0.000000\n" ] ], [ [ "# Итак, основное мероприятие!\n\nНатренируйте лучшую нейросеть! Можно добавлять и изменять параметры, менять количество нейронов в слоях сети и как угодно экспериментировать. \n\nДобейтесь точности лучше **40%** на validation set.", "_____no_output_____" ] ], [ [ "# Let's train the best one-hidden-layer network we can\n\nlearning_rates = 1e-4\nreg_strength = 1e-3\nlearning_rate_decay = 0.999\nhidden_layer_size = 128\nnum_epochs = 200\nbatch_size = 64\n\nbest_classifier = TwoLayerNet(n_input = train_X.shape[1], n_output = 10, hidden_layer_size = 100, reg = 1e-3);\ndataset = Dataset(train_X, train_y, val_X, val_y);\ntrainer = Trainer(best_classifier, dataset, MomentumSGD(), num_epochs=100, batch_size=100,\n learning_rate=1e-1, learning_rate_decay= 0.99);\n\n# TODO Implement missing pieces in Trainer.fit function\n# You should expect loss to go down and train and val accuracy go up for every epoch\nloss_history, train_history, val_history = trainer.fit();\nbest_val_accuracy = val_history[-1];\n# TODO find the best hyperparameters to train the network\n# Don't hesitate to add new values to the arrays above, perform experiments, use any tricks you want\n# You should expect to get to at least 40% of valudation accuracy\n# Save loss/train/history of the best classifier to the variables above\n\nprint('best validation accuracy achieved: %f' % best_val_accuracy)", "Loss: 1.743276, Train accuracy: 0.373667, val accuracy: 0.392000\nLoss: 1.610870, Train accuracy: 0.542000, val accuracy: 0.530000\nLoss: 1.667678, Train accuracy: 0.621222, val accuracy: 0.614000\nLoss: 1.273217, Train accuracy: 0.669333, val accuracy: 0.633000\nLoss: 1.688855, Train accuracy: 0.656778, val accuracy: 0.622000\nLoss: 1.079257, Train accuracy: 0.705667, val accuracy: 0.673000\nLoss: 1.227188, Train accuracy: 0.720222, val accuracy: 0.686000\nLoss: 1.165690, Train accuracy: 0.724333, val accuracy: 0.681000\nLoss: 0.960097, Train accuracy: 0.741444, val accuracy: 0.687000\nLoss: 1.189171, Train accuracy: 0.737000, val accuracy: 0.693000\nLoss: 1.218992, Train accuracy: 0.760556, val accuracy: 0.713000\nLoss: 1.304661, Train accuracy: 0.736889, val accuracy: 0.688000\nLoss: 1.421084, Train accuracy: 0.746333, val accuracy: 0.676000\nLoss: 1.397406, Train accuracy: 0.757000, val accuracy: 0.698000\nLoss: 1.115847, Train accuracy: 0.740556, val accuracy: 0.678000\nLoss: 1.150359, Train accuracy: 0.755889, val accuracy: 0.675000\nLoss: 1.238597, Train accuracy: 0.782667, val accuracy: 0.704000\nLoss: 1.445391, Train accuracy: 0.756111, val accuracy: 0.697000\nLoss: 1.221729, Train accuracy: 0.777667, val accuracy: 0.708000\nLoss: 1.227693, Train accuracy: 0.742444, val accuracy: 0.673000\nLoss: 1.215597, Train accuracy: 0.782889, val accuracy: 0.707000\nLoss: 1.039257, Train accuracy: 0.785000, val accuracy: 0.694000\nLoss: 1.162473, Train accuracy: 0.757222, val accuracy: 0.684000\nLoss: 1.156418, Train accuracy: 0.759778, val accuracy: 0.684000\nLoss: 1.151415, Train accuracy: 0.813000, val accuracy: 0.729000\nLoss: 1.236868, Train accuracy: 0.793667, val accuracy: 0.724000\nLoss: 1.268407, Train accuracy: 0.788778, val accuracy: 0.698000\nLoss: 1.278146, Train accuracy: 0.798111, val accuracy: 0.722000\nLoss: 0.994011, Train accuracy: 0.783889, val accuracy: 0.699000\nLoss: 1.234244, Train accuracy: 0.807111, val accuracy: 0.716000\nLoss: 1.243254, Train accuracy: 0.798556, val accuracy: 0.720000\nLoss: 1.074333, Train accuracy: 0.796667, val accuracy: 0.717000\nLoss: 1.145668, Train accuracy: 0.793444, val accuracy: 0.709000\nLoss: 1.030971, Train accuracy: 0.795778, val accuracy: 0.712000\nLoss: 0.995358, Train accuracy: 0.811667, val accuracy: 0.735000\nLoss: 1.129479, Train accuracy: 0.814222, val accuracy: 0.726000\nLoss: 1.400140, Train accuracy: 0.782222, val accuracy: 0.680000\nLoss: 1.035359, Train accuracy: 0.821111, val accuracy: 0.717000\nLoss: 1.218216, Train accuracy: 0.812444, val accuracy: 0.722000\nLoss: 1.168491, Train accuracy: 0.834333, val accuracy: 0.740000\nLoss: 0.989449, Train accuracy: 0.796000, val accuracy: 0.705000\nLoss: 0.928764, Train accuracy: 0.818222, val accuracy: 0.717000\nLoss: 1.106439, Train accuracy: 0.786111, val accuracy: 0.680000\nLoss: 0.949846, Train accuracy: 0.825333, val accuracy: 0.741000\nLoss: 1.114411, Train accuracy: 0.832556, val accuracy: 0.725000\nLoss: 1.038213, Train accuracy: 0.804444, val accuracy: 0.710000\nLoss: 1.079139, Train accuracy: 0.818778, val accuracy: 0.727000\nLoss: 1.034110, Train accuracy: 0.822444, val accuracy: 0.728000\nLoss: 0.858759, Train accuracy: 0.834778, val accuracy: 0.722000\nLoss: 0.963532, Train accuracy: 0.823000, val accuracy: 0.724000\nLoss: 0.989006, Train accuracy: 0.840667, val accuracy: 0.731000\nLoss: 0.894170, Train accuracy: 0.836667, val accuracy: 0.740000\nLoss: 1.048599, Train accuracy: 0.825111, val accuracy: 0.718000\nLoss: 1.215989, Train accuracy: 0.818000, val accuracy: 0.713000\nLoss: 0.982781, Train accuracy: 0.825222, val accuracy: 0.718000\nLoss: 1.037981, Train accuracy: 0.838556, val accuracy: 0.731000\nLoss: 0.953691, Train accuracy: 0.832889, val accuracy: 0.724000\nLoss: 1.075208, Train accuracy: 0.849111, val accuracy: 0.736000\nLoss: 0.913716, Train accuracy: 0.852222, val accuracy: 0.730000\nLoss: 0.979647, Train accuracy: 0.865222, val accuracy: 0.763000\nLoss: 1.044378, Train accuracy: 0.825889, val accuracy: 0.718000\nLoss: 0.912837, Train accuracy: 0.809000, val accuracy: 0.720000\nLoss: 1.007592, Train accuracy: 0.848667, val accuracy: 0.742000\nLoss: 0.828044, Train accuracy: 0.837333, val accuracy: 0.736000\nLoss: 0.895170, Train accuracy: 0.868444, val accuracy: 0.746000\nLoss: 0.928527, Train accuracy: 0.846000, val accuracy: 0.748000\nLoss: 0.904274, Train accuracy: 0.857333, val accuracy: 0.744000\nLoss: 0.871565, Train accuracy: 0.873444, val accuracy: 0.746000\nLoss: 0.918455, Train accuracy: 0.857333, val accuracy: 0.741000\nLoss: 1.145318, Train accuracy: 0.811556, val accuracy: 0.716000\nLoss: 1.021040, Train accuracy: 0.852889, val accuracy: 0.744000\nLoss: 1.028453, Train accuracy: 0.859667, val accuracy: 0.760000\nLoss: 0.930704, Train accuracy: 0.871111, val accuracy: 0.745000\nLoss: 0.881517, Train accuracy: 0.866556, val accuracy: 0.754000\nLoss: 0.880905, Train accuracy: 0.886778, val accuracy: 0.763000\nLoss: 0.942066, Train accuracy: 0.847667, val accuracy: 0.738000\nLoss: 0.861154, Train accuracy: 0.877444, val accuracy: 0.750000\nLoss: 0.996219, Train accuracy: 0.869778, val accuracy: 0.746000\nLoss: 0.944638, Train accuracy: 0.860667, val accuracy: 0.742000\nLoss: 0.956469, Train accuracy: 0.872778, val accuracy: 0.750000\nLoss: 0.894449, Train accuracy: 0.877444, val accuracy: 0.755000\nLoss: 0.838482, Train accuracy: 0.872556, val accuracy: 0.747000\nLoss: 0.807193, Train accuracy: 0.875889, val accuracy: 0.749000\nLoss: 0.936702, Train accuracy: 0.878778, val accuracy: 0.761000\nLoss: 0.797635, Train accuracy: 0.876889, val accuracy: 0.758000\nLoss: 0.896070, Train accuracy: 0.884000, val accuracy: 0.747000\nLoss: 0.962982, Train accuracy: 0.856222, val accuracy: 0.743000\nLoss: 0.902790, Train accuracy: 0.871000, val accuracy: 0.738000\nLoss: 0.911869, Train accuracy: 0.889111, val accuracy: 0.759000\nLoss: 0.922656, Train accuracy: 0.878667, val accuracy: 0.764000\nLoss: 0.734755, Train accuracy: 0.869778, val accuracy: 0.744000\nLoss: 0.868990, Train accuracy: 0.878556, val accuracy: 0.743000\nLoss: 1.045162, Train accuracy: 0.855667, val accuracy: 0.724000\nLoss: 1.021613, Train accuracy: 0.875444, val accuracy: 0.742000\nLoss: 0.894410, Train accuracy: 0.889444, val accuracy: 0.760000\nLoss: 0.863364, Train accuracy: 0.879889, val accuracy: 0.747000\nLoss: 0.880927, Train accuracy: 0.895889, val accuracy: 0.771000\nLoss: 0.855832, Train accuracy: 0.892333, val accuracy: 0.777000\nLoss: 0.803813, Train accuracy: 0.887778, val accuracy: 0.753000\nLoss: 0.818788, Train accuracy: 0.887778, val accuracy: 0.758000\nbest validation accuracy achieved: 0.758000\n" ], [ "plt.figure(figsize=(15, 7))\nplt.subplot(211)\nplt.title(\"Loss\")\nplt.plot(loss_history)\nplt.subplot(212)\nplt.title(\"Train/validation accuracy\")\nplt.plot(train_history)\nplt.plot(val_history)", "_____no_output_____" ] ], [ [ "# Как обычно, посмотрим, как наша лучшая модель работает на тестовых данных", "_____no_output_____" ] ], [ [ "test_pred = best_classifier.predict(test_X)\ntest_accuracy = multiclass_accuracy(test_pred, test_y)\nprint('Neural net test set accuracy: %f' % (test_accuracy, ))", "Neural net test set accuracy: 0.715000\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
cb79acb7e7587d4f2948b5cded2e52822e215d66
16,403
ipynb
Jupyter Notebook
Part 2 - Regression/.ipynb_checkpoints/random-forest-regression-checkpoint.ipynb
AlexanderVanEck/machine-learning-fundamentals
d1de166640136e060f7d53ed9414b386c264b2d0
[ "MIT" ]
null
null
null
Part 2 - Regression/.ipynb_checkpoints/random-forest-regression-checkpoint.ipynb
AlexanderVanEck/machine-learning-fundamentals
d1de166640136e060f7d53ed9414b386c264b2d0
[ "MIT" ]
null
null
null
Part 2 - Regression/.ipynb_checkpoints/random-forest-regression-checkpoint.ipynb
AlexanderVanEck/machine-learning-fundamentals
d1de166640136e060f7d53ed9414b386c264b2d0
[ "MIT" ]
null
null
null
123.330827
13,444
0.886606
[ [ [ "# Random forest regression is basically Tree Decision Regression on steriods.\n# We take a random amount of data points from our set and create Tree Decision Regression for\n# those. We do this > 250 times for different random data points, then when we want to predict\n# a new value we look at every averaged value of every decision tree and we average those\n# averaged values. This means we supress noise and outliers.", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plot\nfrom sklearn.ensemble import RandomForestRegressor", "_____no_output_____" ], [ "dataset = pd.read_csv('datasets/position_salaries.csv')\nX = dataset.iloc[:, 1:2].values\ny = dataset.iloc[:, 2].values", "_____no_output_____" ], [ "regressor = RandomForestRegressor(n_estimators=500, random_state=0)\nregressor = regressor.fit(X, y)", "_____no_output_____" ], [ "X_grid = np.arange(min(X), max(X), 0.01)\nX_grid = X_grid.reshape(len(X_grid), 1)\n\nplot.title('Salaries vs. Position level')\nplot.xlabel('Position level')\nplot.ylabel('Yearly salary')\nplot.scatter(X, y, color='red')\nplot.plot(X_grid, regressor.predict(X_grid), color='blue')\nplot.show()", "_____no_output_____" ], [ "regressor.predict(6.5)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
cb79b123894900fcf0aba3da020c7f45d99e1e8a
1,993
ipynb
Jupyter Notebook
notes/W-Standalone_Programs.ipynb
agill8781/python
7207b168c1aa3a5cc978812d22a5a2633f8b7a7f
[ "CC0-1.0" ]
28
2019-03-01T23:42:41.000Z
2022-03-29T01:01:00.000Z
notes/W-Standalone_Programs.ipynb
agill8781/python
7207b168c1aa3a5cc978812d22a5a2633f8b7a7f
[ "CC0-1.0" ]
1
2019-04-18T18:29:42.000Z
2019-04-18T18:29:42.000Z
notes/W-Standalone_Programs.ipynb
agill8781/python
7207b168c1aa3a5cc978812d22a5a2633f8b7a7f
[ "CC0-1.0" ]
43
2018-12-12T20:11:01.000Z
2022-03-29T01:45:22.000Z
24.604938
129
0.560963
[ [ [ "### Creating a Python program (not on the notebook) -- _Optional_\n\n\n* Use SSH and connect to your Unix shell\n* Type `nano helloworld.py` to open `nano`, a (relatively easy to use) text editor, and create the file `helloworld.py`\n* In the text editor, type:\n `print('Hello my dear friend!')`\n* Type Ctrl+X, to save the file and close the editor.\n* In the shell prompt, type `python helloworld.py` to see the outcome\n\n", "_____no_output_____" ], [ "#### Exercise\n\nOpen and change the file `helloworld.py` to print your own message.\n", "_____no_output_____" ] ], [ [ "# execute your steps in the shell", "_____no_output_____" ] ], [ [ "### Creating a self-executing Python program -- _Optional_\n\n* Repeat the steps as above, but with the following change: In the first line of your program, type: `#!/usr/bin/python`\n* Save the file with the name `hello`\n* In the shell, type `chmod 700 hello`. This makes our file _executable_.\n* Finally type `./hello` and see what happens.", "_____no_output_____" ] ], [ [ "# execute your steps in the shell", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb79c90efd08f348c79c65921caf77329ebd99d6
324,023
ipynb
Jupyter Notebook
lecture-5.ipynb
oseledets/NLA
d16d47bc8e20df478d98b724a591d33d734ec74b
[ "MIT" ]
14
2015-01-20T13:24:38.000Z
2022-02-03T05:54:09.000Z
lecture-5.ipynb
oseledets/NLA
d16d47bc8e20df478d98b724a591d33d734ec74b
[ "MIT" ]
null
null
null
lecture-5.ipynb
oseledets/NLA
d16d47bc8e20df478d98b724a591d33d734ec74b
[ "MIT" ]
4
2015-09-10T09:14:10.000Z
2019-10-09T04:36:07.000Z
422.45502
98,881
0.92132
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb79dbad815d0760dae7118447d546c8458e954f
1,451
ipynb
Jupyter Notebook
1684. Count the Number of Consistent Strings.ipynb
SurajpratapsinghSayar/LeetCode-Solutions
1a4c0a803d54aa77e801eb1052ce92fafc2d1ca8
[ "MIT" ]
null
null
null
1684. Count the Number of Consistent Strings.ipynb
SurajpratapsinghSayar/LeetCode-Solutions
1a4c0a803d54aa77e801eb1052ce92fafc2d1ca8
[ "MIT" ]
null
null
null
1684. Count the Number of Consistent Strings.ipynb
SurajpratapsinghSayar/LeetCode-Solutions
1a4c0a803d54aa77e801eb1052ce92fafc2d1ca8
[ "MIT" ]
null
null
null
18.367089
56
0.459683
[ [ [ "allowed = 'ab'\nwords = [\"ad\",\"bd\",\"aaab\",\"baa\",\"badab\"]", "_____no_output_____" ], [ "def solution(a,b):\n check = [i for i in a]\n for word in b:\n s = [i for i in word if i not in check]\n if len(s)!=0:\n words.remove(word)\n print(b)", "_____no_output_____" ], [ "solution(allowed,words)", "['aaab', 'baa']\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
cb7a0618f7ce69cbf3dae12eb8b2e09941f74d51
3,286
ipynb
Jupyter Notebook
InClassAssignment.ipynb
bcb25/IS-465G
4b751d9d24aeb1e260fc853cf06c29d96d8823f7
[ "MIT" ]
null
null
null
InClassAssignment.ipynb
bcb25/IS-465G
4b751d9d24aeb1e260fc853cf06c29d96d8823f7
[ "MIT" ]
null
null
null
InClassAssignment.ipynb
bcb25/IS-465G
4b751d9d24aeb1e260fc853cf06c29d96d8823f7
[ "MIT" ]
null
null
null
37.770115
279
0.509434
[ [ [ "<a href=\"https://colab.research.google.com/github/bcb25/IS-465-BB/blob/master/InClassAssignment.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "# Author: William Gleason <[email protected]>\n# Ben Berman <[email protected]>\n# Iveth Contreras <[email protected]>\n# Class: IS 465-002\n\nimport nltk \nimport string\nnltk.download('wordnet')\nfrom nltk.corpus import wordnet\nsynonyms = [] \nantonyms = [] \nsample_set=['day','danish','dare','death','night','name','life','liberty','justice','journey','flight','fruit']\n#Checking Synonyms and Antonyms from nltk wordnet against input word\ndef Thesaurus(x): \n for syn in wordnet.synsets(x): \n for l in syn.lemmas(): \n synonyms.append(l.name()) \n if l.antonyms(): \n antonyms.append(l.antonyms()[0].name())\n print(\"Synonyms: \" + str(synonyms)) \n print(\"Antonyms: \" + str(antonyms))\n#Checking a word against a word list for the first two letters of the word, prints each word that matches\ndef isSimilar(x):\n first= x[0]+x[1]\n Check = [idx for idx in sample_set if idx[0].lower()+idx[1].lower()== first.lower()] \n print(\"Words starting with \"+first+\": \" + str(Check))\n#Has user input a word and passes it to the methods\nx = input('Please Enter a Word: ') \nThesaurus(x)\nisSimilar(x)\n", "[nltk_data] Downloading package wordnet to /root/nltk_data...\n[nltk_data] Package wordnet is already up-to-date!\nPlease Enter a Word: day\nSynonyms: ['day', 'twenty-four_hours', 'twenty-four_hour_period', '24-hour_interval', 'solar_day', 'mean_solar_day', 'day', 'day', 'day', 'daytime', 'daylight', 'day', 'day', 'day', 'sidereal_day', 'day', 'day', 'Day', 'Clarence_Day', 'Clarence_Shepard_Day_Jr.']\nAntonyms: ['night']\nWords starting with da: ['day', 'danish', 'dare']\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
cb7a075460de4cf7acb2d0f1614bef3d54d1124d
1,015,333
ipynb
Jupyter Notebook
Properati_EDA.ipynb
sebacastrocba/real-state-project
d6283602029f03dda1697c505286b5a502baad7e
[ "MIT" ]
null
null
null
Properati_EDA.ipynb
sebacastrocba/real-state-project
d6283602029f03dda1697c505286b5a502baad7e
[ "MIT" ]
null
null
null
Properati_EDA.ipynb
sebacastrocba/real-state-project
d6283602029f03dda1697c505286b5a502baad7e
[ "MIT" ]
null
null
null
288.775028
551,172
0.913404
[ [ [ "# Project: Valuing real estate properties using machine learning", "_____no_output_____" ], [ "## Part 1: From EDA to data preparation", "_____no_output_____" ], [ "The objective of this project is to create a machine learning model that values real estate properties in Argentina.", "_____no_output_____" ], [ "For this we will use the dataset available at https://www.properati.com.ar.\nThis dataset contains the following features:\n\n- **id**: Identifier of the ad. It is not unique: if the notice is updated by the real estate agency (new version of the notice) a new record is created with the same id but different dates: registration and cancellation.\n- **ad_type**: Type of ad (Property, Development/Project).\n- **start_date**: Date of entry of the ad.\n- **end_date**: Date of cancellation of the ad.\n- **created_on**: Date of registration of the first version of the ad.\n- **lat**: Latitude.\n- **lon**: Longitude.\n- **l1**: Administrative level 1: country.\n- **l2**: Administrative level 2: usually province.\n- **l3**: Administrative level 3: usually city.\n- **l4**: Administrative level 4: usually neighborhood.\n- **l5**: Administrative level 5: not defined.\n- **l6**: Administrative level 6: not defined.\n- **rooms**: Number of environments (useful in Argentina).\n- **bedrooms**: Number of bedrooms (useful in the rest of the countries).\n- **bathrooms**: Number of bathrooms.\n- **surface_total**: Total area in m².\n- **surface_covered**: Covered area in m².\n- **price**: Price published in the ad.\n- **currency**: Currency of the published price.\n- **price_period**: Price Period (Daily, Weekly, Monthly)\n- **title**: Title of the ad.\n- **description**: Ad Description.\n- **property_type**: Type of property (House, Apartment, PH, plot of land, etc.).\n- **operation_type**: Type of operation (Sale, Rent).\n", "_____no_output_____" ] ], [ [ "#Importings\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport missingno as msno\nfrom sklearn.impute import SimpleImputer\n%matplotlib inline", "_____no_output_____" ], [ "#Helper function\ndef cat_plot(data, col):\n total_ads = len(data)\n temp_df = pd.Series((data[col].value_counts() / total_ads) * 100)\n fig = temp_df.sort_values(ascending=False).plot.bar()\n fig.set_xlabel(col)\n fig.axhline(y=5, color='red')\n fig.set_ylabel('Percentage of ads')\n plt.show()\n return fig", "_____no_output_____" ], [ "#Loding the data\ndataset = pd.read_csv(\"ar_properties.csv\")", "_____no_output_____" ] ], [ [ "## Exploratory Data Analysis", "_____no_output_____" ] ], [ [ "dataset.head()", "_____no_output_____" ], [ "print(\"The dataframe contains {} rows and {} columns\".format(dataset.shape[0], dataset.shape[1]))", "The dataframe contains 1000000 rows and 25 columns\n" ], [ "dataset.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1000000 entries, 0 to 999999\nData columns (total 25 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 1000000 non-null object \n 1 ad_type 1000000 non-null object \n 2 start_date 1000000 non-null object \n 3 end_date 1000000 non-null object \n 4 created_on 1000000 non-null object \n 5 lat 863498 non-null float64\n 6 lon 862598 non-null float64\n 7 l1 1000000 non-null object \n 8 l2 1000000 non-null object \n 9 l3 959375 non-null object \n 10 l4 260280 non-null object \n 11 l5 5237 non-null object \n 12 l6 0 non-null float64\n 13 rooms 583440 non-null float64\n 14 bedrooms 518261 non-null float64\n 15 bathrooms 775634 non-null float64\n 16 surface_total 398606 non-null float64\n 17 surface_covered 407021 non-null float64\n 18 price 959629 non-null float64\n 19 currency 958099 non-null object \n 20 price_period 385258 non-null object \n 21 title 1000000 non-null object \n 22 description 999968 non-null object \n 23 property_type 1000000 non-null object \n 24 operation_type 1000000 non-null object \ndtypes: float64(9), object(16)\nmemory usage: 190.7+ MB\n" ], [ "#Checking duplicate ads\ndataset.duplicated(subset=\"id\").sum()", "_____no_output_____" ] ], [ [ "This dataframe has no duplicated values but we can see that it has NaNs values. Besides, taking in account the objective of this project we will evaluate which columns are necessary. Those not necessary will be discarded. ", "_____no_output_____" ] ], [ [ "#Drop not necessary columns\ndf = dataset.drop([\"ad_type\",\"id\", \"start_date\", \"end_date\", \"created_on\", \"lat\", \"lon\", \"l6\", \"title\", \"description\"], axis=1)", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ] ], [ [ "For some reason, there are publications of properties that have a larger covered surface area than the total surface area. This is not correct, so this dataset will be filtered.", "_____no_output_____" ] ], [ [ "#Restriction\nmask = df.surface_covered <= df.surface_total\ndf = df[mask]\ndf.reset_index(inplace=True, drop=True)", "_____no_output_____" ] ], [ [ "### Categorical features analysis", "_____no_output_____" ] ], [ [ "#Categorical features analysis:\ncategorical = df.select_dtypes(include=\"object\")\ncategorical_list = categorical.columns.to_list()\ncategorical.isna().sum()", "_____no_output_____" ] ], [ [ "The dataset contains several features with NaN values. Therefore, handling missing values will be necessary.", "_____no_output_____" ], [ "### Feature analysis", "_____no_output_____" ], [ "**l1 column**", "_____no_output_____" ] ], [ [ "df.l1.value_counts()", "_____no_output_____" ], [ "cat_plot(df, \"l1\")", "_____no_output_____" ] ], [ [ "We can see that most ads are from Argentina. It is our objective to construct a model that predict real state values from Argentina. So we will discard those ads from other countries. We chose a country since the real estate market will vary strongly according to state policies that are foreign to the data provided. In addition, the other countries represent less than 5% of the ads in the dataset.", "_____no_output_____" ] ], [ [ "mask1 = df.l1 == \"Argentina\"\ndf = df[mask1]", "_____no_output_____" ] ], [ [ "**l2 column**", "_____no_output_____" ] ], [ [ "cat_plot(df, \"l2\")", "_____no_output_____" ] ], [ [ "We note that most of the publications belong to the regions of Buenos Aires and the province of Santa Fe. The other regions have publications that account for less than 5% of the total number of publications. ", "_____no_output_____" ] ], [ [ "list(set(df.l2))", "_____no_output_____" ], [ "bsas = [x for x in list(set(df.l2)) if x.startswith('B')]", "_____no_output_____" ], [ "bsas", "_____no_output_____" ], [ "interior = [x for x in list(set(df.l2)) if (x != \"Capital Federal\") and (x not in bsas)]", "_____no_output_____" ], [ "interior", "_____no_output_____" ], [ "df.l2.replace(to_replace=interior, value= \"Interior\", inplace=True)\ndf.l2.replace(to_replace=bsas, value= \"Buenos Aires\", inplace=True)", "_____no_output_____" ], [ "cat_plot(df, \"l2\")", "_____no_output_____" ] ], [ [ "**l3 column**", "_____no_output_____" ] ], [ [ "len(df.l3.unique())", "_____no_output_____" ], [ "cat_plot(df, \"l3\")", "_____no_output_____" ] ], [ [ "This feature will introduce high cardinality into the model. There are 698 different location, most of them with low prevalence. This would lead to overfitting problems.", "_____no_output_____" ] ], [ [ "df = df.drop(\"l3\", axis=1)", "_____no_output_____" ] ], [ [ "**currency column**", "_____no_output_____" ] ], [ [ "df.currency.fillna(\"unknown\", inplace=True)\ndf.currency.value_counts()", "_____no_output_____" ], [ "cat_plot(df, \"currency\")", "_____no_output_____" ] ], [ [ "We can observe that for most advertisements currency is dollars. As this data is directly related to the target \"precio\" (price), we should unify the unit of the paper currency used. One option would be to convert the price in argentine pesos to dollars. Since the Argentine economy is very unstable and the value of the dollar is so variable and dependent on several factors, it is difficult to follow this option to obtain a reliable model. Therefore, we will choose to eliminate those publications made in currencies other than dollars.", "_____no_output_____" ] ], [ [ "to_replace = {\"unknown\": np.nan}\ndf.currency.replace(to_replace, value=None, inplace=True)", "_____no_output_____" ], [ "mask2 = df.currency == \"USD\"\ndf = df[mask2]", "_____no_output_____" ] ], [ [ "**property_type column**", "_____no_output_____" ] ], [ [ "df.property_type.value_counts()", "_____no_output_____" ], [ "cat_plot(df, \"property_type\")", "_____no_output_____" ] ], [ [ "Some property type categories represent less than 5% of the published ads. We will only retain those whose number of publications is greater than 5%. But we can group those categories under \"Otro\" (others). We already have an \"Otro\" category so one solution could be to append those ads whose property_type is under 5% to \"Otro\" category.", "_____no_output_____" ] ], [ [ "df.property_type.replace([\"Casa de campo\", \"Cochera\", \"Depósito\", \"Lote\", \"Local comercial\", \"Oficina\"], value= \"Otro\", inplace=True)", "_____no_output_____" ], [ "cat_plot(df, \"property_type\")", "_____no_output_____" ] ], [ [ "**price_period column**", "_____no_output_____" ] ], [ [ "df.price_period.fillna(\"unknown\", inplace=True)\ndf.price_period.value_counts()", "_____no_output_____" ], [ "cat_plot(df, \"price_period\")", "_____no_output_____" ] ], [ [ "Most publications are unknown and monthly ads. Daily and weekly publications do not exceed 5% of the publications. ", "_____no_output_____" ] ], [ [ "print(\"Percentage of unknown =\" ,round(df[(df['price_period'] =='unknown')].shape[0]/df.shape[0]*100,2),'%')", "Percentage of unknown = 49.99 %\n" ] ], [ [ "Here we have two options: discard unknown values and select only those monthly posted or impute unknown values as monthly posted ads.", "_____no_output_____" ] ], [ [ "df.price_period.replace(to_replace, value=None, inplace=True)", "_____no_output_____" ], [ "#We select only those ads that are monthly paid\nmask3 = df.price_period == \"Mensual\"\ndf = df[mask3]", "_____no_output_____" ] ], [ [ "**operation_type column**", "_____no_output_____" ] ], [ [ "df.operation_type.value_counts()", "_____no_output_____" ], [ "cat_plot(df, \"operation_type\")", "_____no_output_____" ] ], [ [ "The type of operation of most of the publications is sale. Sale and rent are very different operations that would definitely influence the target. As most operations are sale we will only take those.", "_____no_output_____" ] ], [ [ "#df.operation_type.replace(to_replace=\"Alquiler temporal\", value=\"Alquiler\", inplace=True)", "_____no_output_____" ], [ "#df = df.drop(\"operation_type\", axis=1)", "_____no_output_____" ] ], [ [ "### Outliers detection and elimination", "_____no_output_____" ] ], [ [ "df.describe()", "_____no_output_____" ] ], [ [ "We can see that there are indeed outliers. It is unlikely that there are properties with 35 rooms, 123 bedrooms, or 20 bathrooms, for example. ", "_____no_output_____" ] ], [ [ "#Numeric features\nnumeric_cols = df.select_dtypes(include=[\"int\", \"float\"]).columns.tolist()", "_____no_output_____" ], [ "plt.figure(figsize = (20,20))\nplt.subplot(3, 2, 1)\nsns.boxplot(data = df, x= 'rooms', y = 'property_type', palette = 'colorblind')\nplt.title('Rooms boxplot')\nplt.xlabel('Rooms')\nplt.ylabel('Property types')\n\nplt.subplot(3, 2, 2)\nsns.boxplot(data = df, x= 'bedrooms', y = 'property_type', palette = 'colorblind')\nplt.title('Bedrooms boxplot')\nplt.xlabel('Bedrooms')\nplt.ylabel('Property types')\n\nplt.subplot(3, 2, 3)\nsns.boxplot(data = df, x= 'bathrooms', y = 'property_type', palette = 'colorblind')\nplt.title('Bathrooms boxplot')\nplt.xlabel('Bathrooms')\nplt.ylabel('Property types')\n\nplt.subplot(3, 2, 4)\nsns.boxplot(data = df, x= 'surface_total', y = 'property_type', palette = 'colorblind')\nplt.title('Total area boxplot')\nplt.xlabel('Total area')\nplt.ylabel('Property type')\n\nplt.subplot(3, 2, 5)\nsns.boxplot(data = df, x= 'surface_covered', y = 'property_type', palette = 'colorblind')\nplt.title('Covered area boxplot')\nplt.xlabel('Covered area')\nplt.ylabel('Property type')\n\nplt.subplot(3, 2, 6)\nsns.boxplot(data = df, x= 'price', y = 'property_type', palette = 'colorblind')\nplt.title('Price boxplot')\nplt.xlabel('Price')\nplt.ylabel('Property type')\n\nplt.show()", "_____no_output_____" ], [ "def remove_outliers(dfx):\n q1 = dfx.quantile(0.25)\n q3 = dfx.quantile(0.75)\n iqr = q3 - q1\n cut_off = iqr*1.5\n \n df_filtred = dfx[~((dfx < (dfx.quantile(0.25) - cut_off)) | (dfx > (dfx.quantile(0.75) + cut_off))).any(axis=1)]\n \n return df_filtred", "_____no_output_____" ], [ "df2 = remove_outliers(df)", "_____no_output_____" ], [ "df2.shape", "_____no_output_____" ] ], [ [ "### Missing values analysis", "_____no_output_____" ] ], [ [ "num_nans = df2.isna().sum()\nnum_nans", "_____no_output_____" ], [ "total_ads = len(df2)\ncols_df2 = df2.columns.to_list()\nnum_nans = df2.isna().sum()\n\nfig = df2.isna().sum().sort_values(ascending=False).plot.bar()\nfig.set_xlabel(\"Column\")\nfig.set_ylabel(\"Number of NaNs\")\n\nplt.show()", "_____no_output_____" ], [ "for col in cols_df2:\n print(f\"Percentage of NaNs in {col} =\", round(df2[(df2[col].isna())].shape[0] / df2.shape[0]*100,2), '%')", "Percentage of NaNs in l1 = 0.0 %\nPercentage of NaNs in l2 = 0.0 %\nPercentage of NaNs in l4 = 67.55 %\nPercentage of NaNs in l5 = 99.4 %\nPercentage of NaNs in rooms = 13.6 %\nPercentage of NaNs in bedrooms = 16.66 %\nPercentage of NaNs in bathrooms = 5.09 %\nPercentage of NaNs in surface_total = 0.0 %\nPercentage of NaNs in surface_covered = 0.0 %\nPercentage of NaNs in price = 0.0 %\nPercentage of NaNs in currency = 0.0 %\nPercentage of NaNs in price_period = 0.0 %\nPercentage of NaNs in property_type = 0.0 %\nPercentage of NaNs in operation_type = 0.0 %\n" ] ], [ [ "A large number of missing values are observed in columns l4 and l5 features (more than 20% of NaN values). Those features refers to regions or neighborhoods that would be difficult to impute. Also imputing them would create a great bias. Therefore, this features will be eliminated.", "_____no_output_____" ] ], [ [ "df3 = df2.drop([\"l4\", \"l5\"], axis=1) # Eliminate NaNs values where %NaNs > 20%", "_____no_output_____" ], [ "df3.reset_index(inplace=True, drop=True)", "_____no_output_____" ] ], [ [ "### **MCAR, MAR or MNAR**", "_____no_output_____" ] ], [ [ "msno.matrix(df3, figsize=(15,5))", "_____no_output_____" ], [ "df3_sorted = df3.sort_values(\"property_type\")\nmsno.matrix(df3_sorted, figsize=(15,5))", "_____no_output_____" ], [ "df3_sorted.property_type.unique()", "_____no_output_____" ] ], [ [ "Even when we sort by \"property_type\", the values of l3 still look random. This could prove these values are missing completely at random (MCAR). For \"rooms\", \"bedrooms\" and \"bathrooms\" NaNs values could be missing at random (MAR). When the dataset is sorted by \"property_type\" it is observed a grouping behaviour in NaNs values from those features. Maybe this is due to property types that do not have a room, bedroom or a bathrooms like \"Lote\" or \"Cochera\". \nAnother way would be by plotting a heat map to see if the missingness has any correlation:", "_____no_output_____" ] ], [ [ "fig = msno.heatmap(df3, figsize=(15,5))\nplt.show()", "_____no_output_____" ] ], [ [ "Missing observations in rooms, bedrooms and bathrooms have little correlation. ", "_____no_output_____" ], [ "### Missing values imputation", "_____no_output_____" ] ], [ [ "#Missing values by property type \ndf3.set_index(\"property_type\")[[\"bedrooms\", \"rooms\", \"bathrooms\"]].isnull().groupby(level=0).sum()", "_____no_output_____" ], [ "#imputation by property type\n\nproperty_types = df3.property_type.unique().tolist()\n\n# Buscamos los valores más frecuentes\nmost_frequent_bath = df3.groupby(['property_type'])['bathrooms'].agg(pd.Series.mode)\nmost_frequent_bath = dict(most_frequent_bath)\nprint(most_frequent_bath)\nmost_frequent_rooms = df3.groupby(['property_type'])['rooms'].agg(pd.Series.mode)\nmost_frequent_rooms = dict(most_frequent_rooms)\nprint(most_frequent_rooms)\nmost_frequent_bed = df3.groupby('property_type')['bedrooms'].agg(pd.Series.mode)\nmost_frequent_bed = dict(most_frequent_bed)\nprint(most_frequent_bed)", "{'Casa': 2.0, 'Departamento': 1.0, 'Otro': 1.0, 'PH': 1.0}\n{'Casa': 4.0, 'Departamento': 2.0, 'Otro': 1.0, 'PH': 3.0}\n{'Casa': 3.0, 'Departamento': 1.0, 'Otro': 2.0, 'PH': 2.0}\n" ], [ "df3.bathrooms = df3.bathrooms.fillna(df3.property_type.map(most_frequent_bath)) \ndf3.rooms = df3.rooms.fillna(df3.property_type.map(most_frequent_rooms))\ndf3.bedrooms = df3.bedrooms.fillna(df3.property_type.map(most_frequent_bed))", "_____no_output_____" ], [ "df3.shape", "_____no_output_____" ] ], [ [ "### Numerical features analysis", "_____no_output_____" ] ], [ [ "#Numeric features\nnum_cols = df3.select_dtypes(include=[\"int\", \"float\"]).columns.tolist()", "_____no_output_____" ], [ "print(num_cols)", "['rooms', 'bedrooms', 'bathrooms', 'surface_total', 'surface_covered', 'price']\n" ] ], [ [ "**rooms**", "_____no_output_____" ] ], [ [ "df3.groupby(\"property_type\")[\"rooms\"].describe()", "_____no_output_____" ] ], [ [ "**bedrooms**", "_____no_output_____" ] ], [ [ "df3.groupby(\"property_type\")[\"bedrooms\"].describe()", "_____no_output_____" ] ], [ [ "Notice that department have negative values. This is not correct, so need to be fixed.", "_____no_output_____" ] ], [ [ "df3[df3.property_type == \"Departamento\"][\"bedrooms\"].hist()", "_____no_output_____" ] ], [ [ "**bathrooms**", "_____no_output_____" ] ], [ [ "df3.groupby(\"property_type\")[\"bathrooms\"].describe()", "_____no_output_____" ] ], [ [ "**surface_total**", "_____no_output_____" ] ], [ [ "df3.groupby(\"property_type\")[\"surface_total\"].describe()", "_____no_output_____" ] ], [ [ "**surface_covered**", "_____no_output_____" ] ], [ [ "df3.groupby(\"property_type\")[\"surface_covered\"].describe()", "_____no_output_____" ] ], [ [ "**price**", "_____no_output_____" ] ], [ [ "df3.groupby(\"property_type\")[\"price\"].describe()", "_____no_output_____" ] ], [ [ "### Target distribution", "_____no_output_____" ] ], [ [ "#Target distribution\nsns.histplot(data = df3, x=\"price\", bins=10)", "_____no_output_____" ], [ "# create a copy of data\ndata_copy = df3.copy()# create a new feature Log_Price\ndata_copy['Log_Price'] = np.log(df3['price'])", "_____no_output_____" ], [ "from sklearn.preprocessing import PowerTransformer\n\npt = PowerTransformer(method='box-cox')\na = pt.fit_transform(df3[['price']])\ndata_copy['box_cox_price'] = a", "_____no_output_____" ], [ "sns.displot(data=data_copy, x=\"box_cox_price\", bins=10)\nplt.title(\"Histogram of Price\")\nplt.show()", "_____no_output_____" ] ], [ [ "### Final dataset:", "_____no_output_____" ] ], [ [ "df3.reset_index(inplace=True, drop=True)", "_____no_output_____" ], [ "final_df = df3.drop([\"l1\", \"currency\"], axis=1)", "_____no_output_____" ], [ "final_df.head()", "_____no_output_____" ], [ "final_df.describe()", "_____no_output_____" ], [ "sns.pairplot(final_df)", "_____no_output_____" ], [ "final_df.corr()", "_____no_output_____" ], [ "final_df.to_csv('Properaty_data_clean.csv', index=False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb7a085c1e1864bc48867e1da016149909833563
4,838
ipynb
Jupyter Notebook
Tips/2016-03-08-Functional-Programming-in-Python.ipynb
guoweikuang/pytips
cb4c53385c2986122a711dfc15ef05272db70f86
[ "MIT" ]
606
2016-03-07T11:57:43.000Z
2022-03-24T08:31:26.000Z
Tips/2016-03-08-Functional-Programming-in-Python.ipynb
guoweikuang/pytips
cb4c53385c2986122a711dfc15ef05272db70f86
[ "MIT" ]
19
2016-03-10T16:02:00.000Z
2020-11-19T15:31:14.000Z
Tips/2016-03-08-Functional-Programming-in-Python.ipynb
guoweikuang/pytips
cb4c53385c2986122a711dfc15ef05272db70f86
[ "MIT" ]
122
2016-03-07T11:59:02.000Z
2021-07-15T04:36:52.000Z
24.069652
340
0.541753
[ [ [ "### Python 中的函数式编程\n\n> 函数式编程(英语:functional programming)或称函数程序设计,又称泛函编程,是一种编程范型,它将电脑运算视为数学上的函数计算,并且避免使用程序状态以及易变对象。函数编程语言最重要的基础是λ演算(lambda calculus)。而且λ演算的函数可以接受函数当作输入(引数)和输出(传出值)。(维基百科:函数式编程)\n\n所谓编程范式(Programming paradigm)是指编程风格、方法或模式,比如面向过程编程(C语言)、面向对象编程(C++)、面向函数式编程(Haskell),并不是说某种编程语言一定属于某种范式,例如 Python 就是多范式编程语言。\n\n#### 函数式编程\n\n函数式编程具有以下特点:\n\n1. 避免状态变量\n2. 函数也是变量(一等公民,First-Class Citizen)\n3. 高阶函数\n4. 面向问题描述而不是面向问题解决步骤\n\n值得一提的是,函数式编程的这些特点在实践过程中可能并不是那么 Pythonic,甚至与**[0x00](https://github.com/rainyear/pytips/blob/master/Tips/2016-03-06-The-Zen-of-Python.ipynb)**中提到的 The Zen of Python 相悖。例如函数式编程面向问题描述的特点可能让你更快地写出更简洁的代码,但可读性却也大打折扣(可参考这一段[Haskell代码](https://gist.github.com/rainyear/94b5d9a865601f075719))。不过,虽然 Pythonic 很重要但并不是唯一的准则,_The Choice Is Yours_。", "_____no_output_____" ], [ "#### `map(function, iterable, ...)`/`filter(function, iterable)`", "_____no_output_____" ] ], [ [ "# map 函数的模拟实现\ndef myMap(func, iterable):\n for arg in iterable:\n yield func(arg)\n\nnames = [\"ana\", \"bob\", \"dogge\"]\n\nprint(map(lambda x: x.capitalize(), names)) # Python 2.7 中直接返回列表\nfor name in myMap(lambda x: x.capitalize(), names):\n print(name)", "<map object at 0x11185c9b0>\nAna\nBob\nDogge\n" ], [ "# filter 函数的模拟实现\ndef myFilter(func, iterable):\n for arg in iterable:\n if func(arg):\n yield arg\n \nprint(filter(lambda x: x % 2 == 0, range(10))) # Python 2.7 中直接返回列表\nfor i in myFilter(lambda x: x % 2 == 0, range(10)):\n print(i)", "<filter object at 0x11185cbe0>\n0\n2\n4\n6\n8\n" ] ], [ [ "#### `functools.reduce(function, iterable[, initializer])`\n\nPython 3.5 中`reduce` 被降格到标准库`functools`,`reduce` 也是遍历可迭代对象元素作为第一个函数的参数,并将结果累计:", "_____no_output_____" ] ], [ [ "from functools import reduce\n\nprint(reduce(lambda a, b: a*b, range(1,5)))", "24\n" ] ], [ [ "#### `functools.partial(func, *args, **keywords)`\n\n偏应用函数(Partial Application)让我们可以固定函数的某些参数:", "_____no_output_____" ] ], [ [ "from functools import partial\n\nadd = lambda a, b: a + b\nadd1024 = partial(add, 1024)\n\nadd1024(1)", "_____no_output_____" ] ], [ [ "这里简单介绍了一些常用函数式编程的方法和概念,实际上要传达的一个最重要的观念就是**函数本身也可以作为变量被返回、传递给高阶函数**,这使得我们可以更灵活地运用函数解决问题。但是这并不意味着一定要使用上面这些方法来简化代码,例如更 Pythonic 的方法推荐尽可能使用 List Comprehension 替代`map`/`filter`(关于 List Comprehension 后面会再单独介绍)。如果一定想要用函数式编程的方法来写 Python,也可以尝试[Fn.py](https://github.com/kachayev/fn.py),或者,试试 [Haskell](https://www.haskell.org/)。", "_____no_output_____" ], [ "#### 参考\n\n1. [维基百科:函数式编程](https://zh.wikipedia.org/wiki/%E5%87%BD%E6%95%B8%E7%A8%8B%E5%BC%8F%E8%AA%9E%E8%A8%80)\n2. [byvoid:APIO讲稿——函数式编程](http://byvoid.github.io/slides/apio-fp/index.html)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb7a10721ff98446ecc13195b190cf419ce20905
1,039,629
ipynb
Jupyter Notebook
data analysis/Buldyrev_vs_RGG/mann whitney.ipynb
junohpark221/BSc_individual_project
44f49d3cbb93298880f046551056185b72324d17
[ "MIT" ]
1
2021-07-04T15:38:52.000Z
2021-07-04T15:38:52.000Z
data analysis/Buldyrev_vs_RGG/mann whitney.ipynb
junohpark221/BSc_individual_project
44f49d3cbb93298880f046551056185b72324d17
[ "MIT" ]
null
null
null
data analysis/Buldyrev_vs_RGG/mann whitney.ipynb
junohpark221/BSc_individual_project
44f49d3cbb93298880f046551056185b72324d17
[ "MIT" ]
null
null
null
193.707658
46,072
0.854151
[ [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport statistics\n\nfrom scipy import stats", "_____no_output_____" ], [ "buldy_RGG_50_rep100_045 = pd.read_csv('Raw_data/Processed/proc_buldy_RGG_50_rep100_045.csv')\n\ndel buldy_RGG_50_rep100_045['Unnamed: 0']", "_____no_output_____" ], [ "buldy_RGG_50_rep100_045", "_____no_output_____" ], [ "buldy_RGG_50_rep100_067 = pd.read_csv('proc_buldy_RGG_50_rep100_067.csv')\n\ndel buldy_RGG_50_rep100_067['Unnamed: 0']", "_____no_output_____" ], [ "buldy_RGG_50_rep100_067", "_____no_output_____" ], [ "buldy_RGG_200_rep100_0685 = pd.read_csv('proc_buldy_RGG_200_rep100_0685.csv')\n\ndel buldy_RGG_200_rep100_0685['Unnamed: 0']", "_____no_output_____" ], [ "buldy_RGG_200_rep100_0685 ", "_____no_output_____" ], [ "buldy_RGG_200_rep100_095 = pd.read_csv('proc_buldy_RGG_200_rep100_095.csv')\n\ndel buldy_RGG_200_rep100_095['Unnamed: 0']", "_____no_output_____" ], [ "buldy_RGG_200_rep100_095", "_____no_output_____" ], [ "buldy_RGG_50_rep100_045_rgg_rgg_data = buldy_RGG_50_rep100_045.copy()\nbuldy_RGG_50_rep100_045_rgg_rand_data = buldy_RGG_50_rep100_045.copy()\nbuldy_RGG_50_rep100_045_rand_rgg_data = buldy_RGG_50_rep100_045.copy()\nbuldy_RGG_50_rep100_045_rand_rand_data = buldy_RGG_50_rep100_045.copy()\n\nrgg_rgg_drop_list = []\nrgg_rand_drop_list = []\nrand_rgg_drop_list = []\nrand_rand_drop_list = []\n\nfor i in range(400):\n if i % 4 == 0:\n rgg_rand_drop_list.append(i)\n rand_rgg_drop_list.append(i)\n rand_rand_drop_list.append(i)\n elif i % 4 == 1:\n rgg_rgg_drop_list.append(i)\n rand_rgg_drop_list.append(i)\n rand_rand_drop_list.append(i)\n elif i % 4 == 2:\n rgg_rgg_drop_list.append(i)\n rgg_rand_drop_list.append(i)\n rand_rand_drop_list.append(i)\n elif i % 4 == 3:\n rgg_rgg_drop_list.append(i)\n rgg_rand_drop_list.append(i)\n rand_rgg_drop_list.append(i) \n\nbuldy_RGG_50_rep100_045_rgg_rgg_data = buldy_RGG_50_rep100_045_rgg_rgg_data.drop(rgg_rgg_drop_list)\nbuldy_RGG_50_rep100_045_rgg_rand_data = buldy_RGG_50_rep100_045_rgg_rand_data.drop(rgg_rand_drop_list)\nbuldy_RGG_50_rep100_045_rand_rgg_data = buldy_RGG_50_rep100_045_rand_rgg_data.drop(rand_rgg_drop_list)\nbuldy_RGG_50_rep100_045_rand_rand_data = buldy_RGG_50_rep100_045_rand_rand_data.drop(rand_rand_drop_list)\n\nbuldy_RGG_50_rep100_045_rgg_rgg_data = buldy_RGG_50_rep100_045_rgg_rgg_data.reset_index(drop=True)\nbuldy_RGG_50_rep100_045_rgg_rand_data = buldy_RGG_50_rep100_045_rgg_rand_data.reset_index(drop=True)\nbuldy_RGG_50_rep100_045_rand_rgg_data = buldy_RGG_50_rep100_045_rand_rgg_data.reset_index(drop=True)\nbuldy_RGG_50_rep100_045_rand_rand_data = buldy_RGG_50_rep100_045_rand_rand_data.reset_index(drop=True)", "_____no_output_____" ], [ "buldy_RGG_50_rep100_045_rgg_rgg_data", "_____no_output_____" ], [ "buldy_RGG_50_rep100_067_rgg_rgg_data = buldy_RGG_50_rep100_067.copy()\nbuldy_RGG_50_rep100_067_rgg_rand_data = buldy_RGG_50_rep100_067.copy()\nbuldy_RGG_50_rep100_067_rand_rgg_data = buldy_RGG_50_rep100_067.copy()\nbuldy_RGG_50_rep100_067_rand_rand_data = buldy_RGG_50_rep100_067.copy()\n\nrgg_rgg_drop_list = []\nrgg_rand_drop_list = []\nrand_rgg_drop_list = []\nrand_rand_drop_list = []\n\nfor i in range(400):\n if i % 4 == 0:\n rgg_rand_drop_list.append(i)\n rand_rgg_drop_list.append(i)\n rand_rand_drop_list.append(i)\n elif i % 4 == 1:\n rgg_rgg_drop_list.append(i)\n rand_rgg_drop_list.append(i)\n rand_rand_drop_list.append(i)\n elif i % 4 == 2:\n rgg_rgg_drop_list.append(i)\n rgg_rand_drop_list.append(i)\n rand_rand_drop_list.append(i)\n elif i % 4 == 3:\n rgg_rgg_drop_list.append(i)\n rgg_rand_drop_list.append(i)\n rand_rgg_drop_list.append(i) \n\nbuldy_RGG_50_rep100_067_rgg_rgg_data = buldy_RGG_50_rep100_067_rgg_rgg_data.drop(rgg_rgg_drop_list)\nbuldy_RGG_50_rep100_067_rgg_rand_data = buldy_RGG_50_rep100_067_rgg_rand_data.drop(rgg_rand_drop_list)\nbuldy_RGG_50_rep100_067_rand_rgg_data = buldy_RGG_50_rep100_067_rand_rgg_data.drop(rand_rgg_drop_list)\nbuldy_RGG_50_rep100_067_rand_rand_data = buldy_RGG_50_rep100_067_rand_rand_data.drop(rand_rand_drop_list)\n\nbuldy_RGG_50_rep100_067_rgg_rgg_data = buldy_RGG_50_rep100_067_rgg_rgg_data.reset_index(drop=True)\nbuldy_RGG_50_rep100_067_rgg_rand_data = buldy_RGG_50_rep100_067_rgg_rand_data.reset_index(drop=True)\nbuldy_RGG_50_rep100_067_rand_rgg_data = buldy_RGG_50_rep100_067_rand_rgg_data.reset_index(drop=True)\nbuldy_RGG_50_rep100_067_rand_rand_data = buldy_RGG_50_rep100_067_rand_rand_data.reset_index(drop=True)", "_____no_output_____" ], [ "buldy_RGG_50_rep100_067_rgg_rgg_data", "_____no_output_____" ], [ "buldy_RGG_200_rep100_0685_rgg_rgg_data = buldy_RGG_200_rep100_0685.copy()\nbuldy_RGG_200_rep100_0685_rgg_rand_data = buldy_RGG_200_rep100_0685.copy()\nbuldy_RGG_200_rep100_0685_rand_rgg_data = buldy_RGG_200_rep100_0685.copy()\nbuldy_RGG_200_rep100_0685_rand_rand_data = buldy_RGG_200_rep100_0685.copy()\n\nrgg_rgg_drop_list = []\nrgg_rand_drop_list = []\nrand_rgg_drop_list = []\nrand_rand_drop_list = []\n\nfor i in range(400):\n if i % 4 == 0:\n rgg_rand_drop_list.append(i)\n rand_rgg_drop_list.append(i)\n rand_rand_drop_list.append(i)\n elif i % 4 == 1:\n rgg_rgg_drop_list.append(i)\n rand_rgg_drop_list.append(i)\n rand_rand_drop_list.append(i)\n elif i % 4 == 2:\n rgg_rgg_drop_list.append(i)\n rgg_rand_drop_list.append(i)\n rand_rand_drop_list.append(i)\n elif i % 4 == 3:\n rgg_rgg_drop_list.append(i)\n rgg_rand_drop_list.append(i)\n rand_rgg_drop_list.append(i) \n\nbuldy_RGG_200_rep100_0685_rgg_rgg_data = buldy_RGG_200_rep100_0685_rgg_rgg_data.drop(rgg_rgg_drop_list)\nbuldy_RGG_200_rep100_0685_rgg_rand_data = buldy_RGG_200_rep100_0685_rgg_rand_data.drop(rgg_rand_drop_list)\nbuldy_RGG_200_rep100_0685_rand_rgg_data = buldy_RGG_200_rep100_0685_rand_rgg_data.drop(rand_rgg_drop_list)\nbuldy_RGG_200_rep100_0685_rand_rand_data = buldy_RGG_200_rep100_0685_rand_rand_data.drop(rand_rand_drop_list)\n\nbuldy_RGG_200_rep100_0685_rgg_rgg_data = buldy_RGG_200_rep100_0685_rgg_rgg_data.reset_index(drop=True)\nbuldy_RGG_200_rep100_0685_rgg_rand_data = buldy_RGG_200_rep100_0685_rgg_rand_data.reset_index(drop=True)\nbuldy_RGG_200_rep100_0685_rand_rgg_data = buldy_RGG_200_rep100_0685_rand_rgg_data.reset_index(drop=True)\nbuldy_RGG_200_rep100_0685_rand_rand_data = buldy_RGG_200_rep100_0685_rand_rand_data.reset_index(drop=True)", "_____no_output_____" ], [ "buldy_RGG_200_rep100_0685_rgg_rgg_data", "_____no_output_____" ], [ "buldy_RGG_200_rep100_095_rgg_rgg_data = buldy_RGG_200_rep100_095.copy()\nbuldy_RGG_200_rep100_095_rgg_rand_data = buldy_RGG_200_rep100_095.copy()\nbuldy_RGG_200_rep100_095_rand_rgg_data = buldy_RGG_200_rep100_095.copy()\nbuldy_RGG_200_rep100_095_rand_rand_data = buldy_RGG_200_rep100_095.copy()\n\nrgg_rgg_drop_list = []\nrgg_rand_drop_list = []\nrand_rgg_drop_list = []\nrand_rand_drop_list = []\n\nfor i in range(400):\n if i % 4 == 0:\n rgg_rand_drop_list.append(i)\n rand_rgg_drop_list.append(i)\n rand_rand_drop_list.append(i)\n elif i % 4 == 1:\n rgg_rgg_drop_list.append(i)\n rand_rgg_drop_list.append(i)\n rand_rand_drop_list.append(i)\n elif i % 4 == 2:\n rgg_rgg_drop_list.append(i)\n rgg_rand_drop_list.append(i)\n rand_rand_drop_list.append(i)\n elif i % 4 == 3:\n rgg_rgg_drop_list.append(i)\n rgg_rand_drop_list.append(i)\n rand_rgg_drop_list.append(i) \n\nbuldy_RGG_200_rep100_095_rgg_rgg_data = buldy_RGG_200_rep100_095_rgg_rgg_data.drop(rgg_rgg_drop_list)\nbuldy_RGG_200_rep100_095_rgg_rand_data = buldy_RGG_200_rep100_095_rgg_rand_data.drop(rgg_rand_drop_list)\nbuldy_RGG_200_rep100_095_rand_rgg_data = buldy_RGG_200_rep100_095_rand_rgg_data.drop(rand_rgg_drop_list)\nbuldy_RGG_200_rep100_095_rand_rand_data = buldy_RGG_200_rep100_095_rand_rand_data.drop(rand_rand_drop_list)\n\nbuldy_RGG_200_rep100_095_rgg_rgg_data = buldy_RGG_200_rep100_095_rgg_rgg_data.reset_index(drop=True)\nbuldy_RGG_200_rep100_095_rgg_rand_data = buldy_RGG_200_rep100_095_rgg_rand_data.reset_index(drop=True)\nbuldy_RGG_200_rep100_095_rand_rgg_data = buldy_RGG_200_rep100_095_rand_rgg_data.reset_index(drop=True)\nbuldy_RGG_200_rep100_095_rand_rand_data = buldy_RGG_200_rep100_095_rand_rand_data.reset_index(drop=True)", "_____no_output_____" ], [ "buldy_RGG_200_rep100_095_rgg_rgg_data", "_____no_output_____" ], [ "stats.kstest(buldy_RGG_200_rep100_0685_rand_rgg_data['alive_nodes'], 'norm')", "_____no_output_____" ], [ "stats.kstest(buldy_RGG_200_rep100_0685_rand_rand_data['alive_nodes'], 'norm')", "_____no_output_____" ], [ "stats.mannwhitneyu(buldy_RGG_200_rep100_0685_rand_rgg_data['alive_nodes'], buldy_RGG_200_rep100_0685_rand_rand_data['alive_nodes'])", "_____no_output_____" ], [ "stats.kstest(buldy_RGG_200_rep100_095_rgg_rgg_data['alive_nodes'], 'norm')", "_____no_output_____" ], [ "stats.kstest(buldy_RGG_200_rep100_095_rgg_rand_data['alive_nodes'], 'norm')", "_____no_output_____" ], [ "stats.mannwhitneyu(buldy_RGG_200_rep100_095_rgg_rgg_data['alive_nodes'], buldy_RGG_200_rep100_095_rgg_rand_data['alive_nodes'])", "_____no_output_____" ] ], [ [ "# Data Dividing Done\n\n\n\n\n# -----------------------------------------------------------------------------------------------\n\n\n\n\n\n# Plotting Starts", "_____no_output_____" ], [ "## find_inter_thres", "_____no_output_____" ] ], [ [ "find_inter_thres_list = []\n\nfor col in find_inter_thres.columns:\n if col != 'rep':\n find_inter_thres_list.append(statistics.mean(find_inter_thres[col].values.tolist()))\n\nprint(find_inter_thres_list)", "[413.29333333333335, 479.46, 483.4266666666667, 477.70666666666665, 472.5733333333333, 469.29333333333335, 469.82666666666665, 473.26666666666665, 485.8933333333333, 494.93333333333334, 497.52, 499.28666666666663, 499.87333333333333, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0]\n" ], [ "Xs = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1.0]\n\nplt.plot(Xs, [i/500 for i in find_inter_thres_list])\nplt.xticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])\nplt.axvline(x=0.7, color='r', linestyle='--')\nplt.savefig('find_inter_thres.png')", "_____no_output_____" ] ], [ [ "## rep5_04_002", "_____no_output_____" ] ], [ [ "rgg_rgg_dict = {}\nrgg_rand_dict = {}\nrand_rgg_dict = {}\nrand_rand_dict = {}\n\nfor i in range(20):\n target = [i*5 + 0, i*5 + 1, i*5 + 2, i*5 + 3, i*5 + 4]\n \n temp_rgg_rgg = rgg_rgg_data[i*5 + 0 : i*5 + 5]\n temp_rgg_rand = rgg_rand_data[i*5 + 0 : i*5 + 5]\n temp_rand_rgg = rand_rgg_data[i*5 + 0 : i*5 + 5]\n temp_rand_rand = rand_rand_data[i*5 + 0 : i*5 + 5]\n \n if i == 0:\n rgg_rgg_dict['intra_thres'] = [statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist())]\n rgg_rgg_dict['alive_nodes'] = [statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist())]\n rgg_rgg_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist())]\n \n rgg_rand_dict['intra_thres'] = [statistics.mean(temp_rgg_rand['intra_thres'].values.tolist())]\n rgg_rand_dict['alive_nodes'] = [statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist())]\n rgg_rand_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist())]\n \n rand_rgg_dict['intra_thres'] = [statistics.mean(temp_rand_rgg['intra_thres'].values.tolist())]\n rand_rgg_dict['alive_nodes'] = [statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist())]\n rand_rgg_dict['init_mean_deg'] = [statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist())]\n \n rand_rand_dict['intra_thres'] = [statistics.mean(temp_rand_rand['intra_thres'].values.tolist())]\n rand_rand_dict['alive_nodes'] = [statistics.mean(temp_rand_rand['alive_nodes'].values.tolist())]\n rand_rand_dict['init_mean_deg'] = [statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist())]\n else:\n rgg_rgg_dict['intra_thres'].append(statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist()))\n rgg_rgg_dict['alive_nodes'].append(statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist()))\n rgg_rgg_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist()))\n \n rgg_rand_dict['intra_thres'].append(statistics.mean(temp_rgg_rand['intra_thres'].values.tolist()))\n rgg_rand_dict['alive_nodes'].append(statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist()))\n rgg_rand_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist()))\n \n rand_rgg_dict['intra_thres'].append(statistics.mean(temp_rand_rgg['intra_thres'].values.tolist()))\n rand_rgg_dict['alive_nodes'].append(statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist()))\n rand_rgg_dict['init_mean_deg'].append(statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist()))\n \n rand_rand_dict['intra_thres'].append(statistics.mean(temp_rand_rand['intra_thres'].values.tolist()))\n rand_rand_dict['alive_nodes'].append(statistics.mean(temp_rand_rand['alive_nodes'].values.tolist()))\n rand_rand_dict['init_mean_deg'].append(statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist()))", "_____no_output_____" ], [ "plt.plot(rgg_rgg_dict['intra_thres'], rgg_rgg_dict['alive_nodes'])\nplt.plot(rgg_rgg_dict['intra_thres'], rgg_rand_dict['alive_nodes'])\nplt.plot(rgg_rgg_dict['intra_thres'], rand_rgg_dict['alive_nodes'])\nplt.plot(rgg_rgg_dict['intra_thres'], rand_rand_dict['alive_nodes'])\nplt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])\nplt.title('Mean Alive nodes')\nplt.show()", "_____no_output_____" ], [ "p = 0.9\n\nplt.plot([p * i for i in rgg_rgg_dict['init_mean_deg']], rgg_rgg_dict['alive_nodes'])\nplt.plot([p * i for i in rgg_rgg_dict['init_mean_deg']], rgg_rand_dict['alive_nodes'])\nplt.plot([p * i for i in rgg_rgg_dict['init_mean_deg']], rand_rgg_dict['alive_nodes'])\nplt.plot([p * i for i in rgg_rgg_dict['init_mean_deg']], rand_rand_dict['alive_nodes'])\nplt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])\nplt.title('Mean Alive nodes')\nplt.show()", "_____no_output_____" ] ], [ [ "## att30_rep5_04_002", "_____no_output_____" ] ], [ [ "rgg_rgg_2_dict = {}\nrgg_rand_2_dict = {}\nrand_rgg_2_dict = {}\nrand_rand_2_dict = {}\n\nfor i in range(50):\n target = [i*5 + 0, i*5 + 1, i*5 + 2, i*5 + 3, i*5 + 4]\n \n temp_rgg_rgg = rgg_rgg_2_data[i*5 + 0 : i*5 + 5]\n temp_rgg_rand = rgg_rand_2_data[i*5 + 0 : i*5 + 5]\n temp_rand_rgg = rand_rgg_2_data[i*5 + 0 : i*5 + 5]\n temp_rand_rand = rand_rand_2_data[i*5 + 0 : i*5 + 5]\n \n if i == 0:\n rgg_rgg_2_dict['intra_thres'] = [statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist())]\n rgg_rgg_2_dict['alive_nodes'] = [statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist())]\n rgg_rgg_2_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist())]\n \n rgg_rand_2_dict['intra_thres'] = [statistics.mean(temp_rgg_rand['intra_thres'].values.tolist())]\n rgg_rand_2_dict['alive_nodes'] = [statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist())]\n rgg_rand_2_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist())]\n \n rand_rgg_2_dict['intra_thres'] = [statistics.mean(temp_rand_rgg['intra_thres'].values.tolist())]\n rand_rgg_2_dict['alive_nodes'] = [statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist())]\n rand_rgg_2_dict['init_mean_deg'] = [statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist())]\n \n rand_rand_2_dict['intra_thres'] = [statistics.mean(temp_rand_rand['intra_thres'].values.tolist())]\n rand_rand_2_dict['alive_nodes'] = [statistics.mean(temp_rand_rand['alive_nodes'].values.tolist())]\n rand_rand_2_dict['init_mean_deg'] = [statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist())]\n else:\n rgg_rgg_2_dict['intra_thres'].append(statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist()))\n rgg_rgg_2_dict['alive_nodes'].append(statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist()))\n rgg_rgg_2_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist()))\n \n rgg_rand_2_dict['intra_thres'].append(statistics.mean(temp_rgg_rand['intra_thres'].values.tolist()))\n rgg_rand_2_dict['alive_nodes'].append(statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist()))\n rgg_rand_2_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist()))\n \n rand_rgg_2_dict['intra_thres'].append(statistics.mean(temp_rand_rgg['intra_thres'].values.tolist()))\n rand_rgg_2_dict['alive_nodes'].append(statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist()))\n rand_rgg_2_dict['init_mean_deg'].append(statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist()))\n \n rand_rand_2_dict['intra_thres'].append(statistics.mean(temp_rand_rand['intra_thres'].values.tolist()))\n rand_rand_2_dict['alive_nodes'].append(statistics.mean(temp_rand_rand['alive_nodes'].values.tolist()))\n rand_rand_2_dict['init_mean_deg'].append(statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist()))", "_____no_output_____" ], [ "plt.plot(rgg_rgg_2_dict['intra_thres'], rgg_rgg_2_dict['alive_nodes'])\nplt.plot(rgg_rgg_2_dict['intra_thres'], rgg_rand_2_dict['alive_nodes'])\nplt.plot(rgg_rgg_2_dict['intra_thres'], rand_rgg_2_dict['alive_nodes'])\nplt.plot(rgg_rgg_2_dict['intra_thres'], rand_rand_2_dict['alive_nodes'])\nplt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])\nplt.title('Mean Alive nodes')\nplt.show()", "_____no_output_____" ], [ "p = 0.9\n\nplt.plot([p * i for i in rgg_rgg_2_dict['init_mean_deg']], rgg_rgg_2_dict['alive_nodes'])\nplt.plot([p * i for i in rgg_rgg_2_dict['init_mean_deg']], rgg_rand_2_dict['alive_nodes'])\nplt.plot([p * i for i in rgg_rgg_2_dict['init_mean_deg']], rand_rgg_2_dict['alive_nodes'])\nplt.plot([p * i for i in rgg_rgg_2_dict['init_mean_deg']], rand_rand_2_dict['alive_nodes'])\nplt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])\nplt.title('Mean Alive nodes')\nplt.show()", "_____no_output_____" ] ], [ [ "## buldy_RGG_rep30_03_0005", "_____no_output_____" ] ], [ [ "buldy_RGG_rep30_03_0005_rgg_rgg_dict = {}\nbuldy_RGG_rep30_03_0005_rgg_rand_dict = {}\nbuldy_RGG_rep30_03_0005_rand_rgg_dict = {}\nbuldy_RGG_rep30_03_0005_rand_rand_dict = {}\n\nfor i in range(100):\n target = list(range(i*30, (i+1)*30))\n \n temp_rgg_rgg = buldy_RGG_rep30_03_0005_rgg_rgg_data[i*30 : (i+1)*30]\n temp_rgg_rand = buldy_RGG_rep30_03_0005_rgg_rand_data[i*30 : (i+1)*30]\n temp_rand_rgg = buldy_RGG_rep30_03_0005_rand_rgg_data[i*30 : (i+1)*30]\n temp_rand_rand = buldy_RGG_rep30_03_0005_rand_rand_data[i*30 : (i+1)*30]\n \n rgg_rgg_alive = 0\n rgg_rand_alive = 0\n rand_rgg_alive = 0\n rand_rand_alive = 0\n for index in target:\n if (temp_rgg_rgg['alive_nodes'][index] != 0) and (temp_rgg_rgg['fin_larg_comp'][index] != 0):\n rgg_rgg_alive += 1\n if (temp_rgg_rand['alive_nodes'][index] != 0) and (temp_rgg_rand['fin_larg_comp'][index] != 0):\n rgg_rand_alive += 1\n if (temp_rand_rgg['alive_nodes'][index] != 0) and (temp_rand_rgg['fin_larg_comp'][index] != 0):\n rand_rgg_alive += 1\n if (temp_rand_rand['alive_nodes'][index] != 0) and (temp_rand_rand['fin_larg_comp'][index] != 0):\n rand_rand_alive += 1\n \n if i == 0:\n buldy_RGG_rep30_03_0005_rgg_rgg_dict['intra_thres'] = [statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist())]\n buldy_RGG_rep30_03_0005_rgg_rgg_dict['alive_nodes'] = [statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist())]\n buldy_RGG_rep30_03_0005_rgg_rgg_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist())]\n buldy_RGG_rep30_03_0005_rgg_rgg_dict['alive ratio'] = [rgg_rgg_alive / 30]\n \n buldy_RGG_rep30_03_0005_rgg_rand_dict['intra_thres'] = [statistics.mean(temp_rgg_rand['intra_thres'].values.tolist())]\n buldy_RGG_rep30_03_0005_rgg_rand_dict['alive_nodes'] = [statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist())]\n buldy_RGG_rep30_03_0005_rgg_rand_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist())]\n buldy_RGG_rep30_03_0005_rgg_rand_dict['alive ratio'] = [rgg_rand_alive / 30]\n \n buldy_RGG_rep30_03_0005_rand_rgg_dict['intra_thres'] = [statistics.mean(temp_rand_rgg['intra_thres'].values.tolist())]\n buldy_RGG_rep30_03_0005_rand_rgg_dict['alive_nodes'] = [statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist())]\n buldy_RGG_rep30_03_0005_rand_rgg_dict['init_mean_deg'] = [statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist())]\n buldy_RGG_rep30_03_0005_rand_rgg_dict['alive ratio'] = [rand_rgg_alive / 30]\n \n buldy_RGG_rep30_03_0005_rand_rand_dict['intra_thres'] = [statistics.mean(temp_rand_rand['intra_thres'].values.tolist())]\n buldy_RGG_rep30_03_0005_rand_rand_dict['alive_nodes'] = [statistics.mean(temp_rand_rand['alive_nodes'].values.tolist())]\n buldy_RGG_rep30_03_0005_rand_rand_dict['init_mean_deg'] = [statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist())]\n buldy_RGG_rep30_03_0005_rand_rand_dict['alive ratio'] = [rand_rand_alive / 30]\n else:\n buldy_RGG_rep30_03_0005_rgg_rgg_dict['intra_thres'].append(statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist()))\n buldy_RGG_rep30_03_0005_rgg_rgg_dict['alive_nodes'].append(statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist()))\n buldy_RGG_rep30_03_0005_rgg_rgg_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist()))\n buldy_RGG_rep30_03_0005_rgg_rgg_dict['alive ratio'].append(rgg_rgg_alive / 30)\n \n buldy_RGG_rep30_03_0005_rgg_rand_dict['intra_thres'].append(statistics.mean(temp_rgg_rand['intra_thres'].values.tolist()))\n buldy_RGG_rep30_03_0005_rgg_rand_dict['alive_nodes'].append(statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist()))\n buldy_RGG_rep30_03_0005_rgg_rand_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist()))\n buldy_RGG_rep30_03_0005_rgg_rand_dict['alive ratio'].append(rgg_rand_alive / 30)\n \n buldy_RGG_rep30_03_0005_rand_rgg_dict['intra_thres'].append(statistics.mean(temp_rand_rgg['intra_thres'].values.tolist()))\n buldy_RGG_rep30_03_0005_rand_rgg_dict['alive_nodes'].append(statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist()))\n buldy_RGG_rep30_03_0005_rand_rgg_dict['init_mean_deg'].append(statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist()))\n buldy_RGG_rep30_03_0005_rand_rgg_dict['alive ratio'].append(rand_rgg_alive / 30)\n \n buldy_RGG_rep30_03_0005_rand_rand_dict['intra_thres'].append(statistics.mean(temp_rand_rand['intra_thres'].values.tolist()))\n buldy_RGG_rep30_03_0005_rand_rand_dict['alive_nodes'].append(statistics.mean(temp_rand_rand['alive_nodes'].values.tolist()))\n buldy_RGG_rep30_03_0005_rand_rand_dict['init_mean_deg'].append(statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist()))\n buldy_RGG_rep30_03_0005_rand_rand_dict['alive ratio'].append(rand_rand_alive / 30)", "_____no_output_____" ], [ "plt.plot(buldy_RGG_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_rep30_03_0005_rgg_rgg_dict['alive_nodes'])\nplt.plot(buldy_RGG_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_rep30_03_0005_rgg_rand_dict['alive_nodes'])\nplt.plot(buldy_RGG_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_rep30_03_0005_rand_rgg_dict['alive_nodes'])\nplt.plot(buldy_RGG_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_rep30_03_0005_rand_rand_dict['alive_nodes'])\nplt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])\nplt.title('Mean Alive nodes')\nplt.show()", "_____no_output_____" ], [ "p = 0.9\n\nplt.plot([p * i for i in buldy_RGG_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_rep30_03_0005_rgg_rgg_dict['alive_nodes'])\nplt.plot([p * i for i in buldy_RGG_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_rep30_03_0005_rgg_rand_dict['alive_nodes'])\nplt.plot([p * i for i in buldy_RGG_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_rep30_03_0005_rand_rgg_dict['alive_nodes'])\nplt.plot([p * i for i in buldy_RGG_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_rep30_03_0005_rand_rand_dict['alive_nodes'])\nplt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])\nplt.title('500 Nodes, 2 Layers, 50 attack size')\nplt.xlabel('p<k>')\nplt.ylabel('mean alive nodes')\nplt.savefig('buldy_RGG_rep30_03_0005.png')\nplt.show()", "_____no_output_____" ], [ "p = 0.9\n\nplt.plot([p * i for i in buldy_RGG_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_rep30_03_0005_rgg_rgg_dict['alive ratio'])\nplt.plot([p * i for i in buldy_RGG_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_rep30_03_0005_rgg_rand_dict['alive ratio'])\nplt.plot([p * i for i in buldy_RGG_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_rep30_03_0005_rand_rgg_dict['alive ratio'])\nplt.plot([p * i for i in buldy_RGG_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_rep30_03_0005_rand_rand_dict['alive ratio'])\nplt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])\nplt.title('500 Nodes, 2 Layers, 50 attack size')\nplt.xlabel('p<k>')\nplt.ylabel('alive ratio')\nplt.savefig('buldy_RGG_rep30_03_0005_ratio.png')\nplt.show()", "_____no_output_____" ] ], [ [ "## buldy_RGG_100_rep30_03_0005", "_____no_output_____" ] ], [ [ "buldy_RGG_100_rep30_03_0005_rgg_rgg_dict = {}\nbuldy_RGG_100_rep30_03_0005_rgg_rand_dict = {}\nbuldy_RGG_100_rep30_03_0005_rand_rgg_dict = {}\nbuldy_RGG_100_rep30_03_0005_rand_rand_dict = {}\n\nfor i in range(100):\n target = list(range(i*30, (i+1)*30))\n \n temp_rgg_rgg = buldy_RGG_100_rep30_03_0005_rgg_rgg_data[i*30 : (i+1)*30]\n temp_rgg_rand = buldy_RGG_100_rep30_03_0005_rgg_rand_data[i*30 : (i+1)*30]\n temp_rand_rgg = buldy_RGG_100_rep30_03_0005_rand_rgg_data[i*30 : (i+1)*30]\n temp_rand_rand = buldy_RGG_100_rep30_03_0005_rand_rand_data[i*30 : (i+1)*30]\n \n rgg_rgg_alive = 0\n rgg_rand_alive = 0\n rand_rgg_alive = 0\n rand_rand_alive = 0\n for index in target:\n if (temp_rgg_rgg['alive_nodes'][index] != 0) and (temp_rgg_rgg['fin_larg_comp'][index] != 0):\n rgg_rgg_alive += 1\n if (temp_rgg_rand['alive_nodes'][index] != 0) and (temp_rgg_rand['fin_larg_comp'][index] != 0):\n rgg_rand_alive += 1\n if (temp_rand_rgg['alive_nodes'][index] != 0) and (temp_rand_rgg['fin_larg_comp'][index] != 0):\n rand_rgg_alive += 1\n if (temp_rand_rand['alive_nodes'][index] != 0) and (temp_rand_rand['fin_larg_comp'][index] != 0):\n rand_rand_alive += 1\n \n if i == 0:\n buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['intra_thres'] = [statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist())]\n buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['alive_nodes'] = [statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist())]\n buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist())]\n buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['alive ratio'] = [rgg_rgg_alive / 30]\n \n buldy_RGG_100_rep30_03_0005_rgg_rand_dict['intra_thres'] = [statistics.mean(temp_rgg_rand['intra_thres'].values.tolist())]\n buldy_RGG_100_rep30_03_0005_rgg_rand_dict['alive_nodes'] = [statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist())]\n buldy_RGG_100_rep30_03_0005_rgg_rand_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist())]\n buldy_RGG_100_rep30_03_0005_rgg_rand_dict['alive ratio'] = [rgg_rand_alive / 30]\n \n buldy_RGG_100_rep30_03_0005_rand_rgg_dict['intra_thres'] = [statistics.mean(temp_rand_rgg['intra_thres'].values.tolist())]\n buldy_RGG_100_rep30_03_0005_rand_rgg_dict['alive_nodes'] = [statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist())]\n buldy_RGG_100_rep30_03_0005_rand_rgg_dict['init_mean_deg'] = [statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist())]\n buldy_RGG_100_rep30_03_0005_rand_rgg_dict['alive ratio'] = [rand_rgg_alive / 30]\n \n buldy_RGG_100_rep30_03_0005_rand_rand_dict['intra_thres'] = [statistics.mean(temp_rand_rand['intra_thres'].values.tolist())]\n buldy_RGG_100_rep30_03_0005_rand_rand_dict['alive_nodes'] = [statistics.mean(temp_rand_rand['alive_nodes'].values.tolist())]\n buldy_RGG_100_rep30_03_0005_rand_rand_dict['init_mean_deg'] = [statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist())]\n buldy_RGG_100_rep30_03_0005_rand_rand_dict['alive ratio'] = [rand_rand_alive / 30]\n else:\n buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['intra_thres'].append(statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist()))\n buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['alive_nodes'].append(statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist()))\n buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist()))\n buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['alive ratio'].append(rgg_rgg_alive / 30)\n \n buldy_RGG_100_rep30_03_0005_rgg_rand_dict['intra_thres'].append(statistics.mean(temp_rgg_rand['intra_thres'].values.tolist()))\n buldy_RGG_100_rep30_03_0005_rgg_rand_dict['alive_nodes'].append(statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist()))\n buldy_RGG_100_rep30_03_0005_rgg_rand_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist()))\n buldy_RGG_100_rep30_03_0005_rgg_rand_dict['alive ratio'].append(rgg_rand_alive / 30)\n \n buldy_RGG_100_rep30_03_0005_rand_rgg_dict['intra_thres'].append(statistics.mean(temp_rand_rgg['intra_thres'].values.tolist()))\n buldy_RGG_100_rep30_03_0005_rand_rgg_dict['alive_nodes'].append(statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist()))\n buldy_RGG_100_rep30_03_0005_rand_rgg_dict['init_mean_deg'].append(statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist()))\n buldy_RGG_100_rep30_03_0005_rand_rgg_dict['alive ratio'].append(rand_rgg_alive / 30)\n \n buldy_RGG_100_rep30_03_0005_rand_rand_dict['intra_thres'].append(statistics.mean(temp_rand_rand['intra_thres'].values.tolist()))\n buldy_RGG_100_rep30_03_0005_rand_rand_dict['alive_nodes'].append(statistics.mean(temp_rand_rand['alive_nodes'].values.tolist()))\n buldy_RGG_100_rep30_03_0005_rand_rand_dict['init_mean_deg'].append(statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist()))\n buldy_RGG_100_rep30_03_0005_rand_rand_dict['alive ratio'].append(rand_rand_alive / 30)", "_____no_output_____" ], [ "plt.plot(buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['alive_nodes'])\nplt.plot(buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_100_rep30_03_0005_rgg_rand_dict['alive_nodes'])\nplt.plot(buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_100_rep30_03_0005_rand_rgg_dict['alive_nodes'])\nplt.plot(buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_100_rep30_03_0005_rand_rand_dict['alive_nodes'])\nplt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])\nplt.title('Mean Alive nodes')\nplt.show()", "_____no_output_____" ], [ "p = 0.8\n\nplt.plot([p * i for i in buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['alive_nodes'])\nplt.plot([p * i for i in buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_100_rep30_03_0005_rgg_rand_dict['alive_nodes'])\nplt.plot([p * i for i in buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_100_rep30_03_0005_rand_rgg_dict['alive_nodes'])\nplt.plot([p * i for i in buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_100_rep30_03_0005_rand_rand_dict['alive_nodes'])\nplt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])\nplt.title('500 Nodes, 2 Layers, 100 attack size')\nplt.xlabel('p<k>')\nplt.ylabel('mean alive nodes')\nplt.savefig('buldy_RGG_100_rep30_03_0005.png')\nplt.show()", "_____no_output_____" ], [ "p = 0.8\n\nplt.plot([p * i for i in buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['alive ratio'])\nplt.plot([p * i for i in buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_100_rep30_03_0005_rgg_rand_dict['alive ratio'])\nplt.plot([p * i for i in buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_100_rep30_03_0005_rand_rgg_dict['alive ratio'])\nplt.plot([p * i for i in buldy_RGG_100_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_100_rep30_03_0005_rand_rand_dict['alive ratio'])\nplt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])\nplt.title('500 Nodes, 2 Layers, 100 attack size')\nplt.xlabel('p<k>')\nplt.ylabel('alive ratio')\nplt.savefig('buldy_RGG_100_rep30_03_0005_ratio.png')\nplt.show()", "_____no_output_____" ] ], [ [ "## buldy_RGG_200_rep30_03_0005", "_____no_output_____" ] ], [ [ "buldy_RGG_200_rep30_03_0005_rgg_rgg_dict = {}\nbuldy_RGG_200_rep30_03_0005_rgg_rand_dict = {}\nbuldy_RGG_200_rep30_03_0005_rand_rgg_dict = {}\nbuldy_RGG_200_rep30_03_0005_rand_rand_dict = {}\n\nfor i in range(100):\n target = list(range(i*30, (i+1)*30))\n \n temp_rgg_rgg = buldy_RGG_200_rep30_03_0005_rgg_rgg_data[i*30 : (i+1)*30]\n temp_rgg_rand = buldy_RGG_200_rep30_03_0005_rgg_rand_data[i*30 : (i+1)*30]\n temp_rand_rgg = buldy_RGG_200_rep30_03_0005_rand_rgg_data[i*30 : (i+1)*30]\n temp_rand_rand = buldy_RGG_200_rep30_03_0005_rand_rand_data[i*30 : (i+1)*30]\n \n rgg_rgg_alive = 0\n rgg_rand_alive = 0\n rand_rgg_alive = 0\n rand_rand_alive = 0\n for index in target:\n if (temp_rgg_rgg['alive_nodes'][index] != 0) and (temp_rgg_rgg['fin_larg_comp'][index] != 0):\n rgg_rgg_alive += 1\n if (temp_rgg_rand['alive_nodes'][index] != 0) and (temp_rgg_rand['fin_larg_comp'][index] != 0):\n rgg_rand_alive += 1\n if (temp_rand_rgg['alive_nodes'][index] != 0) and (temp_rand_rgg['fin_larg_comp'][index] != 0):\n rand_rgg_alive += 1\n if (temp_rand_rand['alive_nodes'][index] != 0) and (temp_rand_rand['fin_larg_comp'][index] != 0):\n rand_rand_alive += 1\n \n if i == 0:\n buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['intra_thres'] = [statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist())]\n buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['alive_nodes'] = [statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist())]\n buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist())]\n buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['alive ratio'] = [rgg_rgg_alive / 30]\n \n buldy_RGG_200_rep30_03_0005_rgg_rand_dict['intra_thres'] = [statistics.mean(temp_rgg_rand['intra_thres'].values.tolist())]\n buldy_RGG_200_rep30_03_0005_rgg_rand_dict['alive_nodes'] = [statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist())]\n buldy_RGG_200_rep30_03_0005_rgg_rand_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist())]\n buldy_RGG_200_rep30_03_0005_rgg_rand_dict['alive ratio'] = [rgg_rand_alive / 30]\n \n buldy_RGG_200_rep30_03_0005_rand_rgg_dict['intra_thres'] = [statistics.mean(temp_rand_rgg['intra_thres'].values.tolist())]\n buldy_RGG_200_rep30_03_0005_rand_rgg_dict['alive_nodes'] = [statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist())]\n buldy_RGG_200_rep30_03_0005_rand_rgg_dict['init_mean_deg'] = [statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist())]\n buldy_RGG_200_rep30_03_0005_rand_rgg_dict['alive ratio'] = [rand_rgg_alive / 30]\n \n buldy_RGG_200_rep30_03_0005_rand_rand_dict['intra_thres'] = [statistics.mean(temp_rand_rand['intra_thres'].values.tolist())]\n buldy_RGG_200_rep30_03_0005_rand_rand_dict['alive_nodes'] = [statistics.mean(temp_rand_rand['alive_nodes'].values.tolist())]\n buldy_RGG_200_rep30_03_0005_rand_rand_dict['init_mean_deg'] = [statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist())]\n buldy_RGG_200_rep30_03_0005_rand_rand_dict['alive ratio'] = [rand_rand_alive / 30]\n else:\n buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['intra_thres'].append(statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist()))\n buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['alive_nodes'].append(statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist()))\n buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist()))\n buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['alive ratio'].append(rgg_rgg_alive / 30)\n \n buldy_RGG_200_rep30_03_0005_rgg_rand_dict['intra_thres'].append(statistics.mean(temp_rgg_rand['intra_thres'].values.tolist()))\n buldy_RGG_200_rep30_03_0005_rgg_rand_dict['alive_nodes'].append(statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist()))\n buldy_RGG_200_rep30_03_0005_rgg_rand_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist()))\n buldy_RGG_200_rep30_03_0005_rgg_rand_dict['alive ratio'].append(rgg_rand_alive / 30)\n \n buldy_RGG_200_rep30_03_0005_rand_rgg_dict['intra_thres'].append(statistics.mean(temp_rand_rgg['intra_thres'].values.tolist()))\n buldy_RGG_200_rep30_03_0005_rand_rgg_dict['alive_nodes'].append(statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist()))\n buldy_RGG_200_rep30_03_0005_rand_rgg_dict['init_mean_deg'].append(statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist()))\n buldy_RGG_200_rep30_03_0005_rand_rgg_dict['alive ratio'].append(rand_rgg_alive / 30)\n \n buldy_RGG_200_rep30_03_0005_rand_rand_dict['intra_thres'].append(statistics.mean(temp_rand_rand['intra_thres'].values.tolist()))\n buldy_RGG_200_rep30_03_0005_rand_rand_dict['alive_nodes'].append(statistics.mean(temp_rand_rand['alive_nodes'].values.tolist()))\n buldy_RGG_200_rep30_03_0005_rand_rand_dict['init_mean_deg'].append(statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist()))\n buldy_RGG_200_rep30_03_0005_rand_rand_dict['alive ratio'].append(rand_rand_alive / 30)", "_____no_output_____" ], [ "plt.plot(buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['alive_nodes'])\nplt.plot(buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_200_rep30_03_0005_rgg_rand_dict['alive_nodes'])\nplt.plot(buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_200_rep30_03_0005_rand_rgg_dict['alive_nodes'])\nplt.plot(buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['intra_thres'], buldy_RGG_200_rep30_03_0005_rand_rand_dict['alive_nodes'])\nplt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])\nplt.title('Mean Alive nodes')\nplt.show()", "_____no_output_____" ], [ "p = 0.6\n\nplt.plot([p * i for i in buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['alive_nodes'])\nplt.plot([p * i for i in buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_200_rep30_03_0005_rgg_rand_dict['alive_nodes'])\nplt.plot([p * i for i in buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_200_rep30_03_0005_rand_rgg_dict['alive_nodes'])\nplt.plot([p * i for i in buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_200_rep30_03_0005_rand_rand_dict['alive_nodes'])\nplt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])\nplt.title('500 Nodes, 2 Layers, 200 attack size')\nplt.xlabel('p<k>')\nplt.ylabel('mean alive nodes')\nplt.savefig('buldy_RGG_200_rep30_03_0005.png')\nplt.show()", "_____no_output_____" ], [ "p = 0.6\n\nplt.plot([p * i for i in buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['alive ratio'])\nplt.plot([p * i for i in buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_200_rep30_03_0005_rgg_rand_dict['alive ratio'])\nplt.plot([p * i for i in buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_200_rep30_03_0005_rand_rgg_dict['alive ratio'])\nplt.plot([p * i for i in buldy_RGG_200_rep30_03_0005_rgg_rgg_dict['init_mean_deg']], buldy_RGG_200_rep30_03_0005_rand_rand_dict['alive ratio'])\nplt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])\nplt.title('500 Nodes, 2 Layers, 200 attack size')\nplt.xlabel('p<k>')\nplt.ylabel('alive ratio')\nplt.savefig('buldy_RGG_200_rep30_03_0005_ratio.png')\nplt.show()", "_____no_output_____" ] ], [ [ "## buldy_RGG_30_rep30_04_0007", "_____no_output_____" ] ], [ [ "buldy_RGG_30_rep30_04_0007_rgg_rgg_dict = {}\nbuldy_RGG_30_rep30_04_0007_rgg_rand_dict = {}\nbuldy_RGG_30_rep30_04_0007_rand_rgg_dict = {}\nbuldy_RGG_30_rep30_04_0007_rand_rand_dict = {}\n\nfor i in range(100):\n target = list(range(i*30, (i+1)*30))\n \n temp_rgg_rgg = buldy_RGG_30_rep30_04_0007_rgg_rgg_data[i*30 : (i+1)*30]\n temp_rgg_rand = buldy_RGG_30_rep30_04_0007_rgg_rand_data[i*30 : (i+1)*30]\n temp_rand_rgg = buldy_RGG_30_rep30_04_0007_rand_rgg_data[i*30 : (i+1)*30]\n temp_rand_rand = buldy_RGG_30_rep30_04_0007_rand_rand_data[i*30 : (i+1)*30]\n \n rgg_rgg_alive = 0\n rgg_rand_alive = 0\n rand_rgg_alive = 0\n rand_rand_alive = 0\n for index in target:\n if (temp_rgg_rgg['alive_nodes'][index] != 0) and (temp_rgg_rgg['fin_larg_comp'][index] != 0):\n rgg_rgg_alive += 1\n if (temp_rgg_rand['alive_nodes'][index] != 0) and (temp_rgg_rand['fin_larg_comp'][index] != 0):\n rgg_rand_alive += 1\n if (temp_rand_rgg['alive_nodes'][index] != 0) and (temp_rand_rgg['fin_larg_comp'][index] != 0):\n rand_rgg_alive += 1\n if (temp_rand_rand['alive_nodes'][index] != 0) and (temp_rand_rand['fin_larg_comp'][index] != 0):\n rand_rand_alive += 1\n \n if i == 0:\n buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['intra_thres'] = [statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist())]\n buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['alive_nodes'] = [statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist())]\n buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist())]\n buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['alive ratio'] = [rgg_rgg_alive / 30]\n \n buldy_RGG_30_rep30_04_0007_rgg_rand_dict['intra_thres'] = [statistics.mean(temp_rgg_rand['intra_thres'].values.tolist())]\n buldy_RGG_30_rep30_04_0007_rgg_rand_dict['alive_nodes'] = [statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist())]\n buldy_RGG_30_rep30_04_0007_rgg_rand_dict['init_mean_deg'] = [statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist())]\n buldy_RGG_30_rep30_04_0007_rgg_rand_dict['alive ratio'] = [rgg_rand_alive / 30]\n \n buldy_RGG_30_rep30_04_0007_rand_rgg_dict['intra_thres'] = [statistics.mean(temp_rand_rgg['intra_thres'].values.tolist())]\n buldy_RGG_30_rep30_04_0007_rand_rgg_dict['alive_nodes'] = [statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist())]\n buldy_RGG_30_rep30_04_0007_rand_rgg_dict['init_mean_deg'] = [statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist())]\n buldy_RGG_30_rep30_04_0007_rand_rgg_dict['alive ratio'] = [rand_rgg_alive / 30]\n \n buldy_RGG_30_rep30_04_0007_rand_rand_dict['intra_thres'] = [statistics.mean(temp_rand_rand['intra_thres'].values.tolist())]\n buldy_RGG_30_rep30_04_0007_rand_rand_dict['alive_nodes'] = [statistics.mean(temp_rand_rand['alive_nodes'].values.tolist())]\n buldy_RGG_30_rep30_04_0007_rand_rand_dict['init_mean_deg'] = [statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist())]\n buldy_RGG_30_rep30_04_0007_rand_rand_dict['alive ratio'] = [rand_rand_alive / 30]\n else:\n buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['intra_thres'].append(statistics.mean(temp_rgg_rgg['intra_thres'].values.tolist()))\n buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['alive_nodes'].append(statistics.mean(temp_rgg_rgg['alive_nodes'].values.tolist()))\n buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rgg['init_mean_deg'].values.tolist()))\n buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['alive ratio'].append(rgg_rgg_alive / 30)\n \n buldy_RGG_30_rep30_04_0007_rgg_rand_dict['intra_thres'].append(statistics.mean(temp_rgg_rand['intra_thres'].values.tolist()))\n buldy_RGG_30_rep30_04_0007_rgg_rand_dict['alive_nodes'].append(statistics.mean(temp_rgg_rand['alive_nodes'].values.tolist()))\n buldy_RGG_30_rep30_04_0007_rgg_rand_dict['init_mean_deg'].append(statistics.mean(temp_rgg_rand['init_mean_deg'].values.tolist()))\n buldy_RGG_30_rep30_04_0007_rgg_rand_dict['alive ratio'].append(rgg_rand_alive / 30)\n \n buldy_RGG_30_rep30_04_0007_rand_rgg_dict['intra_thres'].append(statistics.mean(temp_rand_rgg['intra_thres'].values.tolist()))\n buldy_RGG_30_rep30_04_0007_rand_rgg_dict['alive_nodes'].append(statistics.mean(temp_rand_rgg['alive_nodes'].values.tolist()))\n buldy_RGG_30_rep30_04_0007_rand_rgg_dict['init_mean_deg'].append(statistics.mean(temp_rand_rgg['init_mean_deg'].values.tolist()))\n buldy_RGG_30_rep30_04_0007_rand_rgg_dict['alive ratio'].append(rand_rgg_alive / 30)\n \n buldy_RGG_30_rep30_04_0007_rand_rand_dict['intra_thres'].append(statistics.mean(temp_rand_rand['intra_thres'].values.tolist()))\n buldy_RGG_30_rep30_04_0007_rand_rand_dict['alive_nodes'].append(statistics.mean(temp_rand_rand['alive_nodes'].values.tolist()))\n buldy_RGG_30_rep30_04_0007_rand_rand_dict['init_mean_deg'].append(statistics.mean(temp_rand_rand['init_mean_deg'].values.tolist()))\n buldy_RGG_30_rep30_04_0007_rand_rand_dict['alive ratio'].append(rand_rand_alive / 30)", "_____no_output_____" ], [ "plt.plot(buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['intra_thres'], buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['alive_nodes'])\nplt.plot(buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['intra_thres'], buldy_RGG_30_rep30_04_0007_rgg_rand_dict['alive_nodes'])\nplt.plot(buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['intra_thres'], buldy_RGG_30_rep30_04_0007_rand_rgg_dict['alive_nodes'])\nplt.plot(buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['intra_thres'], buldy_RGG_30_rep30_04_0007_rand_rand_dict['alive_nodes'])\nplt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])\nplt.title('Mean Alive nodes')\nplt.show()", "_____no_output_____" ], [ "p = 0.9\n\nplt.plot([p * i for i in buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['init_mean_deg']], buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['alive_nodes'])\nplt.plot([p * i for i in buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['init_mean_deg']], buldy_RGG_30_rep30_04_0007_rgg_rand_dict['alive_nodes'])\nplt.plot([p * i for i in buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['init_mean_deg']], buldy_RGG_30_rep30_04_0007_rand_rgg_dict['alive_nodes'])\nplt.plot([p * i for i in buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['init_mean_deg']], buldy_RGG_30_rep30_04_0007_rand_rand_dict['alive_nodes'])\nplt.title('300 Nodes, 2 Layers, 30 attack size')\nplt.xlabel('p<k>')\nplt.ylabel('mean alive nodes')\nplt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])\nplt.savefig('buldy_RGG_30_rep30_04_0007')\nplt.show()", "_____no_output_____" ], [ "p = 0.9\n\nplt.plot([p * i for i in buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['init_mean_deg']], buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['alive ratio'])\nplt.plot([p * i for i in buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['init_mean_deg']], buldy_RGG_30_rep30_04_0007_rgg_rand_dict['alive ratio'])\nplt.plot([p * i for i in buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['init_mean_deg']], buldy_RGG_30_rep30_04_0007_rand_rgg_dict['alive ratio'])\nplt.plot([p * i for i in buldy_RGG_30_rep30_04_0007_rgg_rgg_dict['init_mean_deg']], buldy_RGG_30_rep30_04_0007_rand_rand_dict['alive ratio'])\nplt.legend(['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand'])\nplt.title('300 Nodes, 2 Layers, 30 attack size')\nplt.xlabel('p<k>')\nplt.ylabel('alive ratio')\nplt.savefig('buldy_RGG_30_rep30_04_0007_ratio')\nplt.show()", "_____no_output_____" ] ], [ [ "## buldy_RGG_50_rep100_045", "_____no_output_____" ] ], [ [ "buldy_RGG_50_rep100_045_far_dead_node = {}\ncum_far_dead_node = {'rgg_rgg': [],\n 'rgg_rand': [],\n 'rand_rgg': [],\n 'rand_rand': []}\n\nfor index in range(len(buldy_RGG_50_rep100_045_rgg_rgg_data.columns) - 21):\n for j in range(100):\n if buldy_RGG_50_rep100_045_rgg_rgg_data['step%d_far_dead_node' % index][j] != 0:\n \n \n if i == 0:\n buldy_RGG_50_rep100_045_far_dead_node['rgg_rgg'] = [statistics.mean(buldy_RGG_50_rep100_045_rgg_rgg_data['step%d_far_dead_node' % index].values.tolist())]\n buldy_RGG_50_rep100_045_far_dead_node['rgg_rand'] = [statistics.mean(buldy_RGG_50_rep100_045_rgg_rand_data['step%d_far_dead_node' % index].values.tolist())]\n buldy_RGG_50_rep100_045_far_dead_node['rand_rgg'] = [statistics.mean(buldy_RGG_50_rep100_045_rand_rgg_data['step%d_far_dead_node' % index].values.tolist())]\n buldy_RGG_50_rep100_045_far_dead_node['rand_rand'] = [statistics.mean(buldy_RGG_50_rep100_045_rand_rand_data['step%d_far_dead_node' % index].values.tolist())]\n else:\n buldy_RGG_50_rep100_045_far_dead_node['rgg_rgg'].append(statistics.mean(buldy_RGG_50_rep100_045_rgg_rgg_data['step%d_far_dead_node' % index].values.tolist()))\n buldy_RGG_50_rep100_045_far_dead_node['rgg_rand'].append(statistics.mean(buldy_RGG_50_rep100_045_rgg_rand_data['step%d_far_dead_node' % index].values.tolist()))\n buldy_RGG_50_rep100_045_far_dead_node['rand_rgg'].append(statistics.mean(buldy_RGG_50_rep100_045_rand_rgg_data['step%d_far_dead_node' % index].values.tolist()))\n buldy_RGG_50_rep100_045_far_dead_node['rand_rand'].append(statistics.mean(buldy_RGG_50_rep100_045_rand_rand_data['step%d_far_dead_node' % index].values.tolist()))\n ", "_____no_output_____" ], [ "cum_far_dead_node = {'rgg_rgg': [],\n 'rgg_rand': [],\n 'rand_rgg': [],\n 'rand_rand': []}\n\nfor index, row in buldy_RGG_50_rep100_045_rgg_rgg_data.iterrows():\n cur_row = row.tolist()\n \n length = int((len(buldy_RGG_50_rep100_045_rgg_rgg_data.columns) - 21) / 3)\n \n temp = []\n for i in range(length):\n if cur_row[(3*i) + 23] != 0:\n temp.append(cur_row[(3*i) + 23])\n else:\n temp.append(temp[i-2])\n \n cum_far_dead_node['rgg_rgg'].append(temp)\n \nprint(cum_far_dead_node['rgg_rgg'])", "[[0.6879706789051951, 0.5505739703384914, 0.5338971958657229, 0.5139744869480464, 0.5338971958657229, 0.5139744869480464, 0.5338971958657229, 0.5139744869480464, 0.5338971958657229, 0.5139744869480464, 0.5338971958657229, 0.5139744869480464, 0.5338971958657229, 0.5139744869480464, 0.5338971958657229, 0.5139744869480464, 0.5338971958657229, 0.5139744869480464, 0.5338971958657229], [0.6960127715310351, 0.5837410892936934, 0.51608809631184, 0.4675166249630603, 0.4470879808901324, 0.4675166249630603, 0.4470879808901324, 0.4675166249630603, 0.4470879808901324, 0.4675166249630603, 0.4470879808901324, 0.4675166249630603, 0.4470879808901324, 0.4675166249630603, 0.4470879808901324, 0.4675166249630603, 0.4470879808901324, 0.4675166249630603, 0.4470879808901324], [0.6888664840477402, 0.5460979608018127, 0.4992577185176286, 0.4754801286633672, 0.4875410130890059, 0.4754801286633672, 0.4875410130890059, 0.4754801286633672, 0.4875410130890059, 0.4754801286633672, 0.4875410130890059, 0.4754801286633672, 0.4875410130890059, 0.4754801286633672, 0.4875410130890059, 0.4754801286633672, 0.4875410130890059, 0.4754801286633672, 0.4875410130890059], [0.7040561090615646, 0.4305654085942861, 0.4184920688253442, 0.06165909897940485, 0.4184920688253442, 0.06165909897940485, 0.4184920688253442, 0.06165909897940485, 0.4184920688253442, 0.06165909897940485, 0.4184920688253442, 0.06165909897940485, 0.4184920688253442, 0.06165909897940485, 0.4184920688253442, 0.06165909897940485, 0.4184920688253442, 0.06165909897940485, 0.4184920688253442], [0.6985928786067628, 0.6034143187545158, 0.6283258470599116, 0.5440376381763153, 0.6283258470599116, 0.5440376381763153, 0.6283258470599116, 0.5440376381763153, 0.6283258470599116, 0.5440376381763153, 0.6283258470599116, 0.5440376381763153, 0.6283258470599116, 0.5440376381763153, 0.6283258470599116, 0.5440376381763153, 0.6283258470599116, 0.5440376381763153, 0.6283258470599116], [0.6954233029575586, 0.6454214312050546, 0.6061868643665901, 0.4683305919845574, 0.2757375417759079, 0.4493809934580124, 0.2757375417759079, 0.4493809934580124, 0.2757375417759079, 0.4493809934580124, 0.2757375417759079, 0.4493809934580124, 0.2757375417759079, 0.4493809934580124, 0.2757375417759079, 0.4493809934580124, 0.2757375417759079, 0.4493809934580124, 0.2757375417759079], [0.6923069165179774, 0.5376106228869935, 0.5185346487445525, 0.5376106228869935, 0.5185346487445525, 0.5376106228869935, 0.5185346487445525, 0.5376106228869935, 0.5185346487445525, 0.5376106228869935, 0.5185346487445525, 0.5376106228869935, 0.5185346487445525, 0.5376106228869935, 0.5185346487445525, 0.5376106228869935, 0.5185346487445525, 0.5376106228869935, 0.5185346487445525], [0.6859454719121254, 0.6713787406463817, 0.6522721432724671, 0.5257667800946747, 0.5424410988500873, 0.5257667800946747, 0.5424410988500873, 0.5257667800946747, 0.5424410988500873, 0.5257667800946747, 0.5424410988500873, 0.5257667800946747, 0.5424410988500873, 0.5257667800946747, 0.5424410988500873, 0.5257667800946747, 0.5424410988500873, 0.5257667800946747, 0.5424410988500873], [0.6974423005281336, 0.4983465867770809, 0.4641029960333676, 0.4124264455737112, 0.4641029960333676, 0.4124264455737112, 0.4641029960333676, 0.4124264455737112, 0.4641029960333676, 0.4124264455737112, 0.4641029960333676, 0.4124264455737112, 0.4641029960333676, 0.4124264455737112, 0.4641029960333676, 0.4124264455737112, 0.4641029960333676, 0.4124264455737112, 0.4641029960333676], [0.6955920196029797, 0.6739741391236936, 0.5369606834980764, 0.4549563806188126, 0.4771769117501855, 0.5231706558429261, 0.4771769117501855, 0.5231706558429261, 0.4771769117501855, 0.5231706558429261, 0.4771769117501855, 0.5231706558429261, 0.4771769117501855, 0.5231706558429261, 0.4771769117501855, 0.5231706558429261, 0.4771769117501855, 0.5231706558429261, 0.4771769117501855], [0.6888728282893095, 0.4986658330109473, 0.6888728282893095, 0.4986658330109473, 0.6888728282893095, 0.4986658330109473, 0.6888728282893095, 0.4986658330109473, 0.6888728282893095, 0.4986658330109473, 0.6888728282893095, 0.4986658330109473, 0.6888728282893095, 0.4986658330109473, 0.6888728282893095, 0.4986658330109473, 0.6888728282893095, 0.4986658330109473, 0.6888728282893095], [0.6845326723567468, 0.5039170248943298, 0.3818698548567837, 0.5039170248943298, 0.3818698548567837, 0.5039170248943298, 0.3818698548567837, 0.5039170248943298, 0.3818698548567837, 0.5039170248943298, 0.3818698548567837, 0.5039170248943298, 0.3818698548567837, 0.5039170248943298, 0.3818698548567837, 0.5039170248943298, 0.3818698548567837, 0.5039170248943298, 0.3818698548567837], [0.6821062477155988, 0.5243391371918984, 0.438675749538082, 0.5243391371918984, 0.438675749538082, 0.5243391371918984, 0.438675749538082, 0.5243391371918984, 0.438675749538082, 0.5243391371918984, 0.438675749538082, 0.5243391371918984, 0.438675749538082, 0.5243391371918984, 0.438675749538082, 0.5243391371918984, 0.438675749538082, 0.5243391371918984, 0.438675749538082], [0.7018678396316862, 0.5522986050377134, 0.466283251941794, 0.3993469894457807, 0.466283251941794, 0.3993469894457807, 0.466283251941794, 0.3993469894457807, 0.466283251941794, 0.3993469894457807, 0.466283251941794, 0.3993469894457807, 0.466283251941794, 0.3993469894457807, 0.466283251941794, 0.3993469894457807, 0.466283251941794, 0.3993469894457807, 0.466283251941794], [0.683425070172236, 0.6794862920243604, 0.4250083415473181, 0.6794862920243604, 0.4250083415473181, 0.6794862920243604, 0.4250083415473181, 0.6794862920243604, 0.4250083415473181, 0.6794862920243604, 0.4250083415473181, 0.6794862920243604, 0.4250083415473181, 0.6794862920243604, 0.4250083415473181, 0.6794862920243604, 0.4250083415473181, 0.6794862920243604, 0.4250083415473181], [0.6914926119057713, 0.5559571943390375, 0.5042542215925021, 0.4610908278615228, 0.5042542215925021, 0.4610908278615228, 0.5042542215925021, 0.4610908278615228, 0.5042542215925021, 0.4610908278615228, 0.5042542215925021, 0.4610908278615228, 0.5042542215925021, 0.4610908278615228, 0.5042542215925021, 0.4610908278615228, 0.5042542215925021, 0.4610908278615228, 0.5042542215925021], [0.7016254311032887, 0.6604674341172346, 0.318158047899655, 0.6604674341172346, 0.318158047899655, 0.6604674341172346, 0.318158047899655, 0.6604674341172346, 0.318158047899655, 0.6604674341172346, 0.318158047899655, 0.6604674341172346, 0.318158047899655, 0.6604674341172346, 0.318158047899655, 0.6604674341172346, 0.318158047899655, 0.6604674341172346, 0.318158047899655], [0.6749264364380183, 0.6498191853462889, 0.6177892404862021, 0.4675987898196354, 0.4499296902825712, 0.4675987898196354, 0.4499296902825712, 0.4675987898196354, 0.4499296902825712, 0.4675987898196354, 0.4499296902825712, 0.4675987898196354, 0.4499296902825712, 0.4675987898196354, 0.4499296902825712, 0.4675987898196354, 0.4499296902825712, 0.4675987898196354, 0.4499296902825712], [0.6947781050391095, 0.683207572579, 0.5682029481530008, 0.4868357198868119, 0.4742546874602357, 0.4868357198868119, 0.4742546874602357, 0.4868357198868119, 0.4742546874602357, 0.4868357198868119, 0.4742546874602357, 0.4868357198868119, 0.4742546874602357, 0.4868357198868119, 0.4742546874602357, 0.4868357198868119, 0.4742546874602357, 0.4868357198868119, 0.4742546874602357], [0.6740812072423552, 0.6618122825837863, 0.4641538700274452, 0.3094238234600202, 0.4641538700274452, 0.3094238234600202, 0.4641538700274452, 0.3094238234600202, 0.4641538700274452, 0.3094238234600202, 0.4641538700274452, 0.3094238234600202, 0.4641538700274452, 0.3094238234600202, 0.4641538700274452, 0.3094238234600202, 0.4641538700274452, 0.3094238234600202, 0.4641538700274452], [0.7022291073412669, 0.5293429885367102, 0.4777381791351661, 0.3674608054750634, 0.3931317104649806, 0.3674608054750634, 0.3931317104649806, 0.3674608054750634, 0.3931317104649806, 0.3674608054750634, 0.3931317104649806, 0.3674608054750634, 0.3931317104649806, 0.3674608054750634, 0.3931317104649806, 0.3674608054750634, 0.3931317104649806, 0.3674608054750634, 0.3931317104649806], [0.6950708119445599, 0.5336777878974175, 0.525825384019172, 0.31686891740658035, 0.3007243082646641, 0.31686891740658035, 0.3007243082646641, 0.31686891740658035, 0.3007243082646641, 0.31686891740658035, 0.3007243082646641, 0.31686891740658035, 0.3007243082646641, 0.31686891740658035, 0.3007243082646641, 0.31686891740658035, 0.3007243082646641, 0.31686891740658035, 0.3007243082646641], [0.6825233674817666, 0.5369225134631442, 0.54788145473833, 0.2594733928188212, 0.54788145473833, 0.2594733928188212, 0.54788145473833, 0.2594733928188212, 0.54788145473833, 0.2594733928188212, 0.54788145473833, 0.2594733928188212, 0.54788145473833, 0.2594733928188212, 0.54788145473833, 0.2594733928188212, 0.54788145473833, 0.2594733928188212, 0.54788145473833], [0.7019971505169456, 0.6149761817732257, 0.5774232316592243, 0.5282007671745257, 0.5168398954453652, 0.5282007671745257, 0.5168398954453652, 0.5282007671745257, 0.5168398954453652, 0.5282007671745257, 0.5168398954453652, 0.5282007671745257, 0.5168398954453652, 0.5282007671745257, 0.5168398954453652, 0.5282007671745257, 0.5168398954453652, 0.5282007671745257, 0.5168398954453652], [0.6735436853556123, 0.6652605716112322, 0.433414761625117, 0.6652605716112322, 0.433414761625117, 0.6652605716112322, 0.433414761625117, 0.6652605716112322, 0.433414761625117, 0.6652605716112322, 0.433414761625117, 0.6652605716112322, 0.433414761625117, 0.6652605716112322, 0.433414761625117, 0.6652605716112322, 0.433414761625117, 0.6652605716112322, 0.433414761625117], [0.7015836687556579, 0.4576973369006489, 0.3908075508804981, 0.4576973369006489, 0.3908075508804981, 0.4576973369006489, 0.3908075508804981, 0.4576973369006489, 0.3908075508804981, 0.4576973369006489, 0.3908075508804981, 0.4576973369006489, 0.3908075508804981, 0.4576973369006489, 0.3908075508804981, 0.4576973369006489, 0.3908075508804981, 0.4576973369006489, 0.3908075508804981], [0.6969251605982767, 0.5439156588967174, 0.5282278708197945, 0.4896285929990201, 0.5282278708197945, 0.4896285929990201, 0.5282278708197945, 0.4896285929990201, 0.5282278708197945, 0.4896285929990201, 0.5282278708197945, 0.4896285929990201, 0.5282278708197945, 0.4896285929990201, 0.5282278708197945, 0.4896285929990201, 0.5282278708197945, 0.4896285929990201, 0.5282278708197945], [0.6862043366440287, 0.6551125589877719, 0.5552209940536255, 0.3734005932730842, 0.3286042059393291, 0.3734005932730842, 0.3286042059393291, 0.3734005932730842, 0.3286042059393291, 0.3734005932730842, 0.3286042059393291, 0.3734005932730842, 0.3286042059393291, 0.3734005932730842, 0.3286042059393291, 0.3734005932730842, 0.3286042059393291, 0.3734005932730842, 0.3286042059393291], [0.6963093497917389, 0.6099850269793455, 0.6282922425872703, 0.41803834403529816, 0.6282922425872703, 0.41803834403529816, 0.6282922425872703, 0.41803834403529816, 0.6282922425872703, 0.41803834403529816, 0.6282922425872703, 0.41803834403529816, 0.6282922425872703, 0.41803834403529816, 0.6282922425872703, 0.41803834403529816, 0.6282922425872703, 0.41803834403529816, 0.6282922425872703], [0.6946620348664078, 0.6970975192420017, 0.6946620348664078, 0.6970975192420017, 0.6946620348664078, 0.6970975192420017, 0.6946620348664078, 0.6970975192420017, 0.6946620348664078, 0.6970975192420017, 0.6946620348664078, 0.6970975192420017, 0.6946620348664078, 0.6970975192420017, 0.6946620348664078, 0.6970975192420017, 0.6946620348664078, 0.6970975192420017, 0.6946620348664078], [0.6950012139409489, 0.6428834684984386, 0.6950012139409489, 0.6428834684984386, 0.6950012139409489, 0.6428834684984386, 0.6950012139409489, 0.6428834684984386, 0.6950012139409489, 0.6428834684984386, 0.6950012139409489, 0.6428834684984386, 0.6950012139409489, 0.6428834684984386, 0.6950012139409489, 0.6428834684984386, 0.6950012139409489, 0.6428834684984386, 0.6950012139409489], [0.691949882811923, 0.5880453426455354, 0.483841608595268, 0.4067445904436966, 0.483841608595268, 0.4067445904436966, 0.483841608595268, 0.4067445904436966, 0.483841608595268, 0.4067445904436966, 0.483841608595268, 0.4067445904436966, 0.483841608595268, 0.4067445904436966, 0.483841608595268, 0.4067445904436966, 0.483841608595268, 0.4067445904436966, 0.483841608595268], [0.695803928431308, 0.6201270918634525, 0.5020712758868663, 0.5068584449657428, 0.5139934341613572, 0.410272265220885, 0.5139934341613572, 0.410272265220885, 0.5139934341613572, 0.410272265220885, 0.5139934341613572, 0.410272265220885, 0.5139934341613572, 0.410272265220885, 0.5139934341613572, 0.410272265220885, 0.5139934341613572, 0.410272265220885, 0.5139934341613572], [0.695904690605074, 0.5247159509380371, 0.4844486936878866, 0.5247159509380371, 0.4844486936878866, 0.5247159509380371, 0.4844486936878866, 0.5247159509380371, 0.4844486936878866, 0.5247159509380371, 0.4844486936878866, 0.5247159509380371, 0.4844486936878866, 0.5247159509380371, 0.4844486936878866, 0.5247159509380371, 0.4844486936878866, 0.5247159509380371, 0.4844486936878866], [0.6963062424753824, 0.5092377924462526, 0.5222664366907246, 0.4124178562317873, 0.4240231141886939, 0.4124178562317873, 0.4240231141886939, 0.4124178562317873, 0.4240231141886939, 0.4124178562317873, 0.4240231141886939, 0.4124178562317873, 0.4240231141886939, 0.4124178562317873, 0.4240231141886939, 0.4124178562317873, 0.4240231141886939, 0.4124178562317873, 0.4240231141886939], [0.7009011864314222, 0.5271494970756266, 0.4485388862186677, 0.4794179892003194, 0.4565032621941688, 0.4794179892003194, 0.4565032621941688, 0.4794179892003194, 0.4565032621941688, 0.4794179892003194, 0.4565032621941688, 0.4794179892003194, 0.4565032621941688, 0.4794179892003194, 0.4565032621941688, 0.4794179892003194, 0.4565032621941688, 0.4794179892003194, 0.4565032621941688], [0.6973796780986028, 0.6396998889284151, 0.5641737992751887, 0.2792845610764548, 0.5641737992751887, 0.2792845610764548, 0.5641737992751887, 0.2792845610764548, 0.5641737992751887, 0.2792845610764548, 0.5641737992751887, 0.2792845610764548, 0.5641737992751887, 0.2792845610764548, 0.5641737992751887, 0.2792845610764548, 0.5641737992751887, 0.2792845610764548, 0.5641737992751887], [0.684500797077741, 0.6160912081927781, 0.684500797077741, 0.6160912081927781, 0.684500797077741, 0.6160912081927781, 0.684500797077741, 0.6160912081927781, 0.684500797077741, 0.6160912081927781, 0.684500797077741, 0.6160912081927781, 0.684500797077741, 0.6160912081927781, 0.684500797077741, 0.6160912081927781, 0.684500797077741, 0.6160912081927781, 0.684500797077741], [0.7036120603464935, 0.6535603834287494, 0.41964696061037504, 0.2955331514171224, 0.41964696061037504, 0.2955331514171224, 0.41964696061037504, 0.2955331514171224, 0.41964696061037504, 0.2955331514171224, 0.41964696061037504, 0.2955331514171224, 0.41964696061037504, 0.2955331514171224, 0.41964696061037504, 0.2955331514171224, 0.41964696061037504, 0.2955331514171224, 0.41964696061037504], [0.6941377115347077, 0.6072255830339559, 0.554195946964856, 0.6072255830339559, 0.554195946964856, 0.6072255830339559, 0.554195946964856, 0.6072255830339559, 0.554195946964856, 0.6072255830339559, 0.554195946964856, 0.6072255830339559, 0.554195946964856, 0.6072255830339559, 0.554195946964856, 0.6072255830339559, 0.554195946964856, 0.6072255830339559, 0.554195946964856], [0.6991782274684111, 0.5946511097849558, 0.5256927562735794, 0.33134296046472106, 0.5256927562735794, 0.33134296046472106, 0.5256927562735794, 0.33134296046472106, 0.5256927562735794, 0.33134296046472106, 0.5256927562735794, 0.33134296046472106, 0.5256927562735794, 0.33134296046472106, 0.5256927562735794, 0.33134296046472106, 0.5256927562735794, 0.33134296046472106, 0.5256927562735794], [0.6894002051537155, 0.6381092581414407, 0.3433983219213077, 0.39619842706841535, 0.3433983219213077, 0.39619842706841535, 0.3433983219213077, 0.39619842706841535, 0.3433983219213077, 0.39619842706841535, 0.3433983219213077, 0.39619842706841535, 0.3433983219213077, 0.39619842706841535, 0.3433983219213077, 0.39619842706841535, 0.3433983219213077, 0.39619842706841535, 0.3433983219213077], [0.6922783247926294, 0.5114254437032989, 0.5127138249412901, 0.471121920616252, 0.5127138249412901, 0.471121920616252, 0.5127138249412901, 0.471121920616252, 0.5127138249412901, 0.471121920616252, 0.5127138249412901, 0.471121920616252, 0.5127138249412901, 0.471121920616252, 0.5127138249412901, 0.471121920616252, 0.5127138249412901, 0.471121920616252, 0.5127138249412901], [0.6957969519829709, 0.5182520434034958, 0.4721152284013603, 0.4723620393500952, 0.4721152284013603, 0.4723620393500952, 0.4721152284013603, 0.4723620393500952, 0.4721152284013603, 0.4723620393500952, 0.4721152284013603, 0.4723620393500952, 0.4721152284013603, 0.4723620393500952, 0.4721152284013603, 0.4723620393500952, 0.4721152284013603, 0.4723620393500952, 0.4721152284013603], [0.6863405307604971, 0.6175375567061727, 0.5466002872920425, 0.4624770252747264, 0.4436176659129512, 0.4624770252747264, 0.4436176659129512, 0.4624770252747264, 0.4436176659129512, 0.4624770252747264, 0.4436176659129512, 0.4624770252747264, 0.4436176659129512, 0.4624770252747264, 0.4436176659129512, 0.4624770252747264, 0.4436176659129512, 0.4624770252747264, 0.4436176659129512], [0.6991214751099586, 0.5630217663369113, 0.5637369351421101, 0.4197059903835531, 0.5637369351421101, 0.4197059903835531, 0.5637369351421101, 0.4197059903835531, 0.5637369351421101, 0.4197059903835531, 0.5637369351421101, 0.4197059903835531, 0.5637369351421101, 0.4197059903835531, 0.5637369351421101, 0.4197059903835531, 0.5637369351421101, 0.4197059903835531, 0.5637369351421101], [0.6979285560343439, 0.5526425677968737, 0.5433620455921996, 0.4644441646198918, 0.4127919690526404, 0.4644441646198918, 0.4127919690526404, 0.4644441646198918, 0.4127919690526404, 0.4644441646198918, 0.4127919690526404, 0.4644441646198918, 0.4127919690526404, 0.4644441646198918, 0.4127919690526404, 0.4644441646198918, 0.4127919690526404, 0.4644441646198918, 0.4127919690526404], [0.6997732319053848, 0.5247524642729471, 0.499748728372113, 0.5247524642729471, 0.499748728372113, 0.5247524642729471, 0.499748728372113, 0.5247524642729471, 0.499748728372113, 0.5247524642729471, 0.499748728372113, 0.5247524642729471, 0.499748728372113, 0.5247524642729471, 0.499748728372113, 0.5247524642729471, 0.499748728372113, 0.5247524642729471, 0.499748728372113], [0.6986828323610524, 0.6593541011087035, 0.5427927272171523, 0.454847450274056, 0.5427927272171523, 0.454847450274056, 0.5427927272171523, 0.454847450274056, 0.5427927272171523, 0.454847450274056, 0.5427927272171523, 0.454847450274056, 0.5427927272171523, 0.454847450274056, 0.5427927272171523, 0.454847450274056, 0.5427927272171523, 0.454847450274056, 0.5427927272171523], [0.6966156114726909, 0.5558333108607599, 0.5152086024927595, 0.4887969950352127, 0.5297767670226967, 0.4887969950352127, 0.5297767670226967, 0.4887969950352127, 0.5297767670226967, 0.4887969950352127, 0.5297767670226967, 0.4887969950352127, 0.5297767670226967, 0.4887969950352127, 0.5297767670226967, 0.4887969950352127, 0.5297767670226967, 0.4887969950352127, 0.5297767670226967], [0.691334077021333, 0.5402307131519618, 0.4992833205372719, 0.4491009605877672, 0.4992833205372719, 0.4491009605877672, 0.4992833205372719, 0.4491009605877672, 0.4992833205372719, 0.4491009605877672, 0.4992833205372719, 0.4491009605877672, 0.4992833205372719, 0.4491009605877672, 0.4992833205372719, 0.4491009605877672, 0.4992833205372719, 0.4491009605877672, 0.4992833205372719], [0.6858527613785327, 0.5869972702171914, 0.5050281515909179, 0.5518862810895899, 0.5050281515909179, 0.5518862810895899, 0.5050281515909179, 0.5518862810895899, 0.5050281515909179, 0.5518862810895899, 0.5050281515909179, 0.5518862810895899, 0.5050281515909179, 0.5518862810895899, 0.5050281515909179, 0.5518862810895899, 0.5050281515909179, 0.5518862810895899, 0.5050281515909179], [0.6944078769569297, 0.5084909073414394, 0.4967977906963274, 0.3819507414215797, 0.4967977906963274, 0.3819507414215797, 0.4967977906963274, 0.3819507414215797, 0.4967977906963274, 0.3819507414215797, 0.4967977906963274, 0.3819507414215797, 0.4967977906963274, 0.3819507414215797, 0.4967977906963274, 0.3819507414215797, 0.4967977906963274, 0.3819507414215797, 0.4967977906963274], [0.7015864989108767, 0.5266668044888348, 0.5184901449346866, 0.4774691700510902, 0.4513730593183062, 0.4774691700510902, 0.4513730593183062, 0.4774691700510902, 0.4513730593183062, 0.4774691700510902, 0.4513730593183062, 0.4774691700510902, 0.4513730593183062, 0.4774691700510902, 0.4513730593183062, 0.4774691700510902, 0.4513730593183062, 0.4774691700510902, 0.4513730593183062], [0.6912307817979866, 0.6879247883479779, 0.5214792963040804, 0.33898955955254706, 0.5214792963040804, 0.33898955955254706, 0.5214792963040804, 0.33898955955254706, 0.5214792963040804, 0.33898955955254706, 0.5214792963040804, 0.33898955955254706, 0.5214792963040804, 0.33898955955254706, 0.5214792963040804, 0.33898955955254706, 0.5214792963040804, 0.33898955955254706, 0.5214792963040804], [0.6967608895344005, 0.5806867353532738, 0.5108314517410555, 0.4464224677745727, 0.5108314517410555, 0.4464224677745727, 0.5108314517410555, 0.4464224677745727, 0.5108314517410555, 0.4464224677745727, 0.5108314517410555, 0.4464224677745727, 0.5108314517410555, 0.4464224677745727, 0.5108314517410555, 0.4464224677745727, 0.5108314517410555, 0.4464224677745727, 0.5108314517410555], [0.6999891851761568, 0.5895141840441459, 0.454817156387496, 0.4746382325513713, 0.454817156387496, 0.4746382325513713, 0.454817156387496, 0.4746382325513713, 0.454817156387496, 0.4746382325513713, 0.454817156387496, 0.4746382325513713, 0.454817156387496, 0.4746382325513713, 0.454817156387496, 0.4746382325513713, 0.454817156387496, 0.4746382325513713, 0.454817156387496], [0.6970773210839201, 0.6173568549790077, 0.6970773210839201, 0.6173568549790077, 0.6970773210839201, 0.6173568549790077, 0.6970773210839201, 0.6173568549790077, 0.6970773210839201, 0.6173568549790077, 0.6970773210839201, 0.6173568549790077, 0.6970773210839201, 0.6173568549790077, 0.6970773210839201, 0.6173568549790077, 0.6970773210839201, 0.6173568549790077, 0.6970773210839201], [0.6927274348804148, 0.6020232963087443, 0.5038249596378639, 0.4508640523534584, 0.5038249596378639, 0.4508640523534584, 0.5038249596378639, 0.4508640523534584, 0.5038249596378639, 0.4508640523534584, 0.5038249596378639, 0.4508640523534584, 0.5038249596378639, 0.4508640523534584, 0.5038249596378639, 0.4508640523534584, 0.5038249596378639, 0.4508640523534584, 0.5038249596378639], [0.6878098637344641, 0.5743179721814952, 0.3470836156427941, 0.5743179721814952, 0.3470836156427941, 0.5743179721814952, 0.3470836156427941, 0.5743179721814952, 0.3470836156427941, 0.5743179721814952, 0.3470836156427941, 0.5743179721814952, 0.3470836156427941, 0.5743179721814952, 0.3470836156427941, 0.5743179721814952, 0.3470836156427941, 0.5743179721814952, 0.3470836156427941], [0.7000115652256631, 0.6651536790125869, 0.5694093457622159, 0.4856095779444335, 0.4843564332445619, 0.4856095779444335, 0.4843564332445619, 0.4856095779444335, 0.4843564332445619, 0.4856095779444335, 0.4843564332445619, 0.4856095779444335, 0.4843564332445619, 0.4856095779444335, 0.4843564332445619, 0.4856095779444335, 0.4843564332445619, 0.4856095779444335, 0.4843564332445619], [0.7070148657706851, 0.6888494397292434, 0.5823689386193517, 0.4551016342284527, 0.5823689386193517, 0.4551016342284527, 0.5823689386193517, 0.4551016342284527, 0.5823689386193517, 0.4551016342284527, 0.5823689386193517, 0.4551016342284527, 0.5823689386193517, 0.4551016342284527, 0.5823689386193517, 0.4551016342284527, 0.5823689386193517, 0.4551016342284527, 0.5823689386193517], [0.6913524534524622, 0.4971911095196237, 0.4142692361915032, 0.3058209601813079, 0.4142692361915032, 0.3058209601813079, 0.4142692361915032, 0.3058209601813079, 0.4142692361915032, 0.3058209601813079, 0.4142692361915032, 0.3058209601813079, 0.4142692361915032, 0.3058209601813079, 0.4142692361915032, 0.3058209601813079, 0.4142692361915032, 0.3058209601813079, 0.4142692361915032], [0.684154542938714, 0.5563651071080006, 0.4740591455264303, 0.3865233091286582, 0.3017430165011092, 0.3865233091286582, 0.3017430165011092, 0.3865233091286582, 0.3017430165011092, 0.3865233091286582, 0.3017430165011092, 0.3865233091286582, 0.3017430165011092, 0.3865233091286582, 0.3017430165011092, 0.3865233091286582, 0.3017430165011092, 0.3865233091286582, 0.3017430165011092], [0.7010746361949705, 0.6067682236366402, 0.4520401253943576, 0.6067682236366402, 0.4520401253943576, 0.6067682236366402, 0.4520401253943576, 0.6067682236366402, 0.4520401253943576, 0.6067682236366402, 0.4520401253943576, 0.6067682236366402, 0.4520401253943576, 0.6067682236366402, 0.4520401253943576, 0.6067682236366402, 0.4520401253943576, 0.6067682236366402, 0.4520401253943576], [0.6862243991074943, 0.5603098578810664, 0.4561420059101754, 0.5603098578810664, 0.4561420059101754, 0.5603098578810664, 0.4561420059101754, 0.5603098578810664, 0.4561420059101754, 0.5603098578810664, 0.4561420059101754, 0.5603098578810664, 0.4561420059101754, 0.5603098578810664, 0.4561420059101754, 0.5603098578810664, 0.4561420059101754, 0.5603098578810664, 0.4561420059101754], [0.6929887691752347, 0.5864318711363624, 0.4849341457113994, 0.28513443874208416, 0.4849341457113994, 0.28513443874208416, 0.4849341457113994, 0.28513443874208416, 0.4849341457113994, 0.28513443874208416, 0.4849341457113994, 0.28513443874208416, 0.4849341457113994, 0.28513443874208416, 0.4849341457113994, 0.28513443874208416, 0.4849341457113994, 0.28513443874208416, 0.4849341457113994], [0.698790405350689, 0.4416002589148517, 0.25344799094959336, 0.4416002589148517, 0.25344799094959336, 0.4416002589148517, 0.25344799094959336, 0.4416002589148517, 0.25344799094959336, 0.4416002589148517, 0.25344799094959336, 0.4416002589148517, 0.25344799094959336, 0.4416002589148517, 0.25344799094959336, 0.4416002589148517, 0.25344799094959336, 0.4416002589148517, 0.25344799094959336], [0.6749558383082833, 0.5508333233983855, 0.4806906291352432, 0.2831705081556237, 0.4806906291352432, 0.2831705081556237, 0.4806906291352432, 0.2831705081556237, 0.4806906291352432, 0.2831705081556237, 0.4806906291352432, 0.2831705081556237, 0.4806906291352432, 0.2831705081556237, 0.4806906291352432, 0.2831705081556237, 0.4806906291352432, 0.2831705081556237, 0.4806906291352432], [0.6851422847009387, 0.6145425824967771, 0.5989084977354423, 0.6145425824967771, 0.5989084977354423, 0.6145425824967771, 0.5989084977354423, 0.6145425824967771, 0.5989084977354423, 0.6145425824967771, 0.5989084977354423, 0.6145425824967771, 0.5989084977354423, 0.6145425824967771, 0.5989084977354423, 0.6145425824967771, 0.5989084977354423, 0.6145425824967771, 0.5989084977354423], [0.6901706784214362, 0.5254020267377152, 0.5090255913812833, 0.4445955743347255, 0.5090255913812833, 0.4445955743347255, 0.5090255913812833, 0.4445955743347255, 0.5090255913812833, 0.4445955743347255, 0.5090255913812833, 0.4445955743347255, 0.5090255913812833, 0.4445955743347255, 0.5090255913812833, 0.4445955743347255, 0.5090255913812833, 0.4445955743347255, 0.5090255913812833], [0.7010826251551174, 0.553745099645963, 0.527832398018632, 0.497862044340754, 0.4957670985037351, 0.497862044340754, 0.4957670985037351, 0.497862044340754, 0.4957670985037351, 0.497862044340754, 0.4957670985037351, 0.497862044340754, 0.4957670985037351, 0.497862044340754, 0.4957670985037351, 0.497862044340754, 0.4957670985037351, 0.497862044340754, 0.4957670985037351], [0.6989137960168493, 0.6111570585535373, 0.4860300016297214, 0.5055288298473712, 0.4860300016297214, 0.5055288298473712, 0.4860300016297214, 0.5055288298473712, 0.4860300016297214, 0.5055288298473712, 0.4860300016297214, 0.5055288298473712, 0.4860300016297214, 0.5055288298473712, 0.4860300016297214, 0.5055288298473712, 0.4860300016297214, 0.5055288298473712, 0.4860300016297214], [0.6956931846560663, 0.6618330163565916, 0.6759317624361679, 0.5363658979431096, 0.6759317624361679, 0.5363658979431096, 0.6759317624361679, 0.5363658979431096, 0.6759317624361679, 0.5363658979431096, 0.6759317624361679, 0.5363658979431096, 0.6759317624361679, 0.5363658979431096, 0.6759317624361679, 0.5363658979431096, 0.6759317624361679, 0.5363658979431096, 0.6759317624361679], [0.6773721599948151, 0.6574888772952684, 0.4877221989739642, 0.6087151511081192, 0.4877221989739642, 0.6087151511081192, 0.4877221989739642, 0.6087151511081192, 0.4877221989739642, 0.6087151511081192, 0.4877221989739642, 0.6087151511081192, 0.4877221989739642, 0.6087151511081192, 0.4877221989739642, 0.6087151511081192, 0.4877221989739642, 0.6087151511081192, 0.4877221989739642], [0.6947427343062257, 0.5851631463870516, 0.5698401284282295, 0.5054944579999211, 0.453129335514308, 0.5054944579999211, 0.453129335514308, 0.5054944579999211, 0.453129335514308, 0.5054944579999211, 0.453129335514308, 0.5054944579999211, 0.453129335514308, 0.5054944579999211, 0.453129335514308, 0.5054944579999211, 0.453129335514308, 0.5054944579999211, 0.453129335514308], [0.6972917520665518, 0.5842247701063386, 0.51225455421499, 0.5842247701063386, 0.51225455421499, 0.5842247701063386, 0.51225455421499, 0.5842247701063386, 0.51225455421499, 0.5842247701063386, 0.51225455421499, 0.5842247701063386, 0.51225455421499, 0.5842247701063386, 0.51225455421499, 0.5842247701063386, 0.51225455421499, 0.5842247701063386, 0.51225455421499], [0.6992011131035915, 0.4809898921235877, 0.4401451486400377, 0.3537370101598489, 0.4401451486400377, 0.3537370101598489, 0.4401451486400377, 0.3537370101598489, 0.4401451486400377, 0.3537370101598489, 0.4401451486400377, 0.3537370101598489, 0.4401451486400377, 0.3537370101598489, 0.4401451486400377, 0.3537370101598489, 0.4401451486400377, 0.3537370101598489, 0.4401451486400377], [0.68296071901907, 0.6142246855142789, 0.6403379313332239, 0.4637079748486686, 0.3269552110478282, 0.4637079748486686, 0.3269552110478282, 0.4637079748486686, 0.3269552110478282, 0.4637079748486686, 0.3269552110478282, 0.4637079748486686, 0.3269552110478282, 0.4637079748486686, 0.3269552110478282, 0.4637079748486686, 0.3269552110478282, 0.4637079748486686, 0.3269552110478282], [0.699549160697535, 0.6486172200620084, 0.5354406807017216, 0.4972549645102342, 0.4938451287685625, 0.4972549645102342, 0.4938451287685625, 0.4972549645102342, 0.4938451287685625, 0.4972549645102342, 0.4938451287685625, 0.4972549645102342, 0.4938451287685625, 0.4972549645102342, 0.4938451287685625, 0.4972549645102342, 0.4938451287685625, 0.4972549645102342, 0.4938451287685625], [0.6989179118441295, 0.5192204425391803, 0.5183496408394862, 0.4135439116852031, 0.5183496408394862, 0.4135439116852031, 0.5183496408394862, 0.4135439116852031, 0.5183496408394862, 0.4135439116852031, 0.5183496408394862, 0.4135439116852031, 0.5183496408394862, 0.4135439116852031, 0.5183496408394862, 0.4135439116852031, 0.5183496408394862, 0.4135439116852031, 0.5183496408394862], [0.6942130583832699, 0.6220300282353817, 0.4336940526173929, 0.46279307390795593, 0.4336940526173929, 0.46279307390795593, 0.4336940526173929, 0.46279307390795593, 0.4336940526173929, 0.46279307390795593, 0.4336940526173929, 0.46279307390795593, 0.4336940526173929, 0.46279307390795593, 0.4336940526173929, 0.46279307390795593, 0.4336940526173929, 0.46279307390795593, 0.4336940526173929], [0.6834264578060677, 0.6293084061350368, 0.5988305644417296, 0.5053248698582646, 0.485069876576681, 0.5053248698582646, 0.485069876576681, 0.5053248698582646, 0.485069876576681, 0.5053248698582646, 0.485069876576681, 0.5053248698582646, 0.485069876576681, 0.5053248698582646, 0.485069876576681, 0.5053248698582646, 0.485069876576681, 0.5053248698582646, 0.485069876576681], [0.6991429834391265, 0.6730392275254578, 0.5107091412287892, 0.442518616969494, 0.5107091412287892, 0.442518616969494, 0.5107091412287892, 0.442518616969494, 0.5107091412287892, 0.442518616969494, 0.5107091412287892, 0.442518616969494, 0.5107091412287892, 0.442518616969494, 0.5107091412287892, 0.442518616969494, 0.5107091412287892, 0.442518616969494, 0.5107091412287892], [0.6959788422829933, 0.6045213174250268, 0.5182837076405664, 0.5913543907435773, 0.5699748079260035, 0.5913543907435773, 0.5699748079260035, 0.5913543907435773, 0.5699748079260035, 0.5913543907435773, 0.5699748079260035, 0.5913543907435773, 0.5699748079260035, 0.5913543907435773, 0.5699748079260035, 0.5913543907435773, 0.5699748079260035, 0.5913543907435773, 0.5699748079260035], [0.6851608046146426, 0.5546011140797111, 0.5644412554246679, 0.4944495096400878, 0.5644412554246679, 0.4944495096400878, 0.5644412554246679, 0.4944495096400878, 0.5644412554246679, 0.4944495096400878, 0.5644412554246679, 0.4944495096400878, 0.5644412554246679, 0.4944495096400878, 0.5644412554246679, 0.4944495096400878, 0.5644412554246679, 0.4944495096400878, 0.5644412554246679], [0.6815184582526442, 0.6659148840586889, 0.6815184582526442, 0.6659148840586889, 0.6815184582526442, 0.6659148840586889, 0.6815184582526442, 0.6659148840586889, 0.6815184582526442, 0.6659148840586889, 0.6815184582526442, 0.6659148840586889, 0.6815184582526442, 0.6659148840586889, 0.6815184582526442, 0.6659148840586889, 0.6815184582526442, 0.6659148840586889, 0.6815184582526442], [0.691260746530931, 0.5533510044880253, 0.5700118164286143, 0.5551209297342701, 0.5700118164286143, 0.5551209297342701, 0.5700118164286143, 0.5551209297342701, 0.5700118164286143, 0.5551209297342701, 0.5700118164286143, 0.5551209297342701, 0.5700118164286143, 0.5551209297342701, 0.5700118164286143, 0.5551209297342701, 0.5700118164286143, 0.5551209297342701, 0.5700118164286143], [0.6980939221899535, 0.6398403038341338, 0.4866328674071781, 0.4747444289150589, 0.471817658808565, 0.4747444289150589, 0.471817658808565, 0.4747444289150589, 0.471817658808565, 0.4747444289150589, 0.471817658808565, 0.4747444289150589, 0.471817658808565, 0.4747444289150589, 0.471817658808565, 0.4747444289150589, 0.471817658808565, 0.4747444289150589, 0.471817658808565], [0.7006810997355021, 0.5361012394357227, 0.4694089264203878, 0.3472503421485294, 0.4694089264203878, 0.3472503421485294, 0.4694089264203878, 0.3472503421485294, 0.4694089264203878, 0.3472503421485294, 0.4694089264203878, 0.3472503421485294, 0.4694089264203878, 0.3472503421485294, 0.4694089264203878, 0.3472503421485294, 0.4694089264203878, 0.3472503421485294, 0.4694089264203878], [0.693384971244155, 0.6452460613939784, 0.6669400805785973, 0.20061363280813307, 0.6669400805785973, 0.20061363280813307, 0.6669400805785973, 0.20061363280813307, 0.6669400805785973, 0.20061363280813307, 0.6669400805785973, 0.20061363280813307, 0.6669400805785973, 0.20061363280813307, 0.6669400805785973, 0.20061363280813307, 0.6669400805785973, 0.20061363280813307, 0.6669400805785973], [0.6925797694588199, 0.3230214064375756, 0.6925797694588199, 0.3230214064375756, 0.6925797694588199, 0.3230214064375756, 0.6925797694588199, 0.3230214064375756, 0.6925797694588199, 0.3230214064375756, 0.6925797694588199, 0.3230214064375756, 0.6925797694588199, 0.3230214064375756, 0.6925797694588199, 0.3230214064375756, 0.6925797694588199, 0.3230214064375756, 0.6925797694588199], [0.6881079289384403, 0.5397042959238989, 0.4525167722749829, 0.1492730492362932, 0.4525167722749829, 0.1492730492362932, 0.4525167722749829, 0.1492730492362932, 0.4525167722749829, 0.1492730492362932, 0.4525167722749829, 0.1492730492362932, 0.4525167722749829, 0.1492730492362932, 0.4525167722749829, 0.1492730492362932, 0.4525167722749829, 0.1492730492362932, 0.4525167722749829], [0.6966030971184889, 0.4936816972171006, 0.4699005840473242, 0.4936816972171006, 0.4699005840473242, 0.4936816972171006, 0.4699005840473242, 0.4936816972171006, 0.4699005840473242, 0.4936816972171006, 0.4699005840473242, 0.4936816972171006, 0.4699005840473242, 0.4936816972171006, 0.4699005840473242, 0.4936816972171006, 0.4699005840473242, 0.4936816972171006, 0.4699005840473242], [0.6791244019906811, 0.4950108503605495, 0.4154613509719154, 0.4950108503605495, 0.4154613509719154, 0.4950108503605495, 0.4154613509719154, 0.4950108503605495, 0.4154613509719154, 0.4950108503605495, 0.4154613509719154, 0.4950108503605495, 0.4154613509719154, 0.4950108503605495, 0.4154613509719154, 0.4950108503605495, 0.4154613509719154, 0.4950108503605495, 0.4154613509719154], [0.7031878751900261, 0.460037188945789, 0.4640051961640838, 0.2776866346922805, 0.29787769092492233, 0.2776866346922805, 0.29787769092492233, 0.2776866346922805, 0.29787769092492233, 0.2776866346922805, 0.29787769092492233, 0.2776866346922805, 0.29787769092492233, 0.2776866346922805, 0.29787769092492233, 0.2776866346922805, 0.29787769092492233, 0.2776866346922805, 0.29787769092492233], [0.6919985385697099, 0.6224349580103637, 0.4803520433500968, 0.6224349580103637, 0.4803520433500968, 0.6224349580103637, 0.4803520433500968, 0.6224349580103637, 0.4803520433500968, 0.6224349580103637, 0.4803520433500968, 0.6224349580103637, 0.4803520433500968, 0.6224349580103637, 0.4803520433500968, 0.6224349580103637, 0.4803520433500968, 0.6224349580103637, 0.4803520433500968], [0.6916810693061993, 0.6438101018393572, 0.4456462409019724, 0.6438101018393572, 0.4456462409019724, 0.6438101018393572, 0.4456462409019724, 0.6438101018393572, 0.4456462409019724, 0.6438101018393572, 0.4456462409019724, 0.6438101018393572, 0.4456462409019724, 0.6438101018393572, 0.4456462409019724, 0.6438101018393572, 0.4456462409019724, 0.6438101018393572, 0.4456462409019724], [0.6978231942417638, 0.6320931389750707, 0.6594632809314187, 0.285737920387288, 0.3000826573763886, 0.285737920387288, 0.3000826573763886, 0.285737920387288, 0.3000826573763886, 0.285737920387288, 0.3000826573763886, 0.285737920387288, 0.3000826573763886, 0.285737920387288, 0.3000826573763886, 0.285737920387288, 0.3000826573763886, 0.285737920387288, 0.3000826573763886], [0.6909578874021928, 0.5878533382672341, 0.5659054189820935, 0.5236622621149383, 0.5323898679446387, 0.5236622621149383, 0.5323898679446387, 0.5236622621149383, 0.5323898679446387, 0.5236622621149383, 0.5323898679446387, 0.5236622621149383, 0.5323898679446387, 0.5236622621149383, 0.5323898679446387, 0.5236622621149383, 0.5323898679446387, 0.5236622621149383, 0.5323898679446387]]\n" ], [ "step_nums = []\nstep_nums.append(statistics.mean(rgg_rgg_data['cas_steps'].values.tolist()))\nstep_nums.append(statistics.mean(rgg_rand_data['cas_steps'].values.tolist()))\nstep_nums.append(statistics.mean(rand_rgg_data['cas_steps'].values.tolist()))\nstep_nums.append(statistics.mean(rand_rand_data['cas_steps'].values.tolist()))\n\nindex = np.arange(4)\ngraph_types = ['RGG-RGG', 'RGG-Rand', 'Rand-RGG', 'Rand-Rand']\n\nplt.bar(index, step_nums, width=0.3, color='gray')\nplt.xticks(index, graph_types)\nplt.title('Number of steps')\nplt.savefig('The number of steps.png')\nplt.show()", "_____no_output_____" ], [ "rgg_rgg_isol = []\nrgg_rgg_unsupp = []\nrgg_rand_isol = []\nrgg_rand_unsupp = []\nrand_rgg_isol = []\nrand_rgg_unsupp = []\nrand_rand_isol = []\nrand_rand_unsupp =[]", "_____no_output_____" ], [ "index = 1\nfor col_name in rgg_rgg_data:\n if col_name == ('step%d_isol' % index):\n rgg_rgg_isol.append(statistics.mean(rgg_rgg_data[col_name].values.tolist()))\n if col_name == ('step%d_unsupp' % index):\n rgg_rgg_unsupp.append(statistics.mean(rgg_rgg_data[col_name].values.tolist()))\n index += 1\n \nindex = 1\nfor col_name in rgg_rand_data:\n if col_name == ('step%d_isol' % index):\n rgg_rand_isol.append(statistics.mean(rgg_rand_data[col_name].values.tolist()))\n if col_name == ('step%d_unsupp' % index):\n rgg_rand_unsupp.append(statistics.mean(rgg_rand_data[col_name].values.tolist()))\n index += 1\n \nindex = 1\nfor col_name in rand_rgg_data:\n if col_name == ('step%d_isol' % index):\n rand_rgg_isol.append(statistics.mean(rand_rgg_data[col_name].values.tolist()))\n if col_name == ('step%d_unsupp' % index):\n rand_rgg_unsupp.append(statistics.mean(rand_rgg_data[col_name].values.tolist()))\n index += 1\n \nindex = 1\nfor col_name in rand_rand_data:\n if col_name == ('step%d_isol' % index):\n rand_rand_isol.append(statistics.mean(rand_rand_data[col_name].values.tolist()))\n if col_name == ('step%d_unsupp' % index):\n rand_rand_unsupp.append(statistics.mean(rand_rand_data[col_name].values.tolist()))\n index += 1", "_____no_output_____" ], [ "print(len(rgg_rgg_isol))\nprint(len(rgg_rgg_unsupp))\nprint(len(rgg_rand_isol))\nprint(len(rgg_rand_unsupp))\nprint(len(rand_rgg_isol))\nprint(len(rand_rgg_unsupp))\nprint(len(rand_rand_isol))\nprint(len(rand_rand_unsupp))", "62\n62\n14\n14\n65\n65\n27\n27\n" ], [ "cum_rgg_rgg_isol = []\ncum_rgg_rgg_unsupp = []\ncum_rgg_rand_isol = []\ncum_rgg_rand_unsupp = []\ncum_rand_rgg_isol = []\ncum_rand_rgg_unsupp = []\ncum_rand_rand_isol = []\ncum_rand_rand_unsupp = []\n\ntotal = []\nfor i in range(len(rgg_rgg_isol)):\n if i == 0:\n total.append(rgg_rgg_isol[i])\n total.append(rgg_rgg_unsupp[i])\n else:\n total[0] += rgg_rgg_isol[i]\n total[1] += rgg_rgg_unsupp[i]\n cum_rgg_rgg_isol.append(total[0])\n cum_rgg_rgg_unsupp.append(total[1])\n \ntotal = []\nfor i in range(len(rgg_rand_isol)):\n if i == 0:\n total.append(rgg_rand_isol[i])\n total.append(rgg_rand_unsupp[i])\n else:\n total[0] += rgg_rand_isol[i]\n total[1] += rgg_rand_unsupp[i]\n cum_rgg_rand_isol.append(total[0])\n cum_rgg_rand_unsupp.append(total[1])\n \ntotal = []\nfor i in range(len(rand_rgg_isol)):\n if i == 0:\n total.append(rand_rgg_isol[i])\n total.append(rand_rgg_unsupp[i])\n else:\n total[0] += rand_rgg_isol[i]\n total[1] += rand_rgg_unsupp[i]\n cum_rand_rgg_isol.append(total[0])\n cum_rand_rgg_unsupp.append(total[1])\n \ntotal = []\nfor i in range(len(rand_rand_isol)):\n if i == 0:\n total.append(rand_rand_isol[i])\n total.append(rand_rand_unsupp[i])\n else:\n total[0] += rand_rand_isol[i]\n total[1] += rand_rand_unsupp[i]\n cum_rand_rand_isol.append(total[0])\n cum_rand_rand_unsupp.append(total[1])", "_____no_output_____" ] ], [ [ "## Isolation vs Unsupport", "_____no_output_____" ] ], [ [ "plt.plot(range(len(cum_rgg_rgg_isol)), cum_rgg_rgg_isol)\nplt.plot(range(len(cum_rgg_rgg_isol)), cum_rgg_rgg_unsupp)\nplt.legend(['rgg_rgg_isol','rgg_rgg_unsupp'])\nplt.title('Isolation vs Unsupport: RGG-RGG')\nplt.savefig('Isolation vs Unsupport_RGG-RGG.png')\nplt.show()", "_____no_output_____" ], [ "plt.plot(range(len(cum_rgg_rand_isol)), cum_rgg_rand_isol)\nplt.plot(range(len(cum_rgg_rand_isol)), cum_rgg_rand_unsupp)\nplt.legend(['rgg_rand_isol','rgg_rand_unsupp'])\nplt.title('Isolation vs Unsupport: RGG-Rand')\nplt.savefig('Isolation vs Unsupport_RGG-Rand.png')\nplt.show()", "_____no_output_____" ], [ "plt.plot(range(len(cum_rand_rgg_isol)), cum_rand_rgg_isol)\nplt.plot(range(len(cum_rand_rgg_isol)), cum_rand_rgg_unsupp)\nplt.legend(['rand_rgg_isol','rand_rgg_unsupp'])\nplt.title('Isolation vs Unsupport: Rand-RGG')\nplt.savefig('Isolation vs Unsupport_Rand-RGG.png')\nplt.show()", "_____no_output_____" ], [ "plt.plot(range(len(cum_rand_rand_isol)), cum_rand_rand_isol)\nplt.plot(range(len(cum_rand_rand_isol)), cum_rand_rand_unsupp)\nplt.legend(['rand_rand_isol','rand_rand_unsupp'])\nplt.title('Isolation vs Unsupport: Rand-Rand')\nplt.savefig('Isolation vs Unsupport_Rand-Rand.png')\nplt.show()", "_____no_output_____" ], [ "df_len = []\n\ndf_len.append(list(rgg_rgg_isol))\ndf_len.append(list(rgg_rand_isol))\ndf_len.append(list(rand_rgg_isol))\ndf_len.append(list(rand_rand_isol))\n\nmax_df_len = max(df_len, key=len)\n\nx_val = list(range(len(max_df_len)))", "_____no_output_____" ], [ "proc_isol = []\nproc_unsupp = []\n\nproc_isol.append(cum_rgg_rgg_isol)\nproc_isol.append(cum_rgg_rand_isol)\nproc_isol.append(cum_rand_rgg_isol)\nproc_isol.append(cum_rand_rand_isol)\n\nproc_unsupp.append(cum_rgg_rgg_unsupp)\nproc_unsupp.append(cum_rgg_rand_unsupp)\nproc_unsupp.append(cum_rand_rgg_unsupp)\nproc_unsupp.append(cum_rand_rand_unsupp)\n\nfor x in x_val:\n if len(rgg_rgg_isol) <= x:\n proc_isol[0].append(cum_rgg_rgg_isol[len(rgg_rgg_isol) - 1])\n proc_unsupp[0].append(cum_rgg_rgg_unsupp[len(rgg_rgg_isol) - 1])\n if len(rgg_rand_isol) <= x:\n proc_isol[1].append(cum_rgg_rand_isol[len(rgg_rand_isol) - 1])\n proc_unsupp[1].append(cum_rgg_rand_unsupp[len(rgg_rand_isol) - 1])\n if len(rand_rgg_isol) <= x:\n proc_isol[2].append(cum_rand_rgg_isol[len(rand_rgg_isol) - 1])\n proc_unsupp[2].append(cum_rand_rgg_unsupp[len(rand_rgg_isol) - 1])\n if len(rand_rand_isol) <= x:\n proc_isol[3].append(cum_rand_rand_isol[len(rand_rand_isol) - 1])\n proc_unsupp[3].append(cum_rand_rand_unsupp[len(rand_rand_isol) - 1])", "_____no_output_____" ], [ "plt.plot(x_val, proc_isol[0])\nplt.plot(x_val, proc_isol[1])\nplt.plot(x_val, proc_isol[2])\nplt.plot(x_val, proc_isol[3])\nplt.legend(['rgg_rgg_isol','rgg_rand_isol', 'rand_rgg_isol', 'rand_rand_isol'])\nplt.title('Isolation trend')\nplt.show()", "_____no_output_____" ], [ "plt.plot(x_val, proc_unsupp[0])\nplt.plot(x_val, proc_unsupp[1])\nplt.plot(x_val, proc_unsupp[2])\nplt.plot(x_val, proc_unsupp[3])\nplt.legend(['rgg_rgg_unsupp','rgg_rand_unsupp', 'rand_rgg_unsupp', 'rand_rand_unsupp'])\nplt.title('Unsupport trend')\nplt.show()", "_____no_output_____" ] ], [ [ "## Pie Chart", "_____no_output_____" ] ], [ [ "init_death = 150\nlabels = ['Alive nodes', 'Initial death', 'Dead nodes from isolation', 'Dead nodes from unsupport']\n\nalive = []\nalive.append(statistics.mean(rgg_rgg_data['alive_nodes']))\nalive.append(statistics.mean(rgg_rand_data['alive_nodes']))\nalive.append(statistics.mean(rand_rgg_data['alive_nodes']))\nalive.append(statistics.mean(rand_rand_data['alive_nodes']))\n\ntot_isol = []\ntot_isol.append(statistics.mean(rgg_rgg_data['tot_isol_node']))\ntot_isol.append(statistics.mean(rgg_rand_data['tot_isol_node']))\ntot_isol.append(statistics.mean(rand_rgg_data['tot_isol_node']))\ntot_isol.append(statistics.mean(rand_rand_data['tot_isol_node']))\n\ntot_unsupp = []\ntot_unsupp.append(statistics.mean(rgg_rgg_data['tot_unsupp_node']))\ntot_unsupp.append(statistics.mean(rgg_rand_data['tot_unsupp_node']))\ntot_unsupp.append(statistics.mean(rand_rgg_data['tot_unsupp_node']))\ntot_unsupp.append(statistics.mean(rand_rand_data['tot_unsupp_node']))", "_____no_output_____" ], [ "deaths = [alive[0], init_death, tot_isol[0], tot_unsupp[0]]\n\nplt.pie(deaths, labels=labels, autopct='%.1f%%')\nplt.title('RGG-RGG death trend')\nplt.show()", "_____no_output_____" ], [ "deaths = [alive[1], init_death, tot_isol[1], tot_unsupp[1]]\n\nplt.pie(deaths, labels=labels, autopct='%.1f%%')\nplt.title('RGG-Rand death trend')\nplt.show()", "_____no_output_____" ], [ "deaths = [alive[2], init_death, tot_isol[2], tot_unsupp[2]]\n\nplt.pie(deaths, labels=labels, autopct='%.1f%%')\nplt.title('Rand-RGG death trend')\nplt.show()", "_____no_output_____" ], [ "deaths = [alive[3], init_death, tot_isol[3], tot_unsupp[3]]\n\nplt.pie(deaths, labels=labels, autopct='%.1f%%')\nplt.title('Rand-Rand death trend')\nplt.show()", "_____no_output_____" ] ], [ [ "## Compute the number of nodes", "_____no_output_____" ] ], [ [ "x_val = np.arange(4)\nlabels = ['initial', 'final']\n\nplt.bar(x_val, alive)\nplt.xticks(x_val, graph_types)\nplt.title('Alive nodes')\nplt.savefig('alive nodes.png')\nplt.show()", "_____no_output_____" ] ], [ [ "## Compare the number of edges", "_____no_output_____" ] ], [ [ "init_intra = []\ninit_intra.append(statistics.mean(rgg_rgg_data['init_intra_edge']))\ninit_intra.append(statistics.mean(rgg_rand_data['init_intra_edge']))\ninit_intra.append(statistics.mean(rand_rgg_data['init_intra_edge']))\ninit_intra.append(statistics.mean(rand_rand_data['init_intra_edge']))\n\ninit_inter = []\ninit_inter.append(statistics.mean(rgg_rgg_data['init_inter_edge']))\ninit_inter.append(statistics.mean(rgg_rand_data['init_inter_edge']))\ninit_inter.append(statistics.mean(rand_rgg_data['init_inter_edge']))\ninit_inter.append(statistics.mean(rand_rand_data['init_inter_edge']))\n\ninit_supp = []\ninit_supp.append(statistics.mean(rgg_rgg_data['init_supp_edge']))\ninit_supp.append(statistics.mean(rgg_rand_data['init_supp_edge']))\ninit_supp.append(statistics.mean(rand_rgg_data['init_supp_edge']))\ninit_supp.append(statistics.mean(rand_rand_data['init_supp_edge']))\n\nfin_intra = []\nfin_intra.append(statistics.mean(rgg_rgg_data['fin_intra_edge']))\nfin_intra.append(statistics.mean(rgg_rand_data['fin_intra_edge']))\nfin_intra.append(statistics.mean(rand_rgg_data['fin_intra_edge']))\nfin_intra.append(statistics.mean(rand_rand_data['fin_intra_edge']))\n\nfin_inter = []\nfin_inter.append(statistics.mean(rgg_rgg_data['fin_inter_edge']))\nfin_inter.append(statistics.mean(rgg_rand_data['fin_inter_edge']))\nfin_inter.append(statistics.mean(rand_rgg_data['fin_inter_edge']))\nfin_inter.append(statistics.mean(rand_rand_data['fin_inter_edge']))\n\nfin_supp = []\nfin_supp.append(statistics.mean(rgg_rgg_data['fin_supp_edge']))\nfin_supp.append(statistics.mean(rgg_rand_data['fin_supp_edge']))\nfin_supp.append(statistics.mean(rand_rgg_data['fin_supp_edge']))\nfin_supp.append(statistics.mean(rand_rand_data['fin_supp_edge']))", "_____no_output_____" ], [ "plt.bar(x_val-0.1, init_intra, width=0.2)\nplt.bar(x_val+0.1, fin_intra, width=0.2)\nplt.legend(labels)\nplt.xticks(x_val, graph_types)\nplt.title('Initial_intra_edge vs Final_intra_edge')\nplt.show()", "_____no_output_____" ], [ "plt.bar(x_val-0.1, init_inter, width=0.2)\nplt.bar(x_val+0.1, fin_inter, width=0.2)\nplt.legend(labels)\nplt.xticks(x_val, graph_types)\nplt.title('Initial_inter_edge vs Final_inter_edge')\nplt.show()", "_____no_output_____" ], [ "plt.bar(x_val-0.1, init_supp, width=0.2)\nplt.bar(x_val+0.1, fin_supp, width=0.2)\nplt.legend(labels)\nplt.xticks(x_val, graph_types)\nplt.title('Initial_support_edge vs Final_support_edge')\nplt.show()", "_____no_output_____" ] ], [ [ "## Network Analysis", "_____no_output_____" ] ], [ [ "init_far = []\ninit_far.append(statistics.mean(rgg_rgg_data['init_far_node']))\ninit_far.append(statistics.mean(rgg_rand_data['init_far_node']))\ninit_far.append(statistics.mean(rand_rgg_data['init_far_node']))\ninit_far.append(statistics.mean(rand_rand_data['init_far_node']))\n\nfin_far = []\nfin_far.append(statistics.mean(rgg_rgg_data['fin_far_node']))\nfin_far.append(statistics.mean(rgg_rand_data['fin_far_node']))\nfin_far.append(statistics.mean(rand_rgg_data['fin_far_node']))\nfin_far.append(statistics.mean(rand_rand_data['fin_far_node']))", "_____no_output_____" ], [ "plt.bar(x_val-0.1, init_far, width=0.2)\nplt.bar(x_val+0.1, fin_far, width=0.2)\nplt.legend(labels)\nplt.xticks(x_val, graph_types)\nplt.title('Initial_far_node vs Final_far_node')\nplt.show()", "_____no_output_____" ], [ "init_clust = []\ninit_clust.append(statistics.mean(rgg_rgg_data['init_clust']))\ninit_clust.append(statistics.mean(rgg_rand_data['init_clust']))\ninit_clust.append(statistics.mean(rand_rgg_data['init_clust']))\ninit_clust.append(statistics.mean(rand_rand_data['init_clust']))\n\nfin_clust = []\nfin_clust.append(statistics.mean(rgg_rgg_data['fin_clust']))\nfin_clust.append(statistics.mean(rgg_rand_data['fin_clust']))\nfin_clust.append(statistics.mean(rand_rgg_data['fin_clust']))\nfin_clust.append(statistics.mean(rand_rand_data['fin_clust']))", "_____no_output_____" ], [ "plt.bar(x_val-0.1, init_clust, width=0.2)\nplt.bar(x_val+0.1, fin_clust, width=0.2)\nplt.legend(labels)\nplt.xticks(x_val, graph_types)\nplt.title('Initial_clustering_coefficient vs Final_clustering_coefficient')\nplt.show()", "_____no_output_____" ], [ "init_mean_deg = []\ninit_mean_deg.append(statistics.mean(rgg_rgg_data['init_mean_deg']))\ninit_mean_deg.append(statistics.mean(rgg_rand_data['init_mean_deg']))\ninit_mean_deg.append(statistics.mean(rand_rgg_data['init_mean_deg']))\ninit_mean_deg.append(statistics.mean(rand_rand_data['init_mean_deg']))\n\nfin_mean_deg = []\nfin_mean_deg.append(statistics.mean(rgg_rgg_data['fin_mean_deg']))\nfin_mean_deg.append(statistics.mean(rgg_rand_data['fin_mean_deg']))\nfin_mean_deg.append(statistics.mean(rand_rgg_data['fin_mean_deg']))\nfin_mean_deg.append(statistics.mean(rand_rand_data['fin_mean_deg']))", "_____no_output_____" ], [ "plt.bar(x_val-0.1, init_mean_deg, width=0.2)\nplt.bar(x_val+0.1, fin_mean_deg, width=0.2)\nplt.legend(labels)\nplt.xticks(x_val, graph_types)\nplt.title('Initial_mean_degree vs Final_mean_degree')\nplt.show()", "_____no_output_____" ], [ "init_larg_comp = []\ninit_larg_comp.append(statistics.mean(rgg_rgg_data['init_larg_comp']))\ninit_larg_comp.append(statistics.mean(rgg_rand_data['init_larg_comp']))\ninit_larg_comp.append(statistics.mean(rand_rgg_data['init_larg_comp']))\ninit_larg_comp.append(statistics.mean(rand_rand_data['init_larg_comp']))\n\nfin_larg_comp = []\nfin_larg_comp.append(statistics.mean(rgg_rgg_data['fin_larg_comp']))\nfin_larg_comp.append(statistics.mean(rgg_rand_data['fin_larg_comp']))\nfin_larg_comp.append(statistics.mean(rand_rgg_data['fin_larg_comp']))\nfin_larg_comp.append(statistics.mean(rand_rand_data['fin_larg_comp']))", "_____no_output_____" ], [ "plt.bar(x_val-0.1, init_larg_comp, width=0.2)\nplt.bar(x_val+0.1, fin_larg_comp, width=0.2)\nplt.legend(labels)\nplt.xticks(x_val, graph_types)\nplt.title('Initial_largest_component_size vs Final_largest_component_size')\nplt.show()", "_____no_output_____" ], [ "deg_assort = []\n\na = rgg_rgg_data['deg_assort'].fillna(0)\nb = rgg_rand_data['deg_assort'].fillna(0)\nc = rand_rgg_data['deg_assort'].fillna(0)\nd = rand_rand_data['deg_assort'].fillna(0)\n\ndeg_assort.append(statistics.mean(a))\ndeg_assort.append(statistics.mean(b))\ndeg_assort.append(statistics.mean(c))\ndeg_assort.append(statistics.mean(d))", "_____no_output_____" ], [ "plt.bar(x_val, deg_assort)\nplt.xticks(x_val, graph_types)\nplt.title('Degree Assortativity')\nplt.show()", "_____no_output_____" ], [ "dist_deg_cent = []\ndist_deg_cent.append(statistics.mean(rgg_rgg_data['dist_deg_cent']))\ndist_deg_cent.append(statistics.mean(rgg_rand_data['dist_deg_cent']))\ndist_deg_cent.append(statistics.mean(rand_rgg_data['dist_deg_cent']))\ndist_deg_cent.append(statistics.mean(rand_rand_data['dist_deg_cent']))", "_____no_output_____" ], [ "plt.bar(x_val, dist_deg_cent)\nplt.xticks(x_val, graph_types)\nplt.title('Distance to degree centre from the attack point')\nplt.show()", "_____no_output_____" ], [ "dist_bet_cent = []\ndist_bet_cent.append(statistics.mean(rgg_rgg_data['dist_bet_cent']))\ndist_bet_cent.append(statistics.mean(rgg_rand_data['dist_bet_cent']))\ndist_bet_cent.append(statistics.mean(rand_rgg_data['dist_bet_cent']))\ndist_bet_cent.append(statistics.mean(rand_rand_data['dist_bet_cent']))", "_____no_output_____" ], [ "plt.bar(x_val, dist_bet_cent)\nplt.xticks(x_val, graph_types)\nplt.title('Distance to betweenes centre from the attack point')\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb7a150b66b46cc61e8fe41226194318c8e511dd
15,418
ipynb
Jupyter Notebook
notebooks/Orbit Computation/Access Computation.ipynb
open-space-collective/open-space-toolk
2a97d94612b82cd58a7d4c6b2bb014f2f29f65b0
[ "Apache-2.0" ]
18
2020-01-24T23:27:07.000Z
2022-02-02T15:23:29.000Z
notebooks/Orbit Computation/Access Computation.ipynb
open-space-collective/libraries
2a97d94612b82cd58a7d4c6b2bb014f2f29f65b0
[ "Apache-2.0" ]
4
2018-11-12T00:34:22.000Z
2018-11-15T06:04:00.000Z
notebooks/Orbit Computation/Access Computation.ipynb
open-space-collective/libraries
2a97d94612b82cd58a7d4c6b2bb014f2f29f65b0
[ "Apache-2.0" ]
5
2020-03-05T00:35:54.000Z
2022-01-02T22:21:49.000Z
24.867742
299
0.546828
[ [ [ "# Access Computation", "_____no_output_____" ], [ "This tutorial demonstrates how to compute access.", "_____no_output_____" ], [ "## Setup", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\n\nimport plotly.graph_objs as go\n\nfrom ostk.mathematics.objects import RealInterval\n\nfrom ostk.physics.units import Length\nfrom ostk.physics.units import Angle\nfrom ostk.physics.time import Scale\nfrom ostk.physics.time import Instant\nfrom ostk.physics.time import Duration\nfrom ostk.physics.time import Interval\nfrom ostk.physics.time import DateTime\nfrom ostk.physics.time import Time\nfrom ostk.physics.coordinate.spherical import LLA\nfrom ostk.physics.coordinate.spherical import AER\nfrom ostk.physics.coordinate import Position\nfrom ostk.physics.coordinate import Frame\nfrom ostk.physics import Environment\nfrom ostk.physics.environment.objects.celestial_bodies import Earth\n\nfrom ostk.astrodynamics import Trajectory\nfrom ostk.astrodynamics.trajectory import Orbit\nfrom ostk.astrodynamics.trajectory.orbit.models import Kepler\nfrom ostk.astrodynamics.trajectory.orbit.models.kepler import COE\nfrom ostk.astrodynamics.trajectory.orbit.models import SGP4\nfrom ostk.astrodynamics.trajectory.orbit.models.sgp4 import TLE\nfrom ostk.astrodynamics import Access\nfrom ostk.astrodynamics.access import Generator as AccessGenerator", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "## Access", "_____no_output_____" ], [ "An access represents an object-to-object visibility period.", "_____no_output_____" ], [ "In this example, let's compute accesses between a fixed position on the ground and a satellite in LEO.", "_____no_output_____" ], [ "## Environment", "_____no_output_____" ], [ "Let's setup an environment (which describes where planets are, etc...):", "_____no_output_____" ] ], [ [ "environment = Environment.default() ;", "_____no_output_____" ] ], [ [ "### Origin", "_____no_output_____" ], [ "Let's define a fixed ground position, using its geographic coordinates:", "_____no_output_____" ] ], [ [ "latitude = Angle.degrees(50.0)\nlongitude = Angle.degrees(20.0)\naltitude = Length.meters(30.0)", "_____no_output_____" ], [ "from_lla = LLA(latitude, longitude, altitude)", "_____no_output_____" ], [ "from_position = Position.meters(from_lla.to_cartesian(Earth.equatorial_radius, Earth.flattening), Frame.ITRF())", "_____no_output_____" ] ], [ [ "And derive a trajectory, fixed at that position:", "_____no_output_____" ] ], [ [ "from_trajectory = Trajectory.position(from_position)", "_____no_output_____" ] ], [ [ "### Target", "_____no_output_____" ], [ "Let's consider a satellite in **Low-Earth Orbit**.", "_____no_output_____" ] ], [ [ "earth = environment.access_celestial_object_with_name(\"Earth\")", "_____no_output_____" ] ], [ [ "We can define its orbit with **Classical Orbital Elements**:", "_____no_output_____" ] ], [ [ "a = Earth.equatorial_radius + Length.kilometers(500.0)\ne = 0.000\ni = Angle.degrees(97.8893)\nraan = Angle.degrees(100.372)\naop = Angle.degrees(0.0)\nnu = Angle.degrees(0.0201851)\n\ncoe = COE(a, e, i, raan, aop, nu)", "_____no_output_____" ] ], [ [ "... and by using a **Keplerian** orbital model:", "_____no_output_____" ] ], [ [ "epoch = Instant.date_time(DateTime(2018, 1, 1, 0, 0, 0), Scale.UTC)\n\nkeplerian_model = Kepler(coe, epoch, earth, Kepler.PerturbationType.J2)", "_____no_output_____" ] ], [ [ "Or with a **Two-Line Element** (TLE) set:", "_____no_output_____" ] ], [ [ "tle = TLE(\n \"ISS (ZARYA)\",\n \"1 25544U 98067A 18268.86272795 .00002184 00000-0 40781-4 0 9990\",\n \"2 25544 51.6405 237.0010 0003980 205.4375 242.3358 15.53733046134172\"\n)", "_____no_output_____" ] ], [ [ "... along with its associated **SGP4** orbital model:", "_____no_output_____" ] ], [ [ "sgp4_model = SGP4(tle)", "_____no_output_____" ] ], [ [ "Below, we select which orbital model to use:", "_____no_output_____" ] ], [ [ "orbital_model = keplerian_model\n# orbital_model = sgp4_model", "_____no_output_____" ] ], [ [ "We then obtain the satellite orbit (which is a **Trajectory** object):", "_____no_output_____" ] ], [ [ "satellite_orbit = Orbit(orbital_model, earth)", "_____no_output_____" ] ], [ [ "Alternatively, the **Orbit** class can provide some useful shortcuts (for usual orbit types):", "_____no_output_____" ] ], [ [ "epoch = Instant.date_time(DateTime(2018, 1, 1, 0, 0, 0), Scale.UTC)\n\nsatellite_orbit = Orbit.sun_synchronous(epoch, Length.kilometers(500.0), Time(12, 0, 0), earth)", "_____no_output_____" ] ], [ [ "### Access", "_____no_output_____" ], [ "Now that the origin and the target trajectories are well defined, we can compute the **Access**.", "_____no_output_____" ], [ "Let's first define an **analysis interval**:", "_____no_output_____" ] ], [ [ "start_instant = Instant.date_time(DateTime.parse(\"2018-01-01 00:00:00\"), Scale.UTC) ;\nend_instant = Instant.date_time(DateTime.parse(\"2018-01-10 00:00:00\"), Scale.UTC) ;\n\ninterval = Interval.closed(start_instant, end_instant) ;", "_____no_output_____" ] ], [ [ "Then, using an **Access Generator**, we can compute the accesses within the intervals of interest:", "_____no_output_____" ] ], [ [ "azimuth_range = RealInterval.closed(0.0, 360.0) # [deg]\nelevation_range = RealInterval.closed(20.0, 90.0) # [deg]\nrange_range = RealInterval.closed(0.0, 10000e3) # [m]\n\n# Access generator with Azimuth-Range-Elevation constraints\n\naccess_generator = AccessGenerator.aer_ranges(azimuth_range, elevation_range, range_range, environment)", "_____no_output_____" ], [ "accesses = access_generator.compute_accesses(interval, from_trajectory, satellite_orbit)", "_____no_output_____" ] ], [ [ "And format the output using a dataframe:", "_____no_output_____" ] ], [ [ "accesses_df = pd.DataFrame([[str(access.get_type()), repr(access.get_acquisition_of_signal()), repr(access.get_time_of_closest_approach()), repr(access.get_loss_of_signal()), float(access.get_duration().in_seconds())] for access in accesses], columns=['Type', 'AOS', 'TCA', 'LOS', 'Duration'])", "_____no_output_____" ] ], [ [ "### Output", "_____no_output_____" ], [ "Print accesses:", "_____no_output_____" ] ], [ [ "accesses_df", "_____no_output_____" ] ], [ [ "Let's calculate the geographic coordinate of the satellite, during access:", "_____no_output_____" ] ], [ [ "def compute_lla (state):\n \n lla = LLA.cartesian(state.get_position().in_frame(Frame.ITRF(), state.get_instant()).get_coordinates(), Earth.equatorial_radius, Earth.flattening)\n\n return [float(lla.get_latitude().in_degrees()), float(lla.get_longitude().in_degrees()), float(lla.get_altitude().in_meters())]\n\ndef compute_aer (instant, from_lla, to_position):\n \n nedFrame = earth.get_frame_at(from_lla, Earth.FrameType.NED)\n\n fromPosition_NED = from_position.in_frame(nedFrame, instant)\n sunPosition_NED = to_position.in_frame(nedFrame, instant)\n\n aer = AER.from_position_to_position(fromPosition_NED, sunPosition_NED, True)\n \n return [float(aer.get_azimuth().in_degrees()), float(aer.get_elevation().in_degrees()), float(aer.get_range().in_meters())]\n\ndef compute_time_lla_aer_state (state):\n \n instant = state.get_instant()\n \n lla = compute_lla(state)\n aer = compute_aer(instant, from_lla, state.get_position().in_frame(Frame.ITRF(), state.get_instant()))\n\n return [instant, lla[0], lla[1], lla[2], aer[0], aer[1], aer[2]]\n\ndef compute_trajectory_geometry (aTrajectory, anInterval):\n\n return [compute_lla(state) for state in aTrajectory.get_states_at(anInterval.generate_grid(Duration.minutes(1.0)))]\n\ndef compute_access_geometry (access):\n\n return [compute_time_lla_aer_state(state) for state in satellite_orbit.get_states_at(access.get_interval().generate_grid(Duration.seconds(1.0)))]", "_____no_output_____" ], [ "satellite_orbit_geometry_df = pd.DataFrame(compute_trajectory_geometry(satellite_orbit, interval), columns=['Latitude', 'Longitude', 'Altitude'])", "_____no_output_____" ], [ "satellite_orbit_geometry_df.head()", "_____no_output_____" ], [ "access_geometry_dfs = [pd.DataFrame(compute_access_geometry(access), columns=['Time', 'Latitude', 'Longitude', 'Altitude', 'Azimuth', 'Elevation', 'Range']) for access in accesses] ;", "_____no_output_____" ], [ "def get_max_elevation (df):\n \n return df.loc[df['Elevation'].idxmax()]['Elevation']", "_____no_output_____" ] ], [ [ "And plot the geometries onto a map:", "_____no_output_____" ] ], [ [ "data = []\n\n# Target geometry\n\ndata.append(\n dict(\n type = 'scattergeo',\n lon = [float(longitude.in_degrees())],\n lat = [float(latitude.in_degrees())],\n mode = 'markers',\n marker = dict(\n size = 10,\n color = 'orange'\n )\n )\n)\n\n# Orbit geometry\n\ndata.append(\n dict(\n type = 'scattergeo',\n lon = satellite_orbit_geometry_df['Longitude'],\n lat = satellite_orbit_geometry_df['Latitude'],\n mode = 'lines',\n line = dict(\n width = 1,\n color = 'rgba(0, 0, 0, 0.1)',\n )\n )\n)\n\n# Access geometry\n\nfor access_geometry_df in access_geometry_dfs:\n \n data.append(\n dict(\n type = 'scattergeo',\n lon = access_geometry_df['Longitude'],\n lat = access_geometry_df['Latitude'],\n mode = 'lines',\n line = dict(\n width = 1,\n color = 'red',\n )\n )\n )\n \nlayout = dict(\n title = None,\n showlegend = False,\n height = 1000,\n geo = dict(\n showland = True,\n landcolor = 'rgb(243, 243, 243)',\n countrycolor = 'rgb(204, 204, 204)',\n ),\n )\n \nfigure = go.Figure(data = data, layout = layout)\n\nfigure.show()", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]