repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
content
stringlengths
335
154k
Aryan-Barbarian/bigbang
examples/Corr between centrality and community 0.1.ipynb
gpl-2.0
%matplotlib inline from bigbang.archive import Archive import bigbang.parse as parse import bigbang.graph as graph import bigbang.mailman as mailman import bigbang.process as process import networkx as nx import matplotlib.pyplot as plt import pandas as pd from pprint import pprint as pp import pytz import numpy as np import math from itertools import repeat urls = ["http://mail.scipy.org/pipermail/ipython-dev/", "http://mail.scipy.org/pipermail/ipython-user/", "http://mail.scipy.org/pipermail/scipy-dev/", "http://mail.scipy.org/pipermail/scipy-user/", "http://mail.scipy.org/pipermail/numpy-discussion/"] archives= [Archive(url,archive_dir="../archives") for url in urls] """ Explanation: An IPython notebook that explores the relationship(correlation) between betweenness centrality and community membership of a number of mailing-lists in a given time period. End of explanation """ date_from_whole = [2010,1] #Include June(Start month) date_to_whole = [2012,12] #Include December(End month) total_month = (date_to_whole[0] - date_from_whole[0])*12 + (date_to_whole[1]-date_from_whole[1]+1) date_from = [] date_to = [] temp_year = date_from_whole[0] temp_month = date_from_whole[1] for i in range(total_month): date_from.append(pd.datetime(temp_year,temp_month,1,tzinfo=pytz.utc)) if temp_month == 12: temp_year += 1 temp_month = 0 date_to.append(pd.datetime(temp_year,temp_month+1,1,tzinfo=pytz.utc)) temp_month += 1 def filter_by_date(df,d_from,d_to): return df[(df['Date'] > d_from) & (df['Date'] < d_to)] IG = [] for k in range(total_month): dfs = [filter_by_date(arx.data, date_from[k], date_to[k]) for arx in archives] bdf = pd.concat(dfs) IG.append(graph.messages_to_interaction_graph(bdf)) #RG = graph.messages_to_reply_graph(messages) #IG = graph.messages_to_interaction_graph(bdf) bc = [] for j in range(total_month): bc.append(pd.Series(nx.betweenness_centrality(IG[j]))) len(bc) """ Explanation: The following sets start month and end month, both inclusive. End of explanation """ new_dict = [{} for i in repeat(None, total_month)] new_dict1 = [{} for i in repeat(None, total_month)] for t in range(total_month): filtered_activity = [] for i in range(5): df = archives[i].data fdf = filter_by_date(df,date_from[t],date_to[t]) filtered_activity.append(Archive(fdf).get_activity().sum()) for k in range(len(filtered_activity)): for g in range(len(filtered_activity[k])): original_key = filtered_activity[k].keys()[g] new_key = (original_key[original_key.index("(") + 1:original_key.rindex(")")]) if new_key not in new_dict[t]: new_dict[t][new_key] = 0 new_dict1[t][new_key] = 0 new_dict[t][new_key] += math.log(filtered_activity[k].get_values()[g]+1) #can define community membership by changing the above line. #example, direct sum of emails would be new_dict1[t][new_key] += filtered_activity[k].get_values()[g] for i in range(len(new_dict1)): [x+1 for x in new_dict1[i].values()] [np.log(x) for x in new_dict1[i].values()] #check if there's name difference, return nothing if perfect. for i in range(total_month): set(new_dict[i].keys()).difference(bc[i].index.values) set(bc[i].index.values).difference(new_dict[i].keys()) set(new_dict1[i].keys()).difference(bc[i].index.values) set(bc[i].index.values).difference(new_dict1[i].keys()) #A list of corresponding betweenness centrality and community membership for all users, monthly comparison = [] comparison1 = [] for i in range(len(new_dict)): comparison.append(pd.DataFrame([new_dict[i], bc[i]])) comparison1.append(pd.DataFrame([new_dict1[i], bc[i]])) corr = [] corr1 = [] for i in range(len(new_dict)): corr.append(np.corrcoef(comparison[i].get_values()[0],comparison[i].get_values()[1])[0,1]) corr1.append(np.corrcoef(comparison1[i].get_values()[0],comparison1[i].get_values()[1])[0,1]) corr1 #Blue as sum of log, red as log of sum, respect to community membership x = range(1,total_month+1) y = corr plt.plot(x, y, marker='o') z = corr1 plt.plot(x, z, marker='o', linestyle='--', color='r') """ Explanation: new_dict is a dictionary with keys as users' names, and values of their community membership(can have different interpretation) Here the community membership for a user is defined as sum of log(Ni + 1), with Ni corresponds to the number of emails a user sent to Mailing list i. End of explanation """
aimalz/qp
docs/desc-0000-qp-photo-z_approximation/research/data_exploration.ipynb
mit
%load_ext autoreload %autoreload 2 from __future__ import print_function import hickle import numpy as np from pathos.multiprocessing import ProcessingPool as Pool import random import cProfile import pstats import StringIO import timeit import psutil import sys import os import timeit import pandas as pd pd.set_option('display.max_columns', None) import matplotlib.pyplot as plt %matplotlib inline import qp from qp.utils import calculate_kl_divergence as make_kld np.random.seed(seed=42) random.seed(a=42) """ Explanation: Exploring BPZ Test Data Alex Malz (NYU) & Phil Marshall (SLAC) In this notebook we develop machinery to evaluate our approximations on whole datasets in "survey mode." End of explanation """ # choose one of these: # dataset_key = 'Euclid'# Melissa Graham's data dataset_key = 'LSST'# Sam Schmidt's data dataname = dataset_key dataset_info = {} dataset_info[dataset_key] = {} """ Explanation: Set-up, Ingest There are two datasets available: $10^{5}$ LSST-like mock data provided by Sam Schmidt (UC Davis, LSST $10^{4}$ Euclid-like mock data provided by Melissa Graham (UW, LSST) End of explanation """ if dataset_key == 'Euclid': datafilename = 'bpz_euclid_test_10_2.probs' elif dataset_key == 'LSST': datafilename = 'test_magscat_trainingfile_probs.out' dataset_info[dataset_key]['filename'] = datafilename """ Explanation: Both datasets are fit with BPZ. End of explanation """ if dataset_key == 'Euclid': z_low = 0.01 z_high = 3.51 elif dataset_key == 'LSST': z_low = 0.005 z_high = 2.11 dataset_info[dataset_key]['z_lim'] = (z_low, z_high) z_grid = np.arange(z_low, z_high, 0.01, dtype='float') z_range = z_high - z_low delta_z = z_range / len(z_grid) dataset_info[dataset_key]['z_grid'] = z_grid dataset_info[dataset_key]['delta_z'] = delta_z """ Explanation: The data files don't appear to come with information about the native format or metaparameters, but we are told they're evaluations on a regular grid of redshifts with given endpoints and number of parameters. End of explanation """ ## Warning: reading in the data is slow for Sam Schmidt's dataset! with open(dataset_info[dataset_key]['filename'], 'rb') as data_file: lines = (line.split(None) for line in data_file) lines.next() pdfs = np.array([[float(line[k]) for k in range(1,len(line))] for line in lines]) # dataset_info[dataset_key]['native_pdfs'] = pdfs print('storage footprint '+str(sys.getsizeof(pdfs))+' bytes') """ Explanation: Let's read in the catalog data. Note that it has a sizeable footprint even for a "small" number of galaxies. End of explanation """ # colors = ['red','green','blue','cyan','magenta','yellow'] # n_plot = len(colors) # # if dataset_key == 'mg': # # indices = [1, 3, 14, 16, 19, 21] # # elif dataset_key == 'ss': # n_gals_tot = len(pdfs) # full_gal_range = range(n_gals_tot) # indices = np.random.choice(full_gal_range, n_plot) # for i in range(n_plot): # plt.plot(dataset_info[dataset_key]['z_grid'], pdfs[indices[i]], # color=colors[i], label=dataset_key+' #'+str(indices[i])) # plt.xlabel(r'$z$', fontsize=16) # plt.ylabel(r'$p(z)$', fontsize=16) # plt.title(dataset_key+' mock catalog') # plt.legend() # plt.savefig('pz_placeholder_'+dataset_key+'.pdf', dpi=250) """ Explanation: Visualizing the BPZ $p(z)$'s Let's plot a few interesting PDFs from the dataset. End of explanation """ if dataset_key == 'Euclid': chosen = 5390 elif dataset_key == 'LSST': # chosen = 108019 indices = [ 12543, 52661, 46216, 53296, 95524, 84574 , 2607 ,56017 , 64794, 7600] chosen = indices[9] start_time = timeit.default_timer() G = qp.PDF(gridded=(dataset_info[dataset_key]['z_grid'], pdfs[chosen])) print(timeit.default_timer() - start_time) G.plot() """ Explanation: Note: BPZ PDFs are not properly normalized. In order to be true PDFs, we want $\int_{-\infty}^{\infty} p(z) dz = 1$, but the data file entries satisfy $\sum {z=z_min}^{z{max}} p(z) = 1$, which is not in general the same. qp approximates the desired integral as $1 = \int p(z) dz \approx \Delta_{z} \sum_{z=z_{min}}^{z_{max}} p(z)$ where $\Delta_{z} = \frac{z_{max} - z_{min}}{N_{ff}}$, where the native format PDF is evaluated at $N_{ff}$ redshifts. Approximating the BPZ $p(z)'s$ Let's pick out a galaxy with an interesting $p(z)$ to turn into a qp.PDF object initialized with a gridded parametrization. End of explanation """ if dataset_key == 'Euclid': nc_needed = 3 elif dataset_key == 'LSST': nc_needed = 5 dataset_info[dataset_key]['N_GMM'] = nc_needed """ Explanation: qp cannot currently convert gridded PDFs to histograms or quantiles - we need to make a GMM first, and use this to instantiate a qp.PDF object using a qp.composite object based on that GMM as qp.PDF.truth. The number of parameters necessary for a qualitatively good fit depends on the characteristics of the dataset. End of explanation """ start_time = timeit.default_timer() G.mix_mod_fit(n_components=dataset_info[dataset_key]['N_GMM'], using='gridded', vb=True) time = timeit.default_timer() - start_time print(str(time)+' for GMM fit to gridded') G.plot() """ Explanation: We can fit a GMM directly to the gridded PDF (via an internal interpolation). The direct fit, however, is not guaranteed to converge, particularly if the underlying distribution is not actually well-described by a weighted sum of Gaussians -- this is why storing the GMM parameters instead of a non-parametric format can be dangerous. End of explanation """ high_res = 1000 start_time = timeit.default_timer() G.sample(high_res, vb=False) G.mix_mod_fit(n_components=dataset_info[dataset_key]['N_GMM'], using='samples', vb=True) time = timeit.default_timer() - start_time print(str(time)+' for GMM fit to samples') G.plot() """ Explanation: The alternative is to take a large number of samples and fit a GMM to those (via the same internal interpolation). We can check that the fits are very similar. Though it is slower, we will sample before fitting to guarantee convergence. End of explanation """ N_f = 7 M = qp.PDF(truth=G.mix_mod, limits=dataset_info[dataset_key]['z_lim']) M.quantize(N=N_f, vb=False) M.histogramize(N=N_f, binrange=dataset_info[dataset_key]['z_lim'], vb=False) M.sample(N=N_f, using='truth', vb=False) M.plot(loc=dataset_key+'_example_pz.pdf', vb=True) """ Explanation: The qp.composite object can be used as the qp.PDF.truth to initialize a new qp.PDF object that doesn't have any information about the gridded or sample approximations but has a qualitatively similar shape and is thus "realistically complex" enough to draw conclusions about real data. Now we can approximate it any way we like! Consider this example for $N_f=7$ parameters. End of explanation """ formats = ['quantiles', 'histogram', 'samples'] parametrizations = {} for f in formats: parametrizations[f] = {} for ff in formats: parametrizations[f][ff] = None parametrizations['quantiles']['quantiles'] = M.quantiles parametrizations['histogram']['histogram'] = M.histogram parametrizations['samples']['samples'] = M.samples dataset_info[dataset_key]['inits'] = parametrizations klds = {} P = qp.PDF(truth=M.truth) for f in formats: Q = qp.PDF(quantiles=dataset_info[dataset_key]['inits'][f]['quantiles'], histogram=dataset_info[dataset_key]['inits'][f]['histogram'], samples=dataset_info[dataset_key]['inits'][f]['samples']) klds[f] = make_kld(P, Q) print(klds) """ Explanation: Quantifying the Accuracy of the Approximation We can also calculate the KLD metric on this qp.PDF. The KLD quantifies the information loss of an approximation of a PDF relative to the true PDF in units of nats. Thus, a lower KLD corresponds to more information being preserved in the approximation. End of explanation """ n_gals_tot = len(pdfs) n_gals_use = 100 full_gal_range = range(n_gals_tot) subset = np.random.choice(full_gal_range, n_gals_use) pdfs_use = pdfs[subset] # using the same grid for output as the native format, but doesn't need to be so dataset_info[dataset_key]['in_z_grid'] = dataset_info[dataset_key]['z_grid'] dataset_info[dataset_key]['metric_z_grid'] = dataset_info[dataset_key]['z_grid'] n_floats_use = 10 if dataset_key == 'Euclid': dataset_info[dataset_key]['N_GMM'] = 3 elif dataset_key == 'LSST': dataset_info[dataset_key]['N_GMM'] = 5 fit_components = dataset_info[dataset_key]['N_GMM'] n_moments_use = 3 colors = {'quantiles':'b', 'histogram':'r', 'samples':'g'} """ Explanation: Survey Mode We want to compare parametrizations for large catalogs, so we'll need to be more efficient. The qp.Ensemble object is a wrapper for qp.PDF objects enabling conversions to be performed and metrics to be calculated in parallel. We'll experiment on a subsample of 100 galaxies. End of explanation """ def setup_from_grid(in_pdfs, z_grid, N_comps, high_res=1000): #read in the data, happens to be gridded zlim = (min(z_grid), max(z_grid)) N_pdfs = len(in_pdfs) # plot_examples(N_pdfs, z_grid, pdfs) print('making the initial ensemble of '+str(N_pdfs)+' PDFs') E0 = qp.Ensemble(N_pdfs, gridded=(z_grid, in_pdfs), vb=True) print('made the initial ensemble of '+str(N_pdfs)+' PDFs') #fit GMMs to gridded pdfs based on samples (faster than fitting to gridded) print('sampling for the GMM fit') samparr = E0.sample(high_res, vb=False) print('took '+str(high_res)+' samples') print('making a new ensemble from samples') Ei = qp.Ensemble(N_pdfs, samples=samparr, vb=False) print('made a new ensemble from samples') print('fitting the GMM to samples') GMMs = Ei.mix_mod_fit(comps=N_comps, vb=False) print('fit the GMM to samples') #set the GMMS as the truth print('making the final ensemble') Ef = qp.Ensemble(N_pdfs, truth=GMMs, vb=False) print('made the final ensemble') return(Ef) # return def plot_examples(z_grid, pdfs, n_plot=6): N_pdfs =len(pdfs) randos = np.random.choice(range(N_pdfs), n_plot) for i in range(n_plot): plt.plot(z_grid, pdfs[randos[i]], label=dataset_key+r'\#'+str(randos[i])) plt.xlabel(r'$z$', fontsize=16) plt.ylabel(r'$p(z)$', fontsize=16) plt.title(dataset_key+' mock catalog') plt.legend() plt.savefig('pz_placeholder_'+dataset_key+'.png', dpi=250) # pr = cProfile.Profile() # pr.enable() catalog = setup_from_grid(pdfs_use, dataset_info[dataset_key]['in_z_grid'], fit_components) # pr.disable() # s = StringIO.StringIO() # sortby = 'cumtime' # ps = pstats.Stats(pr, stream=s).sort_stats(sortby) # ps.print_stats() # print(s.getvalue()) plot_examples(dataset_info[dataset_key]['in_z_grid'], pdfs_use, n_plot=6) """ Explanation: We'll start by reading in our catalog of gridded PDFs, sampling them, fitting GMMs to the samples, and establishing a new qp.Ensemble object where each meber qp.PDF object has qp.PDF.truth$\neq$None. End of explanation """ def analyze_individual(E, z_grid, N_floats, N_moments=4): zlim = (min(z_grid), max(z_grid)) z_range = zlim[-1] - zlim[0] delta_z = z_range / len(z_grid) Eq, Eh, Es = E, E, E inits = {} for f in formats: inits[f] = {} for ff in formats: inits[f][ff] = None print('performing quantization') inits['quantiles']['quantiles'] = Eq.quantize(N=N_floats, vb=False) print('performing histogramization') inits['histogram']['histogram'] = Eh.histogramize(N=N_floats, binrange=zlim, vb=False) print('performing sampling') inits['samples']['samples'] = Es.sample(samps=N_floats, vb=False) print('making the approximate ensembles') Eo ={} for f in formats: Eo[f] = qp.Ensemble(E.n_pdfs, truth=E.truth, quantiles=inits[f]['quantiles'], histogram=inits[f]['histogram'], samples=inits[f]['samples']) print('made the approximate ensembles') print('calculating the individual metrics') klds = {} metrics = {} moments = {} for key in Eo.keys(): print('starting '+key) klds[key] = Eo[key].kld(using=key, limits=zlim, dx=delta_z) samp_metric = qp.PDF(samples=klds[key]) gmm_metric = samp_metric.mix_mod_fit(n_components=dataset_info[dataset_key]['N_GMM'], using='samples') metrics[key] = qp.PDF(truth=gmm_metric) moments[key] = [] for n in range(N_moments+1): moments[key].append([qp.utils.calculate_moment(metrics[key], n, using=key, limits=zlim, dx=delta_z, vb=False)]) print('finished with '+key) print('calculated the individual metrics') # plot_individual(klds, N_floats) return(Eo, klds, moments) def plot_individual(pz_klds, N_floats): colors = {'quantiles':'b', 'histogram':'r', 'samples':'g'} plot_bins = np.linspace(-3., 3., 20) for key in pz_klds.keys(): plt.hist(np.log(pz_klds[key]), color=colors[key], alpha=0.5, label=key, normed=True, bins=plot_bins) plt.legend() plt.ylabel('frequency') plt.xlabel(r'$\log[KLD]$') plt.title(dataset_key+r' dataset with $N_{f}='+str(N_floats)+r'$') plt.savefig(dataset_key+'_metric_histogram_placeholder.png', dpi=250) # pr = cProfile.Profile() # pr.enable() (ensembles, pz_klds, metric_moments) = analyze_individual(catalog, dataset_info[dataset_key]['metric_z_grid'], n_floats_use, n_moments_use) dataset_info[dataset_key]['pz_klds'] = pz_klds dataset_info[dataset_key]['pz_kld_moments'] = metric_moments plot_individual(pz_klds, n_floats_use) # pr.disable() # s = StringIO.StringIO() # sortby = 'cumtime' # ps = pstats.Stats(pr, stream=s).sort_stats(sortby) # ps.print_stats() # print(s.getvalue()) """ Explanation: Next, we compute the KLD between each approximation and the truth for every member of the ensemble. We make the qp.Ensemble.kld into a qp.PDF object of its own to compare the moments of the KLD distributions for different parametrizations. End of explanation """ def analyze_stacked(E0, E, z_grid): zlim = (min(z_grid), max(z_grid)) z_range = zlim[-1] - zlim[0] delta_z = z_range / len(z_grid) parametrizations = E.keys() print('stacking the ensembles') stacked_pdfs = {} for key in formats: stacked_pdfs[key] = qp.PDF(gridded=E[key].stack(z_grid, using=key, vb=False)[key]) stacked_pdfs['truth'] = qp.PDF(gridded=E0.stack(z_grid, using='truth', vb=False)['truth']) print('stacked the ensembles') print('calculating the metrics') klds = {} for key in parametrizations: klds[key] = qp.utils.calculate_kl_divergence(stacked_pdfs['truth'], stacked_pdfs[key], limits=zlim, dx=delta_z) print('calculated the metrics') # plot_estimators(z_grid, stacked_pdfs, klds) return(stacked_pdfs, klds) def plot_estimators(z_grid, stacked_pdfs, klds): colors = {'quantiles':'b', 'histogram':'r', 'samples':'g'} plt.title(r'$\hat{n}(z)$ for '+str(n_floats_use)+' numbers') plt.plot(z_grid, stacked_pdfs['truth'].evaluate(z_grid, vb=False)[1], color='black', lw=4, alpha=0.3, label='truth') for key in formats: plt.plot(z_grid, stacked_pdfs[key].evaluate(z_grid, vb=False)[1], label=key+' KLD='+str(klds[key]), color=colors[key]) plt.xlabel(r'$z$') plt.ylabel(r'$\hat{n}(z)$') plt.legend() plt.title(r'$\hat{n}(z)$ for '+str(n_floats_use)+' numbers') plt.savefig(dataset_key+'_nz_comparison.png', dpi=250) # pr = cProfile.Profile() # pr.enable() (stack_evals, nz_klds) = analyze_stacked(catalog, ensembles, dataset_info[dataset_key]['metric_z_grid']) dataset_info[dataset_key]['nz_ests'] = stack_evals dataset_info[dataset_key]['nz_klds'] = nz_klds plot_estimators(dataset_info[dataset_key]['metric_z_grid'], stack_evals, nz_klds) # pr.disable() # s = StringIO.StringIO() # sortby = 'cumtime' # ps = pstats.Stats(pr, stream=s).sort_stats(sortby) # ps.print_stats() # print(s.getvalue()) """ Explanation: Finally, we calculate metrics on the stacked estimator $\hat{n}(z)$ that is the average of all members of the ensemble. End of explanation """ if os.path.exists('nz_metrics.hkl'): with open('nz_metrics.hkl', 'r') as nz_file: #read in content of list/dict nz_stats = hickle.load(nz_file) else: nz_stats = {} nz_stats['N_f'] = [] if N_f not in nz_stats['N_f']: nz_stats['N_f'].append(N_f) where_N_f = nz_stats['N_f'].index(N_f) if dataset_key not in nz_stats.keys(): nz_stats[dataset_key] = {} for f in parametrizations:#change this name to formats nz_stats[dataset_key][f] = [[]] for f in parametrizations: nz_stats[dataset_key][f][where_N_f].append(dataset_info[dataset_key]['nz_klds'][f]) with open('nz_metrics.hkl', 'w') as nz_file: hickle.dump(nz_stats, nz_file) """ Explanation: We save the data so we can remake the plots later without running everything again. Scaling We'd like to do this for many values of $N_{f}$ as well as larger catalog subsamples, repeating the analysis many times to establish error bars on the KLD as a function of format, $N_{f}$, and dataset. The things we want to plot across multiple datasets/number of parametes are: KLD of stacked estimator, i.e. N_f vs. nz_output[dataset][format][instantiation][KLD_val_for_N_f] moments of KLD of individual PDFs, i.e. n_moment, N_f vs. pz_output[dataset][format][n_moment][instantiation][moment_val_for_N_f] So, we ned to make sure these are saved! End of explanation """ with open('nz_metrics.hkl', 'r') as nz_file: nz_stats = hickle.load(nz_file) colors = {'quantiles':'b', 'histogram':'r', 'samples':'g'} # need to get some version of this working from nz_klds plt.figure(figsize=(5, 5)) for f in parametrizations.keys(): data_arr = np.swapaxes(np.array(nz_stats[dataset_key][f]), 0, 1)#turn N_f * instantiations into instantiations * N_f n_i = len(data_arr) a = 1./n_i plt.plot([2 * max(nz_stats['N_f']), 2 * max(nz_stats['N_f'])], [1., 10.], color=colors[f], alpha=a, label=f) for i in data_arr: # will be regular plot not scatter with more N_f options plt.plot(nz_stats['N_f'], i[0], color=colors[f], alpha=a) plt.semilogy() plt.semilogx() plt.xlim(min(nz_stats['N_f'])-1, max(nz_stats['N_f'])+1) plt.ylim(1., 10.) plt.xlabel(r'number of parameters') plt.ylabel(r'KLD') plt.legend() plt.title(r'$\hat{n}(z)$ KLD on '+str(n_gals_use)+' from '+dataset_key) plt.savefig(dataset_key+'_nz_metrics_placeholder.png', dpi=250) # won't really know how this looks without more N_f tested """ Explanation: We want to plot the KLD on $\hat{n}(z)$ for all formats as $N_{f}$ changes. We want to repeat this for many subsamples of the catalog to establush error bars on the KLD values. End of explanation """ if os.path.exists('pz_metrics.hkl'): with open('pz_metrics.hkl', 'r') as pz_file: #read in content of list/dict pz_stats = hickle.load(pz_file) else: pz_stats = {} pz_stats['N_f'] = [] if N_f not in pz_stats['N_f']: pz_stats['N_f'].append(N_f) where_N_f = pz_stats['N_f'].index(N_f) if dataset_key not in pz_stats.keys(): pz_stats[dataset_key] = {} for f in parametrizations:#change this name to formats pz_stats[dataset_key][f] = [] for m in range(n_moments_use + 1): pz_stats[dataset_key][f].append([[]]) if N_f not in pz_stats['N_f']: pz_stats[dataset_key][f][m].append([]) for f in parametrizations: for m in range(n_moments_use + 1): pz_stats[dataset_key][f][m][where_N_f].append(dataset_info[dataset_key]['pz_kld_moments'][f][m]) with open('pz_metrics.hkl', 'w') as pz_file: hickle.dump(pz_stats, pz_file) with open('pz_metrics.hkl', 'r') as pz_file: pz_stats = hickle.load(pz_file) def make_patch_spines_invisible(ax): ax.set_frame_on(True) ax.patch.set_visible(False) for sp in ax.spines.values(): sp.set_visible(False) shapes = ['o','+','x','v','^','<','>'] fig, ax = plt.subplots() fig.subplots_adjust(right=1.) ax_n = ax for key in parametrizations.keys(): ax_n.plot([-1], [0], color=colors[key], label=key) for n in range(1, 4): ax.scatter([-1], [0], color='k', marker=shapes[n-1], label='moment '+str(n)) if n>1: ax_n = ax.twinx() if n>2: ax_n.spines["right"].set_position(("axes", 1. + 0.1 * (n-1))) make_patch_spines_invisible(ax_n) ax_n.spines["right"].set_visible(True) for f in parametrizations.keys(): data_arr = np.swapaxes(np.array(pz_stats[dataset_key][f][n]), 0, 1) n_i = len(data_arr) a = 1./n_i for i in data_arr: ax_n.scatter(pz_stats['N_f'], i, marker=shapes[n-1], color=colors[f], alpha=a) ax_n.set_ylabel('moment '+str(n)) ax.set_xlim(1,1000)#should be N_f range and logged ax.semilogx() ax.set_xlabel('number of parameters') ax.legend() fig.suptitle('KLD moments on '+str(n_gals_use)+' from '+dataset_key) fig.savefig(dataset_key+'_pz_metrics_placeholder.png', dpi=250) """ Explanation: We want to plot the moments of the KLD distribution for each format as $N_{f}$ changes. End of explanation """ ## everything works above here! now it's time to make plots from this output! # # Function to test the experimental qp.Ensemble object! # def analyze():#(pdfs, N_comps, z, N_floats): # #read in the data, happens to be gridded # z_low, z_high = min(z), max(z) # N_pdfs = len(pdfs) # out_E = {} # E0 = qp.Ensemble(N_pdfs, gridded=(z, pdfs), vb=False) # #fit gridded pdfs as GMMs based on samples # samparr = E0.sample(1000, vb=False) # print(np.shape(samparr)) # Ei = qp.Ensemble(N_pdfs, samples=samparr, vb=False) # GMMs = Ei.mix_mod_fit(comps=N_comps, using='samples', vb=False) # # out_E['GMMs'] = [] # # for GMM in GMMs: # # out_E['GMMs'].append(GMM.functions[0].stats()) # #set the GMMS as the truth # Ef = qp.Ensemble(N_pdfs, truth=GMMs, vb=False) # #stack them and save the output # out_E['truth'] = Ef.stack(z, using='mix_mod', vb=False) # # #evaluate as gridded and save the output # # Et = qp.Ensemble(N_pdfs, gridded=Ef.evaluate(z)) # # out_E['gridded'] = Et.stack(z, using='gridded') # #evaluate as quantiles and save the output # Eq = qp.Ensemble(N_pdfs, quantiles=Ef.quantize(N=N_floats), vb=False) # #q_stack = Eq.stack(z, using='quantiles') # out_E['quantiles'] = Eq.stack(z, using='quantiles', vb=False) # # #evaluate as histogram and save the output # # Eh = qp.Ensemble(N_pdfs, histogram=Ef.histogramize(N=N_floats, binrange=(z_low, z_high))) # # #h_stack = Eh.stack(z, using='histogram') # # out_E['histogram'] = Eh.stack(z, using='histogram') # # #evaluate as samples and save the output # # Es = qp.Ensemble(N_pdfs, samples=Ef.sample(samps=N_floats)) # # #s_stack = Es.stack(z, using='samples') # # out_E['samples'] = Es.stack(z, using='samples') # return(out_E)#, KLDs, RMSEs) """ Explanation: Okay, now all I have to do is have this loop over both datasets, number of galaxies, and number of floats! Everything after here is scratch. That's all, folks! End of explanation """ # print(n_gals_use, n_floats_use, s.getvalue()) """ Explanation: Let's run a test with 100 galaxies and 10 parameters. This should take about 5 minutes or so. End of explanation """ # print(results.keys()) # print(results['truth']['mix_mod']) # KLDs, RMSEs = {}, {} # P = qp.PDF(gridded=results['truth']['mix_mod']) # metric_keys = results.keys() # metric_keys.remove('truth') # for est in metric_keys: # Q = qp.PDF(gridded=results[est][est]) # KLDs[est] = qp.utils.calculate_kl_divergence(P, Q, vb=False) # RMSEs[est] = qp.utils.calculate_rmse(P, Q, vb=False) # plt.plot(results[est][est][0], results[est][est][1], label=est) # plt.legend() # print(KLDs, RMSEs) """ Explanation: Let's show the stacked versions and compute metrics. End of explanation """ # P = qp.PDF(gridded=stack_ests['truth']) # KLDs, RMSEs = {}, {} # for est in .keys(): # Q = qp.PDF(gridded=stack_ests[est]) # KLDs[est] = qp.utils.calculate_kl_divergence(P, Q, vb=False) # RMSEs[est] = qp.utils.calculate_rmse(P, Q, vb=False) """ Explanation: Things are quite broken after this point! End of explanation """ # moments = np.array(results['stats']).T # fit_stats = moments[1] # plt.hist(np.log(fit_stats)) """ Explanation: Let's plot the log standard deviations of the first component of the mixture models. End of explanation """ # D = qp.PDF(samples = np.log(fit_stats)) # T = D.mix_mod_fit(n_components=1) # D.plot() # print(np.exp(T.functions[0].stats())) """ Explanation: Let's check the distribution of standard deviations of the ensemble. End of explanation """ # this ends the test of the experimental qp.Ensemble object # you may now return to your regularly scheduled programming # def analyze_one(index, N_comps, z, N_floats, logfilename='logfile.txt', vb=False): # """ # Model the input BPZ P(z) as a GMM, approximate that GMM in # various ways, and assess the quality of each approximation. # Parameters # ---------- # index : int # ID of galaxy # N_comps : int # Number of components used in GMM # N_floats : int # Number of floats used to parametrize the P(z) # z : float, ndarr # Redshift array for input gridded "truth". Used for # evaluating n(z) too # logfilename: string # where to put logging information # vb : boolean # Verbose output? # Returns # ------- # result : dict # Dictionary containing metric values, n(z) on standard # grid, samples, "true" GMM gridded p(z). # Notes # ----- # In some cases the GMM does not fit well, leading to bad KLD and # RMSE values when it is compared to the truth. # """ # # # Make z array if we don't already have it: # # if z is None: # # z = np.arange(0.01, 3.51, 0.01, dtype='float') # dz = (max(z) - min(z)) / len(z) # zlimits = [min(z), max(z)] # # Make a dictionary to contain the results: # result = {} # # Make a GMM model of the input BPZ p(z) (which are stored # # in the global 'pdfs' variable: # G = qp.PDF(gridded=(z, pdfs[index]), vb=vb) # # Draw 1000 samples, fit a GMM model to them, and make a true PDF: # G.sample(1000, vb=vb) # GMM = G.mix_mod_fit(n_components=N_comps, vb=vb) # P = qp.PDF(truth=GMM, vb=vb) # # Evaluate the GMM on the z grid, and store in the result dictionary. We'll # # need this to make our "true" n(z) estimator. We don't need to keep the # # z array, as we passed that in. # result['truth'] = P.evaluate(z, using='truth', vb=vb)[1] # # Now approximate P in various ways, and assess: # Q, KLD, RMSE, approximation = {}, {}, {}, {} # Q['quantiles'] = qp.PDF(quantiles=P.quantize(N=N_floats, vb=vb), vb=vb) # Q['histogram'] = qp.PDF(histogram=P.histogramize(N=N_floats, binrange=zlimits, vb=vb), vb=vb) # Q['samples'] = qp.PDF(samples=P.sample(N=N_floats, vb=vb), vb=vb) # for k in Q.keys(): # KLD[k] = qp.calculate_kl_divergence(P, Q[k], limits=zlimits, dx=dz, vb=vb) # RMSE[k] = qp.calculate_rmse(P, Q[k], limits=zlimits, dx=dz, vb=vb) # approximation[k] = Q[k].evaluate(z, using=k, vb=vb)[1] # # Store approximations: # result['KLD'] = KLD # result['RMSE'] = RMSE # result['approximation'] = approximation # result['samples'] = Q['samples'].samples # with open(logfilename, 'a') as logfile: # logfile.write(str((index, timeit.default_timer() - start_time))+'\n') # return result """ Explanation: Now enough of the qp.Ensemble functionality has been implemented to merge into the master branch! End of explanation """ # def one_analysis(N): # all_results[str(N)] = [] # pr = cProfile.Profile() # pr.enable() # # with qp.Ensemble # n_gals_tot = len(pdfs) # full_gal_range = range(n_gals_tot) # subset = np.random.choice(full_gal_range, n_gals) # pdfs_use = pdfs[subset] # all_results[str(N)] = analyze(pdfs_use, nc_needed, z, N) # # # if multiprocessing: # # logfilename = dataname + str(n_gals) + 'multi' + str(N)+'.txt' # # def help_analyze(i): # # return analyze_one(i, nc_needed, z, N, logfilename=logfilename) # # pool = Pool(psutil.cpu_count() - 1) # # results = pool.map(help_analyze, range(n_gals)) # # all_results[str(N)] = results # # # tl;dr Tmax=270s for N_floats=3, 100 galaxies, 3 processors # # # if looping: # # logfilename = dataname + str(n_gals) + 'loop' + str(N)+'.txt' # # for i in range(100): # # all_results[str(N)].append(analyze_one(i, 2, z, N, logfilename=logfilename)) # # if i%10 == 0: print('.', end='') # # # tl;dr Tmax=352s for N_floats=3, 100 galaxies # pr.disable() # s = StringIO.StringIO() # sortby = 'cumtime' # ps = pstats.Stats(pr, stream=s).sort_stats(sortby) # ps.print_stats() # print(N, s.getvalue()) # return # #%%time # float_numbers = [3]#, 10, 30, 100] # n_float_numbers = len(float_numbers) # # gal_numbers = [100]#, 1000, 10000] # # n_gal_numbers = len(gal_numbers) # # total_results ={} # # for M in gal_numbers: # # n_gals = M # n_gals = 100 # all_results = {} # for N in float_numbers: # start_time = timeit.default_timer() # one_analysis(N) # # total_results[str(n_gals)] = all_results """ Explanation: OK, now lets's collate the metrics for the first 100 galaxies over a variable number of parameters, and look at the distribution of metric values. We're using multiprocessing because the for loop is slow; the rate-limiting step is the optimization routine for finding quantiles of a GMM. End of explanation """ # with open('all_results.hkl', 'w') as result_file: # hickle.dump(all_results, result_file) # with open('all_results.hkl', 'r') as result_file: # all_results = hickle.load(result_file) # all_results = total_results[str(gal_numbers[0])] # all_KLD, all_RMSE = [], [] # for n in range(n_float_numbers): # KLD, RMSE = {}, {} # for approximation in all_results[str(float_numbers[n])][0]['KLD'].keys(): # x = np.array([]) # for k in range(len(all_results[str(float_numbers[n])])): # x = np.append(x, all_results[str(float_numbers[n])][k]['KLD'][approximation]) # KLD[approximation] = x # x = np.array([]) # for k in range(len(all_results[str(float_numbers[n])])): # x = np.append(x, all_results[str(float_numbers[n])][k]['RMSE'][approximation]) # RMSE[approximation] = x # all_KLD.append(KLD) # all_RMSE.append(RMSE) """ Explanation: Since the previous step is quite slow (on the order of 5 minutes per test of different numbers of parameters for my laptop), this is a good point to save the results. We can load them from the file later and not remake them if we only want to do the rest of the analysis. End of explanation """ # colors = {'samples':'green', 'quantiles':'blue', 'histogram':'red'} # plt.figure(figsize=(12, 5 * n_float_numbers)) # i=0 # for n in range(n_float_numbers): # i += 1 # # Lefthand panel: KLD # plt.subplot(n_float_numbers, 2, i) # plt.title('KLD for '+str(float_numbers[n])+' stored numbers') # bins = np.linspace(0.0, 5., 25) # for k in ['samples', 'quantiles', 'histogram']: # plt.hist(all_KLD[n][k], bins, label=k, fc=colors[k], ec=colors[k], alpha=0.3, normed=True) # #plt.semilogx() # plt.xlabel('KL Divergence Metric', fontsize=16) # plt.ylim(0., 5.0) # plt.xlim(0., 5.0) # plt.legend() # i += 1 # # Righthand panel: RMSE # plt.subplot(n_float_numbers, 2, i)#+n_numbers) # plt.title('RMSE for '+str(float_numbers[n])+' stored numbers') # bins = np.linspace(0.0, 5., 25) # for k in ['samples', 'quantiles', 'histogram']: # plt.hist(all_RMSE[n][k], bins, label=k, fc=colors[k], ec=colors[k], alpha=0.3, normed=True) # #plt.semilogx() # plt.xlabel('RMS Error Metric', fontsize=16) # plt.ylim(0., 5.0) # plt.xlim(0., 5.0) # plt.legend(); # plt.savefig('money.png') """ Explanation: Now let's plot histograms of the metric values. End of explanation """ # plt.figure(figsize=(6, 5 * n_float_numbers)) # all_n = [] # all_x = [] # all_y = [] # for i in range(n_float_numbers): # results = all_results[str(float_numbers[i])] # n = {} # # Pull out all truths and compute the average at each z: # x = np.zeros([len(z), len(results)]) # y = {} # for approx in ['samples', 'quantiles', 'histogram']: # y[approx] = np.zeros([len(z), len(results)]) # for k in range(len(results)): # y[approx][:,k] = results[k]['approximation'][approx] # for k in range(len(results)): # x[:,k] = results[k]['truth'] # # Now do the averaging to make the estimators: # n['truth'] = np.mean(x, axis=1) # n['truth'] /= np.sum(n['truth']) * delta_z # for approx in ['samples', 'quantiles', 'histogram']: # n[approx] = np.mean(y[approx], axis=1) # n[approx] /= np.sum(n[approx]) * delta_z # all_n.append(n) # all_x.append(x) # all_y.append(y) # # Note: this uses the samples' KDE to make the approximation. We could (and # # should!) also try simply concatenating the samples and histogramming them. # # Plot truth and all the approximations. # # The NaNs in the histogram approximation make that unplottable for now. # plt.subplot(n_float_numbers, 1, i+1)#+n_numbers) # plt.title(r'$n(z)$ for '+str(float_numbers[i])+' numbers') # plt.plot(z, n['truth'], color='black', lw=4, alpha=0.3, label='truth') # for k in ['samples', 'quantiles', 'histogram']: # plt.plot(z, n[k], label=k, color=colors[k]) # plt.xlabel('redshift z') # plt.ylabel('n(z)') # plt.legend(); # plt.savefig('nz_comparison.png', dpi=300) """ Explanation: Interestingly, the metrics don't agree, nor is the behavior consistent across different numbers of parameters. However, as the number of parameters increases, the distribution of the metrics converge to lower numbers. KLD seems to flag more "bad" approximations than RMSE. How do we know where to set the threshold in each metric? We should think of the right way to get a summary statistic (first moment?) on the ensemble of KLD or RMSE values so we can make the plot of number of parameters vs. quality of approximation. Now lets compute the estimated $n(z)$. We'll do this with the GMM "truth", and then using each of our approximations. And we'll normalize the $n(z)$ to account for lost systems with bad approximations. End of explanation """ # all_p = [] # for i in range(n_float_numbers): # n = all_n[i] # p = {} # for k in ['samples', 'quantiles', 'histogram']: # p[k] = qp.PDF(gridded=(z,n[k]), vb=False) # p['truth'] = qp.PDF(gridded=(z,n['truth']), vb=False) # all_p.append(p) # all_KLD_nz, all_RMSE_nz = {}, {} # zlimits, dz = [z_low, z_high], 0.01 # for k in ['samples', 'quantiles', 'histogram']: # p = all_p[i] # KLD_nz, RMSE_nz = [], [] # for i in range(n_float_numbers): # KLD_nz.append(qp.calculate_kl_divergence(all_p[i]['truth'], all_p[i][k], limits=zlimits, dx=dz, vb=False)) # RMSE_nz.append(qp.calculate_rmse(all_p[i]['truth'], all_p[i][k], limits=zlimits, dx=dz, vb=False)) # all_KLD_nz[k] = KLD_nz # all_RMSE_nz[k] = RMSE_nz # plt.figure(figsize=(12, 5)) # both = [plt.subplot(1, 2, i+1) for i in range(2)] # KLD_plot = both[0] # RMSE_plot = both[1] # KLD_plot.set_title(r'KLD for $n(z)$') # RMSE_plot.set_title(r'RMSE for $n(z)$') # KLD_plot.set_xlabel('number of parameters') # RMSE_plot.set_xlabel('number of parameters') # KLD_plot.set_ylabel('KLD') # RMSE_plot.set_ylabel('RMSE') # # KLD_plot.semilogx() # # KLD_plot.semilogy() # # RMSE_plot.semilogx() # # RMSE_plot.semilogy() # for k in ['samples', 'quantiles', 'histogram']: # KLD_plot.plot(float_numbers, all_KLD_nz[k], color=colors[k], label=k) # RMSE_plot.plot(float_numbers, all_RMSE_nz[k], color=colors[k], label=k) # KLD_plot.semilogy() # KLD_plot.semilogx() # RMSE_plot.semilogy() # RMSE_plot.semilogx() # KLD_plot.legend() # RMSE_plot.legend() # plt.savefig('summary.png') # print('KLD metrics for n(z) estimator: ', all_KLD_nz) # print('RMSE metrics for n(z) estimator: ', all_RMSE_nz) """ Explanation: The "samples" approximation gives the best result for the $n(z)$ estimator even with a small number of samples. However, once the number of parameters increases slightly, the "quantiles" approximation performs similarly. It takes a large number of parameters before the "histogram" approximation approaches the other options. Let's use the qp.PDF object to compare them quantitatively (since $n(z)$ can be normalized to give the global $p(z)$). End of explanation """
superbobry/pymc3
pymc3/examples/rolling_regression.ipynb
apache-2.0
%matplotlib inline import pandas as pd from pandas_datareader import data import numpy as np import pymc3 as pm import matplotlib.pyplot as plt """ Explanation: Bayesian Rolling Regression in PyMC3 Author: Thomas Wiecki Pairs trading is a famous technique in algorithmic trading that plays two stocks against each other. For this to work, stocks must be correlated (cointegrated). One common example is the price of gold (GLD) and the price of gold mining operations (GDX). End of explanation """ prices = data.YahooDailyReader(symbols=['GLD', 'GDX'], end='2014-8-1').read().loc['Adj Close', :, :].iloc[:1000] prices.head() """ Explanation: Lets load the prices of GDX and GLD. End of explanation """ fig = plt.figure(figsize=(9, 6)) ax = fig.add_subplot(111, xlabel='Price GDX in \$', ylabel='Price GLD in \$') colors = np.linspace(0.1, 1, len(prices)) mymap = plt.get_cmap("winter") sc = ax.scatter(prices.GDX, prices.GLD, c=colors, cmap=mymap, lw=0) cb = plt.colorbar(sc) cb.ax.set_yticklabels([str(p.date()) for p in prices[::len(prices)//10].index]); """ Explanation: Plotting the prices over time suggests a strong correlation. However, the correlation seems to change over time. End of explanation """ with pm.Model() as model_reg: pm.glm.glm('GLD ~ GDX', prices) trace_reg = pm.sample(2000) """ Explanation: A naive approach would be to estimate a linear model and ignore the time domain. End of explanation """ fig = plt.figure(figsize=(9, 6)) ax = fig.add_subplot(111, xlabel='Price GDX in \$', ylabel='Price GLD in \$', title='Posterior predictive regression lines') sc = ax.scatter(prices.GDX, prices.GLD, c=colors, cmap=mymap, lw=0) pm.glm.plot_posterior_predictive(trace_reg[100:], samples=100, label='posterior predictive regression lines', lm=lambda x, sample: sample['Intercept'] + sample['GDX'] * x, eval=np.linspace(prices.GDX.min(), prices.GDX.max(), 100)) cb = plt.colorbar(sc) cb.ax.set_yticklabels([str(p.date()) for p in prices[::len(prices)//10].index]); ax.legend(loc=0); """ Explanation: The posterior predictive plot shows how bad the fit is. End of explanation """ model_randomwalk = pm.Model() with model_randomwalk: # std of random walk, best sampled in log space. sigma_alpha = pm.Exponential('sigma_alpha', 1./.02, testval = .1) sigma_beta = pm.Exponential('sigma_beta', 1./.02, testval = .1) """ Explanation: Rolling regression Next, we will build an improved model that will allow for changes in the regression coefficients over time. Specifically, we will assume that intercept and slope follow a random-walk through time. That idea is similar to the stochastic volatility model. $$ \alpha_t \sim \mathcal{N}(\alpha_{t-1}, \sigma_\alpha^2) $$ $$ \beta_t \sim \mathcal{N}(\beta_{t-1}, \sigma_\beta^2) $$ First, lets define the hyper-priors for $\sigma_\alpha^2$ and $\sigma_\beta^2$. This parameter can be interpreted as the volatility in the regression coefficients. End of explanation """ import theano.tensor as T # To make the model simpler, we will apply the same coefficient for 50 data points at a time subsample_alpha = 50 subsample_beta = 50 with model_randomwalk: alpha = pm.GaussianRandomWalk('alpha', sigma_alpha**-2, shape=len(prices) // subsample_alpha) beta = pm.GaussianRandomWalk('beta', sigma_beta**-2, shape=len(prices) // subsample_beta) # Make coefficients have the same length as prices alpha_r = T.repeat(alpha, subsample_alpha) beta_r = T.repeat(beta, subsample_beta) """ Explanation: Next, we define the regression parameters that are not a single random variable but rather a random vector with the above stated dependence structure. So as not to fit a coefficient to a single data point, we will chunk the data into bins of 50 and apply the same coefficients to all data points in a single bin. End of explanation """ with model_randomwalk: # Define regression regression = alpha_r + beta_r * prices.GDX.values # Assume prices are Normally distributed, the mean comes from the regression. sd = pm.Uniform('sd', 0, 20) likelihood = pm.Normal('y', mu=regression, sd=sd, observed=prices.GLD.values) """ Explanation: Perform the regression given coefficients and data and link to the data via the likelihood. End of explanation """ from scipy import optimize with model_randomwalk: # First optimize random walk start = pm.find_MAP(vars=[alpha, beta], fmin=optimize.fmin_l_bfgs_b) # Sample step = pm.NUTS(scaling=start) trace_rw = pm.sample(2000, step, start=start) """ Explanation: Inference. Despite this being quite a complex model, NUTS handles it wells. End of explanation """ fig = plt.figure(figsize=(8, 6)) ax = plt.subplot(111, xlabel='time', ylabel='alpha', title='Change of alpha over time.') ax.plot(trace_rw[-1000:][alpha].T, 'r', alpha=.05); ax.set_xticklabels([str(p.date()) for p in prices[::len(prices)//5].index]); """ Explanation: Analysis of results $\alpha$, the intercept, does not seem to change over time. End of explanation """ fig = plt.figure(figsize=(8, 6)) ax = fig.add_subplot(111, xlabel='time', ylabel='beta', title='Change of beta over time') ax.plot(trace_rw[-1000:][beta].T, 'b', alpha=.05); ax.set_xticklabels([str(p.date()) for p in prices[::len(prices)//5].index]); """ Explanation: However, the slope does. End of explanation """ fig = plt.figure(figsize=(8, 6)) ax = fig.add_subplot(111, xlabel='Price GDX in \$', ylabel='Price GLD in \$', title='Posterior predictive regression lines') colors = np.linspace(0.1, 1, len(prices)) colors_sc = np.linspace(0.1, 1, len(trace_rw[-500::10]['alpha'].T)) mymap = plt.get_cmap('winter') mymap_sc = plt.get_cmap('winter') xi = np.linspace(prices.GDX.min(), prices.GDX.max(), 50) for i, (alpha, beta) in enumerate(zip(trace_rw[-500::10]['alpha'].T, trace_rw[-500::10]['beta'].T)): for a, b in zip(alpha, beta): ax.plot(xi, a + b*xi, alpha=.05, lw=1, c=mymap_sc(colors_sc[i])) sc = ax.scatter(prices.GDX, prices.GLD, label='data', cmap=mymap, c=colors) cb = plt.colorbar(sc) cb.ax.set_yticklabels([str(p.date()) for p in prices[::len(prices)//10].index]); """ Explanation: The posterior predictive plot shows that we capture the change in regression over time much better. Note that we should have used returns instead of prices. The model would still work the same, but the visualisations would not be quite as clear. End of explanation """
noammor/coursera-machinelearning-python
ex6/ml-ex6.ipynb
mit
import numpy as np import matplotlib.pyplot as plt import scipy.io import sklearn.svm %matplotlib inline """ Explanation: Exercise 6 | Support Vector Machines End of explanation """ ex6data1 = scipy.io.loadmat('ex6data1.mat') X = ex6data1['X'] y = ex6data1['y'][:, 0] def plot_data(X, y, ax=None): if ax == None: fig, ax = plt.subplots(figsize=(7,5)) pos = y==1 neg = y==0 ax.scatter(X[pos,0], X[pos,1], marker='+', color='b') ax.scatter(X[neg,0], X[neg,1], marker='o', color='r', s=5) plot_data(X, y) """ Explanation: Part 1: Loading and Visualizing Data We start the exercise by first loading and visualizing the dataset. The following code will load the dataset into your environment and plot the data. End of explanation """ svm = sklearn.svm.SVC(C=1, kernel='linear') svm.fit(X, y) np.mean(svm.predict(X) == y) svm.coef_ fig, ax = plt.subplots(figsize=(7,5)) def draw_contour(X, model): x1 = np.linspace(np.min(X[:,0]), np.max(X[:,0]), 200) x2 = np.linspace(np.min(X[:,1]), np.max(X[:,1]), 200) xx1, xx2 = np.meshgrid(x1, x2) yy = model.predict(np.c_[xx1.flat, xx2.flat]).reshape(xx1.shape) ax.contour(x1, x2, yy, levels=[0.5]) plot_data(X, y, ax) draw_contour(X, svm) """ Explanation: Part 2: Training Linear SVM The following code will train a linear SVM on the dataset and plot the decision boundary learned. You should try to change the C value below and see how the decision boundary varies (e.g., try C = 1000) End of explanation """ def gaussianKernel(x1, x2, sigma): # ====================== YOUR CODE HERE ====================== # Instructions: Fill in this function to return the similarity between x1 # and x2 computed using a Gaussian kernel with bandwidth # sigma # # return 0 # ============================================================= """ Explanation: Part 3: Implementing Gaussian Kernel You will now implement the Gaussian kernel to use with the SVM. You should complete the code in gaussianKernel. This notebook will not use it, however. An sklearn custom kernel should return a matrix of all kernel values. Feel free to implement gaussianKernel in the sklearn way, and later call svm.SVC(kernel=gaussianKernel). End of explanation """ gaussianKernel(x1=np.array([1, 2, 1]), x2=np.array([0, 4, -1]), sigma=2) """ Explanation: The Gaussian Kernel between x1 = [1; 2; 1], x2 = [0; 4; -1], sigma = 2 should be about 0.324652. End of explanation """ ex6data2 = scipy.io.loadmat('ex6data2.mat') X = ex6data2['X'] y = ex6data2['y'][:,0] print(X.shape, y.shape) plot_data(X, y) """ Explanation: Part 4: Visualizing Dataset 2 The following code will load the next dataset into your environment and plot the data. End of explanation """ model = sklearn.svm.SVC(C=1, gamma=100, kernel='rbf') model.fit(X, y) np.mean((model.predict(X) == y)) fig, ax = plt.subplots() plot_data(X, y, ax) draw_contour(X, model) """ Explanation: Part 5: Training SVM with RBF Kernel (Dataset 2) After you have implemented the kernel, we can now use it to train the SVM classier. Note that this doesn't do this, it simply uses the built-in gaussian kernel in sklearn. End of explanation """ ex6data3 = scipy.io.loadmat('ex6data3.mat') X = ex6data3['X'] y = ex6data3['y'][:, 0] Xval = ex6data3['Xval'] yval = ex6data3['yval'][:, 0] print(X.shape, y.shape, Xval.shape, yval.shape) plot_data(X, y) plot_data(Xval, yval) """ Explanation: Part 6: Visualizing Dataset 3 The following code will load the next dataset into your environment and plot the data. End of explanation """ import itertools possible_C = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30, 100, 300, 1000] possible_gamma = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30, 100, 300, 1000] cv_errors = np.zeros((len(possible_C), len(possible_gamma))) # YOUR CODE GOES HERE C = 7 gamma = 7 # ================== model = sklearn.svm.SVC(C=C, gamma=gamma, kernel='rbf') model.fit(X, y) fig, ax = plt.subplots() plot_data(X, y, ax) draw_contour(X, model) """ Explanation: Part 7: Training SVM with RBF Kernel (Dataset 3) This is a different dataset that you can use to experiment with. Try different values of C and sigma here, train a classifier on your training data, measure the cross validation error and find the values for C and sigma that minimize the cross validation error. End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/nerc/cmip6/models/ukesm1-0-ll/atmoschem.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'nerc', 'ukesm1-0-ll', 'atmoschem') """ Explanation: ES-DOC CMIP6 Model Properties - Atmoschem MIP Era: CMIP6 Institute: NERC Source ID: UKESM1-0-LL Topic: Atmoschem Sub-Topics: Transport, Emissions Concentrations, Gas Phase Chemistry, Stratospheric Heterogeneous Chemistry, Tropospheric Heterogeneous Chemistry, Photo Chemistry. Properties: 84 (39 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:54:26 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Software Properties 3. Key Properties --&gt; Timestep Framework 4. Key Properties --&gt; Timestep Framework --&gt; Split Operator Order 5. Key Properties --&gt; Tuning Applied 6. Grid 7. Grid --&gt; Resolution 8. Transport 9. Emissions Concentrations 10. Emissions Concentrations --&gt; Surface Emissions 11. Emissions Concentrations --&gt; Atmospheric Emissions 12. Emissions Concentrations --&gt; Concentrations 13. Gas Phase Chemistry 14. Stratospheric Heterogeneous Chemistry 15. Tropospheric Heterogeneous Chemistry 16. Photo Chemistry 17. Photo Chemistry --&gt; Photolysis 1. Key Properties Key properties of the atmospheric chemistry 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of atmospheric chemistry model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of atmospheric chemistry model code. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "troposhere" # "stratosphere" # "mesosphere" # "mesosphere" # "whole atmosphere" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Chemistry Scheme Scope Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Atmospheric domains covered by the atmospheric chemistry model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.4. Basic Approximations Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Basic approximations made in the atmospheric chemistry model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "3D mass/mixing ratio for gas" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.5. Prognostic Variables Form Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Form of prognostic variables in the atmospheric chemistry component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 1.6. Number Of Tracers Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of advected tracers in the atmospheric chemistry model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.family_approach') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 1.7. Family Approach Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Atmospheric chemistry calculations (not advection) generalized into families of species? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 1.8. Coupling With Chemical Reactivity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Atmospheric chemistry transport scheme turbulence is couple with chemical reactivity? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Software Properties Software properties of aerosol code 2.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Operator splitting" # "Integrated" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Timestep Framework Timestepping in the atmospheric chemistry model 3.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Mathematical method deployed to solve the evolution of a given variable End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.2. Split Operator Advection Timestep Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for chemical species advection (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.3. Split Operator Physical Timestep Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for physics (in seconds). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.4. Split Operator Chemistry Timestep Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for chemistry (in seconds). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 3.5. Split Operator Alternate Order Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.6. Integrated Timestep Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Timestep for the atmospheric chemistry model (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Implicit" # "Semi-implicit" # "Semi-analytic" # "Impact solver" # "Back Euler" # "Newton Raphson" # "Rosenbrock" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3.7. Integrated Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Specify the type of timestep scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Timestep Framework --&gt; Split Operator Order ** 4.1. Turbulence Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for turbulence scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.2. Convection Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for convection scheme This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.3. Precipitation Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for precipitation scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.4. Emissions Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for emissions scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.5. Deposition Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for deposition scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.6. Gas Phase Chemistry Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for gas phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.7. Tropospheric Heterogeneous Phase Chemistry Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for tropospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.8. Stratospheric Heterogeneous Phase Chemistry Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for stratospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.9. Photo Chemistry Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for photo chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.10. Aerosols Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for aerosols scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Tuning Applied Tuning methodology for atmospheric chemistry component 5.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.2. Global Mean Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List set of metrics of the global mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.3. Regional Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List of regional metrics of mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.4. Trend Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List observed trend metrics used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Grid Atmospheric chemistry grid 6.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the general structure of the atmopsheric chemistry grid End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.2. Matches Atmosphere Grid Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 * Does the atmospheric chemistry grid match the atmosphere grid?* End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Grid --&gt; Resolution Resolution in the atmospheric chemistry grid 7.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. Canonical Horizontal Resolution Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 7.3. Number Of Horizontal Gridpoints Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Total number of horizontal (XY) points (or degrees of freedom) on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 7.4. Number Of Vertical Levels Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Number of vertical levels resolved on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 7.5. Is Adaptive Grid Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Default is False. Set true if grid resolution changes during execution. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.transport.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Transport Atmospheric chemistry transport 8.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview of transport implementation End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 8.2. Use Atmospheric Transport Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is transport handled by the atmosphere, rather than within atmospheric cehmistry? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.transport.transport_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.3. Transport Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If transport is handled within the atmospheric chemistry scheme, describe it. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Emissions Concentrations Atmospheric chemistry emissions 9.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview atmospheric chemistry emissions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Vegetation" # "Soil" # "Sea surface" # "Anthropogenic" # "Biomass burning" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10. Emissions Concentrations --&gt; Surface Emissions ** 10.1. Sources Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Sources of the chemical species emitted at the surface that are taken into account in the emissions scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Climatology" # "Spatially uniform mixing ratio" # "Spatially uniform concentration" # "Interactive" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10.2. Method Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Methods used to define chemical species emitted directly into model layers above the surface (several methods allowed because the different species may not use the same method). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10.3. Prescribed Climatology Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted at the surface and prescribed via a climatology, and the nature of the climatology (E.g. CO (monthly), C2H6 (constant)) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10.4. Prescribed Spatially Uniform Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted at the surface and prescribed as spatially uniform End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10.5. Interactive Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted at the surface and specified via an interactive method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10.6. Other Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted at the surface and specified via any other method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Aircraft" # "Biomass burning" # "Lightning" # "Volcanos" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11. Emissions Concentrations --&gt; Atmospheric Emissions TO DO 11.1. Sources Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Sources of chemical species emitted in the atmosphere that are taken into account in the emissions scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Climatology" # "Spatially uniform mixing ratio" # "Spatially uniform concentration" # "Interactive" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11.2. Method Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Methods used to define the chemical species emitted in the atmosphere (several methods allowed because the different species may not use the same method). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.3. Prescribed Climatology Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted in the atmosphere and prescribed via a climatology (E.g. CO (monthly), C2H6 (constant)) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.4. Prescribed Spatially Uniform Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted in the atmosphere and prescribed as spatially uniform End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.5. Interactive Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted in the atmosphere and specified via an interactive method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.6. Other Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted in the atmosphere and specified via an &quot;other method&quot; End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12. Emissions Concentrations --&gt; Concentrations TO DO 12.1. Prescribed Lower Boundary Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed at the lower boundary. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.2. Prescribed Upper Boundary Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed at the upper boundary. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 13. Gas Phase Chemistry Atmospheric chemistry transport 13.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview gas phase atmospheric chemistry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "HOx" # "NOy" # "Ox" # "Cly" # "HSOx" # "Bry" # "VOCs" # "isoprene" # "H2O" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.2. Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Species included in the gas phase chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.3. Number Of Bimolecular Reactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of bi-molecular reactions in the gas phase chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.4. Number Of Termolecular Reactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of ter-molecular reactions in the gas phase chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.5. Number Of Tropospheric Heterogenous Reactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of reactions in the tropospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.6. Number Of Stratospheric Heterogenous Reactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of reactions in the stratospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.7. Number Of Advected Species Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of advected species in the gas phase chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.8. Number Of Steady State Species Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of gas phase species for which the concentration is updated in the chemical solver assuming photochemical steady state End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13.9. Interactive Dry Deposition Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13.10. Wet Deposition Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is wet deposition included? Wet deposition describes the moist processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13.11. Wet Oxidation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is wet oxidation included? Oxidation describes the loss of electrons or an increase in oxidation state by a molecule End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14. Stratospheric Heterogeneous Chemistry Atmospheric chemistry startospheric heterogeneous chemistry 14.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview stratospheric heterogenous atmospheric chemistry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Cly" # "Bry" # "NOy" # TODO - please enter value(s) """ Explanation: 14.2. Gas Phase Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Gas phase species included in the stratospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sulphate" # "Polar stratospheric ice" # "NAT (Nitric acid trihydrate)" # "NAD (Nitric acid dihydrate)" # "STS (supercooled ternary solution aerosol particule))" # TODO - please enter value(s) """ Explanation: 14.3. Aerosol Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Aerosol species included in the stratospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.4. Number Of Steady State Species Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of steady state species in the stratospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 14.5. Sedimentation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is sedimentation is included in the stratospheric heterogeneous chemistry scheme or not? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 14.6. Coagulation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is coagulation is included in the stratospheric heterogeneous chemistry scheme or not? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15. Tropospheric Heterogeneous Chemistry Atmospheric chemistry tropospheric heterogeneous chemistry 15.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview tropospheric heterogenous atmospheric chemistry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.2. Gas Phase Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of gas phase species included in the tropospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sulphate" # "Nitrate" # "Sea salt" # "Dust" # "Ice" # "Organic" # "Black carbon/soot" # "Polar stratospheric ice" # "Secondary organic aerosols" # "Particulate organic matter" # TODO - please enter value(s) """ Explanation: 15.3. Aerosol Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Aerosol species included in the tropospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.4. Number Of Steady State Species Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of steady state species in the tropospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.5. Interactive Dry Deposition Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.6. Coagulation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is coagulation is included in the tropospheric heterogeneous chemistry scheme or not? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.photo_chemistry.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 16. Photo Chemistry Atmospheric chemistry photo chemistry 16.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview atmospheric photo chemistry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 16.2. Number Of Reactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of reactions in the photo-chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Offline (clear sky)" # "Offline (with clouds)" # "Online" # TODO - please enter value(s) """ Explanation: 17. Photo Chemistry --&gt; Photolysis Photolysis scheme 17.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Photolysis scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.2. Environmental Conditions Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe any environmental conditions taken into account by the photolysis scheme (e.g. whether pressure- and temperature-sensitive cross-sections and quantum yields in the photolysis calculations are modified to reflect the modelled conditions.) End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/cas/cmip6/models/sandbox-2/toplevel.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'cas', 'sandbox-2', 'toplevel') """ Explanation: ES-DOC CMIP6 Model Properties - Toplevel MIP Era: CMIP6 Institute: CAS Source ID: SANDBOX-2 Sub-Topics: Radiative Forcings. Properties: 85 (42 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:53:45 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Flux Correction 3. Key Properties --&gt; Genealogy 4. Key Properties --&gt; Software Properties 5. Key Properties --&gt; Coupling 6. Key Properties --&gt; Tuning Applied 7. Key Properties --&gt; Conservation --&gt; Heat 8. Key Properties --&gt; Conservation --&gt; Fresh Water 9. Key Properties --&gt; Conservation --&gt; Salt 10. Key Properties --&gt; Conservation --&gt; Momentum 11. Radiative Forcings 12. Radiative Forcings --&gt; Greenhouse Gases --&gt; CO2 13. Radiative Forcings --&gt; Greenhouse Gases --&gt; CH4 14. Radiative Forcings --&gt; Greenhouse Gases --&gt; N2O 15. Radiative Forcings --&gt; Greenhouse Gases --&gt; Tropospheric O3 16. Radiative Forcings --&gt; Greenhouse Gases --&gt; Stratospheric O3 17. Radiative Forcings --&gt; Greenhouse Gases --&gt; CFC 18. Radiative Forcings --&gt; Aerosols --&gt; SO4 19. Radiative Forcings --&gt; Aerosols --&gt; Black Carbon 20. Radiative Forcings --&gt; Aerosols --&gt; Organic Carbon 21. Radiative Forcings --&gt; Aerosols --&gt; Nitrate 22. Radiative Forcings --&gt; Aerosols --&gt; Cloud Albedo Effect 23. Radiative Forcings --&gt; Aerosols --&gt; Cloud Lifetime Effect 24. Radiative Forcings --&gt; Aerosols --&gt; Dust 25. Radiative Forcings --&gt; Aerosols --&gt; Tropospheric Volcanic 26. Radiative Forcings --&gt; Aerosols --&gt; Stratospheric Volcanic 27. Radiative Forcings --&gt; Aerosols --&gt; Sea Salt 28. Radiative Forcings --&gt; Other --&gt; Land Use 29. Radiative Forcings --&gt; Other --&gt; Solar 1. Key Properties Key properties of the model 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Top level overview of coupled model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of coupled model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Flux Correction Flux correction properties of the model 2.1. Details Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how flux corrections are applied in the model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Genealogy Genealogy and history of the model 3.1. Year Released Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Year the model was released End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.2. CMIP3 Parent Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 CMIP3 parent if any End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.3. CMIP5 Parent Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 CMIP5 parent if any End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.4. Previous Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Previously known as End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Software Properties Software properties of model 4.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.4. Components Structure Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe how model realms are structured into independent software components (coupled via a coupler) and internal software components. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OASIS" # "OASIS3-MCT" # "ESMF" # "NUOPC" # "Bespoke" # "Unknown" # "None" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 4.5. Coupler Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Overarching coupling framework for model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Coupling ** 5.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of coupling in the model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.2. Atmosphere Double Flux Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the atmosphere passing a double flux to the ocean and sea ice (as opposed to a single one)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Atmosphere grid" # "Ocean grid" # "Specific coupler grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 5.3. Atmosphere Fluxes Calculation Grid Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Where are the air-sea fluxes calculated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.4. Atmosphere Relative Winds Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are relative or absolute winds used to compute the flux? I.e. do ocean surface currents enter the wind stress calculation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Tuning Applied Tuning methodology for model 6.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics/diagnostics retained. Document the relative weight given to climate performance metrics/diagnostics versus process oriented metrics/diagnostics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Global Mean Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List set of metrics/diagnostics of the global mean state used in tuning model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.3. Regional Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List of regional metrics/diagnostics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.4. Trend Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List observed trend metrics/diagnostics used in tuning model/component (such as 20th century) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.5. Energy Balance Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how energy balance was obtained in the full system: in the various components independently or at the components coupling stage? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.6. Fresh Water Balance Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how fresh_water balance was obtained in the full system: in the various components independently or at the components coupling stage? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Key Properties --&gt; Conservation --&gt; Heat Global heat convervation properties of the model 7.1. Global Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how heat is conserved globally End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. Atmos Ocean Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the atmosphere/ocean coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.3. Atmos Land Interface Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how heat is conserved at the atmosphere/land coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.4. Atmos Sea-ice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the atmosphere/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.5. Ocean Seaice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the ocean/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.6. Land Ocean Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the land/ocean coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Key Properties --&gt; Conservation --&gt; Fresh Water Global fresh water convervation properties of the model 8.1. Global Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how fresh_water is conserved globally End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.2. Atmos Ocean Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how fresh_water is conserved at the atmosphere/ocean coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.3. Atmos Land Interface Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how fresh water is conserved at the atmosphere/land coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.4. Atmos Sea-ice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how fresh water is conserved at the atmosphere/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.5. Ocean Seaice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how fresh water is conserved at the ocean/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.6. Runoff Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe how runoff is distributed and conserved End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.7. Iceberg Calving Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how iceberg calving is modeled and conserved End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.8. Endoreic Basins Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how endoreic basins (no ocean access) are treated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.9. Snow Accumulation Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe how snow accumulation over land and over sea-ice is treated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Key Properties --&gt; Conservation --&gt; Salt Global salt convervation properties of the model 9.1. Ocean Seaice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how salt is conserved at the ocean/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10. Key Properties --&gt; Conservation --&gt; Momentum Global momentum convervation properties of the model 10.1. Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how momentum is conserved in the model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11. Radiative Forcings Radiative forcings of the model for historical and scenario (aka Table 12.1 IPCC AR5) 11.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of radiative forcings (GHG and aerosols) implementation in model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12. Radiative Forcings --&gt; Greenhouse Gases --&gt; CO2 Carbon dioxide forcing 12.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13. Radiative Forcings --&gt; Greenhouse Gases --&gt; CH4 Methane forcing 13.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 13.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14. Radiative Forcings --&gt; Greenhouse Gases --&gt; N2O Nitrous oxide forcing 14.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15. Radiative Forcings --&gt; Greenhouse Gases --&gt; Tropospheric O3 Troposheric ozone forcing 15.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16. Radiative Forcings --&gt; Greenhouse Gases --&gt; Stratospheric O3 Stratospheric ozone forcing 16.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 16.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17. Radiative Forcings --&gt; Greenhouse Gases --&gt; CFC Ozone-depleting and non-ozone-depleting fluorinated gases forcing 17.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "Option 1" # "Option 2" # "Option 3" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.2. Equivalence Concentration Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Details of any equivalence concentrations used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.3. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18. Radiative Forcings --&gt; Aerosols --&gt; SO4 SO4 aerosol forcing 18.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 18.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 19. Radiative Forcings --&gt; Aerosols --&gt; Black Carbon Black carbon aerosol forcing 19.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 19.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 20. Radiative Forcings --&gt; Aerosols --&gt; Organic Carbon Organic carbon aerosol forcing 20.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 20.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 21. Radiative Forcings --&gt; Aerosols --&gt; Nitrate Nitrate forcing 21.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 21.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22. Radiative Forcings --&gt; Aerosols --&gt; Cloud Albedo Effect Cloud albedo effect forcing (RFaci) 22.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 22.2. Aerosol Effect On Ice Clouds Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Radiative effects of aerosols on ice clouds are represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.3. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23. Radiative Forcings --&gt; Aerosols --&gt; Cloud Lifetime Effect Cloud lifetime effect forcing (ERFaci) 23.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 23.2. Aerosol Effect On Ice Clouds Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Radiative effects of aerosols on ice clouds are represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 23.3. RFaci From Sulfate Only Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Radiative forcing from aerosol cloud interactions from sulfate aerosol only? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 23.4. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 24. Radiative Forcings --&gt; Aerosols --&gt; Dust Dust forcing 24.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 24.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25. Radiative Forcings --&gt; Aerosols --&gt; Tropospheric Volcanic Tropospheric volcanic forcing 25.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.2. Historical Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in historical simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.3. Future Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in future simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 25.4. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26. Radiative Forcings --&gt; Aerosols --&gt; Stratospheric Volcanic Stratospheric volcanic forcing 26.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26.2. Historical Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in historical simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26.3. Future Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in future simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 26.4. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 27. Radiative Forcings --&gt; Aerosols --&gt; Sea Salt Sea salt forcing 27.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 27.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 28. Radiative Forcings --&gt; Other --&gt; Land Use Land use forcing 28.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 28.2. Crop Change Only Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Land use change represented via crop change only? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 28.3. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "irradiance" # "proton" # "electron" # "cosmic ray" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 29. Radiative Forcings --&gt; Other --&gt; Solar Solar forcing 29.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How solar forcing is provided End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 29.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/pcmdi/cmip6/models/sandbox-2/ocnbgchem.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'pcmdi', 'sandbox-2', 'ocnbgchem') """ Explanation: ES-DOC CMIP6 Model Properties - Ocnbgchem MIP Era: CMIP6 Institute: PCMDI Source ID: SANDBOX-2 Topic: Ocnbgchem Sub-Topics: Tracers. Properties: 65 (37 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:54:36 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Time Stepping Framework --&gt; Passive Tracers Transport 3. Key Properties --&gt; Time Stepping Framework --&gt; Biology Sources Sinks 4. Key Properties --&gt; Transport Scheme 5. Key Properties --&gt; Boundary Forcing 6. Key Properties --&gt; Gas Exchange 7. Key Properties --&gt; Carbon Chemistry 8. Tracers 9. Tracers --&gt; Ecosystem 10. Tracers --&gt; Ecosystem --&gt; Phytoplankton 11. Tracers --&gt; Ecosystem --&gt; Zooplankton 12. Tracers --&gt; Disolved Organic Matter 13. Tracers --&gt; Particules 14. Tracers --&gt; Dic Alkalinity 1. Key Properties Ocean Biogeochemistry key properties 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of ocean biogeochemistry model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of ocean biogeochemistry model code (PISCES 2.0,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.model_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Geochemical" # "NPZD" # "PFT" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Model Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of ocean biogeochemistry model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Fixed" # "Variable" # "Mix of both" # TODO - please enter value(s) """ Explanation: 1.4. Elemental Stoichiometry Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe elemental stoichiometry (fixed, variable, mix of the two) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.5. Elemental Stoichiometry Details Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe which elements have fixed/variable stoichiometry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.prognostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.6. Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of all prognostic tracer variables in the ocean biogeochemistry component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.diagnostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.7. Diagnostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of all diagnotic tracer variables in the ocean biogeochemistry component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.damping') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.8. Damping Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe any tracer damping used (such as artificial correction or relaxation to climatology,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "use ocean model transport time step" # "use specific time step" # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Time Stepping Framework --&gt; Passive Tracers Transport Time stepping method for passive tracers transport in ocean biogeochemistry 2.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Time stepping framework for passive tracers End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.timestep_if_not_from_ocean') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 2.2. Timestep If Not From Ocean Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Time step for passive tracers (if different from ocean) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "use ocean model transport time step" # "use specific time step" # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Time Stepping Framework --&gt; Biology Sources Sinks Time stepping framework for biology sources and sinks in ocean biogeochemistry 3.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Time stepping framework for biology sources and sinks End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.timestep_if_not_from_ocean') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.2. Timestep If Not From Ocean Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Time step for biology sources and sinks (if different from ocean) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Offline" # "Online" # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Transport Scheme Transport scheme in ocean biogeochemistry 4.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of transport scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Use that of ocean model" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 4.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Transport scheme used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.use_different_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.3. Use Different Scheme Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Decribe transport scheme if different than that of ocean model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.atmospheric_deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "from file (climatology)" # "from file (interannual variations)" # "from Atmospheric Chemistry model" # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Boundary Forcing Properties of biogeochemistry boundary forcing 5.1. Atmospheric Deposition Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how atmospheric deposition is modeled End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.river_input') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "from file (climatology)" # "from file (interannual variations)" # "from Land Surface model" # TODO - please enter value(s) """ Explanation: 5.2. River Input Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how river input is modeled End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_boundary_conditions') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.3. Sediments From Boundary Conditions Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List which sediments are speficied from boundary condition End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_explicit_model') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.4. Sediments From Explicit Model Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List which sediments are speficied from explicit sediment model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Gas Exchange *Properties of gas exchange in ocean biogeochemistry * 6.1. CO2 Exchange Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is CO2 gas exchange modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OMIP protocol" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 6.2. CO2 Exchange Type Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe CO2 gas exchange End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.3. O2 Exchange Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is O2 gas exchange modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OMIP protocol" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 6.4. O2 Exchange Type Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe O2 gas exchange End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.5. DMS Exchange Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is DMS gas exchange modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.6. DMS Exchange Type Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify DMS gas exchange scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.7. N2 Exchange Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is N2 gas exchange modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.8. N2 Exchange Type Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify N2 gas exchange scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.9. N2O Exchange Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is N2O gas exchange modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.10. N2O Exchange Type Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify N2O gas exchange scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.11. CFC11 Exchange Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is CFC11 gas exchange modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.12. CFC11 Exchange Type Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify CFC11 gas exchange scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.13. CFC12 Exchange Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is CFC12 gas exchange modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.14. CFC12 Exchange Type Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify CFC12 gas exchange scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.15. SF6 Exchange Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is SF6 gas exchange modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.16. SF6 Exchange Type Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify SF6 gas exchange scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.17. 13CO2 Exchange Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is 13CO2 gas exchange modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.18. 13CO2 Exchange Type Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify 13CO2 gas exchange scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.19. 14CO2 Exchange Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is 14CO2 gas exchange modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.20. 14CO2 Exchange Type Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify 14CO2 gas exchange scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.other_gases') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.21. Other Gases Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify any other gas exchange End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OMIP protocol" # "Other protocol" # TODO - please enter value(s) """ Explanation: 7. Key Properties --&gt; Carbon Chemistry Properties of carbon chemistry biogeochemistry 7.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how carbon chemistry is modeled End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.pH_scale') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Sea water" # "Free" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 7.2. PH Scale Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If NOT OMIP protocol, describe pH scale. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.constants_if_not_OMIP') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.3. Constants If Not OMIP Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If NOT OMIP protocol, list carbon chemistry constants. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Tracers Ocean biogeochemistry tracers 8.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of tracers in ocean biogeochemistry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.sulfur_cycle_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 8.2. Sulfur Cycle Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is sulfur cycle modeled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.nutrients_present') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Nitrogen (N)" # "Phosphorous (P)" # "Silicium (S)" # "Iron (Fe)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.3. Nutrients Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List nutrient species present in ocean biogeochemistry model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_species_if_N') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Nitrates (NO3)" # "Amonium (NH4)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.4. Nitrous Species If N Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N If nitrogen present, list nitrous species. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_processes_if_N') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Dentrification" # "N fixation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.5. Nitrous Processes If N Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N If nitrogen present, list nitrous processes. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_definition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Tracers --&gt; Ecosystem Ecosystem properties in ocean biogeochemistry 9.1. Upper Trophic Levels Definition Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Definition of upper trophic level (e.g. based on size) ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_treatment') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.2. Upper Trophic Levels Treatment Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Define how upper trophic level are treated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Generic" # "PFT including size based (specify both below)" # "Size based only (specify below)" # "PFT only (specify below)" # TODO - please enter value(s) """ Explanation: 10. Tracers --&gt; Ecosystem --&gt; Phytoplankton Phytoplankton properties in ocean biogeochemistry 10.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of phytoplankton End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.pft') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Diatoms" # "Nfixers" # "Calcifiers" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10.2. Pft Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Phytoplankton functional types (PFT) (if applicable) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.size_classes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Microphytoplankton" # "Nanophytoplankton" # "Picophytoplankton" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10.3. Size Classes Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Phytoplankton size classes (if applicable) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Generic" # "Size based (specify below)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11. Tracers --&gt; Ecosystem --&gt; Zooplankton Zooplankton properties in ocean biogeochemistry 11.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of zooplankton End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.size_classes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Microzooplankton" # "Mesozooplankton" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11.2. Size Classes Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Zooplankton size classes (if applicable) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.bacteria_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 12. Tracers --&gt; Disolved Organic Matter Disolved organic matter properties in ocean biogeochemistry 12.1. Bacteria Present Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there bacteria representation ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.lability') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Labile" # "Semi-labile" # "Refractory" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12.2. Lability Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe treatment of lability in dissolved organic matter End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.particules.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Diagnostic" # "Diagnostic (Martin profile)" # "Diagnostic (Balast)" # "Prognostic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13. Tracers --&gt; Particules Particulate carbon properties in ocean biogeochemistry 13.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is particulate carbon represented in ocean biogeochemistry? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.particules.types_if_prognostic') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "POC" # "PIC (calcite)" # "PIC (aragonite" # "BSi" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.2. Types If Prognostic Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N If prognostic, type(s) of particulate matter taken into account End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_prognostic') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "No size spectrum used" # "Full size spectrum" # "Discrete size classes (specify which below)" # TODO - please enter value(s) """ Explanation: 13.3. Size If Prognostic Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If prognostic, describe if a particule size spectrum is used to represent distribution of particules in water volume End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_discrete') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 13.4. Size If Discrete Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If prognostic and discrete size, describe which size classes are used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.particules.sinking_speed_if_prognostic') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Function of particule size" # "Function of particule type (balast)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.5. Sinking Speed If Prognostic Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If prognostic, method for calculation of sinking speed of particules End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.carbon_isotopes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "C13" # "C14)" # TODO - please enter value(s) """ Explanation: 14. Tracers --&gt; Dic Alkalinity DIC and alkalinity properties in ocean biogeochemistry 14.1. Carbon Isotopes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Which carbon isotopes are modelled (C13, C14)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.abiotic_carbon') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 14.2. Abiotic Carbon Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is abiotic carbon modelled ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.alkalinity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Prognostic" # "Diagnostic)" # TODO - please enter value(s) """ Explanation: 14.3. Alkalinity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is alkalinity modelled ? End of explanation """
tensorflow/decision-forests
documentation/tutorials/intermediate_colab.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2020 The TensorFlow Authors. End of explanation """ # Install TensorFlow Dececision Forests !pip install tensorflow_decision_forests """ Explanation: Using text and neural network features <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/decision_forests/tutorials/intermediate_colab"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/decision-forests/blob/main/documentation/tutorials/intermediate_colab.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/decision-forests/blob/main/documentation/tutorials/intermediate_colab.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/decision-forests/documentation/tutorials/intermediate_colab.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> <td> <a href="https://tfhub.dev/google/universal-sentence-encoder/4"><img src="https://www.tensorflow.org/images/hub_logo_32px.png" />See TF Hub model</a> </td> </table> Welcome to the Intermediate Colab for TensorFlow Decision Forests (TF-DF). In this colab, you will learn about some more advanced capabilities of TF-DF, including how to deal with natural language features. This colab assumes you are familiar with the concepts presented the Beginner colab, notably about the installation about TF-DF. In this colab, you will: Train a Random Forest that consumes text features natively as categorical sets. Train a Random Forest that consumes text features using a TensorFlow Hub module. In this setting (transfer learning), the module is already pre-trained on a large text corpus. Train a Gradient Boosted Decision Trees (GBDT) and a Neural Network together. The GBDT will consume the output of the Neural Network. Setup End of explanation """ !pip install wurlitzer """ Explanation: Wurlitzer is needed to display the detailed training logs in Colabs (when using verbose=2 in the model constructor). End of explanation """ import tensorflow_decision_forests as tfdf import os import numpy as np import pandas as pd import tensorflow as tf import math """ Explanation: Import the necessary libraries. End of explanation """ #@title from IPython.core.magic import register_line_magic from IPython.display import Javascript from IPython.display import display as ipy_display # Some of the model training logs can cover the full # screen if not compressed to a smaller viewport. # This magic allows setting a max height for a cell. @register_line_magic def set_cell_height(size): ipy_display( Javascript("google.colab.output.setIframeHeight(0, true, {maxHeight: " + str(size) + "})")) """ Explanation: The hidden code cell limits the output height in colab. End of explanation """ # Install the nighly TensorFlow Datasets package # TODO: Remove when the release package is fixed. !pip install tfds-nightly -U --quiet # Load the dataset import tensorflow_datasets as tfds all_ds = tfds.load("glue/sst2") # Display the first 3 examples of the test fold. for example in all_ds["test"].take(3): print({attr_name: attr_tensor.numpy() for attr_name, attr_tensor in example.items()}) """ Explanation: Use raw text as features TF-DF can consume categorical-set features natively. Categorical-sets represent text features as bags of words (or n-grams). For example: "The little blue dog" โ†’ {"the", "little", "blue", "dog"} In this example, you'll will train a Random Forest on the Stanford Sentiment Treebank (SST) dataset. The objective of this dataset is to classify sentences as carrying a positive or negative sentiment. You'll will use the binary classification version of the dataset curated in TensorFlow Datasets. Note: Categorical-set features can be expensive to train. In this colab, we will train a small Random Forest with 20 trees. End of explanation """ def prepare_dataset(example): label = (example["label"] + 1) // 2 return {"sentence" : tf.strings.split(example["sentence"])}, label train_ds = all_ds["train"].batch(100).map(prepare_dataset) test_ds = all_ds["validation"].batch(100).map(prepare_dataset) """ Explanation: The dataset is modified as follows: The raw labels are integers in {-1, 1}, but the learning algorithm expects positive integer labels e.g. {0, 1}. Therefore, the labels are transformed as follows: new_labels = (original_labels + 1) / 2. A batch-size of 64 is applied to make reading the dataset more efficient. The sentence attribute needs to be tokenized, i.e. "hello world" -&gt; ["hello", "world"]. Note: This example doesn't use the test split of the dataset as it does not have labels. If test split had labels, you could concatenate the validation fold into the train one (e.g. all_ds["train"].concatenate(all_ds["validation"])). Details: Some decision forest learning algorithms do not need a validation dataset (e.g. Random Forests) while others do (e.g. Gradient Boosted Trees in some cases). Since each learning algorithm under TF-DF can use validation data differently, TF-DF handles train/validation splits internally. As a result, when you have a training and validation sets, they can always be concatenated as input to the learning algorithm. End of explanation """ %set_cell_height 300 # Specify the model. model_1 = tfdf.keras.RandomForestModel(num_trees=30) # Train the model. model_1.fit(x=train_ds) """ Explanation: Finaly, train and evaluate the model as usual. TF-DF automatically detects multi-valued categorical features as categorical-set. End of explanation """ model_1.compile(metrics=["accuracy"]) evaluation = model_1.evaluate(test_ds) print(f"BinaryCrossentropyloss: {evaluation[0]}") print(f"Accuracy: {evaluation[1]}") """ Explanation: In the previous logs, note that sentence is a CATEGORICAL_SET feature. The model is evaluated as usual: End of explanation """ import matplotlib.pyplot as plt logs = model_1.make_inspector().training_logs() plt.plot([log.num_trees for log in logs], [log.evaluation.accuracy for log in logs]) plt.xlabel("Number of trees") plt.ylabel("Out-of-bag accuracy") pass """ Explanation: The training logs looks are follow: End of explanation """ !pip install --upgrade tensorflow-hub """ Explanation: More trees would probably be beneficial (I am sure of it because I tried :p). Use a pretrained text embedding The previous example trained a Random Forest using raw text features. This example will use a pre-trained TF-Hub embedding to convert text features into a dense embedding, and then train a Random Forest on top of it. In this situation, the Random Forest will only "see" the numerical output of the embedding (i.e. it will not see the raw text). In this experiment, will use the Universal-Sentence-Encoder. Different pre-trained embeddings might be suited for different types of text (e.g. different language, different task) but also for other type of structured features (e.g. images). Note: This embedding is large (1GB) and therefore the final model will be slow to run (compared to classical decision tree inference). The embedding module can be applied in one of two places: During the dataset preparation. In the pre-processing stage of the model. The second option is often preferable: Packaging the embedding in the model makes the model easier to use (and harder to misuse). First install TF-Hub: End of explanation """ def prepare_dataset(example): label = (example["label"] + 1) // 2 return {"sentence" : example["sentence"]}, label train_ds = all_ds["train"].batch(100).map(prepare_dataset) test_ds = all_ds["validation"].batch(100).map(prepare_dataset) %set_cell_height 300 import tensorflow_hub as hub # NNLM (https://tfhub.dev/google/nnlm-en-dim128/2) is also a good choice. hub_url = "http://tfhub.dev/google/universal-sentence-encoder/4" embedding = hub.KerasLayer(hub_url) sentence = tf.keras.layers.Input(shape=(), name="sentence", dtype=tf.string) embedded_sentence = embedding(sentence) raw_inputs = {"sentence": sentence} processed_inputs = {"embedded_sentence": embedded_sentence} preprocessor = tf.keras.Model(inputs=raw_inputs, outputs=processed_inputs) model_2 = tfdf.keras.RandomForestModel( preprocessing=preprocessor, num_trees=100) model_2.fit(x=train_ds) model_2.compile(metrics=["accuracy"]) evaluation = model_2.evaluate(test_ds) print(f"BinaryCrossentropyloss: {evaluation[0]}") print(f"Accuracy: {evaluation[1]}") """ Explanation: Unlike before, you don't need to tokenize the text. End of explanation """ !wget -q https://storage.googleapis.com/download.tensorflow.org/data/palmer_penguins/penguins.csv -O /tmp/penguins.csv """ Explanation: Note that categorical sets represent text differently from a dense embedding, so it may be useful to use both strategies jointly. Train a decision tree and neural network together The previous example used a pre-trained Neural Network (NN) to process the text features before passing them to the Random Forest. This example will train both the Neural Network and the Random Forest from scratch. TF-DF's Decision Forests do not back-propagate gradients (although this is the subject of ongoing research). Therefore, the training happens in two stages: Train the neural-network as a standard classification task: example โ†’ [Normalize] โ†’ [Neural Network*] โ†’ [classification head] โ†’ prediction *: Training. Replace the Neural Network's head (the last layer and the soft-max) with a Random Forest. Train the Random Forest as usual: example โ†’ [Normalize] โ†’ [Neural Network] โ†’ [Random Forest*] โ†’ prediction *: Training. Prepare the dataset This example uses the Palmer's Penguins dataset. See the Beginner colab for details. First, download the raw data: End of explanation """ dataset_df = pd.read_csv("/tmp/penguins.csv") # Display the first 3 examples. dataset_df.head(3) """ Explanation: Load a dataset into a Pandas Dataframe. End of explanation """ label = "species" # Replaces numerical NaN (representing missing values in Pandas Dataframe) with 0s. # ...Neural Nets don't work well with numerical NaNs. for col in dataset_df.columns: if dataset_df[col].dtype not in [str, object]: dataset_df[col] = dataset_df[col].fillna(0) # Split the dataset into a training and testing dataset. def split_dataset(dataset, test_ratio=0.30): """Splits a panda dataframe in two.""" test_indices = np.random.rand(len(dataset)) < test_ratio return dataset[~test_indices], dataset[test_indices] train_ds_pd, test_ds_pd = split_dataset(dataset_df) print("{} examples in training, {} examples for testing.".format( len(train_ds_pd), len(test_ds_pd))) # Convert the datasets into tensorflow datasets train_ds = tfdf.keras.pd_dataframe_to_tf_dataset(train_ds_pd, label=label) test_ds = tfdf.keras.pd_dataframe_to_tf_dataset(test_ds_pd, label=label) """ Explanation: Prepare the dataset for training. End of explanation """ input_1 = tf.keras.Input(shape=(1,), name="bill_length_mm", dtype="float") input_2 = tf.keras.Input(shape=(1,), name="island", dtype="string") nn_raw_inputs = [input_1, input_2] """ Explanation: Build the models Next create the neural network model using Keras' functional style. To keep the example simple this model only uses two inputs. End of explanation """ # Normalization. Normalization = tf.keras.layers.Normalization CategoryEncoding = tf.keras.layers.CategoryEncoding StringLookup = tf.keras.layers.StringLookup values = train_ds_pd["bill_length_mm"].values[:, tf.newaxis] input_1_normalizer = Normalization() input_1_normalizer.adapt(values) values = train_ds_pd["island"].values input_2_indexer = StringLookup(max_tokens=32) input_2_indexer.adapt(values) input_2_onehot = CategoryEncoding(output_mode="binary", max_tokens=32) normalized_input_1 = input_1_normalizer(input_1) normalized_input_2 = input_2_onehot(input_2_indexer(input_2)) nn_processed_inputs = [normalized_input_1, normalized_input_2] """ Explanation: Use preprocessing layers to convert the raw inputs to inputs apropriate for the neural netrwork. End of explanation """ y = tf.keras.layers.Concatenate()(nn_processed_inputs) y = tf.keras.layers.Dense(16, activation=tf.nn.relu6)(y) last_layer = tf.keras.layers.Dense(8, activation=tf.nn.relu, name="last")(y) # "3" for the three label classes. If it were a binary classification, the # output dim would be 1. classification_output = tf.keras.layers.Dense(3)(y) nn_model = tf.keras.models.Model(nn_raw_inputs, classification_output) """ Explanation: Build the body of the neural network: End of explanation """ # To reduce the risk of mistakes, group both the decision forest and the # neural network in a single keras model. nn_without_head = tf.keras.models.Model(inputs=nn_model.inputs, outputs=last_layer) df_and_nn_model = tfdf.keras.RandomForestModel(preprocessing=nn_without_head) """ Explanation: This nn_model directly produces classification logits. Next create a decision forest model. This will operate on the high level features that the neural network extracts in the last layer before that classification head. End of explanation """ %set_cell_height 300 nn_model.compile( optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"]) nn_model.fit(x=train_ds, validation_data=test_ds, epochs=10) nn_model.summary() """ Explanation: Train and evaluate the models The model will be trained in two stages. First train the neural network with its own classification head: End of explanation """ %set_cell_height 300 df_and_nn_model.fit(x=train_ds) """ Explanation: The neural network layers are shared between the two models. So now that the neural network is trained the decision forest model will be fit to the trained output of the neural network layers: End of explanation """ df_and_nn_model.compile(metrics=["accuracy"]) print("Evaluation:", df_and_nn_model.evaluate(test_ds)) """ Explanation: Now evaluate the composed model: End of explanation """ print("Evaluation :", nn_model.evaluate(test_ds)) """ Explanation: Compare it to the Neural Network alone: End of explanation """
imatge-upc/activitynet-2016-cvprw
notebooks/16 Visualization of Results.ipynb
mit
import random import os import numpy as np from work.dataset.activitynet import ActivityNetDataset dataset = ActivityNetDataset( videos_path='../dataset/videos.json', labels_path='../dataset/labels.txt' ) videos = dataset.get_subset_videos('validation') videos = random.sample(videos, 8) examples = [] for v in videos: file_dir = os.path.join('../downloads/features/', v.features_file_name) if not os.path.isfile(file_dir): os.system('scp imatge:~/work/datasets/ActivityNet/v1.3/features/{} ../downloads/features/'.format(v.features_file_name)) features = np.load(file_dir) examples.append((v, features)) """ Explanation: Generate some validation videos random, download them from the server and then use them to visualize the results. End of explanation """ from keras.layers import Input, BatchNormalization, LSTM, TimeDistributed, Dense from keras.models import Model input_features = Input(batch_shape=(1, 1, 4096,), name='features') input_normalized = BatchNormalization(mode=1)(input_features) lstm1 = LSTM(512, return_sequences=True, stateful=True, name='lstm1')(input_normalized) lstm2 = LSTM(512, return_sequences=True, stateful=True, name='lstm2')(lstm1) output = TimeDistributed(Dense(201, activation='softmax'), name='fc')(lstm2) model = Model(input=input_features, output=output) model.load_weights('../work/scripts/training/lstm_activity_classification/model_snapshot/lstm_activity_classification_02_e100.hdf5') model.summary() model.compile(loss='categorical_crossentropy', optimizer='rmsprop') """ Explanation: Load the trained model with its weigths End of explanation """ predictions = [] for v, features in examples: nb_instances = features.shape[0] X = features.reshape((nb_instances, 1, 4096)) model.reset_states() prediction = model.predict(X, batch_size=1) prediction = prediction.reshape(nb_instances, 201) class_prediction = np.argmax(prediction, axis=1) predictions.append((v, prediction, class_prediction)) """ Explanation: Extract the predictions for each video and print the scoring End of explanation """ from IPython.display import YouTubeVideo, display for v, prediction, class_prediction in predictions: print('Video ID: {}\t\tGround truth: {}'.format(v.video_id, v.get_activity())) class_means = np.mean(prediction, axis=0) top_3 = np.argsort(class_means[1:])[::-1][:3] + 1 scores = class_means[top_3]/np.sum(class_means[1:]) for index, score in zip(top_3, scores): if score == 0.: continue label = dataset.labels[index][1] print('{:.4f}\t{}'.format(score, label)) vid = YouTubeVideo(v.video_id) display(vid) print('\n') """ Explanation: Print the global classification results End of explanation """ import matplotlib.pyplot as plt %matplotlib inline import matplotlib normalize = matplotlib.colors.Normalize(vmin=0, vmax=201) for v, prediction, class_prediction in predictions: v.get_video_instances(16, 0) ground_truth = np.array([instance.output for instance in v.instances]) nb_instances = len(v.instances) print('Video ID: {}\nMain Activity: {}'.format(v.video_id, v.get_activity())) plt.figure(num=None, figsize=(18, 1), dpi=100) plt.contourf(np.broadcast_to(ground_truth, (2, nb_instances)), norm=normalize, interpolation='nearest') plt.title('Ground Truth') plt.show() plt.figure(num=None, figsize=(18, 1), dpi=100) plt.contourf(np.broadcast_to(class_prediction, (2, nb_instances)), norm=normalize, interpolation='nearest') plt.title('Prediction') plt.show() print('\n') normalize = matplotlib.colors.Normalize(vmin=0, vmax=1) for v, prediction, class_prediction in predictions: v.get_video_instances(16, 0) ground_truth = np.array([instance.output for instance in v.instances]) nb_instances = len(v.instances) output_index = dataset.get_output_index(v.label) print('Video ID: {}\nMain Activity: {}'.format(v.video_id, v.get_activity())) class_means = np.mean(prediction, axis=0) top_3 = np.argsort(class_means[1:])[::-1][:3] + 1 scores = class_means[top_3]/np.sum(class_means[1:]) for index, score in zip(top_3, scores): if score == 0.: continue label = dataset.labels[index][1] print('{:.4f}\t{}'.format(score, label)) plt.figure(num=None, figsize=(18, 1), dpi=100) plt.contourf(np.broadcast_to(ground_truth/output_index, (2, nb_instances)), norm=normalize, interpolation='nearest') plt.title('Ground Truth') plt.show() # print only the positions that predicted the global ground truth category temp = np.zeros((nb_instances)) temp[class_prediction==output_index] = 1 plt.figure(num=None, figsize=(18, 1), dpi=100) plt.contourf(np.broadcast_to(temp, (2, nb_instances)), norm=normalize, interpolation='nearest') plt.title('Prediction of the ground truth class') plt.show() plt.figure(num=None, figsize=(18, 1), dpi=100) plt.contourf(np.broadcast_to(prediction[:,output_index], (2, nb_instances)), norm=normalize, interpolation='nearest') plt.title('Probability for ground truth') plt.show() print('\n') """ Explanation: Now show the temporal prediction for the activity happening at the video. End of explanation """
jbmuir/SeismoTeaching
task_1.ipynb
mit
#A fdsn client allow us to connect with web services for obtaining data from obspy.clients.fdsn import Client #The UTCDateTime module specifies times in a consistent fashion - useful for specifying dates precisely from obspy import UTCDateTime """ we can add a "keyword argument" like "timeout" below to certain functions - keyword arguments allow Python functions support variable numbers of arguments easily; once the keyword arguments start, their order doesn't matter we create a new client connected to the IRIS webservice, and increase the timeout value from its default 120s because 2 minutes is often not enough to download all the data we want. Note: this is a multiline comment (marked by the triple quotes); these comments can be placed in functions to automatically document them - but that is a topic for another day. """ iris_client = Client("IRIS", timeout=600) """ Explanation: Task 1.0 example answer: Name: Jack B. Muir, Preferred Name: Jack, Email: [email protected], Pronouns: he/him/his Lab # 1 - Introduction to Observational Seismology in Python Rationale: This lab session is pitched as a piece of formative assessment for the students. They should hopefully feel confident enough to use basic Python + Obspy and to be able to read documentation so that they can create their own analyses after the end of the lab course. In this first lab, students are provided with explicit locations for which to seek out answers to their technical questions but are expected to construct the answers themselves rather than changing values in a procedure that they have been given to memorize rote. In an attempt to promote mastery of the material in this lab, the class builds up from a worked example and slowly introduces new concepts so that students are not given an undue cognitive burden. Experimentation in a real teaching environment will obviously be required to calibrate the tasks to fulfill this goal optimally. Welcome to the first laboratory task of observational seismology. In this set of laboratories, we will learn the skills of modern observational seismology via the Python/Obspy workflow. We will be skipping over some parts of the scientific python ecosystem in favor of jumping right into the seismology - these will be saved for the second lab session. Using the Jupyter Notebook Jupyter notebooks are a mixture of code cells and markdown text cells. Markdown is a light markup language that allows some flexibility in specifying textual effects (headers, italics etc.) without making the user choose different fonts / sizes etc - for reference, see https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet. Code blocks are executed using a Python kernel. Jupyter is a modal editor; it has a command mode, entered by the esc key, and an editing mode, entered by double clicking a cell. In the command mode, you can make a cell markdown by pressing m, and code by pressing y. To execute a cell, press shift + enter. You can create a new cell above by pressing a, and below by pressing b, whilst in command mode. Try to keep code cells short to keep the logic of your worksheet simple. There are many other keyboard shortcuts that you can find in the Jupyter documentation, and you can also use the menus to perform these tasks. Since this document is a Jupyter notebook itself, you can always double click on one of these markdown text cells to see it in action. Task 1.0 - Let's get to know you (1 mark) Create a cell above the notebook title containing your name and email address like so: Name: your-name, Email: your-email. Feel free to add any extra biographical information (like a preferred name / preferred pronouns etc) note here - hopefully students realize that they can double click on this cell to edit it and then just copy the formatting that they see here Rationale: The idea here is to simultaenously attempt to promote an inclusive classroom, and to get students to perform basic comprehension about how Markdown works. As the class continues, we will attempt to slide up Bloom's taxonomy and set up students with the skills to create their own material. Importing Obspy In the Python ecosystem, most interesting pieces of code are kept in modules that must be imported. By a quirk of the Obspy module, we have to import different submodules seperately - see below: End of explanation """ #we use a UTCDateTime object to set the starting time of our search st = UTCDateTime("2010-02-27T00:00:00Z") #we can add a time offset in seconds to get the end time of the search et = st + 24*3600 #or equivalently just use another UTCDateTime object et = UTCDateTime("2010-02-28T00:00:00Z") #we can then use the fdsn client to download an event catalogue, #specifying a large minimum magnitude to restrict the results maule_catalogue = iris_client.get_events(starttime=st, endtime=et, minmagnitude=8) #we can let obspy automatically plot the results in a nice way - the orthographic "ortho" projection shows the location #in a continental context. maule_catalogue.plot(projection='ortho') #we can print the catalogue like so (obspy automatically formats it for us) print(maule_catalogue) """ Explanation: Searching for Events One of the first things we are often intested in is the location and magnitude of an event. Whilst deriving these from seismograms is nontrivial, for historical events we can often fetch them from a catalogue. Lets look at a recent large earthquake in the Americas, the 2010 Maule earthquake. End of explanation """ # Start your code here: #A possible acceptible answer st = UTCDateTime("1975-01-01T00:00:00Z") et = UTCDateTime("2017-01-01T00:00:00Z") big_catalogue = iris_client.get_events(starttime=st, endtime=et, minmagnitude=6.5, minlongitude=-85, maxlongitude=-30, minlatitude=-30, maxlatitude=0) big_catalogue.plot(projection='local', label=None); """ Explanation: *Rationale: This section provides a template or worked example for students to work from in the coming questions. Hopefully they will be able to use the example here to build their own answers. * Regional Earthquake Catalogues Rationale: This coming task is technically simple, but it asks much more from students in terms of geophysical interpretation. The idea here is to establish value - this class is pitched at senior undergraduates / early graduate students who probably know a fair bit of content but do not have the technical knowledge to explore this data themselves. By asking them qualatitive questions at a higher level than the technical task, we both improve geophysical understanding and also create an authentic technical task that reflects a real geophysical problem - hopefully this motivates the students to perform well and fully understand the material. The solution to this problem requires expansion on the worked example, but we have explicitly provided the resources required to do this. Therefore, the step up the learning curve should hopefully not be too difficult. Task 1.1 - Our Earthquake in Context (10 marks) Lets put the 2010 Maule earthquake in context. Make a plot of all > 6.5 magnitude earthquakes in South America between the latitudes of 30ยฐS and 0ยฐS, and longitudes of 85ยฐW and 30ยฐW, from 1975 to the beginning of 2017. Make the projection local & turn the magnitude label off so that you can see the spatial variation clearly. Questions: What spatial features do you see in the data? From your knowledge of plate tectonics, what are the likely causes of these features? What sort of earthquake was Maule likely to be? Hint: You should look at the Obspy documentation to observe how to restrict the spatial range of your search appropriately, and to turn the magnitude plotting off. The url for the catalogue search is https://docs.obspy.org/packages/autogen/obspy.clients.fdsn.client.Client.get_events.html and for the catalogue plotting https://docs.obspy.org/packages/autogen/obspy.core.event.catalog.Catalog.plot.html#obspy.core.event.catalog.Catalog.plot Your Code: End of explanation """ #The main plotting functionality is in the matplotlib submodule pyplot, which is idiomatically imported as plt import matplotlib.pyplot as plt #In order to tell matplotlib to generate its plots inside the Jupyter notebook instead of a seperate window, we need #to tell it to plot inline as shown %matplotlib inline """ Explanation: Your Answer to the questions: Example good answer: The earthquakes are concentrated on the western coast of South America. Furthermore, we see that the earthquakes generally increase in depth away from the coastline. We know that the Atlantic coast of South America is a passive continental margin, which explains the lack of large seismicity on the East coast. Additionally, we know that the Nazca plate subducts beneath the South American plate on the Pacific Coast; the deepening seismicity tracks the subducting slab as it penetrates into the mantle. Given the tectonic setting, Maule was likely a subduction associated thrust earthquake. Task 1.2 - Exploring the Depth Distribution of Earthquakes (11 marks total over two subtasks) Rationale: This task requires students to break out of Obspy. It requires significantly more understanding of the wider Python ecosystem to execute correctly. Furthermore, the documentation reference required (matplotlib) is more difficult to parse than the previous Obspy documentation. Eventually these documentation hints would go away in further labs; the expectation being that students be able to construct the whole workflow by themselves, consulting with relevent resources. The trickiest techical / intellectual point here is the use of a Python list comprehension, which is an essential skill to master. I've attempted to the similar worked example -> application pattern using a real-world task to promote student mastery, however it is unlikely that this single lab will prove sufficient. Unfortunately, I think that the current lab by itself will is as long as possible whilst keeping within the 1.5hr time limit specified in the project proposal, so there are no more opportunities to get students to practice this skill, but future lab sessions would definitely rely heavily on the technique. Now that we have some feel for the 3D spatial distribution of large earthquakes in South America, lets focus in on their depth behaviour specifically. Obspy does not have much support for statistical plots, so in this section we will have to pull information from Obspy data structures and use that information to create our own plots. The basic & most flexible plotting API in Python is Matplotlib, which we will use in this task. End of explanation """ #Your code: st = UTCDateTime("1975-01-01T00:00:00Z") et = UTCDateTime("2017-01-01T00:00:00Z") your_catalogue = iris_client.get_events(starttime=st, endtime=et, minmagnitude=3, minlongitude=-80, maxlongitude=-60, minlatitude=-30, maxlatitude=0) """ Explanation: Task 1.2.0 - Downloading a Big Catalogue (1 mark) Now that we have set up Matplotlib, we need to get a more comprehensive dataset to look at. Download a catalogue containing all earthquakes with magnitude greater than 3 between the start of 1975 & the start of 2017, between the latitudes of 30ยฐS and 0ยฐS, and longitudes of 80ยฐW and 60ยฐW (this longitude range restricts our focus to the western coast of South America). This catalogue may take a while to download, so you should take this time to discuss with your neighbours and the instructor your thoughts. Rationale: this sub-task repeats a skill that we have earlier used - we can't answer it explicilty or students will simply have the answer for the last question if they read ahead! End of explanation """ #Make sure that you replace "your_catalogue" with whatever you have, in fact, called your catalogue depths = [ev.origins[0].depth for ev in your_catalogue if ev.origins[0].depth != None] """ The plt.hist function makes this plot, but also returns a lot of information about the plot, namely the number of counts in each bin, the position of the bins """ counts, bins, patches = plt.hist(depths, bins=10) """ Explanation: Once you have your catalogue, we will use a construct known as a list comprehension to pull out the event depths from the somewhat complicated catalogue data structure as shown. List comprehensions are one of Python's most useful features; they allow us to do away with many operations involving loops and provide a user friendly, readable syntax. They become powerful because they can be combined with iterator objects and logical predicates to sift through data efficiently - in this case, some of the events don't have associated depth data for their origins. Python represents this by a None type, that can't be plotted for obvious reasons. We filter out the data by using a logical predicate as shown. In Python 3 we can also use comprehensions for sets and dictionaries (dictionaries are collections of key-value pairs in Python). They require some practice to use, but there are plenty of good tutorials on the internet. Once we have the depth information, we can make a histogram of the depths as shown. End of explanation """ #Your Code: #Example student code: km_depths = [d/1000 for d in depths] counts, bins, patches = plt.hist(km_depths, bins=30, histtype='step', color='k') counts, bins, patches = plt.hist(km_depths, bins=30, histtype='stepfilled', color='k', alpha=0.2) plt.yscale('log') plt.xlabel('Depth (km)', fontsize=14) plt.ylabel('Counts', fontsize=14) plt.title('Earthquake counts as a function of depth', fontsize=14) """ Explanation: Task 1.2.1 - Making Nicer Plots & Interpreting their Features (10 marks) Let's make this plot look a bit more professional! Take the data used to generate the above plot and give it the following (then answer the questions) Required Plot Features: A title (fontsize 14) Labels for the x & y axes (fontsize 14) Log scale for the y axis so that the surface features don't dominate the plot X scale in km rather than m At least 20 histogram bins since we have lots of earthquakes A "step" rather than "bar" type histogram (bonus if you can fill in the steps with a complementary light color whilst still seeing the edge of the steps) Questions: How does the distribution of earthquakes (seismicity) change with depth? How reliable do you think inferences about seismicity generated from your plot are? What factors contribute to any uncertainties? Hint: In order to create the plot specified above, the documentation for pyplot, found at http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist, will be very helpful. In order to set the x-scale to km, try creating a new dataset of depths using a comprehension. End of explanation """
param411singh/inf1340-2015-notebooks
Week 11.ipynb
mit
import json list1 = ["Monday", 6, "pumpkin", 3.1415] dict1 = { "latitude_degree": 43.6617, "latitude_direction": "N", "longitude_degree": 79.3950, "longitude_direction": "W" } json_encoded = json.dumps(dict1) print json_encoded """ Explanation: Overview Hour 1 JSON Regular Expressions Hour 2 Q&A Hour 3 Work Time JSON Format Strings can be further arranged, so they can encode more complex information. For example, CSV (Comma-Separated Values) can be used to share spreadsheet data between applications. JSON (Javascript Object Notation) is used to transmit ordered lists (arrays) and name- value pairs (objects). It can be sent in a file or through a web request. Syntax The syntax for a JSON array is similar to the syntax for a Python list. ["Monday", 6, "pumpkin", 3.1415] The quotation marks around strings are mandatory. The syntax for a JSON objecgts is similar to the syntax for Python dictionaries. ` { "latitude_degree": 43.6617, "latitude_direction": "N", "longitude_degree": 79.3950, "longitude_direction": "W" } Arrays and objects can appear as values within arrays and objects. Here is an example of a complex JSON string, formatted for readability by humans. [ { "customer": { "last": "Ball", "first": "Maldonado" }, "rating": 2, "specialOrder": true, "tags": [ "ullamco", "aute", "mollit", "ex" ], "greeting": "Happy Birthday", "filling": "chocolate", "batter": "vanilla", "frosting": "chocolate" } ] As you can see, itโ€™s a customer dictionary with a nested dictionary for the address and a list of dictionaries for telephone numbers. The formatting is similar to Python. An useful tool when working with JSON is a viewer. JSON that is being transmitted usually has all of the whitespace removed. It makes the total transmission smaller, but difficult for humans to read. Itโ€™s common to use a JSON viewer, in your IDE or on a web page. You can find one by Googling. Another common tool is a JSON validator. It checks whether your syntax is correct and helps to localize errors. Sometimes you can use the same tool to view and validate JSON. Writing JSON Any Python data structure can be saved as JSON. End of explanation """ import json with open("cake.json", "r") as file_reader: file_contents = file_reader.read() json_contents = json.loads(file_contents) # print json_contents print json.dumps(json_contents, indent=1) """ Explanation: Reading JSON There are Python functions for encoding and decoding JSON into data structures. End of explanation """ for order in json_contents: print("Customer: " + order['customer']['last']) for word in order['tags']: print("\t" + word) """ Explanation: json_contents is typically list, a dictionary, or a combination. You already know how to use these. You can use a for to iterate over all the elements in a list. You can use dictionary syntax to access key value pairs. End of explanation """ import re phone_regex = re.compile(r'\d\d\d-\d\d\d-\d\d\d\d') """ Explanation: Regular Expressions Regular expressions, called regexes for short, are descriptions for a pattern of text. These are useful for validating inputs and formatting. For example, a \d in a regex stands for a digit characterโ€”that is, any single numeral 0 to 9. The regex \d\d\d-\d\d\d-\d\d\d\d is used by Python to match a telephone number as a string of three numbers, a hyphen, three more numbers, another hyphen, and four numbers. Any other string would not match the \d\d\d-\d\d\d-\d\d\d\d regex. But regular expressions can be much more sophisticated. For example, adding a 3 in curly brackets ({3}) after a pattern is like saying, โ€œMatch this pattern three times.โ€ So the slightly shorter regex \d{3}-\d{3}-\d{4} also matches the correct phone number format. Resources Chapter 7 of "Automate the Boring Stuff with Python: Practical Programming for Total Beginners" by Al Sweigart. Python Regular Expressions by Google for Education Creating Regex Objects End of explanation """ phone_match = phone_regex.search('My number is 647-970-9425.') if phone_match is None: print ("No match found") else: print('Phone number found: ' + phone_match.group()) """ Explanation: Matching Regex Objects End of explanation """ phone_regex = re.compile(r'(\d\d\d)-(\d\d\d-\d\d\d\d)') phone_match = phone_regex.search('My number is 415-555-4242.') print phone_match.group() print phone_match.group(0) print phone_match.group(1) print phone_match.group(2) print phone_match.groups() area_code, main_number = phone_match.groups() print area_code print main_number """ Explanation: Search moves through the string from start to end, stopping at the first match found All of the pattern must match Grouping with Parentheses Parentheses are used to groups parts of expression Groups can be retrieve parts of the match End of explanation """ cheese_regex = re.compile(r'^Cheese') cheese_match = cheese_regex.search('Cheese Shop sketch') print cheese_match.group() cheese_match = cheese_regex.search('Not much of a Cheese Shop really, is it?') cheese_match == None monty_regex = re.compile(r"Terry|Michael|Graham|John|Eric") monty_match = monty_regex.search("Eric Palin, John Cleese, and Eric Idle") print monty_match.group(), "at" , monty_match.start() monty_match = monty_regex.findall("Eric Palin, John Cleese, and Eric Idle") print monty_match parrot_regex = re.compile(r'Parrot(man|mobile|copter|bat)') parrot_match = parrot_regex.search('Parrotcopter lost a blade') parrot_match.group() parrot_regex = re.compile(r'Parrot(wo)?man') parrot_match = parrot_regex.search('The Adventures of Parrotman') print parrot_match.group() parrot_match = parrot_regex.search('The Adventures of Parrotwoman') print parrot_match.group() """ Explanation: Special Operators ^ = start, $ = end -- match the start or end of the string \ -- inhibit the "specialness" of a character. So, for example, use . to match a period or \ to match a slash. If you are unsure if a character has special meaning, such as '@', you can put a slash in front of it, \@, to make sure it is treated just as a character. | (pipe, on slash key) --alternation, for combinining patterns ? --optional matching End of explanation """ parrot_regex = re.compile(r'Parrot(wo)*man') parrot_match = parrot_regex.search('The Adventures of Parrotman') print parrot_matches.group() parrot_match = parrot_regex.search('The Adventures of Parrotwowowoman') print parrot_match.group() parrot_regex = re.compile(r'Parrot(wo)+man') parrot_match = parrot_regex.search('The Adventures of Parrotwoman') print parrot_match.group() parrot_match = parrot_regex.search('The Adventures of Parrotman') parrot_match == None # Match everything with .* name_regex = re.compile(r'First Name: (.*) Last Name: (.*)') name_match = name_regex.search("First Name: Tarquin Last Name: Fin-tim-lim-bim-whin-bim-lim-bus-stop-F'tang-F'tang-Ole-Biscuitbarrel") name_match.groups() """ Explanation: Wildcard Characters . (a period) -- matches any single character except newline '\n' * (asterisk) -- match zero or more, or optional + -- match one or more End of explanation """ ha_regex = re.compile(r'(Ha){3}') ha_match = ha_regex.search('HaHaHa') ha_match.group() """ Explanation: Matching Specific Repetitions Another way of writing (Ha)(Ha)(Ha) is (Ha){3} End of explanation """ greedy_regex = re.compile(r'(Ha){3,5}') greedy_match = greedy_regex.search('HaHaHaHaHa') print greedy_match.group() nongreedy_regex = re.compile(r'(Ha){3,5}?') nongreedy_match = nongreedy_regex.search('HaHaHaHaHa') print nongreedy_match.group() """ Explanation: (Ha){3} means exactly 3 repetitions (Ha){,5} means up to 5 repetitions (Ha){3,5} means 3 to 5 repetitions Python regular matching is greedy by default. It takes the first match and makes it as long as possible. In ambiguous situations, the longest string possible is matched. To suppress this use a ? (question mark) after the pattern End of explanation """ gbs_regex = re.compile(r'\d+\s\w+') gbs_regex.findall("12 apostles, 11 went straight to heaven, \\ 10 commandments, 9 bright eyed shiners, \\ 8 Gabriel angels") """ Explanation: Character Classes Another way of writing (0|1|2|3|4|5|6|7|8|9) is \d, which is a shorthand character class. <img src="images/character_classes.png" width=600> End of explanation """ vowel_regex = re.compile(r'[aeiouAEIOU]') vowel_regex.findall('This is a dead parrot. DEAD.') """ Explanation: Custom Character Classes Use square brackets Use a caret (^) to negate It's on the 6 key End of explanation """ vowel_regex = re.compile(r'[aeiou]', re.I) vowel_regex.findall('This is a dead parrot. DEAD.') # Exercise # Joke: What do you call a pig with three eyes? Piiig! """ Explanation: Compilation Flags DOTALL, S --Make . match any character, including newlines IGNORECASE, I --Do case-insensitive matches LOCALE, L --Do a locale-aware match MULTILINE, M --Multi-line matching, affecting ^ and $ VERBOSE, X --Enable verbose REs, which can be organized more cleanly and understandably. UNICODE, U --Makes several escapes like \w, \b, \s and \d dependent on the Unicode character database. End of explanation """
csiu/datasci
text/2015-07-26_document-classification_nb-2cat.ipynb
mit
import glob import pandas as pd samples = { 'train':{}, 'test':{} } files = glob.glob('20news-bydate-*/rec.sport*/*') for s in samples.keys(): for c in ['baseball', 'hockey']: samples[s][c] = samples[s].get(c, len(filter(lambda x: s in x and c in x, files))) print 'Number of training documents:\t', sum(samples['train'].values()) print 'Number of testing documents:\t', sum(samples['test'].values()) pd.DataFrame.from_dict(samples) """ Explanation: Inspiration & Reference: http://blog.yhathq.com/posts/naive-bayes-in-python.html Document collection This dataset is taken from 20Newsgroups; we only use 2 categories: CATEGORIES rec.sport.baseball rec.sport.hockey In total there is 1993 documents -- we take 60% for training and 40% for testing: End of explanation """ import nltk from nltk.corpus import stopwords from nltk.stem import SnowballStemmer import string import glob import string import math import operator def count_words(words): wc = {} for word in words: wc[word] = wc.get(word, 0.0) + 1.0 return wc table = string.maketrans("","") stop = stopwords.words("english") snowballstemmer = SnowballStemmer("english") def preprocess(f): ## will need 'table', 'stop', and 'snowballstemmer' predefined text = open(f).read().translate(table, string.punctuation).lower() text = filter(lambda x: x in string.printable, text) words = nltk.word_tokenize(text) words = [i for i in words if i not in stop] words = [snowballstemmer.stem(i) for i in words] return words vocab = {} word_counts = { "baseball":{}, "hockey":{}, } priors = { "baseball":0., "hockey":0., } docs = [] for f in glob.glob('20news-bydate-train/rec.sport*/*'): if 'baseball' in f: category = 'baseball' else: category = 'hockey' docs.append((category, f)) priors[category] += 1 words = preprocess(f) counts = count_words(words) for word, count in counts.items(): if word not in vocab: vocab[word] = 0.0 if word not in word_counts[category]: word_counts[category][word] = 0.0 vocab[word] += count word_counts[category][word] += count print 'Number of features (i.e. terms): ', len(vocab) """ Explanation: Model: Naive Bayes $P(C|D) = \frac{P(C) P(D|C)}{P(D)}$ Independence assumption Training End of explanation """ results = { "baseball":{ "idx":0, "results":{0:0.0, 1:0.0} }, "hockey":{ "idx":1, "results":{0:0.0, 1:0.0} } } docfail = [] ## P(category) prior_baseball = priors["baseball"] / sum(priors.values()) prior_hockey = priors["hockey"] / sum(priors.values()) for new_doc in glob.glob('20news-bydate-test/rec.sport*/*'): if 'baseball' in new_doc: category = 'baseball' else: category = 'hockey' words = preprocess(new_doc) counts = count_words(words) ## To prevent computational errors, will perform operations in logspace, log(probabilities) log_prob_baseball = 0.0 log_prob_hockey = 0.0 for w, cnt in counts.items(): ## heuristic: skip words not seen before, or words < 3 letters long if not w in vocab or len(w) <= 3: continue ## calculate prob that the word occurs at all p_word = vocab[w] / sum(vocab.values()) ## calculate P(word|category) p_w_given_baseball = word_counts["baseball"].get(w, 0.0) / sum(word_counts["baseball"].values()) p_w_given_hockey = word_counts["hockey"].get(w, 0.0) / sum(word_counts["hockey"].values()) if p_w_given_baseball > 0: log_prob_baseball += math.log(cnt * p_w_given_baseball / p_word) if p_w_given_hockey > 0: log_prob_hockey += math.log(cnt * p_w_given_hockey / p_word) try: max_index, max_value = max(enumerate([ math.exp(log_prob_baseball + math.log(prior_baseball)), #p_baseball_given_w math.exp(log_prob_hockey + math.log(prior_hockey)), #p_hockey_given_w ]), key=operator.itemgetter(1)) except: docfail.append(new_doc) continue results[category]["results"][max_index] = results[category]["results"].get(max_index, 0.0) + 1.0 ## OUPUT: documents which fail testing for i in docfail: print i print results """ Explanation: Testing End of explanation """ import json with open('dc-results/naivebayes2.json') as f: results = json.load(f) import pandas as pd import numpy as np %load_ext rpy2.ipython %R library(ggplot2) %R library(reshape) %R library(gplots) %R library('grid') %R library('gridExtra') %matplotlib inline from copy import deepcopy import matplotlib.pyplot as plt print r = {k:v['results'] for k,v in results.iteritems()} df = pd.DataFrame.from_dict(r)#, orient="index") df.index = ['predict_baseball', 'predict_hockey'] dfcounts = deepcopy(df) print dfcounts if (sum(df.baseball) != 0): df.baseball = df.baseball / sum(df.baseball) if (sum(df.hockey) != 0): df.hockey = df.hockey / sum(df.hockey) df _total = sum(sum(dfcounts.values)) print 'Number of test samples: %d' % _total print 'Percent of test set labelled correctly: %0.1f%%' % (sum(np.diagonal(dfcounts)) / _total * 100) """ Explanation: Because I don't want to re-run the training & testing everything time I come back to this project, we will save the result to a file. import json with open('dc-results/naivebayes2.json', 'w') as out: json.dump(results, out) Visualizing the results End of explanation """ %%R -i df df = melt(df) colnames(df) = c("expected", "value") df = cbind(df, classification=rep(c('baseball', 'hockey'), 2)) ggplot(df, aes(x=expected, y=value, fill=classification)) + geom_bar(stat="identity") + xlab("Actual label") + ylab("Proportion") """ Explanation: The size of the test set is 796 documents. Overall, the classifier has an accuracy of 88.6%. End of explanation """ %%R -i dfcounts dat = cbind(expected=colSums(dfcounts), predicted=rowSums(dfcounts)) dat = melt(dat) colnames(dat) <- c("Label", "Type", "Count") ggplot(dat, aes(x=Label, y=Count, fill=Type)) + geom_bar(stat="identity", position="dodge") """ Explanation: Given the actual labels, we find that our classifier performs well for the baseball labels (99% correct) and okay for the hockey labels (78% correct). End of explanation """ terms_baseball = word_counts['baseball'].keys() terms_hockey = word_counts['hockey'].keys() print "Total # terms for baseball: ", len(terms_baseball) print "Total # terms for hockey: ", len(terms_hockey) %%R -i terms_baseball,terms_hockey -w 380 -h 300 -u px venn(list(baseball=terms_baseball, hockey=terms_hockey)) import re _digits = re.compile('\d') def contains_digits(d): return bool(_digits.search(d)) unique_baseball = set(terms_baseball).difference(set(terms_hockey)) print len(unique_baseball) unique_baseball = filter(lambda x: len(x) > 3, unique_baseball) print len(unique_baseball) unique_baseball = filter(lambda x: not x.isdigit(), unique_baseball) print len(unique_baseball) unique_baseball = filter(lambda x: not contains_digits(x), unique_baseball) print len(unique_baseball) unique_hockey = set(terms_hockey).difference(set(terms_baseball)) print len(unique_hockey) unique_hockey = filter(lambda x: len(x) > 3, unique_hockey) print len(unique_hockey) unique_hockey = filter(lambda x: not x.isdigit(), unique_hockey) print len(unique_hockey) unique_hockey = filter(lambda x: not contains_digits(x), unique_hockey) print len(unique_hockey) """ Explanation: A possible explanation for why the classifier did so well for classifying "baseball" documents is because the classifier tends to predict more "baseball" than expected. Investigating the features End of explanation """ vocab = {} word_counts = { "baseball":{}, "hockey":{}, } priors = { "baseball":0., "hockey":0., } docs = [] for f in glob.glob('20news-bydate-train/rec.sport*/*'): if 'baseball' in f: category = 'baseball' else: category = 'hockey' docs.append((category, f)) priors[category] += 1 words = preprocess(f) counts = count_words(words) for word, count in counts.items(): if contains_digits(word): continue if word not in vocab: vocab[word] = 0.0 if word not in word_counts[category]: word_counts[category][word] = 0.0 vocab[word] += count word_counts[category][word] += count print 'Number of features (i.e. terms): ', len(vocab) results = { "baseball":{ "idx":0, "results":{0:0.0, 1:0.0} }, "hockey":{ "idx":1, "results":{0:0.0, 1:0.0} } } docfail = [] ## P(category) prior_baseball = priors["baseball"] / sum(priors.values()) prior_hockey = priors["hockey"] / sum(priors.values()) for new_doc in glob.glob('20news-bydate-test/rec.sport*/*'): if 'baseball' in new_doc: category = 'baseball' else: category = 'hockey' words = preprocess(new_doc) counts = count_words(words) ## To prevent computational errors, will perform operations in logspace, log(probabilities) log_prob_baseball = 0.0 log_prob_hockey = 0.0 for w, cnt in counts.items(): ## heuristic: skip words not seen before, or words < 3 letters long if not w in vocab or len(w) <= 3: continue ## calculate prob that the word occurs at all p_word = vocab[w] / sum(vocab.values()) ## calculate P(word|category) p_w_given_baseball = word_counts["baseball"].get(w, 0.0) / sum(word_counts["baseball"].values()) p_w_given_hockey = word_counts["hockey"].get(w, 0.0) / sum(word_counts["hockey"].values()) if p_w_given_baseball > 0: log_prob_baseball += math.log(cnt * p_w_given_baseball / p_word) if p_w_given_hockey > 0: log_prob_hockey += math.log(cnt * p_w_given_hockey / p_word) try: max_index, max_value = max(enumerate([ math.exp(log_prob_baseball + math.log(prior_baseball)), #p_baseball_given_w math.exp(log_prob_hockey + math.log(prior_hockey)), #p_hockey_given_w ]), key=operator.itemgetter(1)) except: docfail.append(new_doc) continue results[category]["results"][max_index] = results[category]["results"].get(max_index, 0.0) + 1.0 ## OUPUT: documents which fail testing for i in docfail: print i print results r = {k:v['results'] for k,v in results.iteritems()} df = pd.DataFrame.from_dict(r)#, orient="index") df.index = ['predict_baseball', 'predict_hockey'] dfcounts = deepcopy(df) print dfcounts if (sum(df.baseball) != 0): df.baseball = df.baseball / sum(df.baseball) if (sum(df.hockey) != 0): df.hockey = df.hockey / sum(df.hockey) print df print _total = sum(sum(dfcounts.values)) print 'Number of test samples: %d' % _total print 'Percent of test set labelled correctly: %0.1f%%' % (sum(np.diagonal(dfcounts)) / _total * 100) %%R -i df,dfcounts -w 700 -h 300 -u px df = melt(df) colnames(df) = c("expected", "value") df = cbind(df, classification=rep(c('baseball', 'hockey'), 2)) p1 <- ggplot(df, aes(x=expected, y=value, fill=classification)) + geom_bar(stat="identity") + xlab("Actual label") + ylab("Proportion") dat = cbind(expected=colSums(dfcounts), predicted=rowSums(dfcounts)) dat = melt(dat) colnames(dat) <- c("Label", "Type", "Count") p2 <- ggplot(dat, aes(x=Label, y=Count, fill=Type)) + geom_bar(stat="identity", position="dodge") grid.arrange(p1, p2, ncol=2) terms_baseball = word_counts['baseball'].keys() terms_hockey = word_counts['hockey'].keys() print "Total # terms for baseball: ", len(terms_baseball) print "Total # terms for hockey: ", len(terms_hockey) %%R -i terms_baseball,terms_hockey -w 380 -h 300 -u px venn(list(baseball=terms_baseball, hockey=terms_hockey)) """ Explanation: Redo training/testing ... but this time remove terms which contain a digit End of explanation """
jinntrance/MOOC
coursera/ml-classification/assignments/module-10-online-learning-assignment-blank.ipynb
cc0-1.0
from __future__ import division import graphlab """ Explanation: Training Logistic Regression via Stochastic Gradient Ascent The goal of this notebook is to implement a logistic regression classifier using stochastic gradient ascent. You will: Extract features from Amazon product reviews. Convert an SFrame into a NumPy array. Write a function to compute the derivative of log likelihood function with respect to a single coefficient. Implement stochastic gradient ascent. Compare convergence of stochastic gradient ascent with that of batch gradient ascent. Fire up GraphLab Create Make sure you have the latest version of GraphLab Create. Upgrade by pip install graphlab-create --upgrade See this page for detailed instructions on upgrading. End of explanation """ products = graphlab.SFrame('amazon_baby_subset.gl/') """ Explanation: Load and process review dataset For this assignment, we will use the same subset of the Amazon product review dataset that we used in Module 3 assignment. The subset was chosen to contain similar numbers of positive and negative reviews, as the original dataset consisted of mostly positive reviews. End of explanation """ import json with open('important_words.json', 'r') as f: important_words = json.load(f) important_words = [str(s) for s in important_words] # Remote punctuation def remove_punctuation(text): import string return text.translate(None, string.punctuation) products['review_clean'] = products['review'].apply(remove_punctuation) # Split out the words into individual columns for word in important_words: products[word] = products['review_clean'].apply(lambda s : s.split().count(word)) """ Explanation: Just like we did previously, we will work with a hand-curated list of important words extracted from the review data. We will also perform 2 simple data transformations: Remove punctuation using Python's built-in string manipulation functionality. Compute word counts (only for the important_words) Refer to Module 3 assignment for more details. End of explanation """ products """ Explanation: The SFrame products now contains one column for each of the 193 important_words. End of explanation """ train_data, validation_data = products.random_split(.9, seed=1) print 'Training set : %d data points' % len(train_data) print 'Validation set: %d data points' % len(validation_data) """ Explanation: Split data into training and validation sets We will now split the data into a 90-10 split where 90% is in the training set and 10% is in the validation set. We use seed=1 so that everyone gets the same result. End of explanation """ import numpy as np def get_numpy_data(data_sframe, features, label): data_sframe['intercept'] = 1 features = ['intercept'] + features features_sframe = data_sframe[features] feature_matrix = features_sframe.to_numpy() label_sarray = data_sframe[label] label_array = label_sarray.to_numpy() return(feature_matrix, label_array) """ Explanation: Convert SFrame to NumPy array Just like in the earlier assignments, we provide you with a function that extracts columns from an SFrame and converts them into a NumPy array. Two arrays are returned: one representing features and another representing class labels. Note: The feature matrix includes an additional column 'intercept' filled with 1's to take account of the intercept term. End of explanation """ feature_matrix_train, sentiment_train = get_numpy_data(train_data, important_words, 'sentiment') feature_matrix_valid, sentiment_valid = get_numpy_data(validation_data, important_words, 'sentiment') """ Explanation: Note that we convert both the training and validation sets into NumPy arrays. Warning: This may take a few minutes. End of explanation """ ''' produces probablistic estimate for P(y_i = +1 | x_i, w). estimate ranges between 0 and 1. ''' def predict_probability(feature_matrix, coefficients): # Take dot product of feature_matrix and coefficients score = np.dot(feature_matrix, coefficients) # Compute P(y_i = +1 | x_i, w) using the link function predictions = 1. / (1.+np.exp(-score)) return predictions """ Explanation: Are you running this notebook on an Amazon EC2 t2.micro instance? (If you are using your own machine, please skip this section) It has been reported that t2.micro instances do not provide sufficient power to complete the conversion in acceptable amount of time. For interest of time, please refrain from running get_numpy_data function. Instead, download the binary file containing the four NumPy arrays you'll need for the assignment. To load the arrays, run the following commands: arrays = np.load('module-10-assignment-numpy-arrays.npz') feature_matrix_train, sentiment_train = arrays['feature_matrix_train'], arrays['sentiment_train'] feature_matrix_valid, sentiment_valid = arrays['feature_matrix_valid'], arrays['sentiment_valid'] Quiz question: In Module 3 assignment, there were 194 features (an intercept + one feature for each of the 193 important words). In this assignment, we will use stochastic gradient ascent to train the classifier using logistic regression. How does the changing the solver to stochastic gradient ascent affect the number of features? Building on logistic regression Let us now build on Module 3 assignment. Recall from lecture that the link function for logistic regression can be defined as: $$ P(y_i = +1 | \mathbf{x}_i,\mathbf{w}) = \frac{1}{1 + \exp(-\mathbf{w}^T h(\mathbf{x}_i))}, $$ where the feature vector $h(\mathbf{x}_i)$ is given by the word counts of important_words in the review $\mathbf{x}_i$. We will use the same code as in Module 3 assignment to make probability predictions, since this part is not affected by using stochastic gradient ascent as a solver. Only the way in which the coefficients are learned is affected by using stochastic gradient ascent as a solver. End of explanation """ def feature_derivative(errors, feature): # Compute the dot product of errors and feature ## YOUR CODE HERE derivative = np.dot(errors, feature) return derivative """ Explanation: Derivative of log likelihood with respect to a single coefficient Let us now work on making minor changes to how the derivative computation is performed for logistic regression. Recall from the lectures and Module 3 assignment that for logistic regression, the derivative of log likelihood with respect to a single coefficient is as follows: $$ \frac{\partial\ell}{\partial w_j} = \sum_{i=1}^N h_j(\mathbf{x}_i)\left(\mathbf{1}[y_i = +1] - P(y_i = +1 | \mathbf{x}_i, \mathbf{w})\right) $$ In Module 3 assignment, we wrote a function to compute the derivative of log likelihood with respect to a single coefficient $w_j$. The function accepts the following two parameters: * errors vector containing $(\mathbf{1}[y_i = +1] - P(y_i = +1 | \mathbf{x}_i, \mathbf{w}))$ for all $i$ * feature vector containing $h_j(\mathbf{x}_i)$ for all $i$ Complete the following code block: End of explanation """ def compute_avg_log_likelihood(feature_matrix, sentiment, coefficients): indicator = (sentiment==+1) scores = np.dot(feature_matrix, coefficients) logexp = np.log(1. + np.exp(-scores)) # Simple check to prevent overflow mask = np.isinf(logexp) logexp[mask] = -scores[mask] lp = np.sum((indicator-1)*scores - logexp)/len(feature_matrix) return lp """ Explanation: Note. We are not using regularization in this assignment, but, as discussed in the optional video, stochastic gradient can also be used for regularized logistic regression. To verify the correctness of the gradient computation, we provide a function for computing average log likelihood (which we recall from the last assignment was a topic detailed in an advanced optional video, and used here for its numerical stability). To track the performance of stochastic gradient ascent, we provide a function for computing average log likelihood. $$\ell\ell_A(\mathbf{w}) = \color{red}{\frac{1}{N}} \sum_{i=1}^N \Big( (\mathbf{1}[y_i = +1] - 1)\mathbf{w}^T h(\mathbf{x}_i) - \ln\left(1 + \exp(-\mathbf{w}^T h(\mathbf{x}_i))\right) \Big) $$ Note that we made one tiny modification to the log likelihood function (called compute_log_likelihood) in our earlier assignments. We added a $\color{red}{1/N}$ term which averages the log likelihood accross all data points. The $\color{red}{1/N}$ term makes it easier for us to compare stochastic gradient ascent with batch gradient ascent. We will use this function to generate plots that are similar to those you saw in the lecture. End of explanation """ j = 1 # Feature number i = 10 # Data point number coefficients = np.zeros(194) # A point w at which we are computing the gradient. predictions = predict_probability(feature_matrix_train[i:i+1,:], coefficients) indicator = (sentiment_train[i:i+1]==+1) errors = indicator - predictions gradient_single_data_point = feature_derivative(errors, feature_matrix_train[i:i+1,j]) print "Gradient single data point: %s" % gradient_single_data_point print " --> Should print 0.0" """ Explanation: Quiz Question: Recall from the lecture and the earlier assignment, the log likelihood (without the averaging term) is given by $$\ell\ell(\mathbf{w}) = \sum_{i=1}^N \Big( (\mathbf{1}[y_i = +1] - 1)\mathbf{w}^T h(\mathbf{x}_i) - \ln\left(1 + \exp(-\mathbf{w}^T h(\mathbf{x}_i))\right) \Big) $$ How are the functions $\ell\ell(\mathbf{w})$ and $\ell\ell_A(\mathbf{w})$ related? Modifying the derivative for stochastic gradient ascent Recall from the lecture that the gradient for a single data point $\color{red}{\mathbf{x}_i}$ can be computed using the following formula: $$ \frac{\partial\ell_{\color{red}{i}}(\mathbf{w})}{\partial w_j} = h_j(\color{red}{\mathbf{x}i})\left(\mathbf{1}[y\color{red}{i} = +1] - P(y_\color{red}{i} = +1 | \color{red}{\mathbf{x}_i}, \mathbf{w})\right) $$ Computing the gradient for a single data point Do we really need to re-write all our code to modify $\partial\ell(\mathbf{w})/\partial w_j$ to $\partial\ell_{\color{red}{i}}(\mathbf{w})/{\partial w_j}$? Thankfully No!. Using NumPy, we access $\mathbf{x}i$ in the training data using feature_matrix_train[i:i+1,:] and $y_i$ in the training data using sentiment_train[i:i+1]. We can compute $\partial\ell{\color{red}{i}}(\mathbf{w})/\partial w_j$ by re-using all the code written in feature_derivative and predict_probability. We compute $\partial\ell_{\color{red}{i}}(\mathbf{w})/\partial w_j$ using the following steps: * First, compute $P(y_i = +1 | \mathbf{x}_i, \mathbf{w})$ using the predict_probability function with feature_matrix_train[i:i+1,:] as the first parameter. * Next, compute $\mathbf{1}[y_i = +1]$ using sentiment_train[i:i+1]. * Finally, call the feature_derivative function with feature_matrix_train[i:i+1, j] as one of the parameters. Let us follow these steps for j = 1 and i = 10: End of explanation """ j = 1 # Feature number i = 10 # Data point start B = 10 # Mini-batch size coefficients = np.zeros(194) # A point w at which we are computing the gradient. predictions = predict_probability(feature_matrix_train[i:i+B,:], coefficients) indicator = (sentiment_train[i:i+B]==+1) errors = indicator - predictions gradient_mini_batch = feature_derivative(errors, feature_matrix_train[i:i+B,j]) print "Gradient mini-batch data points: %s" % gradient_mini_batch print " --> Should print 1.0" """ Explanation: Quiz Question: The code block above computed $\partial\ell_{\color{red}{i}}(\mathbf{w})/{\partial w_j}$ for j = 1 and i = 10. Is $\partial\ell_{\color{red}{i}}(\mathbf{w})/{\partial w_j}$ a scalar or a 194-dimensional vector? Modifying the derivative for using a batch of data points Stochastic gradient estimates the ascent direction using 1 data point, while gradient uses $N$ data points to decide how to update the the parameters. In an optional video, we discussed the details of a simple change that allows us to use a mini-batch of $B \leq N$ data points to estimate the ascent direction. This simple approach is faster than regular gradient but less noisy than stochastic gradient that uses only 1 data point. Although we encorage you to watch the optional video on the topic to better understand why mini-batches help stochastic gradient, in this assignment, we will simply use this technique, since the approach is very simple and will improve your results. Given a mini-batch (or a set of data points) $\mathbf{x}{i}, \mathbf{x}{i+1} \ldots \mathbf{x}{i+B}$, the gradient function for this mini-batch of data points is given by: $$ \color{red}{\sum{s = i}^{i+B}} \frac{\partial\ell_{s}}{\partial w_j} = \color{red}{\sum_{s = i}^{i + B}} h_j(\mathbf{x}_s)\left(\mathbf{1}[y_s = +1] - P(y_s = +1 | \mathbf{x}_s, \mathbf{w})\right) $$ Computing the gradient for a "mini-batch" of data points Using NumPy, we access the points $\mathbf{x}i, \mathbf{x}{i+1} \ldots \mathbf{x}_{i+B}$ in the training data using feature_matrix_train[i:i+B,:] and $y_i$ in the training data using sentiment_train[i:i+B]. We can compute $\color{red}{\sum_{s = i}^{i+B}} \partial\ell_{s}/\partial w_j$ easily as follows: End of explanation """ from math import sqrt def logistic_regression_SG(feature_matrix, sentiment, initial_coefficients, step_size, batch_size, max_iter): log_likelihood_all = [] # make sure it's a numpy array coefficients = np.array(initial_coefficients) # set seed=1 to produce consistent results np.random.seed(seed=1) # Shuffle the data before starting permutation = np.random.permutation(len(feature_matrix)) feature_matrix = feature_matrix[permutation,:] sentiment = sentiment[permutation] i = 0 # index of current batch # Do a linear scan over data for itr in xrange(max_iter): # Predict P(y_i = +1|x_i,w) using your predict_probability() function # Make sure to slice the i-th row of feature_matrix with [i:i+batch_size,:] ### YOUR CODE HERE predictions = predict_probability(feature_matrix[i:i+batch_size, :], coefficients) if len(predictions) <= 0: break; # Compute indicator value for (y_i = +1) # Make sure to slice the i-th entry with [i:i+batch_size] ### YOUR CODE HERE indicator = (sentiment[i:i+batch_size] == +1) # Compute the errors as indicator - predictions errors = indicator - predictions for j in xrange(len(coefficients)): # loop over each coefficient # Recall that feature_matrix[:,j] is the feature column associated with coefficients[j] # Compute the derivative for coefficients[j] and save it to derivative. # Make sure to slice the i-th row of feature_matrix with [i:i+batch_size,j] ### YOUR CODE HERE derivative = feature_derivative(errors, feature_matrix[i:i+batch_size, j]) # compute the product of the step size, the derivative, and the **normalization constant** (1./batch_size) ### YOUR CODE HERE coefficients[j] += step_size*derivative * 1.0 / batch_size # Checking whether log likelihood is increasing # Print the log likelihood over the *current batch* lp = compute_avg_log_likelihood(feature_matrix[i:i+batch_size,:], sentiment[i:i+batch_size], coefficients) log_likelihood_all.append(lp) if itr <= 15 or (itr <= 1000 and itr % 100 == 0) or (itr <= 10000 and itr % 1000 == 0) \ or itr % 10000 == 0 or itr == max_iter-1: data_size = len(feature_matrix) print 'Iteration %*d: Average log likelihood (of data points in batch [%0*d:%0*d]) = %.8f' % \ (int(np.ceil(np.log10(max_iter))), itr, \ int(np.ceil(np.log10(data_size))), i, \ int(np.ceil(np.log10(data_size))), i+batch_size, lp) # if we made a complete pass over data, shuffle and restart i += batch_size if i+batch_size > len(feature_matrix): permutation = np.random.permutation(len(feature_matrix)) feature_matrix = feature_matrix[permutation,:] sentiment = sentiment[permutation] i = 0 # We return the list of log likelihoods for plotting purposes. return coefficients, log_likelihood_all """ Explanation: Quiz Question: The code block above computed $\color{red}{\sum_{s = i}^{i+B}}\partial\ell_{s}(\mathbf{w})/{\partial w_j}$ for j = 10, i = 10, and B = 10. Is this a scalar or a 194-dimensional vector? Quiz Question: For what value of B is the term $\color{red}{\sum_{s = 1}^{B}}\partial\ell_{s}(\mathbf{w})/\partial w_j$ the same as the full gradient $\partial\ell(\mathbf{w})/{\partial w_j}$? Averaging the gradient across a batch It is a common practice to normalize the gradient update rule by the batch size B: $$ \frac{\partial\ell_{\color{red}{A}}(\mathbf{w})}{\partial w_j} \approx \color{red}{\frac{1}{B}} {\sum_{s = i}^{i + B}} h_j(\mathbf{x}_s)\left(\mathbf{1}[y_s = +1] - P(y_s = +1 | \mathbf{x}_s, \mathbf{w})\right) $$ In other words, we update the coefficients using the average gradient over data points (instead of using a summation). By using the average gradient, we ensure that the magnitude of the gradient is approximately the same for all batch sizes. This way, we can more easily compare various batch sizes of stochastic gradient ascent (including a batch size of all the data points), and study the effect of batch size on the algorithm as well as the choice of step size. Implementing stochastic gradient ascent Now we are ready to implement our own logistic regression with stochastic gradient ascent. Complete the following function to fit a logistic regression model using gradient ascent: End of explanation """ sample_feature_matrix = np.array([[1.,2.,-1.], [1.,0.,1.]]) sample_sentiment = np.array([+1, -1]) coefficients, log_likelihood = logistic_regression_SG(sample_feature_matrix, sample_sentiment, np.zeros(3), step_size=1., batch_size=2, max_iter=2) print '-------------------------------------------------------------------------------------' print 'Coefficients learned :', coefficients print 'Average log likelihood per-iteration :', log_likelihood if np.allclose(coefficients, np.array([-0.09755757, 0.68242552, -0.7799831]), atol=1e-3)\ and np.allclose(log_likelihood, np.array([-0.33774513108142956, -0.2345530939410341])): # pass if elements match within 1e-3 print '-------------------------------------------------------------------------------------' print 'Test passed!' else: print '-------------------------------------------------------------------------------------' print 'Test failed' """ Explanation: Note. In practice, the final set of coefficients is rarely used; it is better to use the average of the last K sets of coefficients instead, where K should be adjusted depending on how fast the log likelihood oscillates around the optimum. Checkpoint The following cell tests your stochastic gradient ascent function using a toy dataset consisting of two data points. If the test does not pass, make sure you are normalizing the gradient update rule correctly. End of explanation """ coefficients, log_likelihood = logistic_regression_SG(feature_matrix_train, sentiment_train, initial_coefficients=np.zeros(194), step_size=5e-1, batch_size=1, max_iter=10) """ Explanation: Compare convergence behavior of stochastic gradient ascent For the remainder of the assignment, we will compare stochastic gradient ascent against batch gradient ascent. For this, we need a reference implementation of batch gradient ascent. But do we need to implement this from scratch? Quiz Question: For what value of batch size B above is the stochastic gradient ascent function logistic_regression_SG act as a standard gradient ascent algorithm? Running gradient ascent using the stochastic gradient ascent implementation Instead of implementing batch gradient ascent separately, we save time by re-using the stochastic gradient ascent function we just wrote &mdash; to perform gradient ascent, it suffices to set batch_size to the number of data points in the training data. Yes, we did answer above the quiz question for you, but that is an important point to remember in the future :) Small Caveat. The batch gradient ascent implementation here is slightly different than the one in the earlier assignments, as we now normalize the gradient update rule. We now run stochastic gradient ascent over the feature_matrix_train for 10 iterations using: * initial_coefficients = np.zeros(194) * step_size = 5e-1 * batch_size = 1 * max_iter = 10 End of explanation """ # YOUR CODE HERE coefficients_batch, log_likelihood_batch = logistic_regression_SG(feature_matrix_train, sentiment_train, initial_coefficients=np.zeros(194), step_size=5e-1, batch_size=len(feature_matrix_train), max_iter=200) """ Explanation: Quiz Question. When you set batch_size = 1, as each iteration passes, how does the average log likelihood in the batch change? * Increases * Decreases * Fluctuates Now run batch gradient ascent over the feature_matrix_train for 200 iterations using: * initial_coefficients = np.zeros(194) * step_size = 5e-1 * batch_size = len(feature_matrix_train) * max_iter = 200 End of explanation """ 2*50000/100 """ Explanation: Quiz Question. When you set batch_size = len(train_data), as each iteration passes, how does the average log likelihood in the batch change? * Increases * Decreases * Fluctuates Make "passes" over the dataset To make a fair comparison betweeen stochastic gradient ascent and batch gradient ascent, we measure the average log likelihood as a function of the number of passes (defined as follows): $$ [\text{# of passes}] = \frac{[\text{# of data points touched so far}]}{[\text{size of dataset}]} $$ Quiz Question Suppose that we run stochastic gradient ascent with a batch size of 100. How many gradient updates are performed at the end of two passes over a dataset consisting of 50000 data points? End of explanation """ step_size = 1e-1 batch_size = 100 num_passes = 10 num_iterations = num_passes * int(len(feature_matrix_train)/batch_size) coefficients_sgd, log_likelihood_sgd = logistic_regression_SG(feature_matrix_train, sentiment_train, initial_coefficients=np.zeros(194), step_size=1e-1, batch_size=100, max_iter=num_iterations) """ Explanation: Log likelihood plots for stochastic gradient ascent With the terminology in mind, let us run stochastic gradient ascent for 10 passes. We will use * step_size=1e-1 * batch_size=100 * initial_coefficients to all zeros. End of explanation """ import matplotlib.pyplot as plt %matplotlib inline def make_plot(log_likelihood_all, len_data, batch_size, smoothing_window=1, label=''): plt.rcParams.update({'figure.figsize': (9,5)}) log_likelihood_all_ma = np.convolve(np.array(log_likelihood_all), \ np.ones((smoothing_window,))/smoothing_window, mode='valid') plt.plot(np.array(range(smoothing_window-1, len(log_likelihood_all)))*float(batch_size)/len_data, log_likelihood_all_ma, linewidth=4.0, label=label) plt.rcParams.update({'font.size': 16}) plt.tight_layout() plt.xlabel('# of passes over data') plt.ylabel('Average log likelihood per data point') plt.legend(loc='lower right', prop={'size':14}) make_plot(log_likelihood_sgd, len_data=len(feature_matrix_train), batch_size=100, label='stochastic gradient, step_size=1e-1') """ Explanation: We provide you with a utility function to plot the average log likelihood as a function of the number of passes. End of explanation """ make_plot(log_likelihood_sgd, len_data=len(feature_matrix_train), batch_size=100, smoothing_window=30, label='stochastic gradient, step_size=1e-1') """ Explanation: Smoothing the stochastic gradient ascent curve The plotted line oscillates so much that it is hard to see whether the log likelihood is improving. In our plot, we apply a simple smoothing operation using the parameter smoothing_window. The smoothing is simply a moving average of log likelihood over the last smoothing_window "iterations" of stochastic gradient ascent. End of explanation """ step_size = 1e-1 batch_size = 100 num_passes = 200 num_iterations = num_passes * int(len(feature_matrix_train)/batch_size) ## YOUR CODE HERE coefficients_sgd, log_likelihood_sgd = logistic_regression_SG(feature_matrix_train, sentiment_train, initial_coefficients=np.zeros(194), step_size=step_size, batch_size=batch_size, max_iter=num_iterations) """ Explanation: Checkpoint: The above plot should look smoother than the previous plot. Play around with smoothing_window. As you increase it, you should see a smoother plot. Stochastic gradient ascent vs batch gradient ascent To compare convergence rates for stochastic gradient ascent with batch gradient ascent, we call make_plot() multiple times in the same cell. We are comparing: * stochastic gradient ascent: step_size = 0.1, batch_size=100 * batch gradient ascent: step_size = 0.5, batch_size=len(feature_matrix_train) Write code to run stochastic gradient ascent for 200 passes using: * step_size=1e-1 * batch_size=100 * initial_coefficients to all zeros. End of explanation """ make_plot(log_likelihood_sgd, len_data=len(feature_matrix_train), batch_size=100, smoothing_window=30, label='stochastic, step_size=1e-1') make_plot(log_likelihood_batch, len_data=len(feature_matrix_train), batch_size=len(feature_matrix_train), smoothing_window=1, label='batch, step_size=5e-1') """ Explanation: We compare the convergence of stochastic gradient ascent and batch gradient ascent in the following cell. Note that we apply smoothing with smoothing_window=30. End of explanation """ batch_size = 100 num_passes = 10 num_iterations = num_passes * int(len(feature_matrix_train)/batch_size) coefficients_sgd = {} log_likelihood_sgd = {} for step_size in np.logspace(-4, 2, num=7): coefficients_sgd[step_size], log_likelihood_sgd[step_size] = logistic_regression_SG(feature_matrix_train, sentiment_train, initial_coefficients=np.zeros(194), step_size=step_size, batch_size=batch_size, max_iter=num_iterations) """ Explanation: Quiz Question: In the figure above, how many passes does batch gradient ascent need to achieve a similar log likelihood as stochastic gradient ascent? It's always better 10 passes 20 passes 150 passes or more Explore the effects of step sizes on stochastic gradient ascent In previous sections, we chose step sizes for you. In practice, it helps to know how to choose good step sizes yourself. To start, we explore a wide range of step sizes that are equally spaced in the log space. Run stochastic gradient ascent with step_size set to 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, and 1e2. Use the following set of parameters: * initial_coefficients=np.zeros(194) * batch_size=100 * max_iter initialized so as to run 10 passes over the data. End of explanation """ for step_size in np.logspace(-4, 2, num=7): make_plot(log_likelihood_sgd[step_size], len_data=len(train_data), batch_size=100, smoothing_window=30, label='step_size=%.1e'%step_size) """ Explanation: Plotting the log likelihood as a function of passes for each step size Now, we will plot the change in log likelihood using the make_plot for each of the following values of step_size: step_size = 1e-4 step_size = 1e-3 step_size = 1e-2 step_size = 1e-1 step_size = 1e0 step_size = 1e1 step_size = 1e2 For consistency, we again apply smoothing_window=30. End of explanation """ for step_size in np.logspace(-4, 2, num=7)[0:6]: make_plot(log_likelihood_sgd[step_size], len_data=len(train_data), batch_size=100, smoothing_window=30, label='step_size=%.1e'%step_size) """ Explanation: Now, let us remove the step size step_size = 1e2 and plot the rest of the curves. End of explanation """
usc-isi-i2/etk
examples/excel_extractor/excel extractor.ipynb
mit
import pprint from etk.extractors.excel_extractor import ExcelExtractor ee = ExcelExtractor() variables = { 'value': '$col,$row' } raw_extractions = ee.extract('alabama.xls', '16tbl08al', ['C,7', 'M,33'], variables) pprint.pprint(raw_extractions[:10]) # print first 10 """ Explanation: Excel Extractor ETK's Excel Extractor is a cell-based extractor for extracting data from compatible spreadsheets. Souce spreadsheet The example spreadsheet file named alabama.xml and it has a sheet named 16tbl08al, in which row 1 to row 5 and row 60 to row 62 are metadata, 6A to M59 is a table (which has row and column headers). For this example, I'm going to extract data from C7 to M33 (see the picture attached below). Define where and how to extract data Excel Extractor will scan cell-by-cell within a region that you specified and populate variables that you defined. Define variable In this particular example, I want to extract value of all cells in region (C7, M33) and I defined a variable called value. Its value will be extracted from a cell located at $col,$row where $col and $row mean current column id and row id that the scanner is traversing at. The return is a list of object which contains user-defined variables. End of explanation """ variables = { 'value': '$col,$row', 'county': '$B,$row', 'category': '$col,$6' } raw_extractions = ee.extract('alabama.xls', '16tbl08al', ['C,7', 'M,33'], variables) pprint.pprint(raw_extractions[:10]) # print first 10 """ Explanation: Coordinate variable Excel Extractor allows you to define multiple variables. This is useful if you want to extract the data from other cells which are associated with current cell. In this example, I also need column header (category) and county name of every cell in the region. It supports constant coordinate like ($B,$1) (which means the cell at column B row 1) or using + and - to caculate relative coordinate like ($B-1,$row+1) (which means the cell at column A and its row id is current row id + 1). End of explanation """ variables = { 'value': '$col,$row', 'county': '$B,$row', 'category': '$col,$6', 'from_row': '$row', 'from_col': '$col' } raw_extractions = ee.extract('alabama.xls', '16tbl08al', ['C,7', 'M,33'], variables) pprint.pprint(raw_extractions[:10]) # print first 10 """ Explanation: Single variable Besides the coordinate, the value of variables can also be a builtin variable (it only has $row and $col right now). This can be used for tracking provenance of extractions. Both row and column id here are presented in numeric form (base is 0). End of explanation """ import os, sys from etk.etk import ETK from etk.etk_module import ETKModule from etk.extractors.excel_extractor import ExcelExtractor from etk.utilities import Utility class ExampleETKModule(ETKModule): """ Abstract class for extraction module """ def __init__(self, etk): ETKModule.__init__(self, etk) self.ee = ExcelExtractor() def document_selector(self, doc): return 'file_path' in doc.cdr_document def process_document(self, doc): """ Add your code for processing the document """ variables = { 'value': '$col,$row', 'county': '$B,$row', 'category': '$col,$6', 'from_row': '$row', 'from_col': '$col' } raw_extractions = self.ee.extract(doc.cdr_document['file_path'], '16tbl08al', ['C,7', 'M,33'], variables) extracted_docs = [] for d in raw_extractions: # post processing d['category'] = d['category'].replace('\n', ' ').strip() d['county'] = d['county'].replace('\n', ' ').strip() d['from_row'] = int(d['from_row']) d['from_col'] = int(d['from_col']) # create sub document d['doc_id'] = Utility.create_doc_id_from_json(d) extracted_docs.append(etk.create_document(d)) return extracted_docs # if __name__ == "__main__": etk = ETK(modules=ExampleETKModule) doc = etk.create_document({'file_path': 'alabama.xls'}) docs = etk.process_ems(doc) for d in docs[1:11]: # print first 10 print(d.value) """ Explanation: Wrap them up in ETK module and post processing The below example shows how to use this extractor in ETK module. The extractor's variable syntax only supports using a single builtin variable or a coordinate. All the post processings need to be done after extraction. End of explanation """
barjacks/foundations-homework
14_Analyzing_Text/14 - TF-IDF Homework.ipynb
mit
# If you'd like to download it through the command line... !curl -O http://www.cs.cornell.edu/home/llee/data/convote/convote_v1.1.tar.gz # And then extract it through the command line... !tar -zxf convote_v1.1.tar.gz """ Explanation: Homework 14 (or so): TF-IDF text analysis and clustering Hooray, we kind of figured out how text analysis works! Some of it is still magic, but at least the TF and IDF parts make a little sense. Kind of. Somewhat. No, just kidding, we're professionals now. Investigating the Congressional Record The Congressional Record is more or less what happened in Congress every single day. Speeches and all that. A good large source of text data, maybe? Let's pretend it's totally secret but we just got it leaked to us in a data dump, and we need to check it out. It was leaked from this page here. End of explanation """ # glob finds files matching a certain filename pattern import glob # Give me all the text files paths = glob.glob('convote_v1.1/data_stage_one/development_set/*') paths[:5] len(paths) """ Explanation: You can explore the files if you'd like, but we're going to get the ones from convote_v1.1/data_stage_one/development_set/. It's a bunch of text files. End of explanation """ speeches = [] for path in paths: with open(path) as speech_file: speech = { 'pathname': path, 'filename': path.split('/')[-1], 'content': speech_file.read() } speeches.append(speech) speeches_df = pd.DataFrame(speeches) speeches_df.head() """ Explanation: So great, we have 702 of them. Now let's import them. End of explanation """ All_speeches = speeches_df['content'] First_five_speeches = speeches_df['content'].head(5) First_five_speeches """ Explanation: In class we had the texts variable. For the homework can just do speeches_df['content'] to get the same sort of list of stuff. Take a look at the contents of the first 5 speeches End of explanation """ count_vectorizer = CountVectorizer(stop_words='english') speech_tokens = count_vectorizer.fit_transform(All_speeches) count_vectorizer.get_feature_names() All_tokens = pd.DataFrame(speech_tokens.toarray(), columns=count_vectorizer.get_feature_names()) #All_tokens """ Explanation: Doing our analysis Use the sklearn package and a plain boring CountVectorizer to get a list of all of the tokens used in the speeches. If it won't list them all, that's ok! Make a dataframe with those terms as columns. Be sure to include English-language stopwords End of explanation """ count_vectorizer_100 = CountVectorizer(max_features=100, stop_words='english') speech_tokens_top100 = count_vectorizer_100.fit_transform(speeches_df['content']) """ Explanation: Okay, it's far too big to even look at. Let's try to get a list of features from a new CountVectorizer that only takes the top 100 words. End of explanation """ Top_100_tokens = pd.DataFrame(speech_tokens_top100.toarray(), columns=count_vectorizer_100.get_feature_names()) Top_100_tokens.head() """ Explanation: Now let's push all of that into a dataframe with nicely named columns. End of explanation """ speeches_df.info() Top_100_tokens['No_chairman'] = Top_100_tokens['chairman'] == 0 Top_100_tokens[Top_100_tokens['No_chairman'] == True].count().head(1) Top_100_tokens['no_mr'] = Top_100_tokens['mr'] == 0 Top_100_tokens[Top_100_tokens['no_mr'] == True].count().head(1) """ Explanation: Everyone seems to start their speeches with "mr chairman" - how many speeches are there total, and many don't mention "chairman" and how many mention neither "mr" nor "chairman"? End of explanation """ Top_100_tokens['thank'].sort_values(ascending=False).head(1) """ Explanation: What is the index of the speech thank is the most thankful, a.k.a. includes the word 'thank' the most times? End of explanation """ Top_100_tokens['china trade'] = Top_100_tokens['china'] + Top_100_tokens['trade'] Top_100_tokens['china trade'].sort_values(ascending=False).head(3) """ Explanation: If I'm searching for China and trade, what are the top 3 speeches to read according to the CountVectoriser? End of explanation """ idf_vectorizer = TfidfVectorizer(stop_words='english', use_idf=True) Top_100_tokens_idf = idf_vectorizer.fit_transform(All_speeches) idf_df = pd.DataFrame(Top_100_tokens_idf.toarray(), columns=idf_vectorizer.get_feature_names()) idf_df['china trade'] = idf_df['china'] + idf_df['trade'] idf_df['china trade'].sort_values(ascending=False).head(3) """ Explanation: Now what if I'm using a TfidfVectorizer? End of explanation """ # index 0 is the first speech, which was the first one imported. paths[402] # Pass that into 'cat' using { } which lets you put variables in shell commands # that way you can pass the path to cat !cat {paths[577]} """ Explanation: What's the content of the speeches? Here's a way to get them: End of explanation """ All_tokens['chaos'] = All_tokens['chaos'].sort_values(ascending=False) >= 1 All_tokens[All_tokens['chaos'] == True].count().head(1) """ Explanation: Now search for something else! Another two terms that might show up. elections and chaos? Whatever you thnik might be interesting. End of explanation """ #simple counting vectorizer, from sklearn.cluster import KMeans number_of_clusters = 8 km = KMeans(n_clusters=number_of_clusters) count_vectorizer = CountVectorizer(stop_words='english') X = count_vectorizer.fit_transform(All_speeches) km.fit(X) print("Top terms per cluster:") order_centroids = km.cluster_centers_.argsort()[:, ::-1] terms = count_vectorizer.get_feature_names() for i in range(number_of_clusters): top_ten_words = [terms[ind] for ind in order_centroids[i, :5]] print("Cluster {}: {}".format(i, ' '.join(top_ten_words))) # term frequency vectorizer, vectorizer = TfidfVectorizer(use_idf=True, stop_words='english') X = vectorizer.fit_transform(All_speeches) number_of_clusters = 8 km = KMeans(n_clusters=number_of_clusters) km.fit(X) print("Top terms per cluster:") order_centroids = km.cluster_centers_.argsort()[:, ::-1] terms = count_vectorizer.get_feature_names() for i in range(number_of_clusters): top_ten_words = [terms[ind] for ind in order_centroids[i, :10]] print("Cluster {}: {}".format(i, ' '.join(top_ten_words))) #term frequency inverse document frequency vectorizer def oh_tokenizer(str_input): words = re.sub(r"[^A-Za-z0-9\-]", " ", str_input).lower().split() return words l2_vectorizer = TfidfVectorizer(use_idf=True, stop_words='english', tokenizer=oh_tokenizer) X = l2_vectorizer.fit_transform(speeches_df['content']) l2_df = pd.DataFrame(X.toarray(), columns=l2_vectorizer.get_feature_names()) for i in range(number_of_clusters): top_ten_words = [l2_df[ind] for ind in order_centroids[i, :9]] print("Cluster {}: {}".format(i, ' '.join(top_ten_words))) """ Explanation: Enough of this garbage, let's cluster Using a simple counting vectorizer, cluster the documents into eight categories, telling me what the top terms are per category. Using a term frequency vectorizer, cluster the documents into eight categories, telling me what the top terms are per category. Using a term frequency inverse document frequency vectorizer, cluster the documents into eight categories, telling me what the top terms are per category. End of explanation """ !curl -O https://github.com/ledeprogram/courses/raw/master/algorithms/data/hp.zip !unzip hp.zip import glob paths = glob.glob('hp/*.txt') paths[:5] len(paths) Harry_Potter_fiction = [] for path in paths: with open(path) as Harry_file: speech = { 'pathname': path, 'filename': path.split('/')[-1], 'content': Harry_file.read() } Harry_Potter_fiction.append(speech) Harry_df = pd.DataFrame(Harry_Potter_fiction) Harry_df.head() All_of_Harry = Harry_df['content'] All_of_Harry.head() """ Explanation: Which one do you think works the best? Not sure. The last one term frequency inverse I can't get to work. So I am going with number 2. Harry Potter time I have a scraped collection of Harry Potter fanfiction at https://github.com/ledeprogram/courses/raw/master/algorithms/data/hp.zip. I want you to read them in, vectorize them and cluster them. Use this process to find out the two types of Harry Potter fanfiction. What is your hypothesis? End of explanation """ vectorizer = TfidfVectorizer(use_idf=True, stop_words='english') X = vectorizer.fit_transform(All_of_Harry) # KMeans clustering is a method of clustering. from sklearn.cluster import KMeans number_of_clusters = 2 km = KMeans(n_clusters=number_of_clusters) km.fit(X) print("Top terms per cluster:") order_centroids = km.cluster_centers_.argsort()[:, ::-1] terms = vectorizer.get_feature_names() for i in range(number_of_clusters): top_ten_words = [terms[ind] for ind in order_centroids[i, :10]] print("Cluster {}: {}".format(i, ' '.join(top_ten_words))) #Cluster 1 is about Lily and James, whoever they are. Wait: His parents. #Cluster 2 is about Harry and Hermione. """ Explanation: Term Frequency Vectorizer End of explanation """ from sklearn.cluster import KMeans number_of_clusters = 2 km = KMeans(n_clusters=number_of_clusters) count_vectorizer = CountVectorizer(stop_words='english') X = count_vectorizer.fit_transform(All_of_Harry) km.fit(X) print("Top terms per cluster:") order_centroids = km.cluster_centers_.argsort()[:, ::-1] terms = count_vectorizer.get_feature_names() for i in range(number_of_clusters): top_ten_words = [terms[ind] for ind in order_centroids[i, :10]] print("Cluster {}: {}".format(i, ' '.join(top_ten_words))) """ Explanation: Simple Counting Vectorizer End of explanation """
tensorflow/docs-l10n
site/zh-cn/datasets/overview.ipynb
apache-2.0
!pip install -q tfds-nightly tensorflow matplotlib import matplotlib.pyplot as plt import numpy as np import tensorflow as tf import tensorflow_datasets as tfds """ Explanation: TensorFlow Datasets TFDS provides a collection of ready-to-use datasets for use with TensorFlow, Jax, and other Machine Learning frameworks. It handles downloading and preparing the data deterministically and constructing a tf.data.Dataset (or np.array). Note: Do not confuse TFDS (this library) with tf.data (TensorFlow API to build efficient data pipelines). TFDS is a high level wrapper around tf.data. If you're not familiar with this API, we encourage you to read the official tf.data guide first. Copyright 2018 The TensorFlow Datasets Authors, Licensed under the Apache License, Version 2.0 <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://tensorflow.google.cn/datasets/overview"><img src="https://tensorflow.google.cn/images/tf_logo_32px.png"> ๅœจ TensorFlow.org ไธŠๆŸฅ็œ‹</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/datasets/overview.ipynb"><img src="https://tensorflow.google.cn/images/colab_logo_32px.png">ๅœจ Google Colab ไธญ่ฟ่กŒ</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/datasets/overview.ipynb"><img src="https://tensorflow.google.cn/images/GitHub-Mark-32px.png">ๅœจ Github ไธŠๆŸฅ็œ‹ๆบไปฃ็ </a> </td> <td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/datasets/overview.ipynb"><img src="https://tensorflow.google.cn/images/download_logo_32px.png">ไธ‹่ฝฝ็ฌ”่ฎฐๆœฌ</a></td> </table> ๅฎ‰่ฃ… TFDS ๅญ˜ๅœจไบŽไธคไธช่ฝฏไปถๅŒ…ไธญ๏ผš pip install tensorflow-datasets๏ผš็จณๅฎš็‰ˆ๏ผŒๆ•ฐๆœˆๅ‘่กŒไธ€ๆฌกใ€‚ pip install tfds-nightly๏ผšๆฏๅคฉๅ‘่กŒ๏ผŒๅŒ…ๅซๆœ€่ฟ‘็‰ˆๆœฌ็š„ๆ•ฐๆฎ้›†ใ€‚ ๆญค colab ไฝฟ็”จ tfds-nightly๏ผš End of explanation """ tfds.list_builders() """ Explanation: ๆŸฅๆ‰พๅฏ็”จ็š„ๆ•ฐๆฎ้›† ๆ‰€ๆœ‰ๆ•ฐๆฎ้›†ๆž„ๅปบๅ™จ้ƒฝๆ˜ฏ tfds.core.DatasetBuilder ็š„ๅญ็ฑปใ€‚่ฆ่Žทๅ–ๅฏ็”จๆž„ๅปบๅ™จ็š„ๅˆ—่กจ๏ผŒ่ฏทไฝฟ็”จ tfds.list_builders() ๆˆ–ๆŸฅ็œ‹ๆˆ‘ไปฌ็š„็›ฎๅฝ•ใ€‚ End of explanation """ ds = tfds.load('mnist', split='train', shuffle_files=True) assert isinstance(ds, tf.data.Dataset) print(ds) """ Explanation: ๅŠ ่ฝฝๆ•ฐๆฎ้›† tfds.load ๅŠ ่ฝฝๆ•ฐๆฎ้›†ๆœ€็ฎ€ๅ•็š„ๆ–นๆณ•ๆ˜ฏ tfds.loadใ€‚ๅฎƒๅฐ†ๆ‰ง่กŒไปฅไธ‹ๆ“ไฝœ๏ผš ไธ‹่ฝฝๆ•ฐๆฎๅนถๅฐ†ๅ…ถๅญ˜ๅ‚จไธบ tfrecord ๆ–‡ไปถใ€‚ ๅŠ ่ฝฝ tfrecord ๅนถๅˆ›ๅปบ tf.data.Datasetใ€‚ End of explanation """ builder = tfds.builder('mnist') # 1. Create the tfrecord files (no-op if already exists) builder.download_and_prepare() # 2. Load the `tf.data.Dataset` ds = builder.as_dataset(split='train', shuffle_files=True) print(ds) """ Explanation: ไธ€ไบ›ๅธธ่ง็š„ๅ‚ๆ•ฐ๏ผš split=๏ผš่ฆ่ฏปๅ–็š„ๆ‹†ๅˆ†๏ผˆไพ‹ๅฆ‚ 'train'ใ€['train', 'test']ใ€'train[80%:]'โ€ฆ๏ผ‰ใ€‚่ฏทๅ‚้˜…ๆˆ‘ไปฌ็š„ๆ‹†ๅˆ† API ๆŒ‡ๅ—ใ€‚ shuffle_files=๏ผšๆŽงๅˆถๆ˜ฏๅฆๆ‰“ไนฑๆฏไธชๅ‘จๆœŸ้—ด็š„ๆ–‡ไปถ้กบๅบ๏ผˆTFDS ไปฅๅคšไธช่พƒๅฐ็š„ๆ–‡ไปถๅญ˜ๅ‚จๅคงๆ•ฐๆฎ้›†๏ผ‰ data_dir=๏ผšๆ•ฐๆฎ้›†ๅญ˜ๅ‚จ็š„ไฝ็ฝฎ๏ผˆ้ป˜่ฎคไธบ ~/tensorflow_datasets/๏ผ‰ with_info=True๏ผš่ฟ”ๅ›žๅŒ…ๅซๆ•ฐๆฎ้›†ๅ…ƒๆ•ฐๆฎ็š„ tfds.core.DatasetInfo download=False๏ผšๅœ็”จไธ‹่ฝฝ tfds.builder tfds.load ๆ˜ฏ tfds.core.DatasetBuilder ็š„็˜ฆๅฐ่ฃ…ๅฎนๅ™จใ€‚ๆ‚จๅฏไปฅไฝฟ็”จ tfds.core.DatasetBuilder API ่Žทๅพ—็›ธๅŒ็š„่พ“ๅ‡บ๏ผš End of explanation """ ds = tfds.load('mnist', split='train') ds = ds.take(1) # Only take a single example for example in ds: # example is `{'image': tf.Tensor, 'label': tf.Tensor}` print(list(example.keys())) image = example["image"] label = example["label"] print(image.shape, label) """ Explanation: tfds build CLI ๅฆ‚ๆžœๆ‚จๅธŒๆœ›็”Ÿๆˆไธ€ไธช็‰นๅฎš็š„ๆ•ฐๆฎ้›†๏ผŒๅฏไปฅไฝฟ็”จ tfds ๅ‘ฝไปค่กŒใ€‚ไพ‹ๅฆ‚๏ผš sh tfds build mnist ่ฏทๅ‚้˜…ๆ–‡ๆกฃๆŸฅ็œ‹ๅฏ็”จๆ ‡ๅฟ—ใ€‚ ่ฟญไปฃๆ•ฐๆฎ้›† ไฝœไธบๅญ—ๅ…ธ ้ป˜่ฎคๆƒ…ๅ†ตไธ‹๏ผŒtf.data.Dataset ๅฏน่ฑกๅŒ…ๅซ tf.Tensor ็š„ dict๏ผš End of explanation """ ds = tfds.load('mnist', split='train', as_supervised=True) ds = ds.take(1) for image, label in ds: # example is (image, label) print(image.shape, label) """ Explanation: ่ฆๆ‰พๅ‡บ dict ้”ฎๅๅ’Œ็ป“ๆž„๏ผŒ่ฏทๆŸฅ็œ‹ๆˆ‘ไปฌ็›ฎๅฝ•ไธญ็š„ๆ•ฐๆฎ้›†ๆ–‡ๆกฃใ€‚ไพ‹ๅฆ‚๏ผšmnist ๆ–‡ๆกฃใ€‚ ไฝœไธบๅ…ƒ็ป„๏ผˆas_supervised=True๏ผ‰ ไฝฟ็”จ as_supervised=True๏ผŒๆ‚จๅฏไปฅ่Žทๅ– (features, label) ๅ…ƒ็ป„ไฝœไธบๆ›ฟไปฃ็š„็›‘็ฃๆ•ฐๆฎ้›†ใ€‚ End of explanation """ ds = tfds.load('mnist', split='train', as_supervised=True) ds = ds.take(1) for image, label in tfds.as_numpy(ds): print(type(image), type(label), label) """ Explanation: ไฝœไธบ numpy๏ผˆtfds.as_numpy๏ผ‰ ไฝฟ็”จ tfds.as_numpy ่ฟ›่กŒไปฅไธ‹่ฝฌๆข๏ผš tf.Tensor -&gt; np.array tf.data.Dataset -&gt; Iterator[Tree[np.array]]๏ผˆTree ๅฏ่ƒฝๆ˜ฏไปปๆ„ๅตŒๅฅ—็š„ Dictใ€Tuple๏ผ‰ End of explanation """ image, label = tfds.as_numpy(tfds.load( 'mnist', split='test', batch_size=-1, as_supervised=True, )) print(type(image), image.shape) """ Explanation: ไฝœไธบ batched tf.Tensor๏ผˆbatch_size=-1๏ผ‰ ไฝฟ็”จ batch_size=-1๏ผŒๆ‚จๅฏไปฅๅœจๅ•ไธชๆ‰นๆฌกไธญๅŠ ่ฝฝๅฎŒๆ•ด็š„ๆ•ฐๆฎ้›†ใ€‚ ่ฟ™ๅฏไธŽ as_supervised=True ๅ’Œ tfds.as_numpy ็ป“ๅˆไฝฟ็”จไปฅ่Žทๅ– (np.array, np.array) ๅฝขๅผ็š„ๆ•ฐๆฎ๏ผš End of explanation """ ds = tfds.load('mnist', split='train') ds = ds.batch(32).prefetch(1) tfds.benchmark(ds, batch_size=32) tfds.benchmark(ds, batch_size=32) # Second epoch much faster due to auto-caching """ Explanation: ่ฏทๆณจๆ„๏ผŒๆ‚จ็š„ๆ•ฐๆฎ้›†ๅฏไปฅๆ”พๅ…ฅๅ†…ๅญ˜๏ผŒๅนถไธ”ๆ‰€ๆœ‰ๆ ทๆœฌ้ƒฝๅ…ทๆœ‰็›ธๅŒ็š„ๅฝข็Šถใ€‚ ๅฏนๆ‚จ็š„ๆ•ฐๆฎ้›†่ฟ›่กŒๅŸบๅ‡†ๅˆ†ๆž ๅฏนๆ•ฐๆฎ้›†่ฟ›่กŒๅŸบๅ‡†ๅˆ†ๆžๆ˜ฏๅฏนไปปไฝ•ๅฏ่ฟญไปฃๅฏน่ฑก๏ผˆไพ‹ๅฆ‚ tf.data.Datasetใ€tfds.as_numpyโ€ฆ๏ผ‰็š„็ฎ€ๅ• tfds.benchmark ่ฐƒ็”จใ€‚ End of explanation """ ds, info = tfds.load('mnist', split='train', with_info=True) tfds.as_dataframe(ds.take(4), info) """ Explanation: ไธ่ฆๅฟ˜่ฎฐไฝฟ็”จ batch_size= kwarg ๅฏนๆฏไธชๆ‰นๆฌกๅคงๅฐ็š„็ป“ๆžœ่ฟ›่กŒๅฝ’ไธ€ๅŒ–ใ€‚ ๆ€ปไน‹๏ผŒ็ฌฌไธ€ไธช้ข„็ƒญๆ‰นๆฌกไธŽๅ…ถไป–้ข„็ƒญๆ‰นๆฌกๅˆ†ๅผ€ไปฅๆ•่Žท tf.data.Dataset ้ขๅค–็š„่ฎพ็ฝฎๆ—ถ้—ด๏ผˆไพ‹ๅฆ‚็ผ“ๅ†ฒๅŒบๅˆๅง‹ๅŒ–โ€ฆ๏ผ‰ใ€‚ ่ฏทๆณจๆ„๏ผŒ็”ฑไบŽ TFDS ่‡ชๅŠจ็ผ“ๅญ˜ๅŠŸ่ƒฝ๏ผŒ็ฌฌไบŒๆฌก่ฟญไปฃ็š„้€Ÿๅบฆ่ฆๅฟซๅพ—ๅคšใ€‚ tfds.benchmark ไผš่ฟ”ๅ›ž tfds.core.BenchmarkResult ๏ผŒๅฏไปฅๆฃ€ๆŸฅๅฎƒไปฅ่ฟ›่กŒ่ฟ›ไธ€ๆญฅๅˆ†ๆžใ€‚ ๆž„ๅปบ็ซฏๅˆฐ็ซฏๆตๆฐด็บฟ ่ฆๆƒณๆทฑๅ…ฅไธ€็‚น๏ผŒๆ‚จๅฏไปฅๆŸฅ็œ‹๏ผš ๆˆ‘ไปฌ็š„็ซฏๅˆฐ็ซฏ Keras ็คบไพ‹ๆฅไบ†่งฃๅฎŒๆ•ด็š„่ฎญ็ปƒๆตๆฐด็บฟ๏ผˆๅŒ…ๆ‹ฌๆ‰นๅค„็†ใ€ๆ‰“ไนฑ้กบๅบโ€ฆ๏ผ‰ใ€‚ ๆœ‰ๅŠฉไบŽๆ้ซ˜ๆตๆฐด็บฟ้€Ÿๅบฆ็š„ๆ€ง่ƒฝๆŒ‡ๅ—๏ผˆๆ็คบ๏ผšไฝฟ็”จ tfds.benchmark(ds) ๅฏนๆ•ฐๆฎ้›†่ฟ›่กŒๅŸบๅ‡†ๅˆ†ๆž๏ผ‰ใ€‚ ๅ‘ˆ็Žฐ tfds.as_dataframe ไฝฟ็”จ tfds.as_dataframe๏ผŒๅฏไปฅๅฐ† tf.data.Dataset ๅฏน่ฑก่ฝฌๆขไธบ pandas.DataFrame ไปฅๅœจ Colab ไธŠๅ‘ˆ็Žฐใ€‚ ๆทปๅŠ  tfds.core.DatasetInfo ไฝœไธบ tfds.as_dataframe ็š„็ฌฌไบŒไธชๅ‚ๆ•ฐไปฅๅ‘ˆ็Žฐๅ›พๅƒใ€้Ÿณ้ข‘ใ€ๆ–‡ๆœฌใ€่ง†้ข‘โ€ฆ ไฝฟ็”จ ds.take(x) ไป…ๆ˜พ็คบๅ‰ x ไธชๆ ทๆœฌใ€‚pandas.DataFrame ๅฐ†ๅœจๅ†…ๅญ˜ไธญๅŠ ่ฝฝๅฎŒๆ•ดๆ•ฐๆฎ้›†๏ผŒๅนถไธ”ๆ˜พ็คบๅผ€้”€ๅฏ่ƒฝ้žๅธธ้ซ˜ใ€‚ End of explanation """ ds, info = tfds.load('mnist', split='train', with_info=True) fig = tfds.show_examples(ds, info) """ Explanation: tfds.show_examples tfds.show_examples ่ฟ”ๅ›ž matplotlib.figure.Figure๏ผˆ็Žฐๅœจๅชๆ”ฏๆŒๅ›พๅƒๆ•ฐๆฎ้›†๏ผ‰๏ผš End of explanation """ ds, info = tfds.load('mnist', with_info=True) """ Explanation: ่ฎฟ้—ฎๆ•ฐๆฎ้›†ๅ…ƒๆ•ฐๆฎ ๆ‰€ๆœ‰ๆž„ๅปบๅ™จ้ƒฝๅŒ…ๆ‹ฌไธ€ไธชๅŒ…ๅซๆ•ฐๆฎ้›†ๅ…ƒๆ•ฐๆฎ็š„ tfds.core.DatasetInfo ๅฏน่ฑกใ€‚ ๅฏไปฅ้€š่ฟ‡ไปฅไธ‹ๆ–นๅผ่ฎฟ้—ฎ๏ผš tfds.load API๏ผš End of explanation """ builder = tfds.builder('mnist') info = builder.info """ Explanation: tfds.core.DatasetBuilder API๏ผš End of explanation """ print(info) """ Explanation: ๆ•ฐๆฎ้›†ไฟกๆฏๅŒ…ๅซๆœ‰ๅ…ณๆ•ฐๆฎ้›†็š„้™„ๅŠ ไฟกๆฏ๏ผˆ็‰ˆๆœฌใ€ๅผ•็”จใ€้ฆ–้กตใ€ๆ่ฟฐโ€ฆ๏ผ‰ใ€‚ End of explanation """ info.features """ Explanation: ็‰นๅพๅ…ƒๆ•ฐๆฎ๏ผˆๆ ‡็ญพๅ็งฐใ€ๅ›พๅƒๅฝข็Šถโ€ฆ๏ผ‰ ่ฎฟ้—ฎ tfds.features.FeatureDict๏ผš End of explanation """ print(info.features["label"].num_classes) print(info.features["label"].names) print(info.features["label"].int2str(7)) # Human readable version (8 -> 'cat') print(info.features["label"].str2int('7')) """ Explanation: ็ฑปใ€ๆ ‡็ญพๅ็š„ๆ•ฐ้‡๏ผš End of explanation """ print(info.features.shape) print(info.features.dtype) print(info.features['image'].shape) print(info.features['image'].dtype) """ Explanation: ๅฝข็Šถใ€ๆ•ฐๆฎ็ฑปๅž‹๏ผš End of explanation """ print(info.splits) """ Explanation: ๆ‹†ๅˆ†ๅ…ƒๆ•ฐๆฎ๏ผˆไพ‹ๅฆ‚ๆ‹†ๅˆ†ๅ็งฐใ€ๆ ทๆœฌๆ•ฐ้‡โ€ฆ๏ผ‰ ่ฎฟ้—ฎ tfds.core.SplitDict๏ผš End of explanation """ print(list(info.splits.keys())) """ Explanation: ๅฏ็”จๆ‹†ๅˆ†๏ผš End of explanation """ print(info.splits['train'].num_examples) print(info.splits['train'].filenames) print(info.splits['train'].num_shards) """ Explanation: ่Žทๅ–ๆœ‰ๅ…ณไธชๅˆซๆ‹†ๅˆ†็š„ไฟกๆฏ๏ผš End of explanation """ print(info.splits['train[15%:75%]'].num_examples) print(info.splits['train[15%:75%]'].file_instructions) """ Explanation: ๅฎƒไนŸ้€‚็”จไบŽ subsplit API๏ผš End of explanation """
nansencenter/nansat
docs/source/notebooks/nansat-introduction.ipynb
gpl-3.0
import os import shutil import nansat idir = os.path.join(os.path.dirname(nansat.__file__), 'tests', 'data/') """ Explanation: Nansat: First Steps Overview The NANSAT package contains several classes: Nansat - open and read satellite data Domain - define grid for the region of interest Figure - create raster images (PNG, TIF) NSR - define spatial reference (SR) Copy sample data End of explanation """ import matplotlib.pyplot as plt %matplotlib inline from nansat import Nansat n = Nansat(idir+'gcps.tif') """ Explanation: Open file with Nansat End of explanation """ print(n) """ Explanation: Read information ABOUT the data (METADATA) End of explanation """ b1 = n[1] """ Explanation: Read the actual DATA End of explanation """ %whos plt.imshow(b1);plt.colorbar() plt.show() """ Explanation: Check what kind of data we have End of explanation """ n.write_figure('map.png', pltshow=True) """ Explanation: Find where the image is taken End of explanation """
martinjrobins/hobo
examples/plotting/residuals-autocorrelation-diagnostics.ipynb
bsd-3-clause
import pints import pints.toy as toy import pints.plot import numpy as np import matplotlib.pyplot as plt # Use the toy logistic model model = toy.LogisticModel() real_parameters = [0.015, 500] times = np.linspace(0, 1000, 100) org_values = model.simulate(real_parameters, times) # Add independent Gaussian noise noise = 50 values = org_values + np.random.normal(0, noise, org_values.shape) # Set up the problem and run the optimisation problem = pints.SingleOutputProblem(model, times, values) score = pints.SumOfSquaresError(problem) boundaries = pints.RectangularBoundaries([0, 200], [1, 1000]) x0 = np.array([0.5, 500]) found_parameters, found_value = pints.optimise( score, x0, boundaries=boundaries, method=pints.XNES, ) print('Score at true solution: ') print(score(real_parameters)) print('Found solution: True parameters:' ) for k, x in enumerate(found_parameters): print(pints.strfloat(x) + ' ' + pints.strfloat(real_parameters[k])) fig, ax = pints.plot.series(np.array([found_parameters]), problem, ref_parameters=real_parameters) fig.set_size_inches(15, 7.5) plt.show() """ Explanation: Noise model diagnostics: residuals autocorrelation over time This example introduces two noise model diagnostics which are useful for studying the autocorrelation in time series noise. The general procedure we follow in this notebook is to start by performing a fit assuming an IID noise process. Next, we generate the diagnostic plots from the IID residuals, and see if they suggest that a correlated noise process would be applicable. The two diagnostics demonstrated in this notebook are pints.residuals_diagnostics.plot_residuals_distance and pints.residuals_diagnostics.plot_residuals_binned_autocorrelation. Both methods can take either a single best fit parameter or an MCMC chain of posterior samples (when the MCMC chain is provided, the posterior median of the residuals will be used). Another diagnostic plot for autocorrelation in the noise process is shown in Evaluating noise models using autocorrelation plots of the residuals. Pints also includes diagnostic plots to study the magnitude of a noise process, which are demonstrated in Noise model variance diagnostic plots. Residuals distance matrix This diagnostic plot is a distance matrix of the residuals. Correlated noise, such as AR(1) (autoregressive order 1), will cause the distance matrix to exhibit a banded appearance. First, we generate synthetic data according to the logistic model and add IID noise. End of explanation """ # Add independent Gaussian noise rho = 0.85 sigma = 50 values = org_values + pints.noise.ar1(rho, sigma, len(org_values)) # Set up the problem and run the optimisation problem_ar1 = pints.SingleOutputProblem(model, times, values) score = pints.SumOfSquaresError(problem_ar1) boundaries = pints.RectangularBoundaries([0, 200], [1, 1000]) x0 = np.array([0.5, 500]) found_parameters_ar1, found_value_ar1 = pints.optimise( score, x0, boundaries=boundaries, method=pints.XNES, ) fig, ax = pints.plot.series(np.array([found_parameters_ar1]), problem_ar1, ref_parameters=real_parameters) fig.set_size_inches(15, 7.5) plt.show() """ Explanation: Next, we generate a similar time series, this time with AR(1) noise. End of explanation """ from pints.residuals_diagnostics import plot_residuals_distance # Plot the distance matrix of the residuals for IID noise fig = plot_residuals_distance(np.array([found_parameters]), problem) plt.show() # Plot the distance matrix of the residuals for AR(1) noise fig = plot_residuals_distance(np.array([found_parameters_ar1]), problem_ar1) plt.show() """ Explanation: Finally, we plot the distance matrix between the residuals for the time series generated above. The plot can be created using the pints.residuals_diagnostics.plot_residuals_distance function, which takes as input the fitted parameters and the Pints problem. End of explanation """ from pints.residuals_diagnostics import plot_residuals_binned_autocorrelation fig = plot_residuals_binned_autocorrelation( np.array([found_parameters]), problem, n_bins=5 ) fig = plot_residuals_binned_autocorrelation( np.array([found_parameters_ar1]), problem_ar1, n_bins=5 ) plt.show() """ Explanation: Comparing the two figures above, the matrix for the AR(1) noise time series clearly shows a banded appearance. This plot suggests that the IID noise assumption we made when fitting the model is probably inadequate, and we should rerun the fit using a correlated noise model. Binned residuals autocorrelation The next diagnostic plot divides the time series into consecutive bins, and displays the lag 1 autocorrelation of the residuals calculated within each bin over time. This function is available from Pints using pints.residuals_diagnostics.plot_residuals_binned_autocorrelation. End of explanation """ import pints import pints.toy as toy import pints.plot import numpy as np import matplotlib.pyplot as plt # Use the toy logistic model model = toy.LogisticModel() real_parameters = [0.015, 500] times = np.linspace(0, 1000, 500) org_values = model.simulate(real_parameters, times) # Make half IID noise and half AR(1) noise noise = 50 rho = 0.9 sigma = 50 values = org_values + \ np.concatenate((np.random.normal(0, noise, len(org_values)//2), pints.noise.ar1(rho, sigma, len(org_values)//2))) # Set up the problem and run the optimisation problem_mixed = pints.SingleOutputProblem(model, times, values) score = pints.SumOfSquaresError(problem_mixed) boundaries = pints.RectangularBoundaries([0, 200], [1, 1000]) x0 = np.array([0.5, 500]) found_parameters_mixed, found_value_mixed = pints.optimise( score, x0, boundaries=boundaries, method=pints.XNES, ) fig, ax = pints.plot.series(np.array([found_parameters_mixed]), problem_mixed, ref_parameters=real_parameters) fig.set_size_inches(15, 7.5) plt.show() fig = plot_residuals_distance(np.array([found_parameters_mixed]), problem_mixed) plt.show() fig = plot_residuals_binned_autocorrelation( np.array([found_parameters_mixed]), problem_mixed, n_bins=15 ) plt.show() """ Explanation: Similar to the distance matrix diagnostic, these plots indicate a high autocorrelation over time for the problem with AR(1) noise. The binned autocorrelation diagnostic is particularly helpful for detecting noise processes in which the level of correlation is changing over time. For a final example, we generate a time series with IID noise in the first half, and AR(1) noise in the second half. As before, we then fit the parameters assuming IID noise, and look at the diagnostic plots to evaluate the noise model. End of explanation """
prashantas/MyDataScience
DeepNetwork/Keras/MnistKerasModelsGood.ipynb
bsd-2-clause
from keras.datasets import mnist # subroutines for fetching the MNIST dataset from keras.models import Model # basic class for specifying and training a neural network from keras.layers import Input, Dense # the two types of neural network layer we will be using from keras.utils import np_utils # utilities for one-hot encoding of ground truth values batch_size = 128 # in each iteration, we consider 128 training examples at once num_epochs = 20 # we iterate twenty times over the entire training set hidden_size = 512 # there will be 512 neurons in both hidden layers """ Explanation: https://cambridgespark.com/content/tutorials/deep-learning-for-complete-beginners-recognising-handwritten-digits/index.html End of explanation """ (X_train, y_train), (X_test, y_test) = mnist.load_data() # fetch MNIST data ## https://keras.io/datasets/ X_train.shape y_train.shape X_test.shape num_train = 60000 # there are 60000 training examples in MNIST num_test = 10000 # there are 10000 test examples in MNIST height, width, depth = 28, 28, 1 # MNIST images are 28x28 and greyscale num_classes = 10 # there are 10 classes (1 per digit) X_train = X_train.reshape(num_train, height * width) # Flatten data to 1D X_test = X_test.reshape(num_test, height * width) # Flatten data to 1D X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train /= 255 # Normalise data to [0, 1] range X_test /= 255 # Normalise data to [0, 1] range Y_train = np_utils.to_categorical(y_train, num_classes) # One-hot encode the labels Y_test = np_utils.to_categorical(y_test, num_classes) # One-hot encode the labels """ Explanation: Now it is time to load and preprocess the MNIST data set. Keras makes this extremely simple, with a fixed interface for fetching and extracting the data from the remote server, directly into NumPy arrays. To preprocess the input data, we will first flatten the images into 1D (as we will consider each pixel as a separate input feature), and we will then force the pixel intensity values to be in the [0,1][0,1] range by dividing them by 255255. This is a very simple way to "normalise" the data, and I will be discussing other ways in future tutorials in this series. A good approach to a classification problem is to use probabilistic classification, i.e. to have a single output neuron for each class, outputting a value which corresponds to the probability of the input being of that particular class. This implies a need to transform the training output data into a "one-hot" encoding: for example, if the desired output class is 33, and there are five classes overall (labelled 00 to 44), then an appropriate one-hot encoding is: [0 0 0 1 0][0 0 0 1 0]. Keras, once again, provides us with an out-of-the-box functionality for doing just that. End of explanation """ inp = Input(shape=(height * width,)) # Our input is a 1D vector of size 784 hidden_1 = Dense(hidden_size, activation='relu')(inp) # First hidden ReLU layer hidden_2 = Dense(hidden_size, activation='relu')(hidden_1) # Second hidden ReLU layer out = Dense(num_classes, activation='softmax')(hidden_2) # Output softmax layer model = Model(inputs=inp, outputs=out) # To define a model, just specify its input and output layers model.compile(loss='categorical_crossentropy', # using the cross-entropy loss function optimizer='adam', # using the Adam optimiser metrics=['accuracy']) # reporting the accuracy """ Explanation: An excellent feature of Keras, that sets it apart from frameworks such as TensorFlow, is automatic inference of shapes; we only need to specify the shape of the input layer, and afterwards Keras will take care of initialising the weight variables with proper shapes. Once all the layers have been defined, we simply need to identify the input(s) and the output(s) in order to define our model, as illustrated below. End of explanation """ history = model.fit(X_train, Y_train, # Train the model using the training set... batch_size=batch_size, epochs=num_epochs, verbose=1, validation_split=0.1) # ...holding out 10% of the data for validation history.params history.history score = model.evaluate(X_test, Y_test, verbose=1) # Evaluate the trained model on the test set! print('score::',score) print('Test score:', score[0]) print('Test accuracy:',score[1]) """ Explanation: Finally, we call the training algorithm with the determined batch size and epoch count. It is good practice to set aside a fraction of the training data to be used just for verification that our algorithm is (still) properly generalising (this is commonly referred to as the validation set); here we will hold out 10%10% of the data for this purpose. An excellent out-of-the-box feature of Keras is verbosity; it's able to provide detailed real-time pretty-printing of the training algorithm's progress. End of explanation """ import matplotlib.pyplot as plt %matplotlib inline X_test_0 = X_test[0,:].reshape(1,height * width) Y_test_0 = Y_test[0,:] print(Y_test_0) plt.imshow(X_test_0.reshape(28,28)) import numpy as np pred = model.predict(X_test_0) print('Label of testing sample:', np.argmax(Y_test_0)) print('\nOutput of the softmax layer:',pred[0]) print('\nNeural Network prediction:', np.argmax(pred[0])) """ Explanation: Now lets predict one single sample End of explanation """
AndreySheka/dl_ekb
hw9/music_binder/music_dnn_task.ipynb
mit
plt.figure(figsize=(20,4)) pylab.plot(np.arange(len(y)) * 1.0 /sr, y, 'k') pylab.xlim([0, 10]) pylab.show() """ Explanation: Sound as 1D-Signal End of explanation """ S = librosa.feature.melspectrogram(y, sr=sr, n_mels=128) log_S = librosa.logamplitude(S, ref_power=np.max) plt.figure(figsize=(20,4)) librosa.display.specshow(log_S, sr=sr, x_axis='time', y_axis='mel', cmap='hot') plt.title('mel power spectrogram') plt.colorbar(format='%+02.0f dB') plt.tight_layout() def get_spectgorgamm(fname): y, sr = librosa.load(fname) S = librosa.feature.melspectrogram(y, sr=sr, n_mels=128) log_S = librosa.logamplitude(S, ref_power=np.max) return log_S[:, :1200] def plot_spectrogramm(log_S): plt.figure(figsize=(20,4)) librosa.display.specshow(log_S, sr=sr, x_axis='time', y_axis='mel', cmap='hot') plt.title('mel power spectrogram') plt.colorbar(format='%+02.0f dB') plt.tight_layout() """ Explanation: Sound as 2D-Signal End of explanation """ geners = ['blues', 'country', 'hiphop', 'metal', 'reggae', 'classical', 'disco', 'jazz', 'pop', 'rock'] id2gener = dict() X_names, y = [], [] for gener_id, gener in enumerate(geners): id2gener[gener_id] = gener for track in os.listdir('./genres/' + gener): if '.mp3' in track or '.au' in track and '_' not in track: trackfile = os.path.join('./genres/', gener, track) X_names.append(trackfile) y.append(gener_id) from multiprocessing import Pool ncpu = 4 X = Pool(ncpu).map(get_spectgorgamm, X_names) """ Explanation: Prepare a data End of explanation """ perm = np.random.permutation(len(y)) X, X_names, y = np.array(X)[perm].astype('float32'), np.array(X_names)[perm], np.array(y)[perm] Xreshape = X.reshape(X.shape[0], X.shape[1], X.shape[2]) X_train, X_valid = Xreshape[:800], Xreshape[800:] y_train, y_valid = y[:800], y[800:] from sklearn.metrics import accuracy_score from sklearn.neighbors import KNeighborsClassifier clf = KNeighborsClassifier(n_jobs=ncpu) clf = <train clf> y_val_pred = <make prediction on valid set> print accuracy_score(y_valid, y_val_pred) """ Explanation: Nearest Neighbors genre classification End of explanation """ import theano import lasagne import theano.tensor as T perm = np.random.permutation(len(y)) X, y = np.array(X)[perm].astype('float32'), np.array(y)[perm] Xreshape = X.reshape(X.shape[0], X.shape[1], X.shape[2]) X_train, X_valid = Xreshape[:800], Xreshape[800:] y_train, y_valid = y[:800], y[800:] input_X, target_y = T.tensor3("X", dtype='float64'), T.vector("y", dtype='int32') nn = lasagne.layers.InputLayer(shape=(None, X.shape[1], X.shape[2]), input_var=input_X) nn = <Build convnet using Conv1DLayer MaxPool1DLayer> nn = <Add several DenseLayers and DropoutLayer> nn = lasagne.layers.DenseLayer(nn, 10, nonlinearity=lasagne.nonlinearities.softmax) y_predicted = lasagne.layers.get_output(nn) all_weights = lasagne.layers.get_all_params(nn) loss = lasagne.objectives.categorical_crossentropy(y_predicted, target_y).mean() accuracy = lasagne.objectives.categorical_accuracy(y_predicted, target_y).mean() updates_sgd = <Your favorite optimizer> train_fun = theano.function([input_X, target_y], [loss, accuracy], allow_input_downcast=True, updates=updates_sgd) test_fun = theano.function([input_X, target_y], [loss, accuracy], allow_input_downcast=True) %%time conv_nn = train_net(nn, train_fun, test_fun, X_train, y_train, X_valid, y_valid, num_epochs=10, batch_size=50) plt.figure(figsize=(5, 5), dpi=500) W = lasagne.layers.get_all_params(nn)[0].get_value() W[::2, :, :] = 0.2 W = np.hstack(W) pylab.imshow(W, cmap='hot', interpolation="nearest") pylab.axis('off') pylab.show() """ Explanation: Convolution Nural Nets http://benanne.github.io/2014/08/05/spotify-cnns.html End of explanation """ from sklearn.neighbors import NearestNeighbors represent = <Get features from last but one layer> represent_fun = theano.function([input_X], [represent], allow_input_downcast=True) f = lambda x: np.array(represent_fun([x])[0]) track_vectors = map(f, X_train) + map(f, X_valid) track_vectors = np.concatenate(track_vectors, axis=0) nn_pred = NearestNeighbors(metric='cosine', algorithm='brute') nn_pred = nn_pred.fit(track_vectors) X_names[0] ans = list(X_names[nn_pred.kneighbors(track_vectors[0])[1][0]]) ans sound_file = ans[0] y, sr = librosa.load(sound_file) librosa.output.write_wav('./genres/tmp.wav', y, sr, norm=True) Audio(url='./genres/tmp.wav') sound_file = ans[1] y, sr = librosa.load(sound_file) librosa.output.write_wav('./genres/tmp.wav', y, sr, norm=True) Audio(url='./genres/tmp.wav') """ Explanation: Find Simular Tracks <img src="./img/cnn_gr.png" width="500"> End of explanation """ from sklearn.manifold import TSNE represent = lasagne.layers.get_output(nn.input_layer) represent_fun = theano.function([input_X], [represent], allow_input_downcast=True) f = lambda x: np.array(represent_fun([x])[0]) track_vectors = map(f, X_train) + map(f, X_valid) track_vectors = np.concatenate(track_vectors, axis=0) track_labels = np.array(list(y_train) + list(y_valid)) X_tsne = <Make TSNE Features> plt.figure(figsize=(10,10), dpi=500) colors = cm.hot(np.linspace(0, 1, len(id2gener))) for idx, gener in id2gener.items(): idx_ = np.where(track_labels == idx) pylab.scatter(X_tsne[:, 0][idx_], X_tsne[:, 1][idx_], c=colors[idx], cmap=cm.hot, label=gener,s=50) pylab.legend(loc=0, ncol=5) """ Explanation: Maps of tracks by svd and tsne Help: https://lts2.epfl.ch/blog/perekres/category/visualizing-hidden-structures-in-datasets-using-deep-learning/ End of explanation """
najeeb97khan/Random_Acts_Of_Pizza
Quality Of Features.ipynb
mit
%pylab inline import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np import re from nltk.corpus import stopwords from collections import Counter from nltk.corpus import wordnet as wn nouns = {x.name().split('.', 1)[0] for x in wn.all_synsets('n')} import warnings warnings.filterwarnings('ignore') from pylab import rcParams rcParams['figure.figsize'] = 20,10 data_train = pd.read_json('../Dataset/Random Acts Of Pizza/train.json') data_train['data_type'] = 'train' y = data_train.pop('requester_received_pizza') data_train.head(2) data_test = pd.read_json('../Dataset/Random Acts Of Pizza/test.json') data_test['data_type'] = 'test' data_test.head(2) not_present = [] for i in data_train.columns: if i not in data_test.columns: not_present.append(i) data_train.drop(labels=not_present,axis=1,inplace=True) ## Combining the training and testing data data = pd.concat([data_train,data_test],ignore_index=True) data_copy = data.copy() data.shape data.head(2) # Author: Olivier Grisel <[email protected]> # Lars Buitinck <[email protected]> # Chyi-Kwei Yau <[email protected]> # License: BSD 3 clause from time import time from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.decomposition import NMF n_samples = 2000 n_features = 1000 n_topics = 20 n_top_words = 20 def applyNMF(data_samples): print("Extracting tf-idf features for NMF...") tfidf_vectorizer = TfidfVectorizer(max_df=1.0,min_df=1,stop_words='english') tfidf = tfidf_vectorizer.fit_transform(data_samples) print("Fitting the NMF model with tf-idf features," "n_samples=%d and n_features=%d..." % (n_samples, n_features)) nmf = NMF(n_components=n_topics, random_state=1, alpha=.1, l1_ratio=.5).fit(tfidf) return nmf.transform(tfidf) topics = applyNMF(data['request_text_edit_aware']) print(topics.shape) topics_vec = np.argmax(topics,axis=1) data['topics'] = topics_vec data['topics'].head() """ Explanation: Assessing the quality of features Features of the ongoing research project : This Research Work comprises of finding which features contribute to the success of a request. The dataset is obtained from http://www/kaggle.com and comes from the Random Acts Of Pizza subreddit at http://reddit.com. This dataset comes with 5671 textual requests for pizza from the Reddit community with their outcome (successful/unsuccessful) and meta-data. End of explanation """ ## Finding the nature of the topics generated from collections import Counter imp_topics = Counter(topics_vec).most_common(10) print imp_topics def find_topic(topic,remove_verbs=True): requests = data_copy[data['topics'] == imp_topics[topic][0]]['request_text_edit_aware'] chain_requests = '' for request in requests: chain_requests += ('. '+request) chain_requests = re.sub('^[a-zA-Z]',' ',chain_requests) words = [word for word in chain_requests.split() if word not in stopwords.words("english")] if remove_verbs: words = [word for word in words if word in nouns] return Counter(words).most_common(100) topic_words = [] for i in range(len(imp_topics)): words = find_topic(i) words = ' '.join([word[0] for word in words]) topic_words.append(words) train = data[data['data_type'] == 'train'] train.head(2) train['received'] = y train.head(2) topic_df = [] for i in range(len(imp_topics)): topic_df.append([imp_topics[i][0],topic_words[i],100*float(train[train['topics'] == imp_topics[i][0]]['received'].sum())\ /len(train[train['topics'] == imp_topics[i][0]]['received'])]) topic_df = pd.DataFrame(topic_df,columns = ['Topic','Words','Success Rate']) topic_df """ Explanation: Finding the Success Rate of Important Features End of explanation """ topic_df.plot(kind='bar',y='Success Rate',x='Topic') plt.xlabel('Topic') plt.ylabel('Success Rate') plt.title('Success Rate vs Topics') plt.show() """ Explanation: Success of Different Topics End of explanation """ train.dropna(inplace=True,axis=0) train.tail(1) train['request_length'] = [len(x.split()) for x in train['request_text_edit_aware']] train.head(2) length = [] def length_success(topic): max_length = train[train['topics'] == topic]['request_length'].max() min_length = train[train['topics'] == topic]['request_length'].min() bin_size = (max_length - min_length)/20 df = train[train['topics'] == topic] for i in range(10): df_one = df[(df['request_length'] >= min_length) & (df['request_length'] < min_length+bin_size)] df_new = df_one[df_one['received'] == True] if(len(df_one) == 0): df_one = ['a'] length.append([topic,min_length,min_length+bin_size,float(len(df_new))/len(df_one)]) min_length = min_length + bin_size for topic in imp_topics: print 'Calculating length probabilities for {} topic..'.format(topic[0]) length_success(topic[0]) df_length = pd.DataFrame(length,columns=['Topic','Lower Bound','Upper Bound','Probability Success']) df_length.head(5) df_length.to_csv('LengthCorrelation.csv',sep=',',columns=df_length.columns) topic_points = [] for topic in imp_topics: points = [] df_new = df_length[df_length['Topic'] == topic[0]] for i in range(8): points.append(((df_new.iloc[i,1] + df_new.iloc[i,2]/2),df_new.iloc[i,3])) topic_points.append(points) """ Explanation: Success correlation with the length of request End of explanation """ i = 1 for points in topic_points: plt.subplot(3,4,i) plt.plot([point[0] for point in points],[point[1] for point in points]) plt.ylabel('Probability of Success') plt.title('Topic {}'.format(imp_topics[i-1][0])) i += 1 if i > 10: i = 1 plt.show() """ Explanation: Length and Topic relation in view of the successful requests End of explanation """ import re regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' evidentiality,i = np.ones((len(train['request_text_edit_aware']))),0 for request in train["request_text_edit_aware"]: url = re.findall(regex,request) if len(url) <= 0: evidentiality[i] = 0 i += 1 train['evidentiality'] = evidentiality train.head(2) ## How evidentiality affects the success rate total = train[train['evidentiality'] == 1].received success = len(total[total == True]) print 'Percentage of successful requests with evidence: {}%'.format(round(float(success)*100/len(total),3)) total = train[train['evidentiality'] == 0].received success = len(total[total == True]) print 'Percentage of successful requests without evidence: {}%'.format(round(float(success)*100/len(total),3)) """ Explanation: Finding 'Evidentiality' in each of the request End of explanation """ evidence_relation = pd.Series({'Success with evidence':35.192,'Success without evidence':23.794}) evidence_relation.to_csv('evidenceRelation.csv',sep=',') evidence_relation.plot(kind='bar',rot=0) plt.ylabel('Percentage of Successful request') plt.title('How evidence effects a successful request') plt.show() """ Explanation: Relation of Evidentiality with Success of a request End of explanation """ reciprocity,i = np.zeros((len(train['request_text_edit_aware']),)),0 regex = 'return the favor|pay it forward|pay it back' for request in train['request_text_edit_aware']: match = re.search(regex,request) if match: reciprocity[i] = 1 i += 1 train['reciprocity'] = reciprocity train.head(2) ## Finding percentage of successful request with reciprocity and without it total = train[train['reciprocity'] == 1].received success = len(total[total == True]) print 'Percentage of successful requests with reciprocity: {}%'.format(round(float(success)*100/len(total),3)) total = train[train['reciprocity'] == 0].received success = len(total[total == True]) print 'Percentage of successful requests with reciprocity: {}%'.format(round(float(success)*100/len(total),3)) """ Explanation: Finding reciprocity in the request End of explanation """ reciprocity_relation = pd.Series({'Success with reciprocity':30.058,'Success without reciprocity':23.8}) reciprocity_relation.to_csv('reciprocity_relation.csv',sep=',') reciprocity_relation.plot(kind='bar',rot=0) plt.ylabel('Percentage of Successful request') plt.title('How reciprocity effects a successful request') plt.show() """ Explanation: Relation of Reciprocity with Successful request End of explanation """ train.head(2) narrative = {'Money': 'money now broke week until time last \ day when today tonight paid next first night after tomorrow \ month while account before long Friday rent buy bank still \ bills bills ago cash due due soon past never paycheck check \ spent years poor till yesterday morning dollars financial \ hour bill evening credit budget loan bucks deposit dollar \ current payed'.split(),'Job':'work job paycheck unemployment\ interview fired employment hired hire'.split(),'Student':'college\ student school roommate studying university finals semester class\ study project dorm tuition'.split(),'Family':'family mom wife parents\ mother hus- band dad son daughter father parent mum'.split(),'Craving':'friend \ girlfriend craving birthday boyfriend celebrate party game games movie\ date drunk beer celebrating invited drinks crave wasted invite'.split()} request_narrative = [] narration = [] for request in train['request_text_edit_aware']: word_count = {'Money':0,'Job':0,'Student':0,'Family':0,'Craving':0} n = 0 for word in request.split(): for lexicon in narrative: if word in narrative[lexicon]: word_count[lexicon] += 1 for lexicon in word_count: n += word_count[lexicon] request_narrative.append(word_count) try: narration.append(float(n)/len(request.split())) except: narration.append(0) train['narrative'] = narration """ Explanation: Introducing new features in the dataset Narratives End of explanation """ train.head(2) from nltk.parse.stanford import StanfordDependencyParser import string path_to_jar = '../../../Downloads/stanford-parser-full-2014-08-27/stanford-parser.jar' path_to_models_jar = '../../../Downloads/stanford-parser-full-2014-08-27/stanford-parser-3.4.1-models.jar' dependency_parser = StanfordDependencyParser(path_to_jar=path_to_jar, path_to_models_jar=path_to_models_jar) def dep_parse(phrase): words = [word for word in set(word_tokenize(phrase)) if word not in string.punctuation] result = dependency_parser.raw_parse(phrase) dep = result.next() if dep == None: return '' triplet = list(dep.triples()) if triplet == None: return '' parse = [] for i in triplet: try: parse.append("{}({}-{}, {}-{})".format(i[1],i[0][0],words.index(i[0][0])+1,i[2][0],words.index(i[2][0])+1)) except: pass return parse from nltk.tokenize import word_tokenize import nltk.data tokenizer = nltk.data.load('nltk:tokenizers/punkt/english.pickle') ## Warning : TAKES A LONG TIME TO RUN ## DON'T RUN UNTIL REQUIRED text_documents,i = [],2501 for text in train: i += 1 parsed_sents = {} try: parsed_sents['text'] = text parsed_sents['sentences'] = [sents for sents in np.asarray(tokenizer.tokenize(text)) if len(sents.split()) > 1] temp = [] for sentence in parsed_sents['sentences']: try: temp.append(dep_parse(sentence)) except: pass parsed_sents['parses'] = temp except: print text break text_documents.append(parsed_sents) print '{} requests parsed...'.format(i) if i%100 == 0: %store text_documents >> test_documents_new.py filename = str(raw_input('Enter the filename: ')) %store text_documents >> filename """ Explanation: Dependency Parsing Using Stanford Parser End of explanation """ def statistical_sim(sent1, sent2): ''' Statistical similarity between sentences based on the cosine method Returns: float (the cosine similarity b/w sent1 and sent2) ''' sent_token1 = Counter(sent1) sent_token2 = Counter(sent2) intxn = set(sent_token1) & set(sent_token2) numerator = sum([sent_token1[x] * sent_token2[x] for x in intxn]) mod1 = sum([sent_token1[x]**2 for x in sent_token1.keys()]) mod2 = sum([sent_token2[x]**2 for x in sent_token2.keys()]) denominator = sqrt(mod1)*sqrt(mod2) if not denominator: return 0.0 return float(numerator)/denominator ## Sanity check for statistical similarity sent1 = 'Hello my name is Najeeb Khan' sent2 = 'Hello my name is Najeeb Khan' statistical_sim(sent1,sent2) ## Warning : Takes a long time to RUN ## Do not RUN until required i = 0 similarity = [] for request1 in data_train['request_text_edit_aware']: cosine_sim = [] for request2 in data_train['request_text_edit_aware']: if request1 != request2: cosine_sim.append(statistical_sim(request1,request2)) similarity.append([np.argmax(np.asarray(cosine_sim)),np.max(np.asarray(cosine_sim))]) i += 1 if i%100 == 0: %store similarity >> similarity.py print 'Finding similarity in request {}'.format(i) """ Explanation: Finding Redundancy in requests using Cosine Similarity End of explanation """ train['similarity'] = pd.read_json('../../Dataset/Random Acts Of Pizza/data_train.json')['similarity'] """ Explanation: The data is saved in ../../Datasets/Random Acts Of Pizza/data_train.json End of explanation """ politeness_data = pd.read_csv('../../Dataset/Random Acts Of Pizza/politeness_one.csv',index_col=0) part_two = pd.read_csv('../../Dataset/Random Acts Of Pizza/politeness_two.csv',index_col=0) ## One data is missing... So appending missing data politeness_data = politeness_data.append({'text':data_train.iloc[2500,2],'polite':0.5,'impolite':0.5},ignore_index=True) ## Sanity Check for the size of dataset print politeness_data.shape[0] + part_two.shape[0] == train.shape[0] ## Adding the politeness data into 'master' dataframe politeness_data = politeness_data.append(part_two,ignore_index=True) train['polite'] = politeness_data['polite'] train['impolite'] = politeness_data['impolite'] """ Explanation: Including the politeness feature End of explanation """ train.head(2) train.to_json('../../Dataset/Random Acts Of Pizza/trainingData.json',orient='columns') if train.isnull().values.any() == False: print 'Huzzah... No NaNs.. Mission Accomplished :)' """ Explanation: Creating the target .json file End of explanation """
psychemedia/parlihacks
notebooks/Apache Drill - JSON Written Questions.ipynb
mit
import pandas as pd from pydrill.client import PyDrill %matplotlib inline #Get a connection to the Apache Drill server drill = PyDrill(host='localhost', port=8047) """ Explanation: Using Apache Drill to Query Parliament Written Questions Data A bit of a play to try to get to grips with Apache Drill, querying over JSON and CSV for Parliament data that doesn't quite link up as it should do... End of explanation """ #Get Written questions data - may take some time! stub='http://lda.data.parliament.uk'.strip('/') #We're going to have to call the API somehow import requests ##To make thinks more efficient if we do this again, cache requests #!pip3 install requests_cache #import requests_cache #requests_cache.install_cache('parlidata_cache', backend='sqlite') #Get data from URL def getURL(url): print(url) r=requests.get(url) print(r.status_code) return r #Download data - if there is more, get it def loader(url): items=[] done=False r=getURL(url) while not done: items=items+r.json()['result']['items'] if 'next' in r.json()['result']: r=getURL(r.json()['result']['next']+'&_pageSize=500') else: done=True return items url='{}/{}.json?session={}'.format(stub,'commonswrittenquestions','2015/16') items=loader(url) #Save the data import json with open('writtenQuestions.json', 'w') as outfile: json.dump(items, outfile) """ Explanation: Download Written Questions Data for a Session This is a faff, because there is no bulk download... It also makes more sense to use requests-cache along the way in case things break midway through the download, so you don't then have to reload everything again... End of explanation """ #What does the whole table look like? q=''' SELECT * from dfs.`/Users/ajh59/Dropbox/parlidata/notebooks/writtenQuestions.json` LIMIT 3''' drill.query(q).to_dataframe() #Try to select a column q=''' SELECT j.tablingMember._about AS memberURL FROM dfs.`/Users/ajh59/Dropbox/parlidata/notebooks/writtenQuestions.json` j LIMIT 3 ''' drill.query(q).to_dataframe() #Try to select an item from a list in a column q=''' SELECT tablingMemberPrinted[0]._value AS Name FROM dfs.`/Users/ajh59/Dropbox/parlidata/notebooks/writtenQuestions.json` LIMIT 3 ''' drill.query(q).to_dataframe() #Get a dataframe of all the member URLs - so we can get the data fro each from the Parliament data API q=''' SELECT DISTINCT j.tablingMember._about AS memberURL FROM dfs.`/Users/ajh59/Dropbox/parlidata/notebooks/writtenQuestions.json` j ''' memberIds = drill.query(q).to_dataframe() memberIds.head() #The URLs in the written question data donlt actually resolve - we need to tweak them #Generate a set of members who have tabled questions that have been answered #Note that the identifier Linked Data URL doesn't link... so patch it... members= ['{}.json'.format(i.replace('http://','http://lda.')) for i in memberIds['memberURL']] #Preview the links members[:3] #Download the data files into a data directory !mkdir -p data/members for member in members: !wget -quiet -P data/members {member} !ls data/members #Preview one of the files !head data/members/1474.json """ Explanation: We should now have all the data in a single JSON file (writtenQuestions.json). (Actually, if we had downloaded the data into the same directory as separately and uniquely named JSON files, Apache Drill should be able to query over them...) Let's see if we can query it... End of explanation """ q=''' SELECT j.`result`.primaryTopic.gender._value AS gender, j.`result`._about AS url FROM dfs.`/Users/ajh59/Dropbox/parlidata/notebooks/data/members` j''' membersdf=drill.query(q).to_dataframe() membersdf.head() """ Explanation: Apache Drill can query over multiple files in the same directory, so let's try that... Query over all the downloaded member JSON files to create a dataframe to pull out the gender for each member ID URL. End of explanation """ #Lets reverse the URL to the same form as in the written questions - then we can use this for a JOIN membersdf['fixedurl']=membersdf['url'].str.replace('http://lda.','http://').str.replace('.json','') #Save the data as a CSV file membersdf.to_csv('data/members.csv',index=False) !head data/members.csv """ Explanation: Now we need to remap those URLs onto URLs of the form used in the Written Questions data. End of explanation """ #Now find the gender of a question asker - join a query over the monolithic JSON file with the CSV file q=''' SELECT DISTINCT j.tablingMember._about AS memberURL, m.gender FROM dfs.`/Users/ajh59/Dropbox/parlidata/notebooks/writtenQuestions.json` j JOIN dfs.`/Users/ajh59/Dropbox/parlidata/notebooks/data/members.csv` m ON j.tablingMember._about = m.fixedurl LIMIT 3''' drill.query(q).to_dataframe() """ Explanation: Querying Over JOINed JSON and CSV Files Let's see if we can now run a query over the joined monolithic wirtten questions JSON data file and the members CSV data file we created. End of explanation """ #Let's see if we can modify the URL in the spearate JSON files so we can join with the monolithic file q=''' SELECT DISTINCT j.tablingMember._about AS memberURL, m.`result`.primaryTopic.gender._value AS gender, m.`result`._about AS url FROM dfs.`{path}/writtenQuestions.json` j JOIN dfs.`{path}/data/members` m ON j.tablingMember._about = REGEXP_REPLACE(REGEXP_REPLACE(m.`result`._about,'http://lda.','http://'),'\.json','') LIMIT 3'''.format(path='/Users/ajh59/Dropbox/parlidata/notebooks') drill.query(q).to_dataframe() """ Explanation: JOINing Across A Monolithic JSON file and a Directory of Files with Regularly Mismatched Keys That's a clunky route round though... Can we actually do a JOIN between the monolithc written answers JSON file and the separate members JSON files, hacking the member ID URL into the correct form as part of the ON condition? End of explanation """ q=''' SELECT COUNT(*) AS Number, m.`result`.primaryTopic.gender._value AS gender FROM dfs.`{path}/writtenQuestions.json` j JOIN dfs.`{path}/data/members` m ON j.tablingMember._about = REGEXP_REPLACE(REGEXP_REPLACE(m.`result`._about,'http://lda.','http://'),'\.json','') GROUP BY m.`result`.primaryTopic.gender._value'''.format(path='/Users/ajh59/Dropbox/parlidata/notebooks') drill.query(q).to_dataframe() """ Explanation: Now let's do some counting... in the session for which we downloaded the data, how many written questions were tabled by gender, in total? End of explanation """ q=''' SELECT COUNT(*) AS Number, j.tablingMemberPrinted[0]._value AS Name, m.`result`.primaryTopic.gender._value AS gender FROM dfs.`{path}/writtenQuestions.json` j JOIN dfs.`{path}/data/members` m ON j.tablingMember._about = REGEXP_REPLACE(REGEXP_REPLACE(m.`result`._about,'http://lda.','http://'),'\.json','') GROUP BY m.`result`.primaryTopic.gender._value, j.tablingMemberPrinted[0]._value '''.format(path='/Users/ajh59/Dropbox/parlidata/notebooks') drill.query(q).to_dataframe().head() """ Explanation: How many per person, by gender? End of explanation """ q=''' SELECT AVG(Number) AS average, gender FROM (SELECT COUNT(*) AS Number, j.tablingMemberPrinted[0]._value AS Name, m.`result`.primaryTopic.gender._value AS gender FROM dfs.`{path}/writtenQuestions.json` j JOIN dfs.`{path}/data/members` m ON j.tablingMember._about = REGEXP_REPLACE(REGEXP_REPLACE(m.`result`._about,'http://lda.','http://'),'\.json','') GROUP BY m.`result`.primaryTopic.gender._value, j.tablingMemberPrinted[0]._value ) GROUP BY gender '''.format(path='/Users/ajh59/Dropbox/parlidata/notebooks') drill.query(q).to_dataframe() """ Explanation: Can we do the average too? End of explanation """ q=''' SELECT AVG(Number) AS average, party FROM (SELECT COUNT(*) AS Number, j.tablingMemberPrinted[0]._value AS Name, m.`result`.primaryTopic.party._value AS party FROM dfs.`{path}/writtenQuestions.json` j JOIN dfs.`{path}/data/members` m ON j.tablingMember._about = REGEXP_REPLACE(REGEXP_REPLACE(m.`result`._about,'http://lda.','http://'),'\.json','') GROUP BY m.`result`.primaryTopic.party._value, j.tablingMemberPrinted[0]._value ) GROUP BY party '''.format(path='/Users/ajh59/Dropbox/parlidata/notebooks') dq=drill.query(q).to_dataframe() dq['average']=dq['average'].astype(float) dq dq.set_index('party').sort_values(by='average').plot(kind="barh"); """ Explanation: How about by party? End of explanation """ q=''' SELECT AVG(Number) AS average, party, gender FROM (SELECT COUNT(*) AS Number, j.tablingMemberPrinted[0]._value AS Name, m.`result`.primaryTopic.party._value AS party, m.`result`.primaryTopic.gender._value AS gender FROM dfs.`{path}/writtenQuestions.json` j JOIN dfs.`{path}/data/members` m ON j.tablingMember._about = REGEXP_REPLACE(REGEXP_REPLACE(m.`result`._about,'http://lda.','http://'),'\.json','') GROUP BY m.`result`.primaryTopic.party._value, m.`result`.primaryTopic.gender._value, j.tablingMemberPrinted[0]._value ) GROUP BY party, gender '''.format(path='/Users/ajh59/Dropbox/parlidata/notebooks') dq=drill.query(q).to_dataframe() dq['average']=dq['average'].astype(float) dq dq.set_index(['party','gender']).sort_values(by='average').plot(kind="barh"); dq.sort_values(by=['gender','average']).set_index(['party','gender']).plot(kind="barh"); dqp=dq.pivot(index='party',columns='gender') dqp.columns = dqp.columns.get_level_values(1) dqp dqp.plot(kind='barh'); """ Explanation: Party and gender? End of explanation """
MadsJensen/intro_to_scientific_computing
notebooks/23-Single-logfile-parser.ipynb
bsd-3-clause
import string """ Explanation: Parsing a single log file parse: to examine in a minute way In this notebook we'll extract the information on reaction time and accuracy from a single log file, and generalise our code to apply to any log file (written with the same structure). It is considered good practice to import all the modules you use in a notebook in the beginning, so we'll start with that: End of explanation """ print(string.ascii_lowercase) print(type(string.ascii_lowercase)) print(string.digits) """ Explanation: We'll be using two lists defined in the string-module: the list of all lowercase (ASCII) letters the list of all digits (as string a string, not numbers) End of explanation """ logfile_name = '../src/logs/0023_FCA_2017-03-09.log' """ Explanation: Read lines of a single log-file into a list Assign the path to one of the logfiles to the variable logfile_name. You will need to adjust the path to wherever you placed the logs-directory containing them! End of explanation """ fp = open(logfile_name, 'r') all_lines = fp.readlines() fp.close() """ Explanation: Open the file, read the lines & close the file. End of explanation """ all_lines[:10] """ Explanation: Display the first ten lines. For this, you can use the slice-syntax [:10], which reads: 'from the start to index 10'. End of explanation """ len(all_lines[5:]) """ Explanation: The first five lines are comments, which we'll want to skip over. How many events are there in the file (how many rows after the comments)? End of explanation """ field_sep = '\t' # COMPLETE THIS LINE """ Explanation: Splitting the lines From the above, determine the field-separator character used in the file. End of explanation """ line = all_lines[5] split_line = line.split(field_sep) print(split_line) """ Explanation: Split the 6th line and display: End of explanation """ # what is the index of the stimulus? # Try changing the relevant value below until you get 'x' split_line[2][5] idx = 5 # which index gives you the letter/digit? """ Explanation: The 1st value of the split list is the time, the 3rd value contains information on whether the event was a stimulus presentation, or a response. Since the data is consistent, to get the actual stimulus presented (letter or digit), we can simply count how many characters 'in' the equal-sign is: the index of the stimulus is: End of explanation """ # 6th line: STIM line = all_lines[5] split_line = line.split(field_sep) print(split_line) stim_time = split_line[0] # replace XXX! cur_stim = split_line[2][idx] # replace YYY! print(stim_time, cur_stim) # 7th line: RESP line = all_lines[6] split_line = line.split(field_sep) print(split_line) resp_time = split_line[0] # replace XXX! cur_resp = split_line[2][idx] # replace YYY! print(resp_time, cur_resp) # calculate RT RT = int(resp_time) - int(stim_time) # formula here print('reaction time: ', RT) """ Explanation: Note that this index is also the one we need for getting to the response (1 or 2). split the 6th line & print the stimulus delivery time and stimulus presented split the 7th line & print the response time and button number pressed calculate the reaction time NB: the contents of the file we are reading from is textual arithmetic on text is very different from that on numbers... (you'll need to convert the string to a number; use the int-function) assign the reaction time to a variable ('RT') and print it End of explanation """ 'STIM=x\n'.startswith('STIM') for line in all_lines[5:]: split_line = line.split(field_sep) # does the 3rd element of the list start with 'STIM'? if split_line[2].startswith('STIM'): stim_time = split_line[0] cur_stim = split_line[2][idx] # print(stim_time, cur_stim) else: # nope; it starts with something other than 'STIM' resp_time = split_line[0] # replace XXX! cur_resp = split_line[2][idx] # replace YYY! # print(resp_time, cur_resp) # calculate RT RT = int(resp_time) - int(stim_time) # formula here # print('reaction time: ', RT) """ Explanation: Loop over the lines Convert the above into something that can be used to loop over the list. Start by just looping over the 6th and 7th rows: you should arrive at the same answer as above. You'll need logic for determining whether the current line starts with the string STIM. Strings have a method startswith for this! Use an if-else-construct. End of explanation """ # empty lists for reaction times rt_freq = [] rt_rare = [] for line in all_lines[5:]: split_line = line.split(field_sep) # does the 3rd element of the list start with 'STIM'? if split_line[2].startswith('STIM'): stim_time = split_line[0] cur_stim = split_line[2][idx] else: # nope; it starts with something other than 'STIM' resp_time = split_line[0] # replace XXX! cur_resp = split_line[2][idx] # replace YYY! # calculate RT RT = int(resp_time) - int(stim_time) # formula here # test if the current stimulus is in the `ascii_lowercase`-list if cur_stim in string.ascii_lowercase: rt_freq.append(RT) # else test if the current stimulus is in the `digits`-list elif cur_stim in string.digits: rt_rare.append(RT) """ Explanation: Saving the reaction times into lists Instead of printing out 1280 RT values, we want to save them into memory for later use (we need to calculate mean and median values over them). Start with two empty lists for reaction times: one for the frequent category of stimuli (letter) one for the rare category of stimuli (digit) and use the .append-method to add the values to the lists. End of explanation """ rt_freq = [] rt_rare = [] n_corr_freq = 0 n_corr_rare = 0 for line in all_lines[5:]: split_line = line.split(field_sep) # does the 3rd element of the list start with 'STIM'? if split_line[2].startswith('STIM'): stim_time = split_line[0] cur_stim = split_line[2][idx] else: # nope; it starts with something other than 'STIM' resp_time = split_line[0] # replace XXX! cur_resp = split_line[2][idx] # replace YYY! # calculate RT RT = int(resp_time) - int(stim_time) # formula here # test if the current stimulus is in the `ascii_lowercase`-list if cur_stim in string.ascii_lowercase: rt_freq.append(RT) if int(cur_resp) == 1: n_corr_freq = n_corr_freq + 1 # else test if the current stimulus is in the `digits`-list elif cur_stim in string.digits: rt_rare.append(RT) if cur_resp == '2': n_corr_rare = n_corr_rare + 1 rt_freq[:10] """ Explanation: Accuracy: is each response correct or incorrect? Modify the above code to also include logic for determining whether the response in correct or not. Initialise two counters for the number of correct responses. End of explanation """ # copy-paste your mean- and median-function here: def mean(values): return(sum(values)/len(values)) def median(values): return(sorted(values)[len(values) // 2]) # freq mean_rt_freq = 0.1 * mean(rt_freq) median_rt_freq = 0.1 * median(rt_freq) accuracy_freq = 100 * n_corr_freq / len(rt_freq) # rare mean_rt_rare = 100e-3 * mean(rt_rare) median_rt_rare = 100e-3 * median(rt_rare) accuracy_rare = 100 * n_corr_rare / len(rt_rare) print('Frequent category:') print('------------------') print('Mean:', mean_rt_freq) print('Median:', median_rt_freq) print('Accuracy:', accuracy_freq) print('Rare category:') print('--------------') print('Mean:', mean_rt_rare) print('Median:', median_rt_rare) print('Accuracy:', accuracy_rare) """ Explanation: Print out the mean and median RTs and the accuracies for frequent and rare stimuli use the functions you previously wrote as an exercise you'll have to copy the code into the present notebook and execute recall that times are given in the odd unit of '100's of microseconds' multiply by 100e-3 (i.e 0.1) to obtain milliseconds accuracy is simply the number of correct responses divided by the total number of responses End of explanation """ def read_log_file(logfile_name, field_sep='\t'): '''Read a single log file The default field-separator is set to be the tab-character (\t) Return the mean and median RT, and the accuracy, separately for the frequent and rare categories. This is done as a list (tuple) of 6 return values, in the order: (mean_rt_freq, median_rt_freq, accuracy_freq, mean_rt_rare, median_rt_rare, accuracy_rare) ''' # initialise rt_freq = [] rt_rare = [] n_corr_freq = 0 n_corr_rare = 0 # open file and read all its lines into a list fp = open(logfile_name, 'r') all_lines = fp.readlines() fp.close() # hard-code the index of the stimulus/response type/number idx = 5 # loop over lines from 6th onwards for line in all_lines[5:]: split_line = line.split(field_sep) # does the 3rd element of the list start with 'STIM'? if split_line[2].startswith('STIM'): stim_time = split_line[0] cur_stim = split_line[2][idx] else: # nope; it starts with something other than 'STIM' resp_time = split_line[0] # replace XXX! cur_resp = split_line[2][idx] # replace YYY! # calculate RT RT = int(resp_time) - int(stim_time) # formula here # test if the current stimulus is in the `ascii_lowercase`-list if cur_stim in string.ascii_lowercase: rt_freq.append(RT) if int(cur_resp) == 1: n_corr_freq = n_corr_freq + 1 # else test if the current stimulus is in the `digits`-list elif cur_stim in string.digits: rt_rare.append(RT) if cur_resp == '2': n_corr_rare = n_corr_rare + 1 # freq mean_rt_freq = 0.1 * mean(rt_freq) median_rt_freq = 0.1 * median(rt_freq) accuracy_freq = 100 * n_corr_freq / len(rt_freq) # rare mean_rt_rare = 100e-3 * mean(rt_rare) median_rt_rare = 100e-3 * median(rt_rare) accuracy_rare = 100 * n_corr_rare / len(rt_rare) return(mean_rt_freq, median_rt_freq, accuracy_freq, mean_rt_rare, median_rt_rare, accuracy_rare) """ Explanation: Convert all of the above into a function Now that we have code that works for one file, we can make it into a function and apply it on the other files (hoping they 'behave' the same way as the file we used to develop the code on...). End of explanation """ (mean_rt_freq, median_rt_freq, accuracy_freq, mean_rt_rare, median_rt_rare, accuracy_rare) = read_log_file(logfile_name) print('Frequent category:') print('------------------') print('Mean:', mean_rt_freq) print('Median:', median_rt_freq) print('Accuracy:', accuracy_freq) print('Rare category:') print('--------------') print('Mean:', mean_rt_rare) print('Median:', median_rt_rare) print('Accuracy:', accuracy_rare) logfile_name = '../src/logs/0048_MSB_2016-09-23.log' (mean_rt_freq, median_rt_freq, accuracy_freq, mean_rt_rare, median_rt_rare, accuracy_rare) = read_log_file(logfile_name) print('Frequent category:') print('------------------') print('Mean:', mean_rt_freq) print('Median:', median_rt_freq) print('Accuracy:', accuracy_freq) print('Rare category:') print('--------------') print('Mean:', mean_rt_rare) print('Median:', median_rt_rare) print('Accuracy:', accuracy_rare) """ Explanation: Test the function on the same file, then on a new one End of explanation """
hadim/public_notebooks
Analysis/Fit_Ellipse/notebook.ipynb
mit
# Do some import %matplotlib inline import matplotlib.pyplot as plt import numpy as np from tifffile import TiffFile # Load the image tf = TiffFile("binary_cell.tif") # Get the numpy array a = tf.asarray() # Replace all 255 to 1 so the image is now made of "0" and "1" a[a == 255] = 1 print(np.unique(a)) _ = plt.imshow(a, interpolation="none", cmap="gray", origin='lower') """ Explanation: What is the image moment (or how to fit and ellipse) A moment (M) is a specific quantitative measure of a given distribution. The distribution can be one or multi-dimensional (wikipedia definition. Now let's try to have an intuitive view of what it is in the case of an image. Remember that an image is only a 2D distribution of a set of points. Each point being localized by two coordinates $x$ and $y$. So fo each pixel we have the following $f(x, y) = I_{x, y}$. Where $I_{x, y}$ corresponds to the intensity at the coordinate $x$ and $y$. Before going to intuition let's do some easy math. The formal definition of the image moment (2D and continuous) is: $$M_{pq} = \int_{-\infty}^{\infty} x^p y^q f(x, y) dx dy$$ And the 2D discrete equation is : $$M_{pq} = \sum_x \sum_y x^p y^q I(x,y)$$ So we see here the moment (M) depends on two parameters $p$ and $q$. They are called the order of the moment. Different order will give a different measure of the distribution we are looking at. For example let's define a simple 1D distribution of length $N$ $f(x) = A$. the zero order ($p=0$) moment of $A$ is the number of element : $M_0 = \sum_x x^0 = len(A)$ (for pythonist) the first order ($p=1$) moment of a $A$ if the sum of all the element : $M_1 = \sum_x x^1 = np.sum(A)$ (for pythonist) Now if we divide the first order moment by the zero order moment we have the mean : $$\bar{A} = \frac{1}{N} \sum_x x = \frac{M_1}{M_0}$$ The image moment Now let's try to apply this 1D example to an image. End of explanation """ # The sum of all value of 1 M_00 = np.sum(a) M_00 """ Explanation: The goal here is to use the image moment to fit an ellipse. The ellipse can be defined by four parameters : the center, the orientation, the major axis and the minor axis. Note it exists many different method to fit an ellipse. Find the centroid First we are going to detect the centroid (the center of the ellipse) of our object by computing the zero and first order of the image. Remember the discrete equation : $$M_{pq} = \sum_x \sum_y x^p y^q I(x,y)$$ Since we are working on a binary image (only $0$ and $1$) values we remove $I(x,y)$. The zero order moment is then : $$M_{00} = \sum_x \sum_y x^0 y^0$$ Let's do it in python. End of explanation """ # Here we get all the coordinates of pixel equal to 1 xx, yy = np.where(a == 1) M_10 = np.sum(xx) M_10 """ Explanation: Now let's compute the first order moment for $x$ : $$M_{10} = \sum_x \sum_y x^1 y^0$$ End of explanation """ M_01 = np.sum(yy) M_01 """ Explanation: Now let's compute the first order moment for $y$ : End of explanation """ C_x = M_10 / M_00 C_y = M_01 / M_00 print("C =", (C_x, C_y)) """ Explanation: So the centroid $C$ is given by : $$C_x = \bar{x} = \frac{M_{10}}{M_{00}}$$ $$C_y = \bar{y} = \frac{M_{01}}{M_{00}}$$ End of explanation """ plt.scatter(C_x, C_y, color='red', marker='+', s=100) _ = plt.imshow(a, interpolation="none", cmap="gray", origin='lower') """ Explanation: Let's verify it visually : End of explanation """ M_20 = np.sum(xx ** 2) M_02 = np.sum(yy ** 2) M_11 = np.sum(xx * yy) """ Explanation: Well it seems to be exact ! Find the major and minor axis Here it becomes a little bit tricky. The information about the object orientation can be derived using the second order central moments to construct a covariance matrix (see Wikipedia for more details). Here is a new concept : central moment ($\mu$) which describes distribution of mean (unlike moment (M) which describes distribution only). Note that sometime moment is also called raw moment. The discrete equation of the central moment is the : $$\mu_{pq} = \sum_{x} \sum_{y} (x - \bar{x})^p(y - \bar{y})^q f(x,y)$$ with $\bar{x}=\frac{M_{10}}{M_{00}}$ and $\bar{y}=\frac{M_{01}}{M_{00}}$ Now it becomes difficult to get the intuition of what's going next. From Wikipedia : The covariance matrix of the image $I(x,y)$ is : $$\operatorname{cov}[I(x,y)] = \begin{bmatrix} \mu'{20} & \mu'{11} \ \mu'{11} & \mu'{02} \end{bmatrix}$$ The eigenvectors of the covariance matrix of the image correspond to the major and minor axes of the image intensity, so the orientation can thus be extracted from the angle of the eigenvector associated with the largest eigenvalue. It can be shown that this angle ฮ˜ is given by the following formula: $$\Theta = \frac{1}{2} \arctan \left( \frac{2\mu'{11}}{\mu'{20} - \mu'_{02}} \right)$$ Where : $$\mu'{20} = \mu{20} / \mu_{00} = M_{20}/M_{00} - \bar{x}^2$$ $$\mu'{02} = \mu{02} / \mu_{00} = M_{02}/M_{00} - \bar{y}^2$$ $$\mu'{11} = \mu{11} / \mu_{00} = M_{11}/M_{00} - \bar{x}\bar{y}$$ Let's first compute the second order raw moment ($M_{20}$, $M_{02}$ and $M_{11}$) : End of explanation """ mu_20 = M_20 / M_00 - C_x ** 2 mu_02 = M_20 / M_00 - C_y ** 2 mu_11 = M_11 / M_00 - C_x * C_y """ Explanation: Compute $\mu'{20}$, $\mu'{02}$ and $\mu'_{11}$ : End of explanation """ theta = 1/2 * np.arctan((2 * mu_11) / (mu_20 - mu_02)) # Convert it in degree angle = np.rad2deg(theta) print("angle = {}ยฐ".format(angle)) """ Explanation: Get the orientation $\Theta$ : $$\Theta = \frac{1}{2} \arctan \left( \frac{2\mu'{11}}{\mu'{20} - \mu'_{02}} \right)$$ End of explanation """ delta = np.sqrt(4 * mu_11 ** 2 + (mu_20 - mu_02) ** 2) lambda_1 = ((mu_20 + mu_02) + delta) / 2 lambda_2 = ((mu_20 + mu_02) - delta) / 2 """ Explanation: Get the eigenvalues with this equation : $$\Delta = \sqrt{4{\mu'}{11}^2 + ({\mu'}{20}-{\mu'}{02})^2}$$ $$\lambda_i = \frac{\mu'{20} + \mu'_{02}}{2} \pm \frac{\Delta}{2}$$ End of explanation """ semi_major_length = np.sqrt(np.abs(lambda_1)) * 2 semi_minor_length = np.sqrt(np.abs(lambda_2)) * 2 major_length = semi_major_length * 2 minor_length = semi_minor_length * 2 print("Major axis = {}".format(major_length)) print("Minor axis = {}".format(minor_length)) """ Explanation: Get the major and minor axes from the eigenvalues : End of explanation """ cov = np.asarray([[mu_20, mu_11], [mu_11, mu_02]]) eigvalues, eigvectors = np.linalg.eig(cov) # Get the associated eigenvectors and eigenvalues eigval_1, eigval_2 = eigvalues eigvec_1, eigvec_2 = eigvectors[:, 0], eigvectors[:, 1] """ Explanation: Alternatively we can also compute (with Numpy) the orientation and axes length from the eigenvectors of the covariance matrix. $$\operatorname{cov}[I(x,y)] = \begin{bmatrix} \mu'{20} & \mu'{11} \ \mu'{11} & \mu'{02} \end{bmatrix}$$ End of explanation """ theta = np.arctan2(eigvec_1[1], eigvec_1[0]) angle = np.rad2deg(theta) print("angle = {}ยฐ".format(angle)) semi_major_length = np.sqrt(np.abs(eigval_1)) * 2 semi_minor_length = np.sqrt(np.abs(eigval_2)) * 2 major_length = semi_major_length * 2 minor_length = semi_minor_length * 2 print("Major axis = {}".format(major_length)) print("Minor axis = {}".format(minor_length)) """ Explanation: Get the orientation from the first eigenvector with $ \Theta = \operatorname{atan2}(y, x) \quad$ and the axes length from the eigenvalues. End of explanation """ plt.figure() # Plot the centroid in red plt.scatter(C_x, C_y, color='red', marker='+', s=100) # Plot the first eigenvector scale = 100 x1, x2 = [C_x, eigvec_1[0] * scale] y1, y2 = [C_y, eigvec_1[1] * scale] plt.arrow(x1, y1, x2, y2, color='green', lw=1, head_width=20) # Show the image _ = plt.imshow(a, interpolation="none", cmap="gray", origin='lower') """ Explanation: We find the same value as above. Now let's see how our vector looks (which is supposed to define the major orientation of our object). End of explanation """ fig, ax = plt.subplots() # Plot the centroid in red ax.scatter(C_x, C_y, color='red', marker='+', s=100) # Plot the major axis x1 = C_x + semi_major_length * np.cos(theta) y1 = C_y + semi_major_length * np.sin(theta) x2 = C_x - semi_major_length * np.cos(theta) y2 = C_y - semi_major_length * np.sin(theta) ax.plot([x1, x2], [y1, y2], color='green', lw=1) # Plot the minor axis x1 = C_x + semi_minor_length * np.cos(theta + np.pi/2) y1 = C_y + semi_minor_length * np.sin(theta + np.pi/2) x2 = C_x - semi_minor_length * np.cos(theta + np.pi/2) y2 = C_y - semi_minor_length * np.sin(theta + np.pi/2) ax.plot([x1, x2], [y1, y2], color='green', lw=1) # Plot the ellipse angles = np.arange(0, 360, 1) * np.pi / 180 x = 0.5 * major_length * np.cos(angles) y = 0.5 * minor_length * np.sin(angles) R = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) x, y = np.dot(R, np.array([x, y])) x += C_x y += C_y ax.plot(x, y, lw=1, color='red') # Show the image _ = ax.imshow(a, interpolation="none", cmap="gray", origin='lower', aspect='equal') """ Explanation: The orientation seems to be correct. Now we draw the major and minor axes. End of explanation """
JorisBolsens/PYNQ
Pynq-Z1/notebooks/examples/pmod_grove_tmp.ipynb
bsd-3-clause
from pynq.pl import Overlay Overlay("base.bit").download() """ Explanation: Grove Temperature Sensor 1.2 This example shows how to use the Grove Temperature Sensor v1.2 on the Pynq-Z1 board. You will also see how to plot a graph using matplotlib. The Grove Temperature sensor produces an analog signal, and requires an ADC. A Grove Temperature sensor and Pynq Grove Adapter, or Pynq Shield is required. The Grove Temperature Sensor, Pynq Grove Adapter, and Grove I2C ADC are used for this example. You can read a single value of temperature or read multiple values at regular intervals for a desired duration. At the end of this notebook, a Python only solution with single-sample read functionality is provided. 1. Load overlay End of explanation """ import math from pynq.iop import Grove_TMP from pynq.iop import PMODB from pynq.iop import PMOD_GROVE_G4 tmp = Grove_TMP(PMODB, PMOD_GROVE_G4) temperature = tmp.read() print(float("{0:.2f}".format(temperature)),'degree Celsius') """ Explanation: 2. Read single temperature This example shows on how to get a single temperature sample from the Grove TMP sensor. The Grove ADC is assumed to be attached to the GR4 connector of the StickIt. The StickIt module is assumed to be plugged in the 1st PMOD labeled JB. The Grove TMP sensor is connected to the other connector of the Grove ADC. Grove ADC provides a raw sample which is converted into resistance first and then converted into temperature. End of explanation """ import time %matplotlib inline import matplotlib.pyplot as plt tmp.set_log_interval_ms(100) tmp.start_log() # Change input during this time time.sleep(10) tmp_log = tmp.get_log() plt.plot(range(len(tmp_log)), tmp_log, 'ro') plt.title('Grove Temperature Plot') min_tmp_log = min(tmp_log) max_tmp_log = max(tmp_log) plt.axis([0, len(tmp_log), min_tmp_log, max_tmp_log]) plt.show() """ Explanation: 3. Start logging once every 100ms for 10 seconds Executing the next cell will start logging the temperature sensor values every 100ms, and will run for 10s. You can try touch/hold the temperature sensor to vary the measured temperature. You can vary the logging interval and the duration by changing the values 100 and 10 in the cellbelow. The raw samples are stored in the internal memory, and converted into temperature values. End of explanation """ from time import sleep from math import log from pynq.iop import PMOD_GROVE_G3 from pynq.iop import PMOD_GROVE_G4 from pynq.iop.pmod_iic import Pmod_IIC class Python_Grove_TMP(Pmod_IIC): """This class controls the grove temperature sensor. This class inherits from the PMODIIC class. Attributes ---------- iop : _IOP The _IOP object returned from the DevMode. scl_pin : int The SCL pin number. sda_pin : int The SDA pin number. iic_addr : int The IIC device address. """ def __init__(self, pmod_id, gr_pins, model = 'v1.2'): """Return a new instance of a grove OLED object. Parameters ---------- pmod_id : int The PMOD ID (1, 2) corresponding to (PMODA, PMODB). gr_pins: list The group pins on Grove Adapter. G3 or G4 is valid. model : string Temperature sensor model (can be found on the device). """ if gr_pins in [PMOD_GROVE_G3, PMOD_GROVE_G4]: [scl_pin,sda_pin] = gr_pins else: raise ValueError("Valid group numbers are G3 and G4.") # Each revision has its own B value if model == 'v1.2': # v1.2 uses thermistor NCP18WF104F03RC self.bValue = 4250 elif model == 'v1.1': # v1.1 uses thermistor NCP18WF104F03RC self.bValue = 4250 else: # v1.0 uses thermistor TTC3A103*39H self.bValue = 3975 super().__init__(pmod_id, scl_pin, sda_pin, 0x50) # Initialize the Grove ADC self.send([0x2,0x20]); def read(self): """Read temperature in Celsius from grove temperature sensor. Parameters ---------- None Returns ------- float Temperature reading in Celsius. """ val = self._read_grove_adc() R = 4095.0/val - 1.0 temp = 1.0/(log(R)/self.bValue + 1/298.15)-273.15 return temp def _read_grove_adc(self): self.send([0]) bytes = self.receive(2) return 2*(((bytes[0] & 0x0f) << 8) | bytes[1]) from pynq import PL # Flush IOP state PL.reset() py_tmp = Python_Grove_TMP(PMODB, PMOD_GROVE_G4) temperature = py_tmp.read() print(float("{0:.2f}".format(temperature)),'degree Celsius') """ Explanation: 4. A Pure Python class to exercise the AXI IIC Controller inheriting from PMOD_IIC This class is ported from http://www.seeedstudio.com/wiki/Grove_-_Temperature_Sensor. End of explanation """
Hvass-Labs/TensorFlow-Tutorials
12_Adversarial_Noise_MNIST.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt import tensorflow as tf import numpy as np from sklearn.metrics import confusion_matrix import time from datetime import timedelta import math """ Explanation: TensorFlow Tutorial #12 Adversarial Noise for MNIST by Magnus Erik Hvass Pedersen / GitHub / Videos on YouTube WARNING! This tutorial does not work with TensorFlow v.2 and it would take too much effort to update this tutorial to the new API. Introduction The previous Tutorial #11 showed how to find so-called adversarial examples for a state-of-the-art neural network, which caused the network to mis-classify images even though they looked identical to the human eye. For example, an image of a parrot became mis-classified as a bookcase when adding the adversarial noise, but the image looked completely unchanged to the human eye. The adversarial noise in Tutorial #11 was found through an optimization process for each individual image. Because the noise was specialized for each image, it may not generalize and have any effect on other images. In this tutorial we will instead find adversarial noise that causes nearly all input images to become mis-classified as a desired target-class. The MNIST data-set of hand-written digits is used as an example. The adversarial noise is now clearly visible to the human eye, but the digits are still easily identified by a human, while the neural network mis-classifies nearly all the images. In this tutorial we will also try and make the neural network immune to adversarial noise. Tutorial #11 used NumPy for the adversarial optimization. In this tutorial we will show how to implement the optimization process directly in TensorFlow. This might be faster, especially when using a GPU, because it does not need to copy data to and from the GPU in each iteration. It is recommended that you first study Tutorial #11. You should also be familiar with TensorFlow in general, see e.g. Tutorials #01 and #02. Flowchart The following chart shows roughly how the data flows in the Convolutional Neural Network that is implemented below. This example shows an input image with a hand-written 7-digit. The adversarial noise is then added to the image. Red noise-pixels are positive and make the input image darker in those pixels, while blue noise-pixels are negative and make the input lighter in those pixels. The noisy image is then fed to the neural network which results in a predicted class-number. In this case the adversarial noise fools the network into believing that the 7-digit shows a 3-digit. The noise is clearly visible to humans, but the 7-digit is still easily identified by a human. The remarkable thing here, is that a single noise-pattern causes the neural network to mis-classify almost all input images to a desired target-class. There are two separate optimization procedures in this neural network. First we optimize the variables of the neural network so as to classify images in the training-set. This is the normal optimization procedure for neural networks. Once the classification accuracy is good enough, we switch to the second optimization procedure, which tries to find a single pattern of adversarial noise, that causes all input images to be mis-classified as the given target-class. The two optimization procedures are completely separate. The first procedure only modifies the variables of the neural network, while the second procedure only modifies the adversarial noise. Imports End of explanation """ tf.__version__ """ Explanation: This was developed using Python 3.6 (Anaconda) and TensorFlow version: End of explanation """ from mnist import MNIST data = MNIST(data_dir="data/MNIST/") """ Explanation: Load Data The MNIST data-set is about 12 MB and will be downloaded automatically if it is not located in the given path. End of explanation """ print("Size of:") print("- Training-set:\t\t{}".format(data.num_train)) print("- Validation-set:\t{}".format(data.num_val)) print("- Test-set:\t\t{}".format(data.num_test)) """ Explanation: The MNIST data-set has now been loaded and consists of 70.000 images and class-numbers for the images. The data-set is split into 3 mutually exclusive sub-sets. We will only use the training and test-sets in this tutorial. End of explanation """ # The number of pixels in each dimension of an image. img_size = data.img_size # The images are stored in one-dimensional arrays of this length. img_size_flat = data.img_size_flat # Tuple with height and width of images used to reshape arrays. img_shape = data.img_shape # Number of classes, one class for each of 10 digits. num_classes = data.num_classes # Number of colour channels for the images: 1 channel for gray-scale. num_channels = data.num_channels """ Explanation: Copy some of the data-dimensions for convenience. End of explanation """ def plot_images(images, cls_true, cls_pred=None, noise=0.0): assert len(images) == len(cls_true) == 9 # Create figure with 3x3 sub-plots. fig, axes = plt.subplots(3, 3) fig.subplots_adjust(hspace=0.3, wspace=0.3) for i, ax in enumerate(axes.flat): # Get the i'th image and reshape the array. image = images[i].reshape(img_shape) # Add the adversarial noise to the image. image += noise # Ensure the noisy pixel-values are between 0 and 1. image = np.clip(image, 0.0, 1.0) # Plot image. ax.imshow(image, cmap='binary', interpolation='nearest') # Show true and predicted classes. if cls_pred is None: xlabel = "True: {0}".format(cls_true[i]) else: xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i]) # Show the classes as the label on the x-axis. ax.set_xlabel(xlabel) # Remove ticks from the plot. ax.set_xticks([]) ax.set_yticks([]) # Ensure the plot is shown correctly with multiple plots # in a single Notebook cell. plt.show() """ Explanation: Helper-function for plotting images Function used to plot 9 images in a 3x3 grid, and writing the true and predicted classes below each image. If the noise is supplied then it is added to all images. End of explanation """ # Get the first images from the test-set. images = data.x_test[0:9] # Get the true classes for those images. cls_true = data.y_test_cls[0:9] # Plot the images and labels using our helper-function above. plot_images(images=images, cls_true=cls_true) """ Explanation: Plot a few images to see if data is correct End of explanation """ x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x') """ Explanation: TensorFlow Graph The computational graph for the neural network will now be constructed using TensorFlow and PrettyTensor. As usual, we need to create placeholder variables for feeding images into the graph and then we add the adversarial noise to the images. The noisy images are then used as input to a convolutional neural network. There are two separate optimization procedures for this network. A normal optimization procedure for the variables of the neural network itself, and another optimization procedure for the adversarial noise. Both optimization procedures are implemented directly in TensorFlow. Placeholder variables Placeholder variables provide the input to the computational graph in TensorFlow that we may change each time we execute the graph. We call this feeding the placeholder variables. First we define the placeholder variable for the input images. This allows us to change the images that are input to the TensorFlow graph. This is a so-called tensor, which just means that it is a multi-dimensional array. The data-type is set to float32 and the shape is set to [None, img_size_flat], where None means that the tensor may hold an arbitrary number of images with each image being a vector of length img_size_flat. End of explanation """ x_image = tf.reshape(x, [-1, img_size, img_size, num_channels]) """ Explanation: The convolutional layers expect x to be encoded as a 4-dim tensor so we have to reshape it so its shape is instead [num_images, img_height, img_width, num_channels]. Note that img_height == img_width == img_size and num_images can be inferred automatically by using -1 for the size of the first dimension. So the reshape operation is: End of explanation """ y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true') """ Explanation: Next we have the placeholder variable for the true labels associated with the images that were input in the placeholder variable x. The shape of this placeholder variable is [None, num_classes] which means it may hold an arbitrary number of labels and each label is a vector of length num_classes which is 10 in this case. End of explanation """ y_true_cls = tf.argmax(y_true, axis=1) """ Explanation: We could also have a placeholder variable for the class-number, but we will instead calculate it using argmax. Note that this is a TensorFlow operator so nothing is calculated at this point. End of explanation """ noise_limit = 0.35 """ Explanation: Adversarial Noise The pixels in the input image are float-values between 0.0 and 1.0. The adversarial noise is a number that is added or subtracted from the pixels in the input image. The limit of the adversarial noise is set to 0.35 so the noise will be between &plusmn;0.35. End of explanation """ noise_l2_weight = 0.02 """ Explanation: The optimizer for the adversarial noise will try and minimize two loss-measures: (1) The normal loss-measure for the neural network, so we will find the noise that gives the best classification accuracy for the adversarial target-class; and (2) the so-called L2-loss-measure which tries to keep the noise as low as possible. The following weight determines how important the L2-loss is compared to the normal loss-measure. An L2-weight close to zero usually works best. End of explanation """ ADVERSARY_VARIABLES = 'adversary_variables' """ Explanation: When we create the new variable for the noise, we must inform TensorFlow which variable-collections that it belongs to, so we can later inform the two optimizers which variables to update. First we define a name for our new variable-collection. This is just a string. End of explanation """ collections = [tf.GraphKeys.GLOBAL_VARIABLES, ADVERSARY_VARIABLES] """ Explanation: Then we create a list of the collections that we want the new noise-variable to belong to. If we add the noise-variable to the collection tf.GraphKeys.VARIABLES then it will also get initialized with all the other variables in the TensorFlow graph, but it will not get optimized. This is a bit confusing. End of explanation """ x_noise = tf.Variable(tf.zeros([img_size, img_size, num_channels]), name='x_noise', trainable=False, collections=collections) """ Explanation: Now we can create the new variable for the adversarial noise. It will be initialized to zero. It will not be trainable, so it will not be optimized along with the other variables of the neural network. This allows us to create two separate optimization procedures. End of explanation """ x_noise_clip = tf.assign(x_noise, tf.clip_by_value(x_noise, -noise_limit, noise_limit)) """ Explanation: The adversarial noise will be limited / clipped to the given &plusmn; noise-limit that we set above. Note that this is actually not executed at this point in the computational graph, but will instead be executed after the optimization-step, see further below. End of explanation """ x_noisy_image = x_image + x_noise """ Explanation: The noisy image is just the sum of the input image and the adversarial noise. End of explanation """ x_noisy_image = tf.clip_by_value(x_noisy_image, 0.0, 1.0) """ Explanation: When adding the noise to the input image, it may overflow the boundaries for a valid image, so we clip / limit the noisy image to ensure its pixel-values are between 0 and 1. End of explanation """ # Start the network with the noisy input image. net = x_noisy_image # 1st convolutional layer. net = tf.layers.conv2d(inputs=net, name='layer_conv1', padding='same', filters=16, kernel_size=5, activation=tf.nn.relu) net = tf.layers.max_pooling2d(inputs=net, pool_size=2, strides=2) # 2nd convolutional layer. net = tf.layers.conv2d(inputs=net, name='layer_conv2', padding='same', filters=36, kernel_size=5, activation=tf.nn.relu) net = tf.layers.max_pooling2d(inputs=net, pool_size=2, strides=2) # Flatten layer.This should eventually be replaced by: # net = tf.layers.flatten(net) net = tf.contrib.layers.flatten(net) # 1st fully-connected / dense layer. net = tf.layers.dense(inputs=net, name='layer_fc1', units=128, activation=tf.nn.relu) # 2nd fully-connected / dense layer. net = tf.layers.dense(inputs=net, name='layer_fc_out', units=num_classes, activation=None) # Unscaled output of the network. logits = net # Softmax output of the network. y_pred = tf.nn.softmax(logits=logits) # Loss measure to be optimized. cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_true, logits=logits) loss = tf.reduce_mean(cross_entropy) """ Explanation: Convolutional Neural Network We will use the Layers API to construct the convolutional neural network, see Tutorial #03-B. End of explanation """ [var.name for var in tf.trainable_variables()] """ Explanation: Optimizer for Normal Training This is a list of the variables for the neural network that will be trained during the normal optimization procedure. Note that 'x_noise:0' is not in the list, so the adversarial noise is not being optimized in the normal procedure. End of explanation """ optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss) """ Explanation: Optimization of these variables in the neural network is done with the Adam-optimizer using the loss-measure that was returned from PrettyTensor when we constructed the neural network above. Note that optimization is not performed at this point. In fact, nothing is calculated at all, we just add the optimizer-object to the TensorFlow graph for later execution. End of explanation """ adversary_variables = tf.get_collection(ADVERSARY_VARIABLES) """ Explanation: Optimizer for Adversarial Noise Get the list of variables that must be optimized in the second procedure for the adversarial noise. End of explanation """ [var.name for var in adversary_variables] """ Explanation: Show the list of variable-names. There is only one, which is the adversarial noise variable that we created above. End of explanation """ l2_loss_noise = noise_l2_weight * tf.nn.l2_loss(x_noise) """ Explanation: We will combine the loss-function for the normal optimization with a so-called L2-loss for the noise-variable. This should result in the minimum values for the adversarial noise along with the best classification accuracy. The L2-loss is scaled by a weight that is typically set close to zero. End of explanation """ loss_adversary = loss + l2_loss_noise """ Explanation: Combine the normal loss-function with the L2-loss for the adversarial noise. End of explanation """ optimizer_adversary = tf.train.AdamOptimizer(learning_rate=1e-2).minimize(loss_adversary, var_list=adversary_variables) """ Explanation: We can now create the optimizer for the adversarial noise. Because this optimizer is not supposed to update all the variables of the neural network, we must give it a list of the variables that we want updated, which is the variable for the adversarial noise. Also note the learning-rate is much greater than for the normal optimizer above. End of explanation """ y_pred_cls = tf.argmax(y_pred, axis=1) """ Explanation: We have now created two optimizers for the neural network, one for the variables of the neural network and another for the single variable with the adversarial noise. Performance Measures We need a few more operations in the TensorFlow graph which will make it easier for us to display the progress to the user during optimization. First we calculate the predicted class number from the output of the Neural Network y_pred, which is a vector with 10 elements. The class number is the index of the largest element. End of explanation """ correct_prediction = tf.equal(y_pred_cls, y_true_cls) """ Explanation: Then we create a vector of booleans telling us whether the predicted class equals the true class of each image. End of explanation """ accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) """ Explanation: The classification accuracy is calculated by first type-casting the vector of booleans to floats, so that False becomes 0 and True becomes 1, and then taking the average of these numbers. End of explanation """ session = tf.Session() """ Explanation: TensorFlow Run Create TensorFlow session Once the TensorFlow graph has been created, we have to create a TensorFlow session which is used to execute the graph. End of explanation """ session.run(tf.global_variables_initializer()) """ Explanation: Initialize variables The variables for weights and biases must be initialized before we start optimizing them. End of explanation """ def init_noise(): session.run(tf.variables_initializer([x_noise])) """ Explanation: This is a helper-function for initializing / resetting the adversarial noise to zero. End of explanation """ init_noise() """ Explanation: Call the function to initialize the adversarial noise. End of explanation """ train_batch_size = 64 """ Explanation: Helper-function to perform optimization iterations There are 55,000 images in the training-set. It takes a long time to calculate the gradient of the model using all these images. We therefore only use a small batch of images in each iteration of the optimizer. If your computer crashes or becomes very slow because you run out of RAM, then you may try and lower this number, but you may then need to perform more optimization iterations. End of explanation """ def optimize(num_iterations, adversary_target_cls=None): # Start-time used for printing time-usage below. start_time = time.time() for i in range(num_iterations): # Get a batch of training examples. # x_batch now holds a batch of images and # y_true_batch are the true labels for those images. x_batch, y_true_batch, _ = data.random_batch(batch_size=train_batch_size) # If we are searching for the adversarial noise, then # use the adversarial target-class instead. if adversary_target_cls is not None: # The class-labels are One-Hot encoded. # Set all the class-labels to zero. y_true_batch = np.zeros_like(y_true_batch) # Set the element for the adversarial target-class to 1. y_true_batch[:, adversary_target_cls] = 1.0 # Put the batch into a dict with the proper names # for placeholder variables in the TensorFlow graph. feed_dict_train = {x: x_batch, y_true: y_true_batch} # If doing normal optimization of the neural network. if adversary_target_cls is None: # Run the optimizer using this batch of training data. # TensorFlow assigns the variables in feed_dict_train # to the placeholder variables and then runs the optimizer. session.run(optimizer, feed_dict=feed_dict_train) else: # Run the adversarial optimizer instead. # Note that we have 'faked' the class above to be # the adversarial target-class instead of the true class. session.run(optimizer_adversary, feed_dict=feed_dict_train) # Clip / limit the adversarial noise. This executes # another TensorFlow operation. It cannot be executed # in the same session.run() as the optimizer, because # it may run in parallel so the execution order is not # guaranteed. We need the clip to run after the optimizer. session.run(x_noise_clip) # Print status every 100 iterations. if (i % 100 == 0) or (i == num_iterations - 1): # Calculate the accuracy on the training-set. acc = session.run(accuracy, feed_dict=feed_dict_train) # Message for printing. msg = "Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}" # Print it. print(msg.format(i, acc)) # Ending time. end_time = time.time() # Difference between start and end-times. time_dif = end_time - start_time # Print the time-usage. print("Time usage: " + str(timedelta(seconds=int(round(time_dif))))) """ Explanation: Below is the function for performing a number of optimization iterations so as to gradually improve the variables of the neural network. In each iteration, a new batch of data is selected from the training-set and then TensorFlow executes the optimizer using those training samples. The progress is printed every 100 iterations. This function is similar to the previous tutorials, except that it now takes an argument for the adversarial target-class. When this target-class is set to an integer, it will be used instead of the true class-number for the training-data. The adversarial optimizer is also used instead of the normal optimizer, and after each step of the adversarial optimizer, the noise will be limited / clipped to the allowed range. This optimizes the adversarial noise and ignores the other variables of the neural network. End of explanation """ def get_noise(): # Run the TensorFlow session to retrieve the contents of # the x_noise variable inside the graph. noise = session.run(x_noise) return np.squeeze(noise) """ Explanation: Helper-functions for getting and plotting the noise This function gets the adversarial noise from inside the TensorFlow graph. End of explanation """ def plot_noise(): # Get the adversarial noise from inside the TensorFlow graph. noise = get_noise() # Print statistics. print("Noise:") print("- Min:", noise.min()) print("- Max:", noise.max()) print("- Std:", noise.std()) # Plot the noise. plt.imshow(noise, interpolation='nearest', cmap='seismic', vmin=-1.0, vmax=1.0) """ Explanation: This function plots the adversarial noise and prints some statistics. End of explanation """ def plot_example_errors(cls_pred, correct): # This function is called from print_test_accuracy() below. # cls_pred is an array of the predicted class-number for # all images in the test-set. # correct is a boolean array whether the predicted class # is equal to the true class for each image in the test-set. # Negate the boolean array. incorrect = (correct == False) # Get the images from the test-set that have been # incorrectly classified. images = data.x_test[incorrect] # Get the predicted classes for those images. cls_pred = cls_pred[incorrect] # Get the true classes for those images. cls_true = data.y_test_cls[incorrect] # Get the adversarial noise from inside the TensorFlow graph. noise = get_noise() # Plot the first 9 images. plot_images(images=images[0:9], cls_true=cls_true[0:9], cls_pred=cls_pred[0:9], noise=noise) """ Explanation: Helper-function to plot example errors Function for plotting examples of images from the test-set that have been mis-classified. End of explanation """ def plot_confusion_matrix(cls_pred): # This is called from print_test_accuracy() below. # cls_pred is an array of the predicted class-number for # all images in the test-set. # Get the true classifications for the test-set. cls_true = data.y_test_cls # Get the confusion matrix using sklearn. cm = confusion_matrix(y_true=cls_true, y_pred=cls_pred) # Print the confusion matrix as text. print(cm) """ Explanation: Helper-function to plot confusion matrix End of explanation """ # Split the test-set into smaller batches of this size. test_batch_size = 256 def print_test_accuracy(show_example_errors=False, show_confusion_matrix=False): # Number of images in the test-set. num_test = data.num_test # Allocate an array for the predicted classes which # will be calculated in batches and filled into this array. cls_pred = np.zeros(shape=num_test, dtype=np.int) # Now calculate the predicted classes for the batches. # We will just iterate through all the batches. # There might be a more clever and Pythonic way of doing this. # The starting index for the next batch is denoted i. i = 0 while i < num_test: # The ending index for the next batch is denoted j. j = min(i + test_batch_size, num_test) # Get the images from the test-set between index i and j. images = data.x_test[i:j, :] # Get the associated labels. labels = data.y_test[i:j, :] # Create a feed-dict with these images and labels. feed_dict = {x: images, y_true: labels} # Calculate the predicted class using TensorFlow. cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict) # Set the start-index for the next batch to the # end-index of the current batch. i = j # Convenience variable for the true class-numbers of the test-set. cls_true = data.y_test_cls # Create a boolean array whether each image is correctly classified. correct = (cls_true == cls_pred) # Calculate the number of correctly classified images. # When summing a boolean array, False means 0 and True means 1. correct_sum = correct.sum() # Classification accuracy is the number of correctly classified # images divided by the total number of images in the test-set. acc = float(correct_sum) / num_test # Print the accuracy. msg = "Accuracy on Test-Set: {0:.1%} ({1} / {2})" print(msg.format(acc, correct_sum, num_test)) # Plot some examples of mis-classifications, if desired. if show_example_errors: print("Example errors:") plot_example_errors(cls_pred=cls_pred, correct=correct) # Plot the confusion matrix, if desired. if show_confusion_matrix: print("Confusion Matrix:") plot_confusion_matrix(cls_pred=cls_pred) """ Explanation: Helper-function for showing the performance Function for printing the classification accuracy on the test-set. It takes a while to compute the classification for all the images in the test-set, that's why the results are re-used by calling the above functions directly from this function, so the classifications don't have to be recalculated by each function. Note that this function can use a lot of computer memory, which is why the test-set is split into smaller batches. If you have little RAM in your computer and it crashes, then you can try and lower the batch-size. End of explanation """ optimize(num_iterations=1000) """ Explanation: Normal optimization of neural network First we perform 1000 optimization iterations with the normal optimizer. This finds the variables that makes the neural network perform well on the training-set. The adversarial noise is not effective yet because it has only been initialized to zero above and it is not being updated during this optimization. End of explanation """ print_test_accuracy(show_example_errors=True) """ Explanation: The classification accuracy is now about 96-97% on the test-set. (This will vary each time you run this Python Notebook). End of explanation """ init_noise() """ Explanation: Find the adversarial noise Before we start optimizing the adversarial noise, we first initialize it to zero. This was already done above but it is repeated here in case you want to re-run this code with another target-class. End of explanation """ optimize(num_iterations=1000, adversary_target_cls=3) """ Explanation: Now perform optimization of the adversarial noise. This uses the adversarial optimizer instead of the normal optimizer, which means that it only optimizes the variable for the adversarial noise, while ignoring all the other variables of the neural network. End of explanation """ plot_noise() """ Explanation: The adversarial noise has now been optimized and it can be shown in a plot. The red pixels show positive noise-values and the blue pixels show negative noise-values. This noise-pattern is added to every input image. The positive (red) noise-values makes the pixels darker and the negative (blue) noise-values makes the pixels brighter. Examples of this are shown below. End of explanation """ print_test_accuracy(show_example_errors=True, show_confusion_matrix=True) """ Explanation: When this noise is added to all the images in the test-set, the result is typically a classification accuracy of 10-15% depending on the target-class that was chosen. We can also see from the confusion matrix that most images in the test-set are now classified as the desired target-class - although some of the target-classes require more adversarial noise than others. So we have found adversarial noise that makes the neural network mis-classify almost all images in the test-set as our desired target-class. We can also show some examples of mis-classified images with the adversarial noise. The noise is clearly visible but the digits are still easily identified by the human eye. End of explanation """ def find_all_noise(num_iterations=1000): # Adversarial noise for all target-classes. all_noise = [] # For each target-class. for i in range(num_classes): print("Finding adversarial noise for target-class:", i) # Reset the adversarial noise to zero. init_noise() # Optimize the adversarial noise. optimize(num_iterations=num_iterations, adversary_target_cls=i) # Get the adversarial noise from inside the TensorFlow graph. noise = get_noise() # Append the noise to the array. all_noise.append(noise) # Print newline. print() return all_noise all_noise = find_all_noise(num_iterations=300) """ Explanation: Adversarial noise for all target-classes This is a helper-function for finding the adversarial noise for all target-classes. The function loops over all the class-numbers from 0 to 9 and runs the optimization above. The results are then stored in an array. End of explanation """ def plot_all_noise(all_noise): # Create figure with 10 sub-plots. fig, axes = plt.subplots(2, 5) fig.subplots_adjust(hspace=0.2, wspace=0.1) # For each sub-plot. for i, ax in enumerate(axes.flat): # Get the adversarial noise for the i'th target-class. noise = all_noise[i] # Plot the noise. ax.imshow(noise, cmap='seismic', interpolation='nearest', vmin=-1.0, vmax=1.0) # Show the classes as the label on the x-axis. ax.set_xlabel(i) # Remove ticks from the plot. ax.set_xticks([]) ax.set_yticks([]) # Ensure the plot is shown correctly with multiple plots # in a single Notebook cell. plt.show() plot_all_noise(all_noise) """ Explanation: Plot the adversarial noise for all target-classes This is a helper-function for plotting a grid with the adversarial noise for all target-classes 0 to 9. End of explanation """ def make_immune(target_cls, num_iterations_adversary=500, num_iterations_immune=200): print("Target-class:", target_cls) print("Finding adversarial noise ...") # Find the adversarial noise. optimize(num_iterations=num_iterations_adversary, adversary_target_cls=target_cls) # Newline. print() # Print classification accuracy. print_test_accuracy(show_example_errors=False, show_confusion_matrix=False) # Newline. print() print("Making the neural network immune to the noise ...") # Try and make the neural network immune to this noise. # Note that the adversarial noise has not been reset to zero # so the x_noise variable still holds the noise. # So we are training the neural network to ignore the noise. optimize(num_iterations=num_iterations_immune) # Newline. print() # Print classification accuracy. print_test_accuracy(show_example_errors=False, show_confusion_matrix=False) """ Explanation: Red pixels show positive noise values, and blue pixels show negative noise values. In some of these noise-images you can see traces of the numbers. For example, the noise for target-class 0 shows a red circle surrounded by blue. This means that a little noise will be added to the input image in the shape of a circle, and it will dampen the other pixels. This is sufficient for most input images in the MNIST data-set to be mis-classified as a 0. Another example is the noise for 3 which also shows traces of the number 3 with red pixels. But the noise for the other classes is less obvious. Immunity to adversarial noise We will now try and make the neural network immune to adversarial noise. We do this by re-training the neural network to ignore the adversarial noise. This process can be repeated a number of times. Helper-function to make a neural network immune to noise This is the helper-function for making the neural network immune to adversarial noise. First it runs the optimization to find the adversarial noise. Then it runs the normal optimization to make the neural network immune to that noise. End of explanation """ make_immune(target_cls=3) """ Explanation: Make immune to noise for target-class 3 First try and make the neural network immune to the adverserial noise for targer-class 3. First we find the adversarial noise that causes the neural network to mis-classify most of the images in the test-set. Then we run the normal optimization which fine-tunes the variables of the neural network to ignore this noise and this brings the classification accuracy for the noisy images up to 95-97% again. End of explanation """ make_immune(target_cls=3) """ Explanation: Now try and run it again. It is now more difficult to find adversarial noise for the target-class 3. The neural network seems to have become somewhat immune to adversarial noise. End of explanation """ for i in range(10): make_immune(target_cls=i) # Print newline. print() """ Explanation: Make immune to noise for all target-classes Now try and make the neural network immune to adversarial noise for all target-classes. Unfortunately this does not seem to work so well. End of explanation """ for i in range(10): make_immune(target_cls=i) # Print newline. print() make_immune(target_cls=i) # Print newline. print() """ Explanation: Make immune to all target-classes (double runs) Now try and use double-runs to make the neural network immune to adversarial noise for all target-classes. Unfortunately this does not seem to work so well either. Making the neural network immune to one adversarial target-class appears to cancel the immunity towards the other target-classes. End of explanation """ plot_noise() """ Explanation: Plot the adversarial noise We have now performed many optimizations of both the neural network and the adversarial noise. Let us see how the adversarial noise looks now. End of explanation """ print_test_accuracy(show_example_errors=True, show_confusion_matrix=True) """ Explanation: Interestingly, the neural network now has a higher classification accuracy on noisy images than we had on clean images before all these optimizations. End of explanation """ init_noise() """ Explanation: Performance on clean images Now let us see how the neural network performs on clean images so we reset the adversarial noise to zero. End of explanation """ print_test_accuracy(show_example_errors=True, show_confusion_matrix=True) """ Explanation: The neural network now performs worse on clean images compared to noisy images. End of explanation """ # This has been commented out in case you want to modify and experiment # with the Notebook without having to restart it. # session.close() """ Explanation: Close TensorFlow Session We are now done using TensorFlow, so we close the session to release its resources. End of explanation """
petermchale/yeast_bioinformatics
analysis.ipynb
mit
import sys, os sys.path.append(os.getcwd() + '/source') from extract import createEnergyMatrix energy_matrix = createEnergyMatrix('data/Gal4_affinity.in') """ Explanation: Yeast bioinformatic analysis This Notebook lives at Github. Here is a rendered version of this notebook. Research Question The eukaryotic genome is adorned by molecules called transcription factors (TFs). At any given time, some of these are regulating gene expression, e.g. by interacting with RNA polymerase, but others are not. How can we distinguish the functional TF-DNA binding events from a potentially large background of non-functional binding events? Quantifying TF-DNA binding To approach this question, we first need to quantify the strength with which TFs bind DNA. A TF binds DNA by making contact with a sequence of $L$ nucleotides \begin{equation} \vec{s} = \langle s_1, s_2, \ldots, s_L\rangle, \end{equation} where $s_i \in {A,C,G,T}$. Denote by $E(\vec{s})$ the binding energy of a given TF to a DNA sub-sequence $\vec{s}$. With binding lengths of $L = 10-20$ nucleotides, there are too many possible $\vec{s}$ to measure $E(\vec{s})$ exhaustively. Fortunately, the contribution of each nucleotide to the binding energy of the sub-sequence is approximately independent and additive: \begin{equation} E(\vec{s}) = \sum_{i=1}^L \epsilon_i(s_i), \end{equation} reducing the impractical problem of determining the large number of values of $E(\vec{s})$ to the practical problem of the determining the $L\times 4$ energy matrix, $\epsilon_i(s)$. This matrix has been determined for a TF called Gal4 using in vitro measurements of the equilibrium binding constants \begin{equation} K(\vec{s}) \propto e^{-\beta E(\vec{s})} \end{equation} for all sequences $\vec{s}$ that differ in just one nucleotide from a given sequence. I manually fetched these data from the literature [Liang et al 1996], and stored them in the file data/Gal4_affinity.in. End of explanation """ import pandas as pd df = pd.DataFrame(energy_matrix) df """ Explanation: Here, I have stored the energy matrix as a list of dictionaries for computational reasons, but we can use pandas to visualize it: End of explanation """ print(df.idxmin(axis=1)) """ Explanation: In the above data structure: * row labels are positions within a binding site * column labels are the identities of nucleotides at those positions * matrix elements are TF-DNA binding energies So, for example, a DNA sequence that binds optimally to Gal4 can be extracted by listing the nucleotides with the lowest energy at each position: End of explanation """ from extract import getFasta with open('data/chr03.fsa') as f: header, chromosome = getFasta(f) print('\nHere is the beginning of the DNA sequence of the chromosome:\n') print(chromosome[:100]) print('\nThere are ', len(chromosome), 'nucleotides in this chromosome') """ Explanation: Extracting the yeast DNA sequence I manually downloaded, from the Saccharomyces Genome Database, the DNA sequence of the third chromosome of yeast, stored in FASTA format, and read it into a string: End of explanation """ from auxFunctions import calcEnergyListWithMatrix TFBS = calcEnergyListWithMatrix(chromosome, energy_matrix) TFBS.head() """ Explanation: Distribution of TF-DNA binding energies genome-wide With the energy matrix and chromosome sequence in hand, I next computed the energy with which Gal4 binds every possible sub-sequence of length $L = 17$ on the chromosome: End of explanation """ import numpy as np from matplotlib import pyplot as plt from auxFunctions import binList %matplotlib inline energyBins, numberSites = binList(TFBS['TF-DNA binding energy'], xMin=-5.0, xMax=50, binWidth=0.25) fontsize = 14 fontsize_tick = 12 fig = plt.figure(figsize=(7,5), facecolor='w') ax = fig.add_subplot(111) ax.plot(energyBins, numberSites, linewidth=0, marker='s', markersize=8, color='red') ax.set_xlabel('TF-DNA binding energy (kT)', fontsize=fontsize) ax.set_ylabel('number of genomic sites', fontsize=fontsize) ax.set_yscale('log') ax.set_xlim(0, 40) ax.tick_params(axis='both', which='major', labelsize=fontsize_tick) """ Explanation: Here is how those TF-DNA binding energies are distributed throughout the genome: End of explanation """ !head -20 data/saccharomyces_cerevisiae_chr03.gff """ Explanation: Putting the y-axis on a log scale reveals that the distribution is approximately parabolic, implying that on a linear scale the distribution is approximately Gaussian. This is expected from the fact that each TF-DNA binding energy is a sum of single-nucleotide energies that are, to a good approximate, independently and identically distributed (Central Limit Theorem). Notice also that, though highly-specific (low-energy) sites do indeed exist, the sheer number of less-specific (intermediate- to high-energy) sites across the genome can, in principle, soak up a significant number of TFs. Extracting yeast promoters To identify where regulatory regions of genes are likely to be, I downloaded the following General Feature Format file from the Saccharomyces Genome Database: End of explanation """ def extractPromoters(): """ parse saccharomyces_cerevisiae_chr03.gff and extract promoters """ promLength = 100 with open('data/saccharomyces_cerevisiae_chr03.gff') as fin: # skip over header lines line = fin.readline() while line[0] == "#": line = fin.readline() features = [] while line: seqid, source, feature_type, start, end, score, strand, phase, attributes = line.split() if feature_type == 'CDS': attributes = attributes.split(';') initDict = [attribute.split('=') for attribute in attributes] attributes = dict(initDict) systematicGeneName = attributes['Parent'] if 'orf_classification' in attributes: classification = attributes['orf_classification'] else: classification = '.' if 'gene' in attributes: standardGeneName = attributes['gene'] else: standardGeneName = '.' # which DNA strand the gene is encoded on determines where the promoter is located if strand == '+': promStart = int(start) - promLength promEnd = int(start) elif strand == '-': promStart = int(end) promEnd = int(end) + promLength promoter = chromosome[max(promStart, 0):promEnd] features += [(standardGeneName, systematicGeneName, classification, promStart, promEnd, promoter)] line = fin.readline() return pd.DataFrame(data=features, columns=['standard gene name', 'systematic gene name', 'classification', 'promoter start position', 'promoter end position', 'promoter sequence']) promoters = extractPromoters() promoters.head() """ Explanation: This file contains all genomic features on the third chromosome of this species. I located the coding-sequence features, and used them to extract regions of DNA that lie upstream of each transcription start site. End of explanation """ import warnings warnings.filterwarnings('ignore') TFBS_high_affinity = TFBS[TFBS['TF-DNA binding energy'] < 10] TFBS_high_affinity_categorical_variable = [0]*len(TFBS_high_affinity) TFBS_high_affinity_promoter = ['.']*len(TFBS_high_affinity) count = -1 for TFBS_index, TFBS_row in TFBS_high_affinity.iterrows(): count += 1 TFBS_start = TFBS_row['binding-site start position'] TFBS_end = TFBS_row['binding-site end position'] for promoter_index, promoter_row in promoters.iterrows(): promoter_start = promoter_row['promoter start position'] promoter_end = promoter_row['promoter end position'] if (promoter_start < TFBS_start) and (TFBS_end < promoter_end): TFBS_high_affinity_categorical_variable[count] = 1 TFBS_high_affinity_promoter[count] = promoter_row['systematic gene name'] break TFBS_high_affinity['promoter categorical variable'] = TFBS_high_affinity_categorical_variable TFBS_high_affinity['promoter'] = TFBS_high_affinity_promoter TFBS_high_affinity.head() """ Explanation: I have placed a . in a field to indicate missing data. Determining whether potential TF binding sites lie in promoters I next classified each $L$-subsequence (potential binding site) according to whether it lies in a promoter region or not, and added that information as two new columns in the appropriate pandas data frame: End of explanation """ energies = TFBS_high_affinity['TF-DNA binding energy'] categories = TFBS_high_affinity['promoter categorical variable'] # sample data fig = plt.figure(figsize=(7,5), facecolor='w') ax = fig.add_subplot(111) ax.plot(energies, categories, linewidth=0, marker='o', markersize=8, color='red', label='sample data') ax.set_xlabel('TF-DNA binding energy (kT)', fontsize=fontsize) ax.set_ylabel('promoter categorical variable', fontsize=fontsize) ax.tick_params(axis='both', which='major', labelsize=fontsize_tick) ax.set_ylim(-0.1, 1.1) # logistic regression from sklearn.linear_model import LogisticRegression lr = LogisticRegression() energies_rs = energies.values.reshape((len(energies),1)) lr.fit(energies_rs, categories) x = np.linspace(energies.min(),energies.max()) x_rs = x.reshape((len(x), 1)) probs = lr.predict_proba(x_rs) ax.plot(x, probs[:,1], linewidth=2, marker=None, color='black', label='logistic probability of lying in promoter region') # probability that a randomly choosen site lies in a promoter number_promoters, number_cols = promoters.shape promoter_size = len(promoters['promoter sequence'][0]) promoter_size_summed_over_chromosome = number_promoters*promoter_size chromosome_size = len(chromosome) null_probability_of_random_bp_lying_in_promoter = float(promoter_size_summed_over_chromosome)/float(chromosome_size) ax.plot([x.min(), x.max()], [null_probability_of_random_bp_lying_in_promoter, null_probability_of_random_bp_lying_in_promoter], 'r--', linewidth=3, marker=None, color='black', label='null probability of lying in promoter region') # label the plot legend = ax.legend(loc='center', fontsize=fontsize) """ Explanation: Are promoters enriched for subsequences that bind TFs tightly? I used scikit-learn to determine whether subsequences with greater affinity for the TF (i.e. lower energy) tend to be located in promoter regions more often than you'd expect by chance: End of explanation """
juanshishido/experiments-guide
02-randomization.ipynb
mit
import numpy as np n, p = 10, 0.5 np.random.binomial(n, p) """ Explanation: Randomization In the previous chapter, we saw how randomization eliminates selection bias. Let's explain what we mean by randomization, describe several ways we might want to randomly assign treatments, and discuss the components other than the assignment that can be randomized. Randomization refers to using "a known, well-understood probabilistic scheme" to assign treatments to units (Oehlert, 2010). Randomization "ensures that assignment to the treatment group is statistically independent of all observed or unobserved variables" (Gerber and Green, 2012). Simple Random Assignment With simple random assignment, every unit has the same probability of being assigned to a particular treatment group. The probability can be anything greater than zero and less than one. This will approximately determine the number of units in each group. For example, assuming a single treatment group and a single control group, if the probability is 0.75, about 75% will be assigned to the treatment group. Let's imagine we have 10 units to which we assign a treatment with 0.5 probability. Will our groups be balanced? That is, will we have 5 units in the treatment group and 5 units in the control group? Let's find out. End of explanation """ np.random.seed(42) np.random.binomial(n, p) """ Explanation: This counts the number of successes&mdash;think of "success" as being assigned to the treatment group&mdash;in 10 independent trials, where success occurs 50% of the time. Each time you run the cell above, you'll get a different result&mdash;it's not always 5! This is a drawback of simple random assignment. [Y]ou could flip a coin to assign each of 10 [units] to the treatment condition, but there is only a 24.6% chance of ending up with exactly 5 [units] in treatment and 5 in control (Gerber and Green, 2012) So that others may reproduce our assignments, we can use a random seed. This is highly recommended, though, in practice, we won't use np.random.binomial(). (Note: I'll always use 42 as the seed.) End of explanation """ from math import factorial possible_combinations = factorial(10) / (factorial(5) * factorial(10 - 5)) import random from itertools import combinations # enumerate the possible ways to select m of N units enumerated = list(combinations(range(10), 5)) # randomly select one of those allocations random.seed(42) select = random.randint(0, possible_combinations-1) treatment = enumerated[select] print(list(treatment)) """ Explanation: Complete Random Assignment If, instead, we'd like to assign exactly $m$ of $N$ units to the treatment group, we can use complete random assignment. Here, as before, each unit has an identical probability of being assigned to the treatment group. Gerber and Green describe three ways to implement complete random assignment: randomly select units until there are $m$ of them in the treatment group enumerate all of the possible ways to select $m$ of $N$ units and randomly select one of those allocations randomly order the $N$ units and select the first $m$ Let's show examples for the second and third approaches. Enumerate There are $$\frac{n!}{r!(n - r)!} = \frac{10!}{5!5!} = 252$$ possible ways to select 5 of 10 units. We can enumerate these combinations using the itertools module. End of explanation """ units = list(range(10)) random.seed(42) random.shuffle(units) units[:5] """ Explanation: Here, using the seed of 42, units 1, 3, 4, 5, and 8 get assigned to the treatment group. Randomly Order End of explanation """
SKA-ScienceDataProcessor/crocodile
examples/notebooks/wstacking.ipynb
apache-2.0
%matplotlib inline import sys sys.path.append('../..') from matplotlib import pylab pylab.rcParams['figure.figsize'] = 16, 10 import functools import numpy import scipy import scipy.special import time from crocodile.clean import * from crocodile.synthesis import * from crocodile.simulate import * from util.visualize import * from arl.test_support import create_named_configuration """ Explanation: Illustration of w-stacking End of explanation """ vlas = create_named_configuration('VLAA') ha_range = numpy.arange(numpy.radians(0), numpy.radians(90), numpy.radians(90 / 36)) dec = numpy.radians(45) vobs = xyz_to_baselines(vlas.data['xyz'], ha_range, dec) # Wavelength: 5 metres wvl=5 uvw = vobs / wvl from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt ax = plt.figure().add_subplot(121, projection='3d') ax.scatter(uvw[:,0], uvw[:,1] , uvw[:,2]) max_uvw = numpy.amax(uvw) ax.set_xlabel('U [$\lambda$]'); ax.set_xlim((-max_uvw, max_uvw)) ax.set_ylabel('V [$\lambda$]'); ax.set_ylim((-max_uvw, max_uvw)) ax.set_zlabel('W [$\lambda$]'); ax.set_zlim((-max_uvw, max_uvw)) ax.view_init(20, 20) pylab.show() """ Explanation: Generate baseline coordinates for an observation with the VLA over 6 hours, with a visibility recorded every 10 minutes. The phase center is fixed at a declination of 45 degrees. We assume that the imaged sky says at that position over the course of the observation. Note how this gives rise to fairly large $w$-values. End of explanation """ import itertools vis = numpy.zeros(len(uvw), dtype=complex) for u,v in itertools.product(range(-3, 4), range(-3, 4)): vis += 1.0*simulate_point(uvw, 0.010*u, 0.010*v) plt.clf() uvdist=numpy.sqrt(uvw[:,0]**2+uvw[:,1]**2) plt.plot(uvdist, numpy.abs(vis), '.', color='r') """ Explanation: We can now generate visibilities for these baselines by simulation. We place three sources. End of explanation """ # Imaging parameterisation theta = 2*0.05 lam = 18000 wstep = 100 npixkern = 31 grid_size = int(numpy.ceil(theta*lam)) # Determine weights (globally) wt = doweight(theta, lam, uvw, numpy.ones(len(uvw))) # Depending on algorithm we are going to prefer different uvw-distributions, # so make decision about conjugation of visibilities flexible. def flip_conj(where): # Conjugate visibility. This does not change its meaning. uvw[where] = -uvw[where] vis[where] = numpy.conj(vis[where]) # Determine w-planes wplane = numpy.around(uvw[:,2] / wstep).astype(int) return uvw, vis, numpy.arange(numpy.min(wplane), numpy.max(wplane)+1), wplane """ Explanation: Using imaging, we can now reconstruct the image. We split the visibilities into a number of w-bins: End of explanation """ image_sum = numpy.zeros((grid_size, grid_size), dtype=complex) w_grids = {} uvw,vis,wplanes,wplane = flip_conj(uvw[:,2] < 0.0) start_time = time.time() for wp in wplanes: # Filter out w-plane puvw = uvw[wplane == wp] if len(puvw) == 0: continue pvis = vis[wplane == wp] pwt = wt[wplane == wp] midw = numpy.mean(puvw[:,2]) print("w-plane %d: %d visibilities, %.1f average w" % (wp, len(puvw), midw)) # Translate w-coordinate (not needed for simple imaging though) #puvw = numpy.array(puvw) #puvw[:,2] -= midw src = numpy.ndarray((len(pvis), 0)) # Make image cdrt = simple_imaging(theta, lam, puvw, src, pvis * pwt) l,m = theta*coordinates2(grid_size) # Multiply by Fresnel pattern in image space, add wkern = w_kernel_function(l, m, midw) w_grids[wp] = ifft(cdrt) / wkern image_sum += w_grids[wp] print("Done in %.1fs" % (time.time() - start_time)) # We only used half of the visibilities, so the image is not going to # end up real-valued. However, we can easily just remove the unused imaginary # parts and multiply by 2 to arrive at the correct result. show_image(2.0*numpy.real(image_sum), "image", theta) """ Explanation: Simple w-stacking Now we can image each w-plane separately, and divide the w-term out in the image plane. This method requires us to do a lot of FFTs: End of explanation """ start_time = time.time() uvw,vis,wplanes,wplane = flip_conj(uvw[:,1] < 0.0) grid_sum = numpy.zeros((grid_size, grid_size), dtype=complex) for wp in wplanes: # Filter out w-plane puvw = uvw[wplane == wp] if len(puvw) == 0: continue pvis = vis[wplane == wp] pwt = wt[wplane == wp] midw = numpy.mean(puvw[:,2]) # w=0 plane? Just grid directly - skip Fresnel pattern (guaranteed to be =1) + FFTs if abs(midw) < wstep / 2: grid_sum += simple_imaging(theta, lam, puvw, src, pvis * pwt) continue # Determine uv bounds, round to grid cell xy_min = numpy.floor(numpy.amin(puvw[:,:2], axis=0) * theta).astype(int) xy_max = numpy.ceil(numpy.amax(puvw[:,:2], axis=0) * theta).astype(int) # Make sure we have enough space for convolution. xy_min -= (npixkern + 1) // 2 xy_max += npixkern // 2 xy_size = numpy.max(xy_max - xy_min) print("w-plane %d: %d visibilities, %.1f average w, %dx%d cells" % (wp, len(puvw), midw, xy_size, xy_size)) # Force quadratic - TODO: unneeded, strictly speaking xy_maxq = numpy.amax([xy_max, xy_min + xy_size], axis=0) # Determine the uvw size and mid-point uvw_size = xy_size / theta uvw_mid = numpy.hstack([(xy_maxq + xy_min) // 2 / theta, midw]) # Grid pgrid = simple_imaging(theta, uvw_size, puvw - uvw_mid, src, pvis * pwt) # Generate Fresnel pattern l,m = theta*coordinates2(xy_size) wkern = w_kernel_function(l, m, midw) # Divide Fresnel pattern in image plane, then FFT right back pgrid_w = fft(ifft(pgrid) / wkern) # Add to original grid at offset mid = int(lam*theta)//2 x0, y0 = mid + xy_min x1, y1 = mid + xy_max grid_sum[y0:y1, x0:x1] += pgrid_w[0:y1-y0, 0:x1-x0] image_sum = ifft(grid_sum) print("Done in %.1fs" % (time.time() - start_time)) show_image(2.0*numpy.real(image_sum), "image", theta) """ Explanation: This was the easiest version of w-stacking. Clearly a lot of w-planes are mostly empty, which is wasteful both in terms of FFT complexity and especially in terms of memory (bandwidth). Optimised w-planes We can actually reduce the size of these w-planes: Use a grid that has just enough size to contain the visibility and the w-pattern, but FFT it back into grid space and add it into the w=0 grid with an offset. This means two FFTs, but if the sub-grid size is small enough this is worth it. End of explanation """ uvbin_size = 256 - npixkern # Choose it so we get a nice 2^x size below start_time = time.time() uvw,vis,wplanes,wplane = flip_conj(uvw[:,1] < 0.0) grid_sum = numpy.zeros((grid_size, grid_size), dtype=complex) ubin = numpy.floor(uvw[:,0]*theta/uvbin_size).astype(int) vbin = numpy.floor(uvw[:,1]*theta/uvbin_size).astype(int) # Generate Fresnel pattern for shifting between two w-planes # As this is the same between all w-planes, we can share it # between the whole loop. l,m = theta*coordinates2(uvbin_size + npixkern) wkern = w_kernel_function(l, m, wstep) for ub in range(numpy.min(ubin), numpy.max(ubin)+1): for vb in range(numpy.min(vbin), numpy.max(vbin)+1): # Find visibilities bin_sel = numpy.logical_and(ubin == ub, vbin == vb) if not numpy.any(bin_sel): continue # Determine bin dimensions xy_min = uvbin_size * numpy.array([ub, vb], dtype=int) xy_max = uvbin_size * numpy.array([ub+1, vb+1], dtype=int) uv_min = xy_min / theta uv_max = xy_min / theta uv_mid = (xy_max + xy_min) // 2 / theta # Make sure we have enough space for convolution. xy_min -= (npixkern + 1) // 2 xy_max += npixkern // 2 assert(numpy.all(numpy.max(xy_max - xy_min) == uvbin_size+npixkern)) uvw_size = (uvbin_size+npixkern) / theta # Make grid for uv-bin bin_image_sum = numpy.zeros((uvbin_size+npixkern, uvbin_size+npixkern), dtype=complex) nvis = 0; midws = [] last_wp = wplanes[0] for wp in wplanes: # Filter out visibilities for u/v-bin and w-plane slc = numpy.logical_and(bin_sel, wplane == wp) puvw = uvw[slc] if len(puvw) == 0: continue pvis = vis[slc] pwt = wt[slc] # Statistics nvis += len(puvw) midws.append(wp*wstep) # w=0 plane? Just grid directly, as before if wp == 0: grid_sum += simple_imaging(theta, lam, puvw, src, pvis * pwt) continue # Bring image sum into this w-plane if last_wp != wplanes[0]: bin_image_sum *= wkern**(wp-last_wp) last_wp = wp # Grid relative to mid-point uvw_mid = numpy.hstack([uv_mid, [wp*wstep]]) pgrid = simple_imaging(theta, uvw_size, puvw - uvw_mid, src, pvis * pwt) # Add to bin grid bin_image_sum += ifft(pgrid) # No visibilities? Skip if nvis == 0: continue # Transfer into w=0 plane, FFT image sum print("uv-bin %d,%d: %d visibilities, %s w-bins" % (ub, vb, nvis, numpy.array(midws, dtype=int))) bin_image_sum /= wkern**last_wp bin_grid = fft(bin_image_sum) # Add to grid, keeping bounds in mind mid = int(lam*theta)//2 x0, y0 = mid + xy_min x1, y1 = mid + xy_max x0b, y0b = numpy.amax([[x0, y0], [0,0]], axis=0) x1b, y1b = numpy.amin([[x1, y1], [grid_size,grid_size]], axis=0) grid_sum[y0b:y1b, x0b:x1b] += \ bin_grid[y0b-y0:y1b-y0, x0b-x0:x1b-x0] image_sum = ifft(grid_sum) print("Done in %.1fs" % (time.time() - start_time)) show_image(2.0 * numpy.real(image_sum), "image", theta) """ Explanation: As you might notice, this is actually slower overall, because for lower w doing two FFTs per w-plane adds quite a bit of extra work. Choosing uv-bins with w-stacking However, it should now be clear that we can choose what parts of the w-planes to generate entirely independently, so we can especially choose to generate the same uv-chunks on all w-planes. This not only allows us to share the FFT back to the w=0 grid, but also makes the FFT cheaper once we are considering large grids. End of explanation """ image_show = numpy.real(image_sum) step=int(grid_size/10) def zoom(x, y=step): pylab.matshow(image_show[y:y+2*step,x:x+2*step]) ; pylab.colorbar(shrink=.4,pad=0.025); pylab.show() from ipywidgets import interact interact(zoom, x=(0,image_show.shape[0]-2*step,step), y=(0,image_show.shape[1]-2*step,step)); """ Explanation: By zooming in we can confirm output quality: End of explanation """
andreyf/machine-learning-examples
numpy_and_pandas/part0_numpy.ipynb
gpl-3.0
# Python 2 and 3 compatibility from __future__ import (absolute_import, division, print_function, unicode_literals) # ะพั‚ะบะปัŽั‡ะธะผ ะฟั€ะตะดัƒะฟั€ะตะถะดะตะฝะธั Anaconda import warnings warnings.simplefilter('ignore') import numpy as np a = np.array([0, 1, 2, 3]) a """ Explanation: <center> <img src="../img/ods_stickers.jpg"> ะžั‚ะบั€ั‹ั‚ั‹ะน ะบัƒั€ั ะฟะพ ะผะฐัˆะธะฝะฝะพะผัƒ ะพะฑัƒั‡ะตะฝะธัŽ ะะฒั‚ะพั€ั‹ ะผะฐั‚ะตั€ะธะฐะปะฐ: ะฟั€ะพะณั€ะฐะผะผะธัั‚-ะธััะปะตะดะพะฒะฐั‚ะตะปัŒ Mail.ru Group, ัั‚ะฐั€ัˆะธะน ะฟั€ะตะฟะพะดะฐะฒะฐั‚ะตะปัŒ ะคะฐะบัƒะปัŒั‚ะตั‚ะฐ ะšะพะผะฟัŒัŽั‚ะตั€ะฝั‹ั… ะะฐัƒะบ ะ’ะจะญ ะฎั€ะธะน ะšะฐัˆะฝะธั†ะบะธะน ะธ Data Scientist ะฒ Segmento ะ•ะบะฐั‚ะตั€ะธะฝะฐ ะ”ะตะผะธะดะพะฒะฐ. ะœะฐั‚ะตั€ะธะฐะป ั€ะฐัะฟั€ะพัั‚ั€ะฐะฝัะตั‚ัั ะฝะฐ ัƒัะปะพะฒะธัั… ะปะธั†ะตะฝะทะธะธ Creative Commons CC BY-NC-SA 4.0. ะœะพะถะฝะพ ะธัะฟะพะปัŒะทะพะฒะฐั‚ัŒ ะฒ ะปัŽะฑั‹ั… ั†ะตะปัั… (ั€ะตะดะฐะบั‚ะธั€ะพะฒะฐั‚ัŒ, ะฟะพะฟั€ะฐะฒะปัั‚ัŒ ะธ ะฑั€ะฐั‚ัŒ ะทะฐ ะพัะฝะพะฒัƒ), ะบั€ะพะผะต ะบะพะผะผะตั€ั‡ะตัะบะธั…, ะฝะพ ั ะพะฑัะทะฐั‚ะตะปัŒะฝั‹ะผ ัƒะฟะพะผะธะฝะฐะฝะธะตะผ ะฐะฒั‚ะพั€ะฐ ะผะฐั‚ะตั€ะธะฐะปะฐ. <center>ะขะตะผะฐ 1. ะŸะตั€ะฒะธั‡ะฝั‹ะน ะฐะฝะฐะปะธะท ะดะฐะฝะฝั‹ั… ั Pandas</center> <center>ะงะฐัั‚ัŒ 0. ะ ะฐะฑะพั‚ะฐ ั ะฒะตะบั‚ะพั€ะฐะผะธ ะฒ ะฑะธะฑะปะธะพั‚ะตะบะต NumPy Numpy - ัั‚ะพ ะฑะธะฑะปะธะพั‚ะตะบะฐ Python ะดะปั ะฒั‹ั‡ะธัะปะธั‚ะตะปัŒะฝะพ ัั„ั„ะตะบั‚ะธะฒะฝั‹ั… ะพะฟะตั€ะฐั†ะธะน ั ะผะฝะพะณะพะผะตั€ะฝั‹ะผะธ ะผะฐััะธะฒะฐะผะธ, ะฟั€ะตะดะฝะฐะทะฝะฐั‡ะตะฝะฝะฐั ะฒ ะพัะฝะพะฒะฝะพะผ ะดะปั ะฝะฐัƒั‡ะฝั‹ั… ะฒั‹ั‡ะธัะปะตะฝะธะน. End of explanation """ L = range(1000) %timeit [i**2 for i in L] a = np.arange(1000) %timeit a**2 """ Explanation: ะขะฐะบะพะน ะผะฐััะธะฒ ะผะพะถะตั‚ ัะพะดะตั€ะถะฐั‚ัŒ: - ะทะฝะฐั‡ะตะฝะธั ั„ะธะทะธั‡ะตัะบะธั… ะฒะตะปะธั‡ะธะฝ ะฒ ั€ะฐะทั‹ะต ะผะพะผะตะฝั‚ั‹ ะฒั€ะตะผะตะฝะธ ะฟั€ะธ ะผะพะดะตะปะธั€ะพะฒะฐะฝะธะธ - ะทะฝะฐั‡ะตะฝะธั ัะธะณะฝะฐะปะฐ, ะธะทะผะตั€ะตะฝะฝะพะณะพ ะฟั€ะธะฑะพั€ะพะผ - ะธะฝั‚ะตะฝัะธะฒะฝะพัั‚ะธ ะฟะธะบัะตะปะพะฒ - 3D ะบะพะพั€ะดะธะฝะฐั‚ั‹ ะพะฑัŠะตะบั‚ะพะฒ, ะฟะพะปัƒั‡ะตะฝะฝั‹ั…, ะฝะฐะฟั€ะธะผะตั€, ะฟั€ะธ ะœะ ะข - ... ะ—ะฐั‡ะตะผ NumPy: ะญั„ั„ะตะบั‚ะธะฒะฝะพัั‚ัŒ ะฑะฐะทะพะฒั‹ั… ะพะฟะตั€ะฐั†ะธะน End of explanation """ ?np.array """ Explanation: ะ˜ะฝั‚ะตั€ะฐะบั‚ะธะฒะฝะฐั ัะฟั€ะฐะฒะบะฐ End of explanation """ np.lookfor('create array') np.con*? """ Explanation: ะฟะพะธัะบ ะฒ ะดะพะบัƒะผะตะฝั‚ะฐั†ะธะธ End of explanation """ import numpy as np """ Explanation: ะ‘ะธะฑะปะธะพั‚ะตะบัƒ ะฟั€ะธะฝัั‚ะพ ะธะผะฟะพั€ั‚ะธั€ะพะฒะฐั‚ัŒ ั‚ะฐะบ End of explanation """ a = np.array([0, 1, 2, 3]) a a.ndim a.shape len(a) """ Explanation: ะกะพะทะดะฐะฝะธะต ะผะฐััะธะฒะพะฒ 1-D: End of explanation """ b = np.array([[0, 1, 2], [3, 4, 5]]) # 2 x 3 array b b.ndim b.shape len(b) # returns the size of the first dimension c = np.array([[[1], [2]], [[3], [4]]]) c c.shape """ Explanation: 2-D, 3-D, ...: End of explanation """ a = np.arange(10) # 0 .. n-1 (!) a b = np.arange(1, 9, 2) # start, end (exclusive), step b """ Explanation: ะœะตั‚ะพะดั‹ ะดะปั ัะพะทะดะฐะฝะธั ะผะฐััะธะฒะพะฒ ะะฐ ะฟั€ะฐะบั‚ะธะบะต ะผั‹ ั€ะตะดะบะพ ะดะพะฑะฐะฒะปัะตะผ ัะปะตะผะตะฝั‚ั‹ ะฟะพ ะพะดะฝะพะผัƒ ะ ะฐะฒะฝะพะผะตั€ะฝะพ ั€ะฐัะฟั€ะตะดะตะปะตะฝะฝั‹ะต ัะปะตะผะตะฝั‚ั‹: End of explanation """ c = np.linspace(0, 1, 6) # start, end, num-points c d = np.linspace(0, 1, 5, endpoint=False) d """ Explanation: ะฟะพ ั‡ะธัะปัƒ ัะปะตะผะตะฝั‚ะพะฒ: End of explanation """ a = np.ones((3, 3)) # reminder: (3, 3) is a tuple a b = np.zeros((2, 2)) b c = np.eye(3) c d = np.diag(np.array([1, 2, 3, 4])) d """ Explanation: ะงะฐัั‚ะพ ะฒัั‚ั€ะตั‡ะฐัŽั‰ะธะตัั ะผะฐััะธะฒั‹: End of explanation """ a = np.random.rand(4) # uniform in [0, 1] a b = np.random.randn(4) # Gaussian b np.random.seed(1234) # Setting the random seed """ Explanation: np.random ะณะตะฝะตั€ะฐั†ะธั ัะปัƒั‡ะฐะนะฝั‹ั… ั‡ะธัะตะป (Mersenne Twister PRNG): End of explanation """ a = np.array([1, 2, 3]) a.dtype b = np.array([1., 2., 3.]) b.dtype """ Explanation: ะžัะฝะพะฒะฝั‹ะต ั‚ะธะฟั‹ ะดะฐะฝะฝั‹ั… NumPy ะขะพั‡ะบะฐ ะฟะพัะปะต ั‡ะธัะปะฐ ะพะทะฝะฐั‡ะฐะตั‚, ั‡ั‚ะพ ัั‚ะพ ั‚ะธะฟ ะดะฐะฝะฝั‹ั… float64 End of explanation """ c = np.array([1, 2, 3], dtype=float) c.dtype a = np.ones((3, 3)) a.dtype """ Explanation: ะœะพะถะฝะพ ะทะฐะดะฐั‚ัŒ ั‚ะธะฟ ะดะฐะฝะฝั‹ั… ัะฒะฝะพ. ะŸะพ ัƒะผะพะปั‡ะฐะฝะธัŽ - float64 End of explanation """ d = np.array([1+2j, 3+4j, 5+6*1j]) d.dtype """ Explanation: ะŸั€ะพั‡ะธะต ั‚ะธะฟั‹ ะดะฐะฝะฝั‹ั…: ะšะพะผะฟะปะตะบัะฝั‹ะต ั‡ะธัะปะฐ End of explanation """ e = np.array([True, False, False, True]) e.dtype """ Explanation: Bool End of explanation """ f = np.array(['Bonjour', 'Hello', 'Hallo',]) f.dtype # <--- strings containing max. 7 letters """ Explanation: ะกั‚ั€ะพะบะธ ะะฐ ัั‚ั€ะพะบะธ ะฟะฐะผัั‚ัŒ ะฒั‹ะดะตะปัะตั‚ัั "ะถะฐะดะฝะพ" - ะฟะพ ะผะฐะบัะธะผะฐะปัŒะฝะพะผัƒ ั‡ะธัะปัƒ ะปะธั‚ะตั€ ะฒ ัั‚ั€ะพะบะต. ะ’ ัั‚ะพะผ ะฟั€ะธะผะตั€ะต ะฝะฐ ะบะฐะถะดัƒัŽ ัั‚ั€ะพะบัƒ ะฒั‹ะดะตะปัะตั‚ัั ะฟะพ 7 ะปะธั‚ะตั€, ะธ ั‚ะธะฟ ะดะฐะฝะฝั‹ั… - 'S7' End of explanation """ %pylab inline """ Explanation: ะžัะฝะพะฒั‹ ะฒะธะทัƒะฐะปะธะทะฐั†ะธะธ $ ipython notebook --pylab=inline ะ˜ะปะธ ะธะท ั‚ะตั‚ั€ะฐะดะบะธ: End of explanation """ import matplotlib.pyplot as plt # the tidy way x = np.linspace(0, 3, 20) y = np.linspace(0, 9, 20) plt.plot(x, y) # line plot plt.show() # <-- shows the plot (not needed with pylab) """ Explanation: ะŸะฐั€ะฐะผะตั‚ั€ inline ะณะพะฒะพั€ะธั‚ ัะตั€ะฒะตั€ัƒ IPython ะพ ั‚ะพะผ, ั‡ั‚ะพ ั€ะตะทัƒะปัŒั‚ะฐั‚ั‹ ะฑัƒะดัƒั‚ ะพั‚ะพะฑั€ะฐะถะฐั‚ัŒัั ะฒ ัะฐะผะพะน ั‚ะตั‚ั€ะฐะดะบะต, ะฐ ะฝะต ะฒ ะฝะพะฒะพะผ ะพะบะฝะต. ะ˜ะผะฟะพั€ั‚ะธั€ัƒะตะผ Matplotlib End of explanation """ plot(x, y) # line plot """ Explanation: ะ˜ะปะธ ั ะธัะฟะพะปัŒะทะพะฒะฐะฝะธะตะผ pylab: End of explanation """ x = np.linspace(0, 3, 20) y = np.linspace(0, 9, 20) plt.plot(x, y) # line plot plt.plot(x, y, 'o') # dot plot """ Explanation: ะ˜ัะฟะพะปัŒะทะพะฒะฐะฝะธะต import matplotlib.pyplot as plt ั€ะตะบะพะผะตะฝะดัƒะตั‚ัั ะดะปั ัะบั€ะธะฟั‚ะพะฒ, ะฐ pylab - ะฒ ั‚ะตั‚ั€ะฐะดะบะฐั… IPython. ะžั‚ะพะฑั€ะฐะถะตะฝะธะต ะพะดะฝะพะผะตั€ะฝั‹ั… ะผะฐััะธะฒะพะฒ: End of explanation """ image = np.random.rand(30, 30) plt.imshow(image, cmap=plt.cm.hot) plt.colorbar() """ Explanation: ะžั‚ะพะฑั€ะฐะถะตะฝะธะต ะดะฒัƒั…ะผะตั€ะฝั‹ั… ะผะฐััะธะฒะพะฒ (ะฝะฐะฟั€ะธะผะตั€, ะธะทะพะฑั€ะฐะถะตะฝะธะน): End of explanation """ a = np.arange(10) a a[0], a[2], a[-1] """ Explanation: ะ˜ะฝะดะตะบัะธั€ะพะฒะฐะฝะธะต ะผะฐััะธะฒะพะฒ ะธ ัั€ะตะทั‹ ะ’ ั†ะตะปะพะผ ั‚ะฐะบ ะถะต, ะบะฐะบ ัะพ ะฒัั‚ั€ะพะตะฝะฝั‹ะผะธ ะฟะพัะปะตะดะพะฒะฐั‚ะตะปัŒะฝะพัั‚ัะผะธ Python (ะฝะฐะฟั€ะธะผะตั€, ะบะฐะบ ัะพ ัะฟะธัะบะฐะผะธ). End of explanation """ a[::-1] """ Explanation: ะ ะฐะฑะพั‚ะฐะตั‚ ะธ ะฟะพะฟัƒะปัั€ะฝั‹ะน ะฒ Python ัะฟะพัะพะฑ ะพั‚ั€ะฐะถะตะฝะธั ะผะฐััะธะฒะฐ: End of explanation """ a = np.diag(np.arange(3)) a a[1, 1] a[2, 1] = 10 # third line, second column a a[1] """ Explanation: ะ”ะปั ะผะฝะพะณะพะผะตั€ะฝั‹ั… ะผะฐััะธะฒะพะฒ ะธะฝะดะตะบัั‹ - ัั‚ะพ ะบะพั€ั‚ะตะถะธ ั†ะตะปั‹ั… ั‡ะธัะตะป End of explanation """ a = np.arange(10) a a[2:9:3] # [start:end:step] """ Explanation: ะกั€ะตะทั‹ End of explanation """ a[:4] """ Explanation: ะŸะพัะปะตะดะฝะธะน ะธะฝะดะตะบั ะฝะต ะฒะบะปัŽั‡ะฐะตั‚ัั End of explanation """ a[1:3] a[::2] a[3:] """ Explanation: ะŸะพ ัƒะผะพะปั‡ะฐะฝะธัŽ `start` - 0, `end` - ะธะฝะดะตะบั ะฟะพัะปะตะดะฝะตะณะพ ัะปะตะผะตะฝั‚ะฐ, `step` - 1: End of explanation """ a = np.arange(10) a[5:] = 10 a b = np.arange(5) a[5:] = b[::-1] a """ Explanation: ะœะพะถะฝะพ ัะพะฒะผะตั‰ะฐั‚ัŒ ะฟั€ะธัะฒะฐะธะฒะฐะฝะธะต ะธ ัั€ะตะท: End of explanation """ from IPython.display import Image Image(filename='../img/prime-sieve.png') """ Explanation: ะŸั€ะธะผะตั€. ะœะฐั‚ั€ะธั†ะฐ ะดะตะปะธั‚ะตะปะตะน ะžั‚ะพะฑั€ะฐะทะธั‚ัŒ ะผะฐั‚ั€ะธั†ัƒ, ะฒ ะบะพั‚ะพั€ะพะน ะฒั‹ั‡ะตั€ะบะธะฒะฐะตั‚ัั (x, y), ะตัะปะธ y ะดะตะปะธั‚ัั ะฝะฐ x. End of explanation """ is_prime = np.ones((100,), dtype=bool) """ Explanation: ะกะพะทะดะฐะดะธะผ ะผะฐััะธะฒ is_prime, ะทะฐะฟะพะปะฝะตะฝะฝะธะน ะทะฝะฐั‡ะตะฝะธัะผะธ True End of explanation """ is_prime[:2] = 0 """ Explanation: ะ’ั‹ั‡ะตั€ะบะฝะตะผ 0 ะธ 1 ะบะฐะบ ะฝะต ัะฒะปััŽั‰ะธะตัั ะฟั€ะพัั‚ั‹ะผะธ: End of explanation """ N_max = int(np.sqrt(len(is_prime))) for j in range(2, N_max): is_prime[2*j::j] = False is_prime """ Explanation: ะ”ะปั ะบะฐะถะดะพะณะพ ะฝะฐั‚ัƒั€ะฐะปัŒะฝะพะณะพ j ะฝะฐั‡ะธะฝะฐั ั 2, "ะฒั‹ั‡ะตั€ะบะฝะตะผ" ั‡ะธัะปะฐ, ะตะผัƒ ะบั€ะฐั‚ะฝั‹ะต: End of explanation """ np.random.seed(3) a = np.random.random_integers(0, 20, 15) a (a % 3 == 0) mask = (a % 3 == 0) extract_from_a = a[mask] # or, a[a%3==0] extract_from_a # extract a sub-array with the mask """ Explanation: ะ˜ะฝะดะตะบัะธั€ะพะฒะฐะฝะธะต ะผะฐัะบะฐะผะธ End of explanation """ a[a % 3 == 0] = -1 a """ Explanation: ะ˜ะฝะดะตะบัะธั€ะพะฒะฐะฝะธะต ะผะฐัะบะพะน ะผะพะถะตั‚ ะฑั‹ั‚ัŒ ะพั‡ะตะฝัŒ ะฟะพะปะตะทะฝั‹ะผ ะดะปั ะฟั€ะธัะฒะฐะธะฒะฐะฝะธั ะทะฝะฐั‡ะตะฝะธะน ั‡ะฐัั‚ะธ ัะปะตะผะตะฝั‚ะพะฒ ะผะฐััะธะฒะฐ: End of explanation """ a = np.arange(0, 100, 10) a a[[2, 3, 2, 4, 2]] # note: [2, 3, 2, 4, 2] is a Python list a[[9, 7]] = -100 a a = np.arange(10) idx = np.array([[3, 4], [9, 7]]) idx.shape a[idx] """ Explanation: ะ˜ะฝะดะตะบัะธั€ะพะฒะฐะฝะธะต ะผะฐััะธะฒะพะผ ั†ะตะปั‹ั… ั‡ะธัะตะป End of explanation """
tensorflow/docs-l10n
site/en-snapshot/agents/tutorials/5_replay_buffers_tutorial.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2021 The TF-Agents Authors. End of explanation """ !pip install tf-agents from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import numpy as np from tf_agents import specs from tf_agents.agents.dqn import dqn_agent from tf_agents.drivers import dynamic_step_driver from tf_agents.environments import suite_gym from tf_agents.environments import tf_py_environment from tf_agents.networks import q_network from tf_agents.replay_buffers import py_uniform_replay_buffer from tf_agents.replay_buffers import tf_uniform_replay_buffer from tf_agents.specs import tensor_spec from tf_agents.trajectories import time_step """ Explanation: Replay Buffers <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/agents/tutorials/5_replay_buffers_tutorial"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png" /> View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/agents/blob/master/docs/tutorials/5_replay_buffers_tutorial.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/agents/blob/master/docs/tutorials/5_replay_buffers_tutorial.ipynb"> <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/agents/docs/tutorials/5_replay_buffers_tutorial.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> Introduction Reinforcement learning algorithms use replay buffers to store trajectories of experience when executing a policy in an environment. During training, replay buffers are queried for a subset of the trajectories (either a sequential subset or a sample) to "replay" the agent's experience. In this colab, we explore two types of replay buffers: python-backed and tensorflow-backed, sharing a common API. In the following sections, we describe the API, each of the buffer implementations and how to use them during data collection training. Setup Install tf-agents if you haven't already. End of explanation """ data_spec = ( tf.TensorSpec([3], tf.float32, 'action'), ( tf.TensorSpec([5], tf.float32, 'lidar'), tf.TensorSpec([3, 2], tf.float32, 'camera') ) ) batch_size = 32 max_length = 1000 replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer( data_spec, batch_size=batch_size, max_length=max_length) """ Explanation: Replay Buffer API The Replay Buffer class has the following definition and methods: ```python class ReplayBuffer(tf.Module): """Abstract base class for TF-Agents replay buffer.""" def init(self, data_spec, capacity): """Initializes the replay buffer. Args: data_spec: A spec or a list/tuple/nest of specs describing a single item that can be stored in this buffer capacity: number of elements that the replay buffer can hold. """ @property def data_spec(self): """Returns the spec for items in the replay buffer.""" @property def capacity(self): """Returns the capacity of the replay buffer.""" def add_batch(self, items): """Adds a batch of items to the replay buffer.""" def get_next(self, sample_batch_size=None, num_steps=None, time_stacked=True): """Returns an item or batch of items from the buffer.""" def as_dataset(self, sample_batch_size=None, num_steps=None, num_parallel_calls=None): """Creates and returns a dataset that returns entries from the buffer.""" def gather_all(self): """Returns all the items in buffer.""" return self._gather_all() def clear(self): """Resets the contents of replay buffer""" ``` Note that when the replay buffer object is initialized, it requires the data_spec of the elements that it will store. This spec corresponds to the TensorSpec of trajectory elements that will be added to the buffer. This spec is usually acquired by looking at an agent's agent.collect_data_spec which defines the shapes, types, and structures expected by the agent when training (more on that later). TFUniformReplayBuffer TFUniformReplayBuffer is the most commonly used replay buffer in TF-Agents, thus we will use it in our tutorial here. In TFUniformReplayBuffer the backing buffer storage is done by tensorflow variables and thus is part of the compute graph. The buffer stores batches of elements and has a maximum capacity max_length elements per batch segment. Thus, the total buffer capacity is batch_size x max_length elements. The elements stored in the buffer must all have a matching data spec. When the replay buffer is used for data collection, the spec is the agent's collect data spec. Creating the buffer: To create a TFUniformReplayBuffer we pass in: 1. the spec of the data elements that the buffer will store 2. the batch size corresponding to the batch size of the buffer 3. the max_length number of elements per batch segment Here is an example of creating a TFUniformReplayBuffer with sample data specs, batch_size 32 and max_length 1000. End of explanation """ action = tf.constant(1 * np.ones( data_spec[0].shape.as_list(), dtype=np.float32)) lidar = tf.constant( 2 * np.ones(data_spec[1][0].shape.as_list(), dtype=np.float32)) camera = tf.constant( 3 * np.ones(data_spec[1][1].shape.as_list(), dtype=np.float32)) values = (action, (lidar, camera)) values_batched = tf.nest.map_structure(lambda t: tf.stack([t] * batch_size), values) replay_buffer.add_batch(values_batched) """ Explanation: Writing to the buffer: To add elements to the replay buffer, we use the add_batch(items) method where items is a list/tuple/nest of tensors representing the batch of items to be added to the buffer. Each element of items must have an outer dimension equal batch_size and the remaining dimensions must adhere to the data spec of the item (same as the data specs passed to the replay buffer constructor). Here's an example of adding a batch of items End of explanation """ # add more items to the buffer before reading for _ in range(5): replay_buffer.add_batch(values_batched) # Get one sample from the replay buffer with batch size 10 and 1 timestep: sample = replay_buffer.get_next(sample_batch_size=10, num_steps=1) # Convert the replay buffer to a tf.data.Dataset and iterate through it dataset = replay_buffer.as_dataset( sample_batch_size=4, num_steps=2) iterator = iter(dataset) print("Iterator trajectories:") trajectories = [] for _ in range(3): t, _ = next(iterator) trajectories.append(t) print(tf.nest.map_structure(lambda t: t.shape, trajectories)) # Read all elements in the replay buffer: trajectories = replay_buffer.gather_all() print("Trajectories from gather all:") print(tf.nest.map_structure(lambda t: t.shape, trajectories)) """ Explanation: Reading from the buffer There are three ways to read data from the TFUniformReplayBuffer: get_next() - returns one sample from the buffer. The sample batch size and number of timesteps returned can be specified via arguments to this method. as_dataset() - returns the replay buffer as a tf.data.Dataset. One can then create a dataset iterator and iterate through the samples of the items in the buffer. gather_all() - returns all the items in the buffer as a Tensor with shape [batch, time, data_spec] Below are examples of how to read from the replay buffer using each of these methods: End of explanation """ replay_buffer_capacity = 1000*32 # same capacity as the TFUniformReplayBuffer py_replay_buffer = py_uniform_replay_buffer.PyUniformReplayBuffer( capacity=replay_buffer_capacity, data_spec=tensor_spec.to_nest_array_spec(data_spec)) """ Explanation: PyUniformReplayBuffer PyUniformReplayBuffer has the same functionaly as the TFUniformReplayBuffer but instead of tf variables, its data is stored in numpy arrays. This buffer can be used for out-of-graph data collection. Having the backing storage in numpy may make it easier for some applications to do data manipulation (such as indexing for updating priorities) without using Tensorflow variables. However, this implementation won't have the benefit of graph optimizations with Tensorflow. Below is an example of instantiating a PyUniformReplayBuffer from the agent's policy trajectory specs: End of explanation """ env = suite_gym.load('CartPole-v0') tf_env = tf_py_environment.TFPyEnvironment(env) q_net = q_network.QNetwork( tf_env.time_step_spec().observation, tf_env.action_spec(), fc_layer_params=(100,)) agent = dqn_agent.DqnAgent( tf_env.time_step_spec(), tf_env.action_spec(), q_network=q_net, optimizer=tf.compat.v1.train.AdamOptimizer(0.001)) replay_buffer_capacity = 1000 replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer( agent.collect_data_spec, batch_size=tf_env.batch_size, max_length=replay_buffer_capacity) # Add an observer that adds to the replay buffer: replay_observer = [replay_buffer.add_batch] collect_steps_per_iteration = 10 collect_op = dynamic_step_driver.DynamicStepDriver( tf_env, agent.collect_policy, observers=replay_observer, num_steps=collect_steps_per_iteration).run() """ Explanation: Using replay buffers during training Now that we know how to create a replay buffer, write items to it and read from it, we can use it to store trajectories during training of our agents. Data collection First, let's look at how to use the replay buffer during data collection. In TF-Agents we use a Driver (see the Driver tutorial for more details) to collect experience in an environment. To use a Driver, we specify an Observer that is a function for the Driver to execute when it receives a trajectory. Thus, to add trajectory elements to the replay buffer, we add an observer that calls add_batch(items) to add a batch of items on the replay buffer. Below is an example of this with TFUniformReplayBuffer. We first create an environment, a network and an agent. Then we create a TFUniformReplayBuffer. Note that the specs of the trajectory elements in the replay buffer are equal to the agent's collect data spec. We then set its add_batch method as the observer for the driver that will do the data collect during our training: End of explanation """ # Read the replay buffer as a Dataset, # read batches of 4 elements, each with 2 timesteps: dataset = replay_buffer.as_dataset( sample_batch_size=4, num_steps=2) iterator = iter(dataset) num_train_steps = 10 for _ in range(num_train_steps): trajectories, _ = next(iterator) loss = agent.train(experience=trajectories) """ Explanation: Reading data for a train step After adding trajectory elements to the replay buffer, we can read batches of trajectories from the replay buffer to use as input data for a train step. Here is an example of how to train on trajectories from the replay buffer in a training loop: End of explanation """
poldrack/fmri-analysis-vm
analysis/statistics/LinearAlgebraStats.ipynb
mit
import numpy,pandas import matplotlib.pyplot as plt import seaborn as sns import scipy.stats import statsmodels.api as sm import statsmodels from statsmodels.formula.api import ols,glsar from statsmodels.tsa.arima_process import arma_generate_sample from scipy.linalg import toeplitz from IPython.display import display, HTML %matplotlib inline """ Explanation: This notebook introduces the notion of computing the general linear model using linear algebra. First we load the necessarily libraries. End of explanation """ def make_ttest_data(n_obs=[50,50],mean_obs=[10,10.1],sd_obs=[2,2]): """ function to generate independent-sample data with two conditions """ n_obs=[50,50] # number of observations in each condition n_obs_total=numpy.sum(n_obs) mean_obs=[10,11] sd_obs=[1,1] condition=numpy.zeros(n_obs_total) condition[:n_obs[0]]=0 condition[n_obs[0]:n_obs_total]=1 data=numpy.zeros(n_obs_total) data[:n_obs[0]]=mean_obs[0] data[n_obs[0]:n_obs_total]=mean_obs[1] # doublecheck our work assert numpy.sum(data==mean_obs[0])==n_obs[0] assert numpy.sum(data==mean_obs[1])==n_obs[1] noise=numpy.zeros(n_obs_total) noise[:n_obs[0]]=numpy.random.randn(n_obs[0])*sd_obs[0] noise[n_obs[0]:n_obs_total]=numpy.random.randn(n_obs[1])*sd_obs[1] df=pandas.DataFrame({'data':data+noise,'condition':condition}) return df """ Explanation: A simple example We start with a simple example of an independent samples t-test. First, let's make a function that will generate some data. We will assume that there are two conditions with specified means and standard deviations End of explanation """ data=make_ttest_data() Y=data.data.values X=data.condition.values f = plt.figure() sns.distplot(Y[X==0], hist=False, label="condition 1") sns.distplot(Y[X==1], hist=False, label="condition 2") """ Explanation: Make some data and plot the distributions for the two conditions End of explanation """ # to make an array, we give a list to numpy.array y = numpy.array([1,3,2]) print(y) print('y shape:',y.shape) # we can add a dimension with the None operator z=y[:,None] print(z) print('z shape:',z.shape) # one option to create a matrix is to give a vector and reshape to a matrix print('A') A = numpy.array([1,1,2,3,5,8,13,21,34]).reshape(3,3) print(A) # another alternative is to pass a list of lists print('B') B = numpy.array([[1,1,2],[3,5,8],[13,21,34]]) print(B) # to transpose a matrix, use the .T operator print('B.T') print(B.T) """ Explanation: Now we want to perform a t-test to ask whether the means of the two conditions are different. Let's try to compute it on our own, using linear algebra. Remember that the formula for the GLM is: $Y = X * B + e$ where Y is an N X 1 matrix containing the data that we generated, and X is an N X c "design matrix" that describes the conditions (in this case, a single vector indicating condition 1 or 2). Using the normal equations, we can estimate B using: $\hat{B} = (X'X)^{-1}X'Y$ Before we dive into these computations, we need to go over how to do linear algebra in Python. The following borrows liberally from https://www.ibm.com/developerworks/community/blogs/jfp/entry/Elementary_Matrix_Operations_In_Python?lang=en Making arrays/matrices in Python End of explanation """ # create a matrix full of zeros # note that the shape is passed as a tuple if you want multiple dimensions a=numpy.zeros((2,4)) print('a') print(a) #create a matrix full of ones b=numpy.ones((2,4)) print('b') print(b) # create a matrix full of any other number: c=b*12 print('c') print(c) # create a range of numbers: d=numpy.arange(10) print('d') print(d) e=numpy.arange(3,5,0.33) print('e') print(e) """ Explanation: There are some useful functions to generate specific types of matrices End of explanation """ print('a+5') print(a+5) print('c/2') print(c/2) print('a+b+c') print(a+b+c) print('a*b*c') print(a*b*c) print('b/c') print(b/c) """ Explanation: Now let's look at some basic arithmetic operations End of explanation """ x=numpy.array([[1,2],[3,4]]) y=numpy.array([[1,0],[0,2]]) print('x') print(x) print('y') print(y) print('scalar product of x and y: x*y') print(x*y) print('matrix product of x and y: x.dot(y)') print(x.dot(y)) print('or use numpy.matrix') print(numpy.matrix(x)*numpy.matrix(y)) """ Explanation: Matrix multiplication Matrix multiplication is performed on numpy arrays using the .dot() operator. End of explanation """ def variance(Y): # insert code here to estimate variance using matrix multiplication return var # use allclose rather than == to deal with numerical errors assert numpy.allclose(numpy.var(Y),variance(Y)) """ Explanation: Exercise: We know that the variance of a matrix X is computed as $mean((X-mean(X))*(X-mean(X))')$. Fill in the appropriate code in the function below so that it returns a value that equals the value obtained from the numpy.var() command. End of explanation """ def corrcoef(x,y): assert len(x)==len(y) # add code here to compute correlation return r print('My result:',corrcoef(X,Y)) print('Numpy result:',numpy.corrcoef(X,Y)) assert numpy.allclose(numpy.corrcoef(X,Y)[0,1],corrcoef(X,Y)) """ Explanation: Exercise: Write a function to compute the correlation coefficient using matrix algebra. The equation to compute the correlation using matrix algebra is: $r = \frac{X\cdot Y}{\sqrt{(X\cdot X)*(Y\cdot Y)}}$ assuming that X and Y have zero mean, so you need to remove the mean before computing this. End of explanation """ # Exercise code here """ Explanation: Matrix inversion We also need to know how to compute the inverse of a matrix, which we do using numpy.linalg.inv(). Exercise: In the cell below, create a matrix containing the following numbers: [1,0] [0,2] and print out the original matrix along with the inverted matrix. End of explanation """ def ols_estimate(X,Y,add_intercept=True,verbose=False, ddof=1,use_two_sided=True): """ function to estimate parameters for a general linear model """ # first we need to set up the matrices in the proper shape # Y should be N X 1 # X should be X X c if verbose: print('original Y shape:',Y.shape) Y=Y.reshape((len(Y),1)) if verbose: print('new Y shape:',Y.shape) if verbose: print('original X shape:',X.shape) if len(X.shape)==1: X=X.reshape((len(X),1)) Xnames=['X%d'%int(i+1) for i in range(X.shape[1])] if verbose: print('new X shape:',X.shape) # add an intercept to the model if specified if add_intercept: X=sm.add_constant(X) Xnames=Xnames.append('Intercept') # make sure that the design matrix is full rank assert numpy.linalg.matrix_rank(X)==X.shape[1] # estimate the parameters using the normal equations b_hat=numpy.linalg.inv(X.T.dot(X)).dot(X.T.dot(Y)) if verbose: print('b_hat=',b_hat) # compute residuals and their variance resid=Y-X.dot(b_hat) sigma2=resid.T.dot(resid)/(X.shape[0] - X.shape[1]) # variance of the residuals # now compute the t statistic and p values for for each variable in X t=numpy.zeros(X.shape[1]) p=numpy.zeros(X.shape[1]) for i in range(X.shape[1]): c=numpy.zeros(X.shape[1]) c[i]=1 t[i]=c.dot(b_hat)/numpy.sqrt(c.dot(numpy.linalg.inv(X.T.dot(X))).dot(c.T)*sigma2) if t[i]<0: p[i]=scipy.stats.distributions.t.cdf(t[i],len(Y)-1) else: p[i]=1-scipy.stats.distributions.t.cdf(t[i],len(Y)-1) if use_two_sided: p[i]=p[i]*2 if verbose: print('t=',t) df=pandas.DataFrame({'bhat':b_hat.ravel(),'t':t.ravel(),'p':p.ravel()},index=Xnames) return df """ Explanation: Now that we know how to perform the necessary matrix operations, let's do our t-test on the data generated above. We first have to fix a problem: we need both X and Y to be matrices for our computation to work, but right now they are 1-dimensional vectors rather than two-dimensional matrices. We can fix this using numpy - let's go ahead and create a function to compute the ordinary least squares estimates, that includes code to reformat the data into matrices. We also include an option to add an intercept (i.e. a column of ones) to the model if it doesn't already exist. End of explanation """ e=ols_estimate(X,Y) display(e) """ Explanation: Now let's estimate the model parameters using our function. End of explanation """ t,p=scipy.stats.ttest_ind(Y[X==1],Y[X==0]) print('t/p computed by scipy:',t,p) assert numpy.allclose(t,e.t.values[1]) """ Explanation: Let's compute the same test using a couple of other canned procedures. First, we use the t-test procedure within the scipy package. End of explanation """ X=sm.add_constant(X) ols_result=sm.OLS(Y,X).fit() print(ols_result.summary()) # make sure our result is close to the one from statsmodels for i in range(len(e.t.values)): assert numpy.allclose(e.t.values[i],ols_result.tvalues[i]) """ Explanation: We can also compute it via the general linear model, using the ordinary least squares (OLS) method from statsmodels. End of explanation """ residual=Y - X.dot(e.bhat.values) ## insert code here """ Explanation: Exercise: Confirm that the dot product between the residuals from OLS and the X variable is zero. End of explanation """ def mkar1noise(tslen,coef,noisesd): """ function to return AR(1) autocorrelated noise """ varcorrect = numpy.sqrt(1-coef**2) noise=numpy.random.randn(tslen)*noisesd for i in range(1,tslen): noise[i]=noise[i]*varcorrect+noise[i-1]*coef return noise def make_regression_data(nobs=100,regsmooth=[1], regsmoothcoef=0.8, beta=[0.1,0.5,10],noisesd=1., noisecorr=0): """ function to generate regression data with option to add autocorrelated noise beta reflects two conditions plus intercept """ regs=numpy.random.randn(nobs,len(beta)-1) regvarcorrect = numpy.sqrt(1-regsmoothcoef**2) for r in regsmooth: for i in range(1,nobs): regs[i,r]=regs[i,r]*regvarcorrect+regs[i-1,r]*regsmoothcoef regs=numpy.hstack((regs,numpy.ones((regs.shape[0],1)))) data=regs.dot(numpy.array(beta)) if noisecorr==0: noise=numpy.random.randn(len(data))*noisesd else: noise=mkar1noise(len(data),noisecorr,noisesd) return data+noise,regs Y,X=make_regression_data() #X=X-numpy.mean(X,0) plt.imshow(X,interpolation='nearest') plt.axis('auto') plt.ylim([0,100]) plt.figure() plt.scatter(X[:,0],Y) plt.xlabel('first X regressor - X[:,0]') plt.ylabel('Y') plt.figure() plt.scatter(X[:,1],Y) plt.xlabel('first X regressor - X[:,1]') plt.ylabel('Y') e=ols_estimate(X,Y) display(e) """ Explanation: Multiple regression Let's now look at how we can fit a more complex model using the GLM. Let's make some data based on two regressors plus noise. We will make one of the regressors smooth across observations, for reasons that will become clearer later. End of explanation """ ols_result=sm.OLS(Y,X).fit() print(ols_result.summary()) for i in range(len(e.t.values)): assert numpy.allclose(e.t.values[i],ols_result.tvalues[i]) """ Explanation: Let's run the same analysis using a canned function from the statsmodels package to compare the results. Note that statsmodels automatically adds an intercept, so we don't pass that column from the design matrix. End of explanation """ nruns=1000 pval=numpy.zeros((nruns,3)) bhat=numpy.zeros((nruns,3)) for i in range(nruns): Y,X=make_regression_data(beta=[0,0,0]) e=ols_estimate(X,Y) pval[i,:]=e.p.values bhat[i,:]=e.bhat.values df=pandas.DataFrame({'Type 1 error':[numpy.mean(pval[:,i]<0.05) for i in range(3)], 'Variance of bhat':[numpy.std(bhat[:,i]) for i in range(3)]}, index=['X1','X2','intercept']) display(df) """ Explanation: Beyond ordinary least squares In the foregoing, we used ordinary least squares estimation, which is the best linear unbiased estimator in the case of uncorrelated and homoscedastic (equal variance) errors (according to the Gauss-Markov theorem). However, there are common situations where these assumptions fail, in which case we need to use more sophisticated models. The case that is most relevant to fMRI is when there are correlated errors, which we will explore below. First, let's simulate performance using OLS when the assumptions are upheld - the Type I error rate should be about 0.05. End of explanation """ nruns=1000 ncvals=numpy.arange(0.0,0.9,0.1) pval=numpy.zeros((nruns,3,len(ncvals))) bhat=numpy.zeros((nruns,3,len(ncvals))) for nc in range(len(ncvals)): for i in range(nruns): Y,X=make_regression_data(beta=[0,0,0],noisecorr=ncvals[nc]) e=ols_estimate(X,Y,add_intercept=False) pval[i,:,nc]=e.p.values bhat[i,:,nc]=e.bhat.values pval_exc=pval<0.05 meanpval=numpy.mean(pval_exc,0) f=plt.figure(figsize=(8,5)) plt.subplot(1,2,1) plt.plot(ncvals,meanpval.T) plt.plot([0,1],[0.05,0.05],'--') plt.xlabel('autocorrelation') plt.ylabel('Type I error (% of significant tests)') plt.legend(['X1','X2','Intercept']) plt.ylim([0,1]) plt.subplot(1,2,2) bhvar=numpy.std(bhat,0) plt.plot(ncvals,bhvar.T) plt.xlabel('autocorrelation') plt.ylabel('std of parameter estimates') plt.legend(['X1','X2','Intercept']) plt.ylim([0,1]) """ Explanation: Now let's introduce some correlated noise, using the function created above which smooths the noise across observations using a first-order autoregressive (AR(1)) model. We do this for a range of levels of autocorrelation; because we have set the true beta values to zero, and the resulting proportion of significant results tells us the Type 1 error. We also assess the variance of the estimates. End of explanation """ print(toeplitz(range(4))) """ Explanation: Exercise: What do you see? Why do the effects of correlation in the data differ between regressors? Generalized least squares In cases where the data do not adhere to the assumptions of OLS, we can use generalized least squares to obtain BLUE estimates. This requires that we have a model of the autocorrelation structure. Let's use a Toeplitz matrix, allows us to create an AR(1) covariance matrix. The Toeplitz matrix has this form (in this case for a dataset with 4 observations): End of explanation """ rho=0.3 print(rho**toeplitz(range(4))) """ Explanation: The AR1 covariance has this form: $V = \sigma^2 \begin{bmatrix}1 & \rho & \rho^2 & \rho^3\ \rho & 1 & \rho & \rho^2\ \rho^2 & \rho & 1 & \rho \ \rho^3 & \rho^2 & \rho & 1 \\end{bmatrix}$ where $\rho$ is the first-order autocorrelation and $\sigma^2$ is the variance. Note that we still assume that the variances are homogenous across datapoints. Thus, to generate such a matrix we simply exponentiate $\rho$ by the Toeplitz matrix (which is acheived using the $**$ operator) in Python. End of explanation """ def gls_estimate(X,Y,add_intercept=True,verbose=False, ddof=1,use_two_sided=True): """ estimate generalized least squares using a Toeplitz matrix to generate AR(1) covariance """ # first we need to set up the matrices in the proper shape # Y should be N X 1 # X should be X X c if verbose: print('original Y shape:',Y.shape) Y=Y.reshape((len(Y),1)) if verbose: print('new Y shape:',Y.shape) if verbose: print('original X shape:',X.shape) if len(X.shape)==1: X=X.reshape((len(X),1)) Xnames=['X%d'%int(i+1) for i in range(X.shape[1])] if verbose: print('new X shape:',X.shape) # add an intercept to the model if specified if add_intercept: X=sm.add_constant(X) # make sure that the design matrix is full rank assert numpy.linalg.matrix_rank(X)==X.shape[1] # first fit OLS to get residuals for AC estimation e=ols_estimate(X,Y) resid=Y - X.dot(e.bhat.values[:,numpy.newaxis]) ar1_coef=statsmodels.tsa.stattools.acf(resid)[1] # get the first-order autocorrelation estimate # compute the inverse covariance matrix order=toeplitz(range(len(Y))) sigma=ar1_coef**order Vinv=numpy.linalg.inv(sigma) # re-estimate the model using GLS b_hat=numpy.linalg.inv(X.T.dot(Vinv).dot(X)).dot(X.T.dot(Vinv).dot(Y)) if verbose: print('b_hat=',b_hat) resid=Y-X.dot(b_hat) sigma2=resid.T.dot(resid)/(X.shape[0] - X.shape[1]) # variance of the residuals # now compute the t statistic and p values for for each variable in X t=numpy.zeros(X.shape[1]) p=numpy.zeros(X.shape[1]) for i in range(X.shape[1]): c=numpy.zeros(X.shape[1]) c[i]=1 t[i]=c.dot(b_hat)/numpy.sqrt(c.dot(numpy.linalg.inv(X.T.dot(Vinv).dot(X))).dot(c.T)*sigma2) if t[i]<0: p[i]=scipy.stats.distributions.t.cdf(t[i],len(Y)-1) else: p[i]=1-scipy.stats.distributions.t.cdf(t[i],len(Y)-1) if use_two_sided: p[i]=p[i]*2 if verbose: print('t=',t) df=pandas.DataFrame({'bhat':b_hat.ravel(),'t':t.ravel(),'p':p.ravel()},index=Xnames) return df order=toeplitz(range(len(Y))) sigma=0.5**order Y,X=make_regression_data(beta=[1,0.1,10],noisecorr=0.5) e=gls_estimate(X,Y) display(e) gls_result=sm.GLS(Y,X,sigma=sigma).fit() gls_result.summary() """ Explanation: Now let's build a version of our estimator that uses GLS rather than OLS. We do this using an interative approach. We first run OLS to estimate the model and obtain the residuals, and then we estimate the autocorrelation structure from the residuals. Then we estimate the model using GLS with the autocorrelation structure estimated above. The GLS estimator is: $\hat{B} = (X'V^{-1}X)^{-1}X'V^{-1}Y$ where $V$ is the covariance matrix (which in OLS we assumed was simply $\sigma^2I$). This is akin to "whitening" the data by removing the covariance structure. End of explanation """ nruns=1000 ncvals=numpy.arange(0.0,0.9,0.1) pval=numpy.zeros((nruns,2,len(ncvals))) bhat=numpy.zeros((nruns,2,len(ncvals))) for nc in range(len(ncvals)): for i in range(nruns): Y,X=make_regression_data(beta=[0,0,0],noisecorr=ncvals[nc]) e=gls_estimate(X,Y) pval[i,:,nc]=e.p.values[:2] bhat[i,:,nc]=e.bhat.values[:2] pval_exc=pval<0.05 meanpval=numpy.mean(pval_exc,0) f=plt.figure(figsize=(12,5)) f=plt.subplot(1,2,1) plt.plot(ncvals,meanpval.T) plt.plot([0,1],[0.05,0.05],'--') plt.xlabel('autocorrelation') plt.ylabel('% of significant tests') plt.legend(['X1','X2','Intercept']) plt.ylim([0,1]) bhvar=numpy.std(bhat,0) f=plt.subplot(1,2,2) plt.plot(ncvals,bhvar.T) plt.xlabel('autocorrelation') plt.ylabel('std of parameter estimates') plt.legend(['X1','X2','Intercept']) plt.ylim([0,1]) """ Explanation: What do you see in this comparison? Now let's simulate datasets under the null and estimate the model, across different levels of autocorrelation, as we did above. Because the estimation is a bit more complex this will take a couple of minutes. End of explanation """
AdityaSoni19031997/Machine-Learning
kaggle/microsoft_malware_competition/neural-network-malware-0-67.ipynb
mit
# IMPORT LIBRARIES import pandas as pd, numpy as np, os, gc # LOAD AND FREQUENCY-ENCODE FE = ['EngineVersion','AppVersion','AvSigVersion','Census_OSVersion'] # LOAD AND ONE-HOT-ENCODE OHE = [ 'RtpStateBitfield','IsSxsPassiveMode','DefaultBrowsersIdentifier', 'AVProductStatesIdentifier','AVProductsInstalled', 'AVProductsEnabled', 'CountryIdentifier', 'CityIdentifier', 'GeoNameIdentifier', 'LocaleEnglishNameIdentifier', 'Processor', 'OsBuild', 'OsSuite', 'SmartScreen','Census_MDC2FormFactor', 'Census_OEMNameIdentifier', 'Census_ProcessorCoreCount', 'Census_ProcessorModelIdentifier', 'Census_PrimaryDiskTotalCapacity', 'Census_PrimaryDiskTypeName', 'Census_HasOpticalDiskDrive', 'Census_TotalPhysicalRAM', 'Census_ChassisTypeName', 'Census_InternalPrimaryDiagonalDisplaySizeInInches', 'Census_InternalPrimaryDisplayResolutionHorizontal', 'Census_InternalPrimaryDisplayResolutionVertical', 'Census_PowerPlatformRoleName', 'Census_InternalBatteryType', 'Census_InternalBatteryNumberOfCharges', 'Census_OSEdition', 'Census_OSInstallLanguageIdentifier', 'Census_GenuineStateName','Census_ActivationChannel', 'Census_FirmwareManufacturerIdentifier', 'Census_IsTouchEnabled', 'Census_IsPenCapable', 'Census_IsAlwaysOnAlwaysConnectedCapable', 'Wdft_IsGamer', 'Wdft_RegionIdentifier'] # LOAD ALL AS CATEGORIES dtypes = {} for x in FE+OHE: dtypes[x] = 'category' dtypes['MachineIdentifier'] = 'str' dtypes['HasDetections'] = 'int8' # LOAD CSV FILE df_train = pd.read_csv('../input/train.csv', usecols=dtypes.keys(), dtype=dtypes) print ('Loaded',len(df_train),'rows of TRAIN.CSV!') # DOWNSAMPLE sm = 2000000 df_train = df_train.sample(sm) print ('Only using',sm,'rows to train and validate') x=gc.collect() """ Explanation: Neural Network - Statistical Encoding - Microsoft Malware There aren't any examples of using a neural network to model Microsoft Malware, so I thought I'd post one. Also in this kernel, I show statistical one-hot-encoding where only boolean variables that are idependently statistically significant are created. Load Train.csv End of explanation """ import math # CHECK FOR NAN def nan_check(x): if isinstance(x,float): if math.isnan(x): return True return False # FREQUENCY ENCODING def encode_FE(df,col,verbose=1): d = df[col].value_counts(dropna=False) n = col+"_FE" df[n] = df[col].map(d)/d.max() if verbose==1: print('FE encoded',col) return [n] # ONE-HOT-ENCODE ALL CATEGORY VALUES THAT COMPRISE MORE THAN # "FILTER" PERCENT OF TOTAL DATA AND HAS SIGNIFICANCE GREATER THAN "ZSCORE" def encode_OHE(df, col, filter, zscore, tar='HasDetections', m=0.5, verbose=1): cv = df[col].value_counts(dropna=False) cvd = cv.to_dict() vals = len(cv) th = filter * len(df) sd = zscore * 0.5/ math.sqrt(th) #print(sd) n = []; ct = 0; d = {} for x in cv.index: try: if cv[x]<th: break sd = zscore * 0.5/ math.sqrt(cv[x]) except: if cvd[x]<th: break sd = zscore * 0.5/ math.sqrt(cvd[x]) if nan_check(x): r = df[df[col].isna()][tar].mean() else: r = df[df[col]==x][tar].mean() if abs(r-m)>sd: nm = col+'_BE_'+str(x) if nan_check(x): df[nm] = (df[col].isna()).astype('int8') else: df[nm] = (df[col]==x).astype('int8') n.append(nm) d[x] = 1 ct += 1 if (ct+1)>=vals: break if verbose==1: print('OHE encoded',col,'- Created',len(d),'booleans') return [n,d] # ONE-HOT-ENCODING from dictionary def encode_OHE_test(df,col,dt): n = [] for x in dt: n += encode_BE(df,col,x) return n # BOOLEAN ENCODING def encode_BE(df,col,val): n = col+"_BE_"+str(val) if nan_check(val): df[n] = df[col].isna() else: df[n] = df[col]==val df[n] = df[n].astype('int8') return [n] cols = []; dd = [] # ENCODE NEW for x in FE: cols += encode_FE(df_train,x) for x in OHE: tmp = encode_OHE(df_train,x,0.005,5) cols += tmp[0]; dd.append(tmp[1]) print('Encoded',len(cols),'new variables') # REMOVE OLD for x in FE+OHE: del df_train[x] print('Removed original',len(FE+OHE),'variables') x = gc.collect() """ Explanation: Statistically Encode Variables All four variables in the Python variable list FE will get frequency encoded and all thirty-nine variables in list OHE will get statistically one-hot-encoded. In total, forty-three variables are imported from the training csv while thirty-nine were ignored. Among all our category variables, there are a combined 211,562 values! So we can't one-hot-encode all. (Note that this is without Census_OEMModelIdentifier's 175,366 or Census_SystemVolumeTotalCapacity's 536,849) We will use a trick from statistics. First we'll assume we have a random sample. (Which we don't actually have, but let's pretend.) Then for each value, we will test the following hypotheses $$H_0: \text{Prob(HasDetections=1 given value is present)} = 0.5 $$ $$H_A: \text{Prob(HasDetections=1 given value is present)} \ne 0.5$$ The test statistic z-score equals \( \hat{p} \), the observed HasDetections rate given value is present, minus 0.5 divided by the standard deviation of \( \hat{p} \). The Central Limit Theorem tells us $$\text{z-score} = \frac{\hat{p}-0.5}{SD(\hat{p})} = 2 (\hat{p} - 0.5)\sqrt{n} $$ where \(n\) is the number of occurences of the value. If the absolute value of \(z\) is greater than 2.0, we are 95% confident that Prob(HasDetections=1 given value is present) is not equal 0.5 and we will include a boolean for this value in our model. Actually, we'll use a \(z\) threshold of 5.0 and require \( 10^{-7}n>0.005 \). This adds 350 new boolean variables (instead of naively one-hot-encoding 211,562!). ## Example - Census_FirmwareManufacturerIdentifier In the plots below, the dotted lines use the right y-axis and solid lines/bars use the left. The top plot below shows 20 values of variable Census_FirmwareManufacturerIdentifier. Notice that I consider NAN a value. Each of these values contains over 0.5% of the data. And all the variables together contain 97% of the data. Value=93 has a HasDetections rate of 52.5% while value=803 has a HasDetections rate of 35.4%. Their z-scores are \(22.2 = 2\times(0.5253-0.5)\times\sqrt{192481} \text{ }\) and \(-71.3 = 2\times(0.3535-0.5)\times\sqrt{59145}\text{ }\) respectively! The probability that value=93 and value=803 have a HasDetections rate of 50% and what we are observing is due to chance is close to nothing. Additionally from the bottom plot, you see that these two values have consistently been high and low throughout all of the year 2018. We can trust that this trend will continue into the test set's October and November computers. Python Code To see the Python encoding functions, click 'see code' to the right. End of explanation """ from keras import callbacks from sklearn.metrics import roc_auc_score class printAUC(callbacks.Callback): def __init__(self, X_train, y_train): super(printAUC, self).__init__() self.bestAUC = 0 self.X_train = X_train self.y_train = y_train def on_epoch_end(self, epoch, logs={}): pred = self.model.predict(np.array(self.X_train)) auc = roc_auc_score(self.y_train, pred) print("Train AUC: " + str(auc)) pred = self.model.predict(self.validation_data[0]) auc = roc_auc_score(self.validation_data[1], pred) print ("Validation AUC: " + str(auc)) if (self.bestAUC < auc) : self.bestAUC = auc self.model.save("bestNet.h5", overwrite=True) return from sklearn.model_selection import train_test_split from keras.models import Sequential from keras.layers import Dense, Dropout, BatchNormalization, Activation from keras.callbacks import LearningRateScheduler from keras.optimizers import Adam #SPLIT TRAIN AND VALIDATION SET X_train, X_val, Y_train, Y_val = train_test_split( df_train[cols], df_train['HasDetections'], test_size = 0.5) # BUILD MODEL model = Sequential() model.add(Dense(100,input_dim=len(cols))) model.add(Dropout(0.4)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dense(100)) model.add(Dropout(0.4)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dense(1, activation='sigmoid')) model.compile(optimizer=Adam(lr=0.01), loss="binary_crossentropy", metrics=["accuracy"]) annealer = LearningRateScheduler(lambda x: 1e-2 * 0.95 ** x) # TRAIN MODEL model.fit(X_train,Y_train, batch_size=32, epochs = 20, callbacks=[annealer, printAUC(X_train, Y_train)], validation_data = (X_val,Y_val), verbose=2) """ Explanation: Example - Census_OEMModelIdentifier Below is variable Census_OEMModelIdentifier. Observe how NAN is treated like a category value and that it has consistently had the lowest HasDetections rate all of year 2018. Also notice how value=245824 has consistently been high. Finally note that value=188345 and 248045 are high and low respectively in August and September but earlier in the year their positions were reversed! What will their positions be in the test set's October and November computers?? Build and Train Network We will a build a 3 layer fully connected network with 100 neurons on each hidden layer. We will use ReLU activation, Batch Normalization, 40% Dropout, Adam Optimizer, and Decaying Learning Rate. Unfortunately we don't have an AUC loss function, so we will use Cross Entrophy instead. After each epoch, we will call a custom Keras callback to display the current AUC and continually save the best model. End of explanation """ del df_train del X_train, X_val, Y_train, Y_val x = gc.collect() # LOAD BEST SAVED NET from keras.models import load_model model = load_model('bestNet.h5') pred = np.zeros((7853253,1)) id = 1 chunksize = 2000000 for df_test in pd.read_csv('../input/test.csv', chunksize = chunksize, usecols=list(dtypes.keys())[0:-1], dtype=dtypes): print ('Loaded',len(df_test),'rows of TEST.CSV!') # ENCODE TEST cols = [] for x in FE: cols += encode_FE(df_test,x,verbose=0) for x in range(len(OHE)): cols += encode_OHE_test(df_test,OHE[x],dd[x]) # PREDICT TEST end = (id)*chunksize if end>7853253: end = 7853253 pred[(id-1)*chunksize:end] = model.predict_proba(df_test[cols]) print(' encoded and predicted part',id) id += 1 # SUBMIT TO KAGGLE df_test = pd.read_csv('../input/test.csv', usecols=['MachineIdentifier']) df_test['HasDetections'] = pred df_test.to_csv('submission.csv', index=False) """ Explanation: Predict Test and Submit to Kaggle Even after deleting the training data, our network still needs lot of our available RAM, we'll need to load in test.csv by chunks and predict by chunks. Click 'see code' button to see how this is done. End of explanation """
CLandauGWU/group_e
Model_Select.ipynb
mit
#Shift and shape vars shiftmonths = 6 shapef = 'anc' #Assign the split for holdout data. holdout_date = 2015.5 #Get data filestring = './data/'+shapef+'_out.csv' df = pd.read_csv(filestring) df = df.sort_values(['month', 'NAME'])# , 'ANC']) df = df.reset_index(drop=True) len(df.NAME.unique()) """ Explanation: To start... We import the data output of our data pipeline. We reset the index, drop index columns, and lag the data. We explicitly print shape several times, making sure that we capture the magnitude of data lost from dropping NA values. End of explanation """ df.columns print(df.shape) shiftnum= (((len(df.NAME.unique()))*(shiftmonths))) #Also generate some lagged y data in the opposite direction. df['y']= df['countBBL'].shift(-shiftnum) df['countBBL_prev_month'] = df['countBBL'].shift((len(df.NAME.unique()))) df['countBBL_prev_cycle'] = df['countBBL'].shift((shiftnum)) df = df[shiftnum:-(shiftnum+(len(df.NAME.unique())))] df = df.dropna() df.shape """ Explanation: Now we examine the columns and lag the data. End of explanation """ df = pd.get_dummies(df, columns=['NAME']) df = df.drop(['Unnamed: 0'], axis= 1) print(df.shape) df = df.astype('float') df = df.dropna() print(df.shape) """ Explanation: The next cell cleans out vestigial columns and drops/fills/expands to dummies for our NA and categorical values. End of explanation """ #Flexible adaptation of Dr. Braman's interactive gridsearch script #implementation. #TODO Clean up and streamline import sklearn from sklearn.neural_network import * from sklearn.neighbors import * from sklearn.svm import * from sklearn.gaussian_process import * from sklearn.gaussian_process.kernels import * from sklearn.tree import * from sklearn.ensemble import * from sklearn.naive_bayes import * from sklearn.discriminant_analysis import * from sklearn.linear_model import * from sklearn.model_selection import * from sklearn.preprocessing import * import random #Frame up some separate DataFrames for scalar and stuff scl_data = data = df data = data.reset_index(drop=True) X = data.drop(['y'], axis=1) y = data['y'] XH_train = data[data['month'] <= holdout_date-1] yH_train = XH_train['y'] XH_train = XH_train.drop(['y'], axis=1) XH_val = scl_data[scl_data['month'] >= holdout_date-1] XH_val = XH_val[XH_val['month'] <= holdout_date] yH_val = XH_val['y'] XH_val = XH_val.drop(['y'], axis=1) XH_test = data[data['month'] >= holdout_date] yH_test = XH_test['y'] XH_test = XH_test.drop(['y'], axis=1) ytr = sklearn.preprocessing.MinMaxScaler([0, 1] ).fit(y) y = ytr.fit_transform(y) y = pd.DataFrame(y, columns=['y']) scl_data = scl_data.reset_index(drop=True) y.y print(scl_data.month.max()) print(scl_data.shape) scl_data = scl_data.dropna() print(scl_data.shape) sXH_train = scl_data[scl_data['month'] <= holdout_date-1] syH_train = sXH_train['y'] sXH_train = sXH_train.drop(['y'], axis=1) sXH_val = scl_data[scl_data['month'] >= holdout_date-1] sXH_val = sXH_val[sXH_val['month'] <= holdout_date] syH_val = sXH_val['y'] sXH_val = sXH_val.drop(['y'], axis=1) sXH_test = scl_data[scl_data['month'] >= holdout_date] syH_test = sXH_test['y'] sXH_test = sXH_test.drop(['y'], axis=1) #Build scalers for the scl_data, other -------------------- scale_data_splits = [scl_data, sXH_train,sXH_test, syH_train, syH_test] for scl_data in scale_data_splits: scaler = sklearn.preprocessing.StandardScaler( ).fit(scl_data) minmaxer = sklearn.preprocessing.MinMaxScaler([0, 1] ).fit(scl_data) scl = scaler.transform(scl_data) scl = minmaxer.transform(scl_data) try: scl_data = pd.DataFrame(scl, columns=scl_data.columns) except AttributeError as e: print(e) scl_data = pd.DataFrame(scl, columns=['y']) print(scl_data.shape) scl_data = scl_data.dropna() print(scl_data.shape) assert np.all(np.isfinite(scl_data)) assert not np.any(np.isnan(scl_data)) #scl_data[scl_data.columns # ] = scaler.fit_transform(scl_data[scl_data.columns]) #---------------------------------------------------------- """ Explanation: Here we start building our grid search inputs, beginning with the splits. End of explanation """ y; print(sXH_train.shape) print(syH_train.shape) print(sXH_test.shape) print(syH_test.shape) scl_data.columns sX = scl_data.drop(['y'], axis=1) sy = scl_data['y'] assert np.all(np.isfinite(X)) assert np.all(np.isfinite(y)) assert not np.any(np.isnan(X)) assert not np.any(np.isnan(y)) assert np.all(np.isfinite(sX)) assert np.all(np.isfinite(sy)) assert not np.any(np.isnan(sX)) assert not np.any(np.isnan(sy)) scl_data.columns scl_data.describe() """ Explanation: Let's make sure our data came out of the scalers intact: End of explanation """ #Make a short list of random states to insert into randomstate params. scrambler = [] for scram in range(0, 10): scrambler.append(random.randint(0, 10000)) print(scrambler) to_scale = ['SVR'] names = ['AdaBoostRegressor', 'RandomForestRegressor', 'SVR', #'KNeighborsRegressor', #'BaggingRegressor', 'GradientBoostingRegressor', #'LinearRegression', #'MLPRegressor', #'SGDRegressor', 'LassoLars' ] regressors = [AdaBoostRegressor(), RandomForestRegressor(), SVR(), #KNeighborsRegressor(), #BaggingRegressor(), GradientBoostingRegressor(), #LinearRegression(), #MLPRegressor(), #SGDRegressor(), LassoLars() ] param_grids =[ ['AdaBoostRegressor', dict( n_estimators=[80, 60, 30], learning_rate=[1, .5, .01], loss=['linear', 'square', 'exponential'], #random_state=scrambler[3:5] )], ['RandomForestRegressor', dict( max_depth=[5, 10, 15], criterion=['mse', 'mae'], #random_state=scrambler[:2] )], ['SVR', dict( #Most params for SVR are turned off right now, too expensive C=[1, .9], epsilon=[.1, .05], #kernel=['poly'] )], ['GradientBoostingRegressor', dict( max_depth=[3, 6, 9, 12], min_samples_split=[2, 4, 8], presort=[False] )], ['LassoLars', dict( alpha=[0.1, 1, .5, .75], #random_state=[random.randint(0, 10000)] )], ] """ Explanation: This cell contains our a crude RNG, a list of regressors which benefit from scaled data, and hardcoded data used to generate our param_grid, et cetera. End of explanation """ outcomes = [] for name, rgsr in zip(names, regressors): for item in param_grids: if item[0]==name: print(name + ':') params= item[1] cv = sklearn.model_selection.GridSearchCV(rgsr, param_grid=params, verbose=True, n_jobs=12, cv=3, pre_dispatch="2*n_jobs") if name not in to_scale: #X_train, y_train, X_test, y_test = sklearn.model_selection.train_test_split(X, y) fitted = cv.fit(XH_train, yH_train) score = cv.score(XH_val, yH_val) print(score) best = rgsr.set_params(**cv.best_params_) bestfit= best.fit(XH_train, yH_train) bestscore = best.score(XH_test, yH_test) if name in to_scale: #TODO: fix #X_train, y_train, X_test, y_test = sklearn.model_selection.train_test_split(sX, sy) fitted = cv.fit(sXH_train, syH_train) score = cv.score(sXH_val, syH_val) print(score) best = rgsr.set_params(**cv.best_params_) bestfit= best.fit(sXH_train, syH_train) bestscore = best.score(sXH_test, syH_test) print(name + " R2 with best model, score:") print(bestscore) outcomes.append((name, score, cv.cv_results_, cv.best_estimator_, cv.best_params_, bestscore, [yH_test, ])) for nm in range(0, len(outcomes)): print() print(outcomes[nm][0]) print(outcomes[nm][1]) print() print('Best on real:') print(outcomes[nm][-1]) #Adapted from https://pythonspot.com/en/matplotlib-bar-chart/ objects = [j[0] for j in outcomes] y_pos = np.arange(len(objects)) performance = [j[-1] for j in outcomes] for jm in range(len(performance)): if performance[jm] < 0: performance[jm] = 0 performance plt.barh(y_pos, performance, align='center', alpha=0.5) plt.yticks(y_pos, objects) plt.xlabel('R2 Score') ti = "Scoring across models for "+shapef+", lagging by "+str(shiftmonths)+ " months." plt.title(ti) fl = './plots/' + shapef + "_shift" + str(shiftmonths) plt.savefig(fl) """ Explanation: Grid Search: Here we implement an iterator that executes GridSearchCV and reports the best explained variance. The best_params attribute is then extracted, and used those on the whole training set, then predict on the holdout data. Testing indicates that for some models, the fit on our full dataset modestly outperforms the CV regularly. End of explanation """ for jm in range(0, 5): print(outcomes[jm][0]) print(outcomes[jm][1]) print(outcomes[jm][4]) best = AdaBoostRegressor(learning_rate=1, loss='square', n_estimators=60) bestfit= best.fit(XH_train, yH_train) bestscore = best.score(XH_test, yH_test) print(outcomes[0][0]) print(bestscore) best = RandomForestRegressor(max_depth=10) bestfit= best.fit(XH_train, yH_train) bestscore = best.score(XH_test, yH_test) print(outcomes[1][0]) print(bestscore) best = SVR(max_depth=10) bestfit= best.fit(XH_train, yH_train) bestscore = best.score(XH_test, yH_test) print(outcomes[2][0]) print(bestscore) best = GradientBoostingRegressor(max_depth=10) bestfit= best.fit(XH_train, yH_train) bestscore = best.score(XH_test, yH_test) print(outcomes[3][0]) print(bestscore) Xtrain = dat_xtrain.drop(['y'], axis=1) y16 = dat_ytrain['y'] X15 = dat15.drop(['y'], axis=1) y15 = dat15['y'] fitted = outcomes[-2][3].fit(X15, y15) predicted = fitted.predict(X16) pred = pd.DataFrame(predicted, columns=['predicted']) dat16 = dat16.reset_index() pred['y'] = dat16['y'] def flagger_ranges(pred): pred['flag15'] = 0 pred['flag15'][pred['predicted'].between(pred['y']*0.85, pred['y']*1.15) ] = 1 pred['flag05'] = 0 pred['flag05'][pred['predicted'].between(pred['y']*0.85, pred['y']*1.15) ] = 1 pred['flag10'] = 0 pred['flag10'][pred['predicted'].between(pred['y']*0.85, pred['y']*1.15) ] = 1 pred['flag_others']= 0 pred['flag_others'][pred['flag05'] == 0] = 1 return pred pred = flagger_ranges(pred) pred """ Explanation: Everything below is exploratory analysis for me. End of explanation """
ARM-software/lisa
ipynb/deprecated/examples/trappy/custom_events_example.ipynb
apache-2.0
import logging from conf import LisaLogging LisaLogging.setup() # Generate plots inline %matplotlib inline import copy import json import os import time import math import logging # Support to access the remote target import devlib from env import TestEnv # Support to configure and run RTApp based workloads from wlgen import RTA # Support for performance analysis of RTApp workloads from perf_analysis import PerfAnalysis # Support for trace events analysis from trace import Trace # Suport for FTrace events parsing and visualization import trappy """ Explanation: TRAPpy custom events Detailed information on Trappy can be found at examples/trappy/trappy_example.ipynb. End of explanation """ # Setup a target configuration my_target_conf = { # Define the kind of target platform to use for the experiments "platform" : 'linux', # Linux system, valid other options are: # android - access via ADB # linux - access via SSH # host - direct access # Preload settings for a specific target "board" : 'juno', # juno - JUNO board with mainline hwmon # Define devlib module to load "modules" : [ 'bl', # enable big.LITTLE support 'cpufreq' # enable CPUFreq support ], # Account to access the remote target "host" : '192.168.0.1', "username" : 'root', "password" : 'juno', # Comment the following line to force rt-app calibration on your target "rtapp-calib" : { '0': 361, '1': 138, '2': 138, '3': 352, '4': 360, '5': 353 } } # Setup the required Test Environment supports my_tests_conf = { # Binary tools required to run this experiment # These tools must be present in the tools/ folder for the architecture "tools" : ['trace-cmd'], # FTrace events buffer configuration # events listed here MUST be "ftrace" : { ############################################################################## # EVENTS SPECIFICATIPON ############################################################################## # Here is where we specify the list of events we are interested into: # Events are of two types: # 1. FTrace tracepoints that _must_ be supported by the target's kernel in use. # These events will be enabled at ftrace start time, thus if the kernel does # not support one of them, ftrace starting will fails. "events" : [ "sched_switch", "cpu_frequency", ], # 2. FTrace events generated via trace_printk, from either kernel or user # space. These events are different from the previous because they do not # need to be explicitely enabled at ftrace start time. # It's up to the user to ensure that the generated events satisfies these # formatting requirements: # a) the name must be a unique word into the trace # b) values must be reported as a sequence of key=value paires # For example, a valid custom event string is: # my_math_event: kay1=val1 key2=val2 key3=val3 "custom" : [ "my_math_event", ], # For each of these events, TRAPpy will generate a Pandas dataframe accessible # via a TRAPpy::FTrace object, whith the same name of the event. # Thus for example, ftrace.my_math_event will be the object exposing the # dataframe with all the event matching the "my_math_event" unique word. ############################################################################## "buffsize" : 10240, }, } # Initialize a test environment using: # - the provided target configuration (my_target_conf) # - the provided test configuration (my_test_conf) te = TestEnv(target_conf=my_target_conf, test_conf=my_tests_conf) target = te.target logging.info("Target ABI: %s, CPus: %s", target.abi, target.cpuinfo.cpu_names) """ Explanation: Test environment setup For more details on this please check out examples/utils/testenv_example.ipynb. End of explanation """ # Define the format string for the custom events we will inject from user-space my_math_event_fmt = "my_math_event: sin={} cos={}" # Start FTrace te.ftrace.start() # Let's generate some interesting "custom" events from userspace logging.info('Generating events from user-space (will take ~140[s])...') for angle in range(360): v_sin = int(1e6 * math.sin(math.radians(angle))) v_cos = int(1e6 * math.cos(math.radians(angle))) my_math_event = my_math_event_fmt.format(v_sin, v_cos) # custom events can be generated either from userspace, like in this # example, or also from kernelspace (using a trace_printk call) target.execute('echo {} > /sys/kernel/debug/tracing/trace_marker'\ .format(my_math_event)) # Stop FTrace te.ftrace.stop() # Collect the generate trace trace_file = '/tmp/trace.dat' te.ftrace.get_trace(trace_file) # Parse trace events_to_parse = my_tests_conf['ftrace']['events'] + my_tests_conf['ftrace']['custom'] trace = Trace('/tmp', events_to_parse, te.platform) """ Explanation: Example of custom event definition End of explanation """ # Get the TRAPpy FTrace object which has been generated from the trace parsing ftrace = trace.ftrace # The FTrace object allows to verify which (of the registered) events have been # identified into the trace logging.info("List of events identified in the trace:\n%s", ftrace.class_definitions.keys()) # Each event identified in the trace is appended to a table (i.e. data_frame) # which has the same name of the event logging.info("First 10 events of our 'my_math_event' custom event:") ftrace.my_math_event.data_frame.head(10) logging.info("First 10 events of our 'cpu_frequency' tracepoint:") ftrace.cpu_frequency.data_frame.head(10) """ Explanation: Inspection of the generated TRAPpy FTrace object End of explanation """ # It is possible to mix in the same plot tracepoints and custom events # The LinePlot module requires to specify a list of signals to plot. # Each signal is defined as: # <event>:<column> # where: # <event> is one of the events collected from the trace by the FTrace object # <column> is one of the column of the previously defined event my_signals = [ 'cpu_frequency:frequency', 'my_math_event:sin', 'my_math_event:cos' ] # These two paramatere are passed to the LinePlot call as long with the # TRAPpy FTrace object trappy.LinePlot( ftrace, # FTrace object signals=my_signals, # Signals to be plotted drawstyle='steps-post', # Plot style options marker = '+' ).view() """ Explanation: Plotting tracepoint and/or custom events End of explanation """
mne-tools/mne-tools.github.io
0.18/_downloads/9794ea6d3b7fc21947e9529fb55249c9/plot_read_proj.ipynb
bsd-3-clause
# Author: Joan Massich <[email protected]> # # License: BSD (3-clause) import matplotlib.pyplot as plt import mne from mne import read_proj from mne.io import read_raw_fif from mne.datasets import sample print(__doc__) data_path = sample.data_path() subjects_dir = data_path + '/subjects' fname = data_path + '/MEG/sample/sample_audvis_raw.fif' ecg_fname = data_path + '/MEG/sample/sample_audvis_ecg-proj.fif' """ Explanation: ============================================== Read and visualize projections (SSP and other) ============================================== This example shows how to read and visualize Signal Subspace Projectors (SSP) vector. Such projections are sometimes referred to as PCA projections. End of explanation """ raw = read_raw_fif(fname) empty_room_proj = raw.info['projs'] # Display the projections stored in `info['projs']` from the raw object raw.plot_projs_topomap() """ Explanation: Load the FIF file and display the projections present in the file. Here the projections are added to the file during the acquisition and are obtained from empty room recordings. End of explanation """ fig, axes = plt.subplots(1, len(empty_room_proj)) for proj, ax in zip(empty_room_proj, axes): proj.plot_topomap(axes=ax) """ Explanation: Display the projections one by one End of explanation """ assert isinstance(empty_room_proj, list) mne.viz.plot_projs_topomap(empty_room_proj) """ Explanation: Use the function in mne.viz to display a list of projections End of explanation """ # read the projections ecg_projs = read_proj(ecg_fname) # add them to raw and plot everything raw.add_proj(ecg_projs) raw.plot_projs_topomap() """ Explanation: As shown in the tutorial on how to tut-viz-raw the ECG projections can be loaded from a file and added to the raw object End of explanation """ fig, axes = plt.subplots(1, len(ecg_projs)) for proj, ax in zip(ecg_projs, axes): if proj['desc'].startswith('ECG-eeg'): proj.plot_topomap(axes=ax, info=raw.info) else: proj.plot_topomap(axes=ax) """ Explanation: Displaying the projections from a raw object requires no extra information since all the layout information is present in raw.info. MNE is able to automatically determine the layout for some magnetometer and gradiometer configurations but not the layout of EEG electrodes. Here we display the ecg_projs individually and we provide extra parameters for EEG. (Notice that planar projection refers to the gradiometers and axial refers to magnetometers.) Notice that the conditional is just for illustration purposes. We could raw.info in all cases to avoid the guesswork in plot_topomap and ensure that the right layout is always found End of explanation """ possible_layouts = [mne.find_layout(raw.info, ch_type=ch_type) for ch_type in ('grad', 'mag', 'eeg')] mne.viz.plot_projs_topomap(ecg_projs, layout=possible_layouts) """ Explanation: The correct layout or a list of layouts from where to choose can also be provided. Just for illustration purposes, here we generate the possible_layouts from the raw object itself, but it can come from somewhere else. End of explanation """
Kappa-Dev/ReGraph
examples/Tutorial_NetworkX_backend/Part2_hierarchies.ipynb
mit
from regraph import NXGraph, NXHierarchy, Rule from regraph import plot_graph, plot_instance, plot_rule %matplotlib inline """ Explanation: ReGraph tutorial (NetworkX backend) Part 2: Rewriting hierarchies of graph ReGraph allows to create a hierarchies of graphs related by means of homomorphisms (or typing). In the context of a hierarchy, if there exists a homomorphism $G \rightarrow T$, we say that the graph $G$ is typed by the graph $T$. Graph hierarchy is a DAG, where nodes are graphs and edges are homomorphisms. A homomorphism maps every node of $G$ to some node in $T$ (a type) in such a way that: - edges are preserved - attributes of both nodes and edges are preserved End of explanation """ # Define graph G g = NXGraph() g.add_nodes_from(["protein", "binding", "region", "compound"]) g.add_edges_from([("region", "protein"), ("protein", "binding"), ("region", "binding"), ("compound", "binding")]) # Define graph T t = NXGraph() t.add_nodes_from(["action", "agent"]) t.add_edges_from([("agent", "agent"), ("agent", "action")]) # Create a hierarchy simple_hierarchy = NXHierarchy() simple_hierarchy.add_graph("G", g, {"name": "Simple protein interaction"}) simple_hierarchy.add_graph("T", t, {"name": "Agent interaction"}) simple_hierarchy.add_typing( "G", "T", {"protein": "agent", "region": "agent", "compound": "agent", "binding": "action", } ) print(simple_hierarchy) """ Explanation: 1. Creating and modifying a hierarchy object Consider the following example of a simple graph hierarchy. The two graphs $G$ and $T$ are being created and added to the heirarchy. Afterwards a typing homomorphism between $G$ and $T$ is added, so that every node of $G$ is typed by some node in $T$. End of explanation """ type(simple_hierarchy.get_graph("T")) """ Explanation: The method get_graph returns the graph object corresponding to the provided graph id. End of explanation """ simple_hierarchy.get_typing("G", "T") t_node_positions = plot_graph(simple_hierarchy.get_graph("T")) g_node_positions = plot_graph(simple_hierarchy.get_graph("G")) """ Explanation: The method get_typing returns the dictionary object corresponding to the provided hierarchy edge and representing the associated graph homomorphism. End of explanation """ lhs = NXGraph() lhs.add_nodes_from([1, 2]) lhs.add_edges_from([(1, 2)]) p = NXGraph() p.add_nodes_from([1, 2]) p.add_edges_from([]) rhs = NXGraph() rhs.add_nodes_from([1, 2, 3]) rhs.add_edges_from([(3, 1), (3, 2)]) # By default if `p_lhs` and `p_rhs` are not provided # to a rule, it tries to construct this homomorphisms # automatically by matching the names. In this case we # have defined lhs, p and rhs in such a way that that # the names of the matching nodes correspond rule = Rule(p, lhs, rhs) plot_rule(rule) """ Explanation: 2. Rewriting of objects in a hierarchy ReGraph implements the rewriting technique called sesqui-pushout rewriting that allows to transform graphs by applying rules through their instances (matchings). Rewriting an individual graphs in a hierarchy may require an update of other graphs and typings in this hierarchy, such updates are called propagation and are distinguished into two types: backward and forward propagation. Backward propagation briefly: - If some graph elements (nodes/edges or attributes) are removed from a graph in the hierarchy, then all the respective elements that are typed by them in the ancestor graphs should be removed. - If a graph node is cloned, then for every instance of this node (every node that is typed by the clonned node) in the ancestor graphs we either: (a) specify to which clone it corresponds or (b) clone it. Forward propagation briefly: - If some graph nodes are merged and these nodes are typed by different nodes in a descendant graph, the corresponding nodes in the descendant graph should be merged. - If a new graph element (node/edge or attribute) is added, then for all the descendent graphs in the hierarchy we either (a) select an existing element to type the added element or (b) add a new element to type the added element. For more details, please see here. ReGraph allows to rewrite individual graphs situated in the hierarchy using the method rewrite of NXHierarchy. The rewriting can be done in two modes: Strict rewriting rewriting that does not allow propagation. Not strict rewriting that allows propagation. The rewrite takes as the input the following parameters: graph_id, ID of the graph in the hierarchy to rewrite, rule, a rule object to apply, instance, a dictionary containing an instance of the lhs of the rule in the graph subject to rewriting, by default, tries to construct identity morphism of the nodes of the pattern, p_typing, a dictionary containing typing of graphs in the hierarchy by the interface of the rule, keys are IDs of hierarchy graphs, values are dictionaries containing the mapping of nodes from the hierarchy graphs to the inteface nodes (note that a node from a graph can be typed by a set of nodes in the interface of the rule, e.g. if we want to perform cloning of some types, etc). rhs_typing, a dictionary containing typing of the rhs by graphs of the hierarchy, keys are ids of hierarchy graphs, values are dictionaries containing the mapping of nodes from the lhs to the nodes of the typing graph given by the respective key of the value (note that a node from the rhs can be typed by a set of nodes of some graph, e.g. if we want to perform merging of some types, etc), strict, flag indicating if rewriting is strict, then any propagation is not allowed. 2.1. Strict rewriting Let us create a Rule object containing a rule we would like to apply. End of explanation """ instances = simple_hierarchy.find_matching("G", rule.lhs) print("Instances: ", instances) for instance in instances: plot_instance( simple_hierarchy.get_graph("G"), rule.lhs, instance, parent_pos=g_node_positions) #filename=("instance_example_%d.png" % i)) """ Explanation: The created rule removes the edge 1-&gt;2, adds the new node 3 and two edges 3-&gt;1 and 3-&gt;2. Let us find instances of the created rule in the graph G. End of explanation """ instance = { 1: "protein", 2: "binding" } """ Explanation: Let us fix the desired instance: we would like to remove the edge from protein to binding and add some new node connecting them. End of explanation """ try: rhs_instance = simple_hierarchy.rewrite("G", rule, instance, strict=True) except Exception as e: print("Error message: ", e) print("Type: ", type(e)) """ Explanation: Let us try to apply the rule to the selected instance as is in the strict rewriting mode. End of explanation """ rhs_typing = { "T": {3: "agent"} } rhs_instance = simple_hierarchy.rewrite( "G", rule, instance, rhs_typing=rhs_typing, strict=True) print("Instance of the RHS in G", rhs_instance) plot_instance( simple_hierarchy.get_graph("G"), rule.rhs, rhs_instance, parent_pos=g_node_positions) """ Explanation: We have failed to rewrite G, because we have not specified typing for the newly added node 3. Let us try again, but this time we will prove such typing. End of explanation """ lhs = NXGraph() lhs.add_nodes_from(["agent"]) rule = Rule.from_transform(lhs) _, rhs_clone = rule.inject_clone_node("agent") plot_rule(rule) instance = { "agent": "agent" } """ Explanation: We will now create a rule that applied to T and that clones the node agent into two nodes. End of explanation """ try: rhs_instance = simple_hierarchy.rewrite("T", rule, instance, strict=True) except Exception as e: print("Error message: ", e) print("Type: ", type(e)) """ Explanation: We try to apply the created rule to the graph T in the strict mode. End of explanation """ p_typing = { "G": { 'protein': 'agent', 'region': 'agent', 'compound': rhs_clone, 3: 'agent' } } rhs_instance = simple_hierarchy.rewrite("T", rule, instance, p_typing=p_typing, strict=True) print("Instance of the RHS in G", rhs_instance) plot_instance( simple_hierarchy.get_graph("T"), rule.rhs, rhs_instance, parent_pos=t_node_positions) """ Explanation: We have failed to rewrite T, because we have not specified typing for instances of agent in $p$. Let us try again, but this time we will prove such typing. End of explanation """ simple_hierarchy.relabel_graph_node('T', rhs_instance['agent'], 'organic_agent') simple_hierarchy.relabel_graph_node('T', rhs_instance[rhs_clone], 'non_organic_agent') plot_graph(simple_hierarchy.get_graph('T')) print(simple_hierarchy.get_typing("G", "T")) """ Explanation: Let us relabel nodes in T. End of explanation """ hierarchy = NXHierarchy() colors = NXGraph() colors.add_nodes_from([ "green", "red" ]) colors.add_edges_from([ ("red", "green"), ("red", "red"), ("green", "green") ]) hierarchy.add_graph("colors", colors) shapes = NXGraph() shapes.add_nodes_from(["circle", "square"]) shapes.add_edges_from([ ("circle", "square"), ("square", "circle"), ("circle", "circle") ]) hierarchy.add_graph("shapes", shapes) quality = NXGraph() quality.add_nodes_from(["good", "bad"]) quality.add_edges_from([ ("bad", "bad"), ("bad", "good"), ("good", "good") ]) hierarchy.add_graph("quality", quality) g1 = NXGraph() g1.add_nodes_from([ "red_circle", "red_square", ]) g1.add_edges_from([ ("red_circle", "red_square"), ("red_circle", "red_circle"), ("red_square", "red_circle") ]) g1_colors = { "red_circle": "red", "red_square": "red", } g1_shapes = { "red_circle": "circle", "red_square": "square", } hierarchy.add_graph("g1", g1) hierarchy.add_typing("g1", "colors", g1_colors) hierarchy.add_typing("g1", "shapes", g1_shapes) g2 = NXGraph() g2.add_nodes_from([ "good_circle", "good_square", "bad_circle", ]) g2.add_edges_from([ ("good_circle", "good_square"), ("good_square", "good_circle"), ("bad_circle", "good_circle"), ("bad_circle", "bad_circle"), ]) g2_shapes = { "good_circle": "circle", "good_square": "square", "bad_circle": "circle" } g2_quality = { "good_circle": "good", "good_square": "good", "bad_circle": "bad", } hierarchy.add_graph("g2", g2) hierarchy.add_typing("g2", "shapes", g2_shapes) hierarchy.add_typing("g2", "quality", g2_quality) g3 = NXGraph() g3.add_nodes_from([ "good_red_circle", "bad_red_circle", "good_red_square", ]) g3.add_edges_from([ ("bad_red_circle", "good_red_circle"), ("good_red_square", "good_red_circle"), ("good_red_circle", "good_red_square") ]) g3_g1 = { "good_red_circle": "red_circle", "bad_red_circle": "red_circle", "good_red_square": "red_square" } g3_g2 = { "good_red_circle": "good_circle", "bad_red_circle": "bad_circle", "good_red_square": "good_square", } hierarchy.add_graph("g3", g3) hierarchy.add_typing("g3", "g1", g3_g1) hierarchy.add_typing("g3", "g2", g3_g2) for graph in hierarchy.graphs(): print("Graph ", graph) plot_graph(hierarchy.get_graph(graph)) print(hierarchy) """ Explanation: 2.2. Rewriting and propagation To illustrate rewriting with propagation, let us consider a slighlty more sophisticated hierarchy. End of explanation """ print("Node types in G3:\n") for node in hierarchy.get_graph("g3").nodes(): print(node, hierarchy.node_type("g3", node)) """ Explanation: Some of the graphs in the hierarchy are now typed by multiple graphs, which is reflected in the types of nodes, as in the example below: End of explanation """ lhs = NXGraph() lhs.add_nodes_from([1, 2]) lhs.add_edges_from([(1, 2)]) p = NXGraph() p.add_nodes_from([1, 11, 2]) p.add_edges_from([(1, 2)]) rhs = NXGraph.copy(p) rhs.add_nodes_from([3]) p_lhs = {1: 1, 11: 1, 2: 2} p_rhs = {1: 1, 11: 11, 2: 2} r1 = Rule(p, lhs, rhs, p_lhs, p_rhs) hierarchy.add_rule("r1", r1, {"desc": "Rule 1: typed by two graphs"}) lhs_typing1 = {1: "red_circle", 2: "red_square"} rhs_typing1 = {3: "red_circle"} lhs_typing2 = {1: "good_circle", 2: "good_square"} rhs_typing2 = {3: "bad_circle"} hierarchy.add_rule_typing("r1", "g1", lhs_typing1, rhs_typing1) hierarchy.add_rule_typing("r1", "g2", lhs_typing2, rhs_typing2) plot_rule(hierarchy.get_rule('r1')) g1_lhs_typing, g1_rhs_typing = hierarchy.get_typing('r1', 'g1') g2_lhs_typing, g2_rhs_typing = hierarchy.get_typing('r1', 'g2') print("Typing of R1 by G1: ") print("\tLHS", g1_lhs_typing) print("\tP (is implicit)") print("\tRHS", g1_rhs_typing) print("Typing of R1 by G2: ") print("\tLHS", g2_lhs_typing) print("\tP (is implicit)") print("\tRHS", g2_rhs_typing) """ Explanation: NB: Rules as nodes of a hierarchy Having constructed a sophisticated rewriting rule typed by some nodes in the hierarchy one may want to store this rule and to be able to propagate any changes that happen in the hierarchy to the rule as well. ReGraph's NXHierarchy allows to add graph rewriting rules as nodes in the hierarchy. Rules in the hierarchy can be typed by graphs, but rule nodes are not allowed to have incoming edges, i.e. nothing can be typed by a rule. In the example below, a rule is added to the previously constructed hierarchy and typed by graphs g1 and g2: End of explanation """ lhs = NXGraph() lhs.add_nodes_from(["a", "b"]) lhs.add_edges_from([ ("a", "b"), ("b", "a") ]) p = NXGraph() p.add_nodes_from(["a", "a1", "b"]) p.add_edges_from([ ("a", "b"), ("a1", "b") ]) rhs = NXGraph.copy(p) rule = Rule( p, lhs, rhs, {"a": "a", "a1": "a", "b": "b"}, {"a": "a", "a1": "a1", "b": "b"}, ) plot_rule(rule) """ Explanation: 2.3. Rewriting and propagation We now show how graph rewriting can be performed in such an hierarchy. In the previous example we perfromed strict rewriting in a hierarchy, where no propagation was performed. The following example illustrates how the ReGraph propagates the changes made by rewriting on any level to all the graphs (as well as the rules) typed by the one target of rewriting. End of explanation """ instances = hierarchy.find_matching("shapes", lhs) print("Instances:") for instance in instances: print(instance) plot_instance(hierarchy.get_graph("shapes"), rule.lhs, instance) """ Explanation: We have created a rule that clones the node a and reconnects the edges between a and b. End of explanation """ rhs_instances = hierarchy.rewrite("shapes", rule, {"a": "circle", "b": "square"}) """ Explanation: We rewrite the graph shapes with the fixed instances (so, the node circle is cloned). End of explanation """ for graph in hierarchy.graphs(): print("Graph ", graph) plot_graph(hierarchy.get_graph(graph)) """ Explanation: Observe the following plots, the cloning of circle was propagated to all the ancestors of shapes, because we didn't specify how to retype intances of circle for these ancestors using the p_typing parameter. This is an example of previously mentioned backward propagation. End of explanation """ plot_rule(hierarchy.get_rule('r1')) """ Explanation: Even the rule r1 was affected as the result of propagation, all its circle nodes were cloned. End of explanation """ pattern = NXGraph() pattern.add_nodes_from(["a", "b"]) rule = Rule.from_transform(pattern) rhs_node = rule.inject_merge_nodes(["a", "b"]) rule.inject_add_node("c") rule.inject_add_edge("c", rhs_node) instance = { "a": "good_circle", "b": "bad_circle", } old_position = plot_instance(hierarchy.get_graph("g2"), rule.lhs, instance) plot_rule(rule) """ Explanation: Let us now consider a small example of forward propagation. We will create a rule that performs some additions and merges of nodes. End of explanation """ rhs_typing = { "shapes": { "c": "circle" } } rhs_instance = hierarchy.rewrite("g2", rule, instance, rhs_typing=rhs_typing) """ Explanation: Application of this rule will merge nodes bad_circle and good_circle in the graph g2. It with then add a new node and connect it with an edge to the merged node. Let us specify some typings of the new node in the RHS: we will set the new node to be typed as circle in the graph shapes. End of explanation """ for graph in hierarchy.graphs(): print("Graph ", graph) plot_graph(hierarchy.get_graph(graph)) """ Explanation: Observe the following graphs, as the resule of forward propagation nodes good and bad were merged in the graph qualities. In addition, a new node typing the node c in the rule was added to the graph qualities. End of explanation """ hierarchy_json = hierarchy.to_json() import json print(json.dumps(hierarchy_json, indent=" ")) new_hierarchy = NXHierarchy.from_json(hierarchy_json) new_hierarchy == hierarchy """ Explanation: 3. Serializing hierarchy object Because NetworkX graphs are in-memory objects, they are destroyed as soon as the Python application is no longer running. ReGraph provides some utils for serialization of NXHierarchy objects and implements the following methods for loading and exporting your hierarchy in JSON-format: NXHierarchy.to_json creates a json representations of the hierarchy; NXHierarchy.from_json loads an hierarchy from json representation (returns new Hierarchy object); NXHierarchy.export exports the hierarchy to a file (json format); NXHierarchy.load loads an hierarchy from a .json file (returns new object as well). End of explanation """
msampathkumar/data_science_sessions
QuickBasics/Introduction_to_LinkedList.ipynb
mit
class Node: def __init__(self, value: int): # print('push elem', value) self.value = value self.next = None def __repr__(self): is_next = False if self.next: is_next = True # return '<Node val(%s) know(%s)>' % (self.value, id(self.next)) return '<Node val(%s) know(%s)>' % (self.value, self.next.value) return '<Node val(%s)>' % (self.value) def copy(self): return Node(self.value) class LinkedList: def __init__(self): self.head = None def push(self, value: int): "Put new value in the first" tmp = Node(value) if not self.head: self.head = tmp else: tmp.next = self.head self.head = tmp def pop(self): "Give the first value out" if self.head: tmp = self.head.value self.head = self.head.next return tmp def append(self, value): "Put the new value in the last" tmp = Node(value) if not self.head: self.head = tmp else: last = self.head while last.next: last = last.next last.next = tmp def first(self): "give the first value in the list" pass def last(self): "give the first value in the list" pass def print(self): tmp = self.head ct = 0 print('=' * 8) while tmp: print(' ', tmp.value) ct += 1 tmp = tmp.next print('=' * 8, 'tot elems', ct) def delete(self, value): first = self.head if first and first.value == value: self.head = self.head.next print('deleted !', value) return while first.next: if first.next.value == value: first.next = first.next.next print('deleted !', value) return first = first.next def position(self, pos, d=False): tmp = self.head ct = 0 while tmp: if ct == pos: return tmp.value ct += 1 tmp = tmp.next def del_position(self, pos): tmp = self.head ct = 0 if pos == 0: self.head = self.head.next print('\t') while tmp.next: ct += 1 if ct == pos: print('\t->', tmp.next.value) tmp.next = tmp.next.next return ll = LinkedList() ll.push(12) ll.push(11) ll.push(10) ll.print() assert ll.position(1) == 11 assert ll.position(0) == 10 assert ll.position(2) == 12 assert ll.position(3) == None ll.append(13) assert ll.position(3) == 13 ll.position(3) ll = LinkedList() ll.append(12) ll.append(13) ll.append(14) ll.append(15) ll.append(16) ll.append(17) ll.print() ll.del_position(4) ll.print() assert ll.position(1) == 14 ll.delete(12) ll.print() ll.delete(14) assert ll.position(0) == 15 ll.print() ll.delete(17) ll.print() """ Explanation: linked list Implement a simple linked list with following basic operations print push pop append End of explanation """ class NewLinkedList(LinkedList): def custom_swap_values(self): first = self.head second = self.head.next while first and second: first.value, second.value = second.value, first.value try: first = first.next.next second = second.next.next except AttributeError: break def custom_swap_nodes(self): if not (self.head and self.head.next): return # adding a temp node. temp_node = Node(-1) temp_node.next = self.head self.head = temp_node # zero -> first -> second -> third zero_node = temp_node # just created ct = 0 while zero_node.next and zero_node.next.next: if ct < 5: ct += 1 else: break # zero -> first -> second -> third first_node = zero_node.next # required second_node = zero_node.next.next # required third_node = zero_node.next.next.next # can be null # print(zero_node, first_node, second_node, third_node) # zero -> second zero_node.next = second_node second_node.next = first_node first_node.next = third_node # next iteration zero_node = zero_node.next.next self.head = self.head.next ll = NewLinkedList() for i in range(1, 9): ll.append(i) ll.print() ll.custom_swap_nodes() # print('--' * 5) ll.print() """ Explanation: Linked List Swap adjuscent elements with 1 unit gaps. like swap 1,2 and 3,4 and 5, 6 and so,.. * Inputs: 1, 2, 3, 4, 5, 6, ... * Exp. Output: 2, 1, 4, 3, 6, 5, ... End of explanation """ ll = LinkedList() ll.append(5) ll.append(4) ll.append(3) ll.append(2) ll.append(1) # ll.append(11) # ll.append(12) # ll.append(14) # ll.append(15) ll.print() def ll_merge_sort(head_node): print('---' * 5) ct = 0 node1 = head_node # check length while node1: ct += 1 node1 = node1.next # single node ll is always sorted by itself if ct < 2: return head_node # splitting nodes if more nodes are there node1 = head_node tmp = head_node for i in range(ct -1): tmp = tmp.next node2 = tmp.next tmp.next = None # sorting new_nodes = None while node1 or node2: if node1.value > node2.value: if not new_nodes: new_nodes = node1 tmp = new_nodes else: tmp.next = node1 node1 = node1.next else: if not new_nodes: new_nodes = node2 tmp = new_nodes else: tmp.next = node2 node2 = node2.next if node1 == None: node1 = node2 tmp.next = node1 return new_nodes ll.print() ll.print() """ Explanation: MergeSort Linked List End of explanation """ def merge_sort(mylist): l = len(mylist) if l <= 1: return mylist l1 = merge_sort(mylist[:int(l/2)]) l2 = merge_sort(mylist[int(l/2):]) new_list = [] while l1 and l2: if l1[0] > l2[0]: new_list.append(l1[0]) del l1[0] else: new_list.append(l2[0]) del l2[0] if len(l1) == 0: l1 = l2 del l2 while l1: new_list.append(l1[0]) del l1[0] return new_list mylist = [4, 1, 2, 3] print(mylist, "\t\t\t\t-->", merge_sort(mylist)) mylist = [4, 6, 7, 2, 9, 1, 2, 3] print(mylist, "\t\t-->", merge_sort(mylist)) del mylist ll = LinkedList() ll.append(2) ll.append(5) ll.append(3) ll.append(4) ll.append(1) ll.print() """ Explanation: tips learn to put things to paper End of explanation """
evanmiltenburg/python-for-text-analysis
Chapters-colab/Chapter_04_Boolean_Expressions_and_Conditions.ipynb
apache-2.0
%%capture !wget https://github.com/cltl/python-for-text-analysis/raw/master/zips/Data.zip !wget https://github.com/cltl/python-for-text-analysis/raw/master/zips/images.zip !wget https://github.com/cltl/python-for-text-analysis/raw/master/zips/Extra_Material.zip !unzip Data.zip -d ../ !unzip images.zip -d ./ !unzip Extra_Material.zip -d ../ !rm Data.zip !rm Extra_Materil.zip !rm images.zip """ Explanation: <a href="https://colab.research.google.com/github/cltl/python-for-text-analysis/blob/colab/Chapters-colab/Chapter_04_Boolean_Expressions_and_Conditions.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> End of explanation """ print(type('this is a string')) print(type(101)) print(type(0.8)) """ Explanation: Chapter 4 - Boolean Expressions and Conditions This notebook uses code snippets and explanations from this course. So far, we have learned how to use Python as a basic calculator and how to store information in variables. Now we will set the first steps to an actual useful program. A lot of programming has to do with executing code if a particular condition holds. This enables a program to act upon its inputs. For example: an app on your phone might give a warning if the battery level is lower than 5%. This means that the program needs to check if the variable battery_level is lower than the value of 5. We can do these checks using so called Boolean expressions. These Boolean expressions are the main element in probably one of the most used things in Python: if statements. At the end of this topic, you will be able to: work with and understand boolean expressions work with and understand if statements understand what indentation is understand what nesting is If you want to learn more about these topics, you might find the following links useful: Documentation: Built-in Types (boolean expressions) Video: Python Booleans Video: Conditionals Explanation: if elif else If you have questions about this chapter, please contact us([email protected])**. 1. Boolean expressions A Boolean expression (or simply: boolean) is an expression that results in the type bool in Python. Possible values are either True or False. Boolean expressions are the building blocks of programming. Any expression that results in True or False can be considered a Boolean expression. So far you've mainly seen: End of explanation """ print(type(False)) print(type(True)) """ Explanation: Now we're introducing: End of explanation """ print(2 < 5) print(2 <= 5) print(3 > 7) print(3 >= 7) print(3 == 3) print("school" == "school") print("Python" != "SPSS") """ Explanation: 1.1 Comparison operators Here is a list of comparison operators used in Boolean expressions: (You have already used these operators in the previous chapters, but we are treating them in more detail here.) | Operator | Meaning | True | False | |-----------|--------|--------|--------| | == | equal | 2 == 2 | 2 == 3 | | != | not equal | 3 != 2 | 2 != 2 | | &lt; | less than | 2 &lt; 13 | 2 &lt; 2 | | &lt;= | less than or equal to | 2 &lt;= 2 | 3 &lt;= 2 | | &gt; | greater than | 13 &gt; 2 | 2 &gt; 13 | | &gt;= | greather than or equal to | 3 &gt;= 3 | 2 &gt;= 3 | Remember that the single = is reserved for assignment! Boolean expressions look at variables but never change them. End of explanation """ greater = 5 > 2 print(greater, type(greater)) greater = 5 < 2 print(greater, type(greater)) """ Explanation: The relevant 'logical operators' that we used here are: <, <=, >,>=,==,!=. In Python-speak, we say that such a logical expression gets 'evaluated' when you run the code. The outcome of such an evaluation is a 'binary value' or a so-called 'boolean' that can take only two possible values: True or False. You can assign such a boolean to a variable: End of explanation """ print(5 == 5) print(5 == 4) print(10 < 20) print(10 < 8) print(10 < 10) print(10 <= 10) print(20 >= 21) print(20 == 20) print(1 == '1') print(1 != 2) boolean_expression = 5 == 4 print(boolean_expression) """ Explanation: Let's look at some examples. Try to guess the output based on the information about the operators in the table above. Hence, will the expression result in True or False in the following examples? End of explanation """ print("fun" in "function") print("pie" in "python") """ Explanation: 1.2 Membership operators Python also has so-called membership operators: | Operator | function | True | False | |-----------|--------|--------|--------| | in | left object is a member of right object | "c" in "cat" | "f" in "cat" | | not in |left object is NOT a member of right object | "f" not in "cat" | "c" not in "cat" | We have already seen the operator in being used for checking whether a string (single or multiple characters) is a substring of another one: End of explanation """ print(5 in 10) """ Explanation: We can only use membership operators with iterables (i.e. python objects that can be split up into smaller components - e.g. characters of a string). The following will therefore not work, because an integer is not iterable: End of explanation """ letters = ['a','b','c','d'] numbers = [1,2,3,4,5] mixed = [1,2,3,'a','b','c'] print('a' in letters) print('g' not in letters) print('d' in mixed) print(1 in numbers) print(3 not in mixed) print('a' not in 'hello world') """ Explanation: However, we can use membership operators with other types of 'containers', such as lists. We will discuss lists in much more detail later on, but they represent ordered sequences of objects like strings, integers or a combination. We can use in and not in to check whether an object is a member of a list: End of explanation """ letters = ['a','b','c','d'] numbers = [1,2,3,4,5] print('a' in letters and 2 in numbers) print("z" in letters and 3 in numbers) print("f" in letters and 0 in numbers) """ Explanation: 1.3 And, or, and not Finally, boolean operations are often performed using the boolean operators and, or and not. Given two boolean expressions, bool1 and bool2, this is how they work: | operation | function | True | False | |-----------|--------|----------|---------| | bool1 and bool2 | True if both bool1 and bool2 are True, otherwise False | (5 == 5 and 3 &lt; 5) | (5 == 5 and 3 &gt; 5) | | bool1 or bool2 | True when at least one of the boolean expressions is True, otherwise False | (5 == 5 or 3 &gt; 5) | (5 != 5 or 3 &gt; 5) | | not bool1 | True if bool1 is False, otherwise False | (not 5 != 5) | (not 5 == 5) | Here are some examples of and: End of explanation """ letters = ['a','b','c','d'] numbers = [1,2,3,4,5] print('f' in letters or 2 in numbers) print('a' in letters or 2 in numbers) print('f' in letters or 10 in numbers) """ Explanation: Here are some examples of or: End of explanation """ a_string = "hello" letters = ['a','b','c','d'] numbers = [1,2,3,4,5] print(not a_string.endswith("o")) print(not a_string.startswith("o")) print(not 'x' in letters) print(not 4 == 4) print(not (4 == 4 and "4" == 4)) """ Explanation: Here are some example of not: End of explanation """ print(not 'x' in letters) print('x' not in letters) print(not 4 == 4) print(4 != 4) """ Explanation: Note that for some of these, there are alternative ways of writing them. For example, 'x not in y' and 'not x in y' are identical, and so are 'not x == y' and 'x != y'. For now, it does not really matter which one you use. If you want to read more about it, have a look here and here. End of explanation """ print("test" != "testing" and 1 == 1 and 2 == 2 or 20 in [1, 20, 3, 4,5]) """ Explanation: 1.4 EXTRA: all() and any() Take a look at the following example. Do you think it is clear? End of explanation """ letters = ['a','b','c','d'] numbers = [1,2,3,4,5] list_bools1 = ['a' in letters, 2 in numbers] print(list_bools1) boolean_expression1 = all(list_bools1) print(boolean_expression1) list_bools2 = ['a' in letters, 20 in numbers] print(list_bools2) boolean_expression2 = all(list_bools2) print(boolean_expression2) """ Explanation: Not really, right? Luckily, Python has another trick to deal with this type of examples: [all and any]((https://docs.python.org/3/library/functions.html#all). Given a list of boolean expressions, this is how they work: | operation | function | |-----------|--------| | all | True if all boolean expressions are True, otherwise False | | any | True if at least one boolean expression is True, otherwise False | If you don't completely understand all() and any(), don't worry, you will not necessarily need them right now. They are just a nice alternative to make your code more readable and you may appreciate that in the future. Here are some examples of all(): End of explanation """ list_bools3 = ['f' in letters, 200 in numbers] print(list_bools3) boolean_expression3 = any(list_bools3) print(boolean_expression3) list_bools4 = ['a' in letters, 20 in numbers, 2 in numbers] print(list_bools4) boolean_expression4 = any(list_bools4) print(boolean_expression4) """ Explanation: Here are some examples of any(): End of explanation """ number = 2 # try changing this value to 6 if number <= 5: print(number) """ Explanation: 2. Conditions: if statements You might wonder why we took quite some time explaining boolean expresisons. One of the reasons is that they are the main element in probably one of the most used things in Python: if statements. The following picture explains what happens in an if statement in Python. Let's look at an example (modify the value of number to understand what is happening here): End of explanation """ number = 5 if number == 5: print("number equals 5") if number > 4: print("number is greater than 4") if number >= 5: print("number is greater than or equals 5") if number < 6: print("number is less than 6") if number <= 5: print("number is less than or equals 5") if number != 6 : print("number does not equal 6") """ Explanation: You can use as many if statements as you like: End of explanation """ number = 10 # try changing this value to 2 if number <= 5: print(number) else: print('number is higher than 5') """ Explanation: 2.1 Two-way decisions But what if we want to have options for two different scenarios? We could just use a bunch of if statements. However, Python has a more efficient way. Apart from if we also have the else statement for two-way decisions (modify the value of number to understand what is happening here): End of explanation """ age = 21 if age < 12: print("You're still a child!") elif age < 18: print("You are a teenager!") elif age < 30: print("You're pretty young!") else: print("Wow, you're old!") """ Explanation: Now Python always runs one of the two pieces of code. It's like arriving at a fork in the road and choosing one path to follow. 2.2 Multi-way decisions But of course we don't have to stop there. If you have multiple options, you can use the elif statement. For every if block, you can have one if statement, multiple elif statements and one else statement. So now we know the entire if-elif-else construct: End of explanation """ age = 21 if age < 12: print("You're still a child!") if age < 18: print("You are a teenager!") if age < 30: print("You're pretty young!") else: print("Wow, you're old!") """ Explanation: First the if statement will be evaluated. Only if that statement turns out to be False the computer will proceed to evaluate the elif statement. If the elif statements in turn would prove to be False, the machine will proceed and execute the lines of code associated with the else statement. You can think of this coding structure as a decision tree! Remember: if somewhere along the tree, your machine comes across a logical expression which is True, it won't bother anymore to evaluate the remaining options! Note that the statements are evaluated in order of occurence. Can you identify the difference between the code above and the code below? (Try changing age) End of explanation """ person = "John" print("hello!") if person == "Alice": print("how are you today?") #this is indented print("do you want to join me for lunch?") #this is indented elif person == "Lisa": print("let's talk some other time!") #this is indented print("goodbye!") """ Explanation: Remember: - if-if: your code wil check all the if statements - if-elif: if one condition results to True, it will not check the other conditions Unless you need to check all conditions, using if-elif is usually preferred because it's more efficient. 3. Indentation Let's take another look at the example from above (we've added line numbers): python 1. if number &lt;= 5: 2. print(number) 3. else: 4. print('number is higher than 5') You might have noticed that line 2 starts with 4 spaces. This is on purpose! The indentation lets Python know when it needs to execute the piece of code. When the boolean expression in line 1 is True, Python executes the code from the next line that starts four spaces or one tab (an indent) to the right. This is called indentation. All statements with the same distance to the right belong to the same 'block' of code. Unlike other languages, Python does not make use of curly braces to mark the start and end of pieces of code, like if statements. The only delimiter is a colon (:) and the indentation of the code. Both four spaces and tabs can be used for indentation. This indentation must be used consistently throughout your code. The most popular way to indent is four spaces (see stackoverflow). For now, you do not have to worry about this, since a tab is automatically converted to four spaces in notebooks. Take a look at the code below. We see that the indented block is not executed, but the unindented lines of code are. Now go ahead and change the value of the person variable. The conversation should be a bit longer now! End of explanation """ x = float(input("Enter a number: ")) if x >= 0: if x == 0: print("Zero") else: print("Positive number") else: print("Negative number") """ Explanation: 3.1 Nesting We have seen that all statements with the same distance to the right belong to the same block of code, i.e. the statements within a block line up vertically. The block ends at a line less indented or the end of the file. Blocks can contain blocks as welll; this way, we get a nested block structure. The block that has to be more deeply nested is simply indented further to the right: There may be a situation when you want to check for another condition after a condition resolves to True. In such a situation, you can use the nested if construct. As you can see if you run the code below, the second if statement is only executed if the first if statement returns True. Try changing the value of x to see what the code does. End of explanation """ print(True and True) print(False and True) print(1 == 1 and 2 == 1) print("test" == "test") print(1 == 1 or 2 != 1) print(True and 1 == 1) print(False and 0 != 0) print(True or 1 == 1) print("test" == "testing") print(1 != 0 and 2 == 1) print("test" != "testing") print("test" == 1) print(not (True and False)) print(not (1 == 1 and 0 != 1)) print(not (10 == 1 or 1000 == 1000)) print(not (1 != 10 or 3 == 4)) print(not ("testing" == "testing" and "Zed" == "Cool Guy")) print(1 == 1 and (not ("testing" == 1 or 1 == 0))) print("chunky" == "bacon" and (not (3 == 4 or 3 == 3))) print(3 == 3 and (not ("testing" == "testing" or "Python" == "Fun"))) print("test" != "testing" and 1 == 1 and 2 == 2 and 20 in [1, 20, 3, 4,5]) """ Explanation: Exercises Exercise 1: It's important to practice a lot with boolean expressions. Here is a list of them, which orginate from learnpythonthehardway. Try to guess the output. End of explanation """ # insert your code here """ Explanation: Exercise 2: Write a small program that defines a variable weight. If the weight is > 50 pounds, print "There is a $25 charge for luggage that heavy." If it is not, print: "Thank you for your business." If the weight is exactly 50, print: "Pfiew! The weight is just right!". Change the value of weight a couple of times to check whether your code works. Make use of the logical operators and the if-elif-else construct! End of explanation """ my_string = "hello" if my_string == "hello": print("world") """ Explanation: Exercise 3: What's wrong in the following code? Correct the mistake. End of explanation """ my_string = "hello" if my_string == "hello": print("world") """ Explanation: Why is the last line in the following code red? Correct the mistake. End of explanation """ my_string = "hello" if my_string == "hello" print("world") """ Explanation: What's wrong in the following code? Correct the mistake. End of explanation """ my_string = "hello" if my_string = "hello": print("world") """ Explanation: What's wrong in the following code? Correct the mistake. End of explanation """ x = float(input("Enter a number: ")) if x >= 0: if x == 0: print("Zero") else: print("Positive number") else: print("Negative number") """ Explanation: Exercise 4: Can you rewrite the code below without nesting? Hint: use the if-elif-else construct. End of explanation """ orange_quality = "fresh" orange_price = 1.75 # your code here """ Explanation: Exercise 5: A friend wants your advice on how much oranges he should buy. Write a program that will give the advice to buy 24 oranges if the price is lower than 1.50 EUR per kg, 12 oranges if the price is between 1.50 EUR and 3 EUR, and only 1 orange if the price is higher than 3 EUR. But also tell him that he should only buy them if the oranges are fresh; otherwise, he should not get any. Use nesting and the if-elif-else construct. End of explanation """
julianogalgaro/udacity
nd101/c2l8-sentiment-analysis/sentiment_network/Sentiment Classification - Mini Project 2.ipynb
mit
def pretty_print_review_and_label(i): print(labels[i] + "\t:\t" + reviews[i][:80] + "...") g = open('reviews.txt','r') # What we know! reviews = list(map(lambda x:x[:-1],g.readlines())) g.close() g = open('labels.txt','r') # What we WANT to know! labels = list(map(lambda x:x[:-1].upper(),g.readlines())) g.close() len(reviews) reviews[0] labels[0] """ Explanation: Sentiment Classification & How To "Frame Problems" for a Neural Network by Andrew Trask Twitter: @iamtrask Blog: http://iamtrask.github.io What You Should Already Know neural networks, forward and back-propagation stochastic gradient descent mean squared error and train/test splits Where to Get Help if You Need it Re-watch previous Udacity Lectures Leverage the recommended Course Reading Material - Grokking Deep Learning (40% Off: traskud17) Shoot me a tweet @iamtrask Tutorial Outline: Intro: The Importance of "Framing a Problem" Curate a Dataset Developing a "Predictive Theory" PROJECT 1: Quick Theory Validation Transforming Text to Numbers PROJECT 2: Creating the Input/Output Data Putting it all together in a Neural Network PROJECT 3: Building our Neural Network Understanding Neural Noise PROJECT 4: Making Learning Faster by Reducing Noise Analyzing Inefficiencies in our Network PROJECT 5: Making our Network Train and Run Faster Further Noise Reduction PROJECT 6: Reducing Noise by Strategically Reducing the Vocabulary Analysis: What's going on in the weights? Lesson: Curate a Dataset End of explanation """ print("labels.txt \t : \t reviews.txt\n") pretty_print_review_and_label(2137) pretty_print_review_and_label(12816) pretty_print_review_and_label(6267) pretty_print_review_and_label(21934) pretty_print_review_and_label(5297) pretty_print_review_and_label(4998) """ Explanation: Lesson: Develop a Predictive Theory End of explanation """ from collections import Counter import numpy as np positive_counts = Counter() negative_counts = Counter() total_counts = Counter() for i in range(len(reviews)): if(labels[i] == 'POSITIVE'): for word in reviews[i].split(" "): positive_counts[word] += 1 total_counts[word] += 1 else: for word in reviews[i].split(" "): negative_counts[word] += 1 total_counts[word] += 1 positive_counts.most_common() pos_neg_ratios = Counter() for term,cnt in list(total_counts.most_common()): if(cnt > 100): pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1) pos_neg_ratios[term] = pos_neg_ratio for word,ratio in pos_neg_ratios.most_common(): if(ratio > 1): pos_neg_ratios[word] = np.log(ratio) else: pos_neg_ratios[word] = -np.log((1 / (ratio+0.01))) # words most frequently seen in a review with a "POSITIVE" label pos_neg_ratios.most_common() # words most frequently seen in a review with a "NEGATIVE" label list(reversed(pos_neg_ratios.most_common()))[0:30] """ Explanation: Project 1: Quick Theory Validation End of explanation """ from IPython.display import Image review = "This was a horrible, terrible movie." Image(filename='sentiment_network.png') review = "The movie was excellent" Image(filename='sentiment_network_pos.png') import numpy as np vocab = set(total_counts.keys()) vocab_size = len(vocab) layer_0 = np.zeros((1,vocab_size)) word2pos={} for i,w in enumerate(vocab): word2pos[w]=i def update_input_layer(review): """ Modifica a global layer_0 para representar a forma em vetor da resenha. Um determinado elemento de layer_0 deve representar \ quantas vezes determinada palavra ocorre na resenha. Args: review(string) - o string contendo a resenha Returns: None """ global layer_0 # limpa o estado anterior, reinicia a camada com valores '0' layer_0*=0 ## Seu cรณdigo entra aqui for w in review.split(): layer_0[0][word2pos[w]] += 1 def get_target_for_label(label): """Converte uma etiqueta para `0` ou `1`. Args: label(string) - "POSITIVO" ou "NEGATIVO". Returns: `0` or `1`. """ if label == "POSITIVE": return 1 else: return 0 update_input_layer(reviews[0]) print(layer_0) print(get_target_for_label(labels[0])) """ Explanation: Transforming Text into Numbers End of explanation """
metpy/MetPy
v0.10/_downloads/d02fda82caa4290e31f980126221b2a4/Wind_SLP_Interpolation.ipynb
bsd-3-clause
import cartopy.crs as ccrs import cartopy.feature as cfeature from matplotlib.colors import BoundaryNorm import matplotlib.pyplot as plt import numpy as np import pandas as pd from metpy.calc import wind_components from metpy.cbook import get_test_data from metpy.interpolate import interpolate_to_grid, remove_nan_observations from metpy.plots import add_metpy_logo from metpy.units import units to_proj = ccrs.AlbersEqualArea(central_longitude=-97., central_latitude=38.) """ Explanation: Wind and Sea Level Pressure Interpolation Interpolate sea level pressure, as well as wind component data, to make a consistent looking analysis, featuring contours of pressure and wind barbs. End of explanation """ with get_test_data('station_data.txt') as f: data = pd.read_csv(f, header=0, usecols=(2, 3, 4, 5, 18, 19), names=['latitude', 'longitude', 'slp', 'temperature', 'wind_dir', 'wind_speed'], na_values=-99999) """ Explanation: Read in data End of explanation """ lon = data['longitude'].values lat = data['latitude'].values xp, yp, _ = to_proj.transform_points(ccrs.Geodetic(), lon, lat).T """ Explanation: Project the lon/lat locations to our final projection End of explanation """ x_masked, y_masked, pres = remove_nan_observations(xp, yp, data['slp'].values) """ Explanation: Remove all missing data from pressure End of explanation """ slpgridx, slpgridy, slp = interpolate_to_grid(x_masked, y_masked, pres, interp_type='cressman', minimum_neighbors=1, search_radius=400000, hres=100000) """ Explanation: Interpolate pressure using Cressman interpolation End of explanation """ wind_speed = (data['wind_speed'].values * units('m/s')).to('knots') wind_dir = data['wind_dir'].values * units.degree good_indices = np.where((~np.isnan(wind_dir)) & (~np.isnan(wind_speed))) x_masked = xp[good_indices] y_masked = yp[good_indices] wind_speed = wind_speed[good_indices] wind_dir = wind_dir[good_indices] """ Explanation: Get wind information and mask where either speed or direction is unavailable End of explanation """ u, v = wind_components(wind_speed, wind_dir) windgridx, windgridy, uwind = interpolate_to_grid(x_masked, y_masked, np.array(u), interp_type='cressman', search_radius=400000, hres=100000) _, _, vwind = interpolate_to_grid(x_masked, y_masked, np.array(v), interp_type='cressman', search_radius=400000, hres=100000) """ Explanation: Calculate u and v components of wind and then interpolate both. Both will have the same underlying grid so throw away grid returned from v interpolation. End of explanation """ x_masked, y_masked, t = remove_nan_observations(xp, yp, data['temperature'].values) tempx, tempy, temp = interpolate_to_grid(x_masked, y_masked, t, interp_type='cressman', minimum_neighbors=3, search_radius=400000, hres=35000) temp = np.ma.masked_where(np.isnan(temp), temp) """ Explanation: Get temperature information End of explanation """ levels = list(range(-20, 20, 1)) cmap = plt.get_cmap('viridis') norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True) fig = plt.figure(figsize=(20, 10)) add_metpy_logo(fig, 360, 120, size='large') view = fig.add_subplot(1, 1, 1, projection=to_proj) view.set_extent([-120, -70, 20, 50]) view.add_feature(cfeature.STATES.with_scale('50m')) view.add_feature(cfeature.OCEAN) view.add_feature(cfeature.COASTLINE.with_scale('50m')) view.add_feature(cfeature.BORDERS, linestyle=':') cs = view.contour(slpgridx, slpgridy, slp, colors='k', levels=list(range(990, 1034, 4))) view.clabel(cs, inline=1, fontsize=12, fmt='%i') mmb = view.pcolormesh(tempx, tempy, temp, cmap=cmap, norm=norm) fig.colorbar(mmb, shrink=.4, pad=0.02, boundaries=levels) view.barbs(windgridx, windgridy, uwind, vwind, alpha=.4, length=5) view.set_title('Surface Temperature (shaded), SLP, and Wind.') plt.show() """ Explanation: Set up the map and plot the interpolated grids appropriately. End of explanation """
GoogleCloudPlatform/training-data-analyst
courses/machine_learning/deepdive2/production_ml/labs/multi_worker_with_keras.ipynb
apache-2.0
import json import os import sys """ Explanation: Multi-worker training with Keras Learning Objectives Multi-worker Configuration Choose the right strategy Train the model Multi worker training in depth Introduction This notebook demonstrates multi-worker distributed training with Keras model using tf.distribute.Strategy API, specifically tf.distribute.MultiWorkerMirroredStrategy. With the help of this strategy, a Keras model that was designed to run on single-worker can seamlessly work on multiple workers with minimal code change. Each learning objective will correspond to a #TODO in this student lab notebook -- try to complete this notebook first and then review the solution notebook. Setup First, some necessary imports. End of explanation """ os.environ["CUDA_VISIBLE_DEVICES"] = "-1" """ Explanation: Before importing TensorFlow, make a few changes to the environment. Disable all GPUs. This prevents errors caused by the workers all trying to use the same GPU. For a real application each worker would be on a different machine. End of explanation """ os.environ.pop('TF_CONFIG', None) """ Explanation: Reset the TF_CONFIG environment variable, you'll see more about this later. End of explanation """ if '.' not in sys.path: sys.path.insert(0, '.') """ Explanation: Be sure that the current directory is on python's path. This allows the notebook to import the files written by %%writefile later. End of explanation """ import tensorflow as tf print(tf.__version__) """ Explanation: Now import TensorFlow. End of explanation """ %%writefile mnist.py import os import tensorflow as tf import numpy as np def mnist_dataset(batch_size): (x_train, y_train), _ = tf.keras.datasets.mnist.load_data() # The `x` arrays are in uint8 and have values in the range [0, 255]. # You need to convert them to float32 with values in the range [0, 1] x_train = x_train / np.float32(255) y_train = y_train.astype(np.int64) train_dataset = tf.data.Dataset.from_tensor_slices( (x_train, y_train)).shuffle(60000).repeat().batch(batch_size) return train_dataset def build_and_compile_cnn_model(): model = tf.keras.Sequential([ tf.keras.Input(shape=(28, 28)), tf.keras.layers.Reshape(target_shape=(28, 28, 1)), tf.keras.layers.Conv2D(32, 3, activation='relu'), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(10) ]) model.compile( loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.SGD(learning_rate=0.001), metrics=['accuracy']) return model """ Explanation: Dataset and model definition Next create an mnist.py file with a simple model and dataset setup. This python file will be used by the worker-processes in this tutorial: End of explanation """ import mnist batch_size = 64 single_worker_dataset = mnist.mnist_dataset(batch_size) single_worker_model = mnist.build_and_compile_cnn_model() single_worker_model.fit(single_worker_dataset, epochs=3, steps_per_epoch=70) """ Explanation: Try training the model for a small number of epochs and observe the results of a single worker to make sure everything works correctly. As training progresses, the loss should drop and the accuracy should increase. End of explanation """ tf_config = { 'cluster': { 'worker': ['localhost:12345', 'localhost:23456'] }, 'task': {'type': 'worker', 'index': 0} } """ Explanation: Multi-worker Configuration Now let's enter the world of multi-worker training. In TensorFlow, the TF_CONFIG environment variable is required for training on multiple machines, each of which possibly has a different role. TF_CONFIG is a JSON string used to specify the cluster configuration on each worker that is part of the cluster. Here is an example configuration: End of explanation """ # converts a Python object into a json string. # TODO: Your code goes here """ Explanation: Here is the same TF_CONFIG serialized as a JSON string: End of explanation """ os.environ['GREETINGS'] = 'Hello TensorFlow!' """ Explanation: There are two components of TF_CONFIG: cluster and task. cluster is the same for all workers and provides information about the training cluster, which is a dict consisting of different types of jobs such as worker. In multi-worker training with MultiWorkerMirroredStrategy, there is usually one worker that takes on a little more responsibility like saving checkpoint and writing summary file for TensorBoard in addition to what a regular worker does. Such a worker is referred to as the chief worker, and it is customary that the worker with index 0 is appointed as the chief worker (in fact this is how tf.distribute.Strategy is implemented). task provides information of the current task and is different on each worker. It specifies the type and index of that worker. In this example, you set the task type to "worker" and the task index to 0. This machine is the first worker and will be appointed as the chief worker and do more work than the others. Note that other machines will need to have the TF_CONFIG environment variable set as well, and it should have the same cluster dict, but different task type or task index depending on what the roles of those machines are. For illustration purposes, this tutorial shows how one may set a TF_CONFIG with 2 workers on localhost. In practice, users would create multiple workers on external IP addresses/ports, and set TF_CONFIG on each worker appropriately. In this example you will use 2 workers, the first worker's TF_CONFIG is shown above. For the second worker you would set tf_config['task']['index']=1 Above, tf_config is just a local variable in python. To actually use it to configure training, this dictionary needs to be serialized as JSON, and placed in the TF_CONFIG environment variable. Environment variables and subprocesses in notebooks Subprocesses inherit environment variables from their parent. So if you set an environment variable in this jupyter notebook process: End of explanation """ %%bash echo ${GREETINGS} """ Explanation: You can access the environment variable from a subprocesses: End of explanation """ # A distribution strategy for synchronous training on multiple workers. strategy = # TODO: Your code goes here """ Explanation: In the next section, you'll use this to pass the TF_CONFIG to the worker subprocesses. You would never really launch your jobs this way, but it's sufficient for the purposes of this tutorial: To demonstrate a minimal multi-worker example. Choose the right strategy In TensorFlow there are two main forms of distributed training: Synchronous training, where the steps of training are synced across the workers and replicas, and Asynchronous training, where the training steps are not strictly synced. MultiWorkerMirroredStrategy, which is the recommended strategy for synchronous multi-worker training, will be demonstrated in this guide. To train the model, use an instance of tf.distribute.MultiWorkerMirroredStrategy. MultiWorkerMirroredStrategy creates copies of all variables in the model's layers on each device across all workers. It uses CollectiveOps, a TensorFlow op for collective communication, to aggregate gradients and keep the variables in sync. The tf.distribute.Strategy guide has more details about this strategy. End of explanation """ with strategy.scope(): # Model building/compiling need to be within `strategy.scope()`. multi_worker_model = # TODO: Your code goes here """ Explanation: Note: TF_CONFIG is parsed and TensorFlow's GRPC servers are started at the time MultiWorkerMirroredStrategy() is called, so the TF_CONFIG environment variable must be set before a tf.distribute.Strategy instance is created. Since TF_CONFIG is not set yet the above strategy is effectively single-worker training. MultiWorkerMirroredStrategy provides multiple implementations via the CommunicationOptions parameter. RING implements ring-based collectives using gRPC as the cross-host communication layer. NCCL uses Nvidia's NCCL to implement collectives. AUTO defers the choice to the runtime. The best choice of collective implementation depends upon the number and kind of GPUs, and the network interconnect in the cluster. Train the model With the integration of tf.distribute.Strategy API into tf.keras, the only change you will make to distribute the training to multiple-workers is enclosing the model building and model.compile() call inside strategy.scope(). The distribution strategy's scope dictates how and where the variables are created, and in the case of MultiWorkerMirroredStrategy, the variables created are MirroredVariables, and they are replicated on each of the workers. End of explanation """ %%writefile main.py import os import json import tensorflow as tf import mnist per_worker_batch_size = 64 tf_config = json.loads(os.environ['TF_CONFIG']) num_workers = len(tf_config['cluster']['worker']) strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() global_batch_size = per_worker_batch_size * num_workers multi_worker_dataset = mnist.mnist_dataset(global_batch_size) with strategy.scope(): # Model building/compiling need to be within `strategy.scope()`. multi_worker_model = mnist.build_and_compile_cnn_model() multi_worker_model.fit(multi_worker_dataset, epochs=3, steps_per_epoch=70) """ Explanation: Note: Currently there is a limitation in MultiWorkerMirroredStrategy where TensorFlow ops need to be created after the instance of strategy is created. If you see RuntimeError: Collective ops must be configured at program startup, try creating the instance of MultiWorkerMirroredStrategy at the beginning of the program and put the code that may create ops after the strategy is instantiated. To actually run with MultiWorkerMirroredStrategy you'll need to run worker processes and pass a TF_CONFIG to them. Like the mnist.py file written earlier, here is the main.py that each of the workers will run: End of explanation """ %%bash ls *.py """ Explanation: In the code snippet above note that the global_batch_size, which gets passed to Dataset.batch, is set to per_worker_batch_size * num_workers. This ensures that each worker processes batches of per_worker_batch_size examples regardless of the number of workers. The current directory now contains both Python files: End of explanation """ os.environ['TF_CONFIG'] = json.dumps(tf_config) """ Explanation: So json-serialize the TF_CONFIG and add it to the environment variables: End of explanation """ # first kill any previous runs %killbgscripts %%bash --bg python main.py &> job_0.log """ Explanation: Now, you can launch a worker process that will run the main.py and use the TF_CONFIG: End of explanation """ import time time.sleep(10) """ Explanation: There are a few things to note about the above command: It uses the %%bash which is a notebook "magic" to run some bash commands. It uses the --bg flag to run the bash process in the background, because this worker will not terminate. It waits for all the workers before it starts. The backgrounded worker process won't print output to this notebook, so the &amp;&gt; redirects its output to a file, so you can see what happened. So, wait a few seconds for the process to start up: End of explanation """ %%bash cat job_0.log """ Explanation: Now look what's been output to the worker's logfile so far: End of explanation """ tf_config['task']['index'] = 1 os.environ['TF_CONFIG'] = json.dumps(tf_config) """ Explanation: The last line of the log file should say: Started server with target: grpc://localhost:12345. The first worker is now ready, and is waiting for all the other worker(s) to be ready to proceed. So update the tf_config for the second worker's process to pick up: End of explanation """ %%bash python main.py """ Explanation: Now launch the second worker. This will start the training since all the workers are active (so there's no need to background this process): End of explanation """ %%bash cat job_0.log """ Explanation: Now if you recheck the logs written by the first worker you'll see that it participated in training that model: End of explanation """ # Delete the `TF_CONFIG`, and kill any background tasks so they don't affect the next section. os.environ.pop('TF_CONFIG', None) %killbgscripts """ Explanation: Unsurprisingly this ran slower than the the test run at the beginning of this tutorial. Running multiple workers on a single machine only adds overhead. The goal here was not to improve the training time, but only to give an example of multi-worker training. End of explanation """ options = tf.data.Options() options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF global_batch_size = 64 multi_worker_dataset = mnist.mnist_dataset(batch_size=64) dataset_no_auto_shard = multi_worker_dataset.with_options(options) """ Explanation: Multi worker training in depth So far this tutorial has demonstrated a basic multi-worker setup. The rest of this document looks in detail other factors which may be useful or important for real use cases. Dataset sharding In multi-worker training, dataset sharding is needed to ensure convergence and performance. The example in the previous section relies on the default autosharding provided by the tf.distribute.Strategy API. You can control the sharding by setting the tf.data.experimental.AutoShardPolicy of the tf.data.experimental.DistributeOptions. To learn more about auto-sharding see the Distributed input guide. Here is a quick example of how to turn OFF the auto sharding, so each replica processes every example (not recommended): End of explanation """ model_path = '/tmp/keras-model' def _is_chief(task_type, task_id): # Note: there are two possible `TF_CONFIG` configuration. # 1) In addition to `worker` tasks, a `chief` task type is use; # in this case, this function should be modified to # `return task_type == 'chief'`. # 2) Only `worker` task type is used; in this case, worker 0 is # regarded as the chief. The implementation demonstrated here # is for this case. # For the purpose of this colab section, we also add `task_type is None` # case because it is effectively run with only single worker. return (task_type == 'worker' and task_id == 0) or task_type is None def _get_temp_dir(dirpath, task_id): base_dirpath = 'workertemp_' + str(task_id) temp_dir = os.path.join(dirpath, base_dirpath) tf.io.gfile.makedirs(temp_dir) return temp_dir def write_filepath(filepath, task_type, task_id): dirpath = os.path.dirname(filepath) base = os.path.basename(filepath) if not _is_chief(task_type, task_id): dirpath = _get_temp_dir(dirpath, task_id) return os.path.join(dirpath, base) task_type, task_id = (strategy.cluster_resolver.task_type, strategy.cluster_resolver.task_id) write_model_path = write_filepath(model_path, task_type, task_id) """ Explanation: Evaluation If you pass validation_data into model.fit, it will alternate between training and evaluation for each epoch. The evaluation taking validation_data is distributed across the same set of workers and the evaluation results are aggregated and available for all workers. Similar to training, the validation dataset is automatically sharded at the file level. You need to set a global batch size in the validation dataset and set validation_steps. A repeated dataset is also recommended for evaluation. Alternatively, you can also create another task that periodically reads checkpoints and runs the evaluation. This is what Estimator does. But this is not a recommended way to perform evaluation and thus its details are omitted. Performance You now have a Keras model that is all set up to run in multiple workers with MultiWorkerMirroredStrategy. You can try the following techniques to tweak performance of multi-worker training with MultiWorkerMirroredStrategy. MultiWorkerMirroredStrategy provides multiple collective communication implementations. RING implements ring-based collectives using gRPC as the cross-host communication layer. NCCL uses Nvidia's NCCL to implement collectives. AUTO defers the choice to the runtime. The best choice of collective implementation depends upon the number and kind of GPUs, and the network interconnect in the cluster. To override the automatic choice, specify communication_options parameter of MultiWorkerMirroredStrategy's constructor, e.g. communication_options=tf.distribute.experimental.CommunicationOptions(implementation=tf.distribute.experimental.CollectiveCommunication.NCCL). Cast the variables to tf.float if possible. The official ResNet model includes an example of how this can be done. Fault tolerance In synchronous training, the cluster would fail if one of the workers fails and no failure-recovery mechanism exists. Using Keras with tf.distribute.Strategy comes with the advantage of fault tolerance in cases where workers die or are otherwise unstable. You do this by preserving training state in the distributed file system of your choice, such that upon restart of the instance that previously failed or preempted, the training state is recovered. When a worker becomes unavailable, other workers will fail (possibly after a timeout). In such cases, the unavailable worker needs to be restarted, as well as other workers that have failed. Note: Previously, the ModelCheckpoint callback provided a mechanism to restore training state upon restart from job failure for multi-worker training. The TensorFlow team are introducing a new BackupAndRestore callback, to also add the support to single worker training for a consistent experience, and removed fault tolerance functionality from existing ModelCheckpoint callback. From now on, applications that rely on this behavior should migrate to the new callback. ModelCheckpoint callback ModelCheckpoint callback no longer provides fault tolerance functionality, please use BackupAndRestore callback instead. The ModelCheckpoint callback can still be used to save checkpoints. But with this, if training was interrupted or successfully finished, in order to continue training from the checkpoint, the user is responsible to load the model manually. Optionally the user can choose to save and restore model/weights outside ModelCheckpoint callback. Model saving and loading To save your model using model.save or tf.saved_model.save, the destination for saving needs to be different for each worker. On the non-chief workers, you will need to save the model to a temporary directory, and on the chief, you will need to save to the provided model directory. The temporary directories on the worker need to be unique to prevent errors resulting from multiple workers trying to write to the same location. The model saved in all the directories are identical and typically only the model saved by the chief should be referenced for restoring or serving. You should have some cleanup logic that deletes the temporary directories created by the workers once your training has completed. The reason you need to save on the chief and workers at the same time is because you might be aggregating variables during checkpointing which requires both the chief and workers to participate in the allreduce communication protocol. On the other hand, letting chief and workers save to the same model directory will result in errors due to contention. With MultiWorkerMirroredStrategy, the program is run on every worker, and in order to know whether the current worker is chief, it takes advantage of the cluster resolver object that has attributes task_type and task_id. task_type tells you what the current job is (e.g. 'worker'), and task_id tells you the identifier of the worker. The worker with id 0 is designated as the chief worker. In the code snippet below, write_filepath provides the file path to write, which depends on the worker id. In the case of chief (worker with id 0), it writes to the original file path; for others, it creates a temporary directory (with id in the directory path) to write in: End of explanation """ multi_worker_model.save(write_model_path) """ Explanation: With that, you're now ready to save: End of explanation """ if not _is_chief(task_type, task_id): tf.io.gfile.rmtree(os.path.dirname(write_model_path)) """ Explanation: As described above, later on the model should only be loaded from the path chief saved to, so let's remove the temporary ones the non-chief workers saved: End of explanation """ # load a model saved via model.save() loaded_model = # TODO: Your code goes here # Now that the model is restored, and can continue with the training. loaded_model.fit(single_worker_dataset, epochs=2, steps_per_epoch=20) """ Explanation: Now, when it's time to load, let's use convenient tf.keras.models.load_model API, and continue with further work. Here, assume only using single worker to load and continue training, in which case you do not call tf.keras.models.load_model within another strategy.scope(). End of explanation """ checkpoint_dir = '/tmp/ckpt' checkpoint = tf.train.Checkpoint(model=multi_worker_model) write_checkpoint_dir = write_filepath(checkpoint_dir, task_type, task_id) checkpoint_manager = tf.train.CheckpointManager( checkpoint, directory=write_checkpoint_dir, max_to_keep=1) """ Explanation: Checkpoint saving and restoring On the other hand, checkpointing allows you to save model's weights and restore them without having to save the whole model. Here, you'll create one tf.train.Checkpoint that tracks the model, which is managed by a tf.train.CheckpointManager so that only the latest checkpoint is preserved. End of explanation """ checkpoint_manager.save() if not _is_chief(task_type, task_id): tf.io.gfile.rmtree(write_checkpoint_dir) """ Explanation: Once the CheckpointManager is set up, you're now ready to save, and remove the checkpoints non-chief workers saved. End of explanation """ latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir) checkpoint.restore(latest_checkpoint) multi_worker_model.fit(multi_worker_dataset, epochs=2, steps_per_epoch=20) """ Explanation: Now, when you need to restore, you can find the latest checkpoint saved using the convenient tf.train.latest_checkpoint function. After restoring the checkpoint, you can continue with training. End of explanation """ # Multi-worker training with MultiWorkerMirroredStrategy. callbacks = [tf.keras.callbacks.experimental.BackupAndRestore(backup_dir='/tmp/backup')] with strategy.scope(): multi_worker_model = mnist.build_and_compile_cnn_model() multi_worker_model.fit(multi_worker_dataset, epochs=3, steps_per_epoch=70, callbacks=callbacks) """ Explanation: BackupAndRestore callback BackupAndRestore callback provides fault tolerance functionality, by backing up the model and current epoch number in a temporary checkpoint file under backup_dir argument to BackupAndRestore. This is done at the end of each epoch. Once jobs get interrupted and restart, the callback restores the last checkpoint, and training continues from the beginning of the interrupted epoch. Any partial training already done in the unfinished epoch before interruption will be thrown away, so that it doesn't affect the final model state. To use it, provide an instance of tf.keras.callbacks.experimental.BackupAndRestore at the tf.keras.Model.fit() call. With MultiWorkerMirroredStrategy, if a worker gets interrupted, the whole cluster pauses until the interrupted worker is restarted. Other workers will also restart, and the interrupted worker rejoins the cluster. Then, every worker reads the checkpoint file that was previously saved and picks up its former state, thereby allowing the cluster to get back in sync. Then the training continues. BackupAndRestore callback uses CheckpointManager to save and restore the training state, which generates a file called checkpoint that tracks existing checkpoints together with the latest one. For this reason, backup_dir should not be re-used to store other checkpoints in order to avoid name collision. Currently, BackupAndRestore callback supports single worker with no strategy, MirroredStrategy, and multi-worker with MultiWorkerMirroredStrategy. Below are two examples for both multi-worker training and single worker training. End of explanation """
grfiv/titanic
May2015/vowpal_wabbit.ipynb
mit
i = 0 def clean(s): return " ".join(re.findall(r'\w+', s,flags = re.UNICODE | re.LOCALE)).lower() with open("train_titanic.csv", "r") as infile: reader = csv.reader(infile) for line in reader: print line i += 1 if (i == 2): break i = 0 def clean(s): return " ".join(re.findall(r'\w+', s,flags = re.UNICODE | re.LOCALE)).lower() with open("test_titanic.csv", "r") as infile: reader = csv.reader(infile) for line in reader: print line i += 1 if (i == 2): break """ Explanation: http://mlwave.com/tutorial-titanic-machine-learning-from-distaster/ End of explanation """ import csv import re i = 0 def clean(s): return " ".join(re.findall(r'\w+', s,flags = re.UNICODE | re.LOCALE)).lower() with open("train_titanic.csv", "r") as infile, open("train_titanic.vw", "wb") as outfile: reader = csv.reader(infile) for line in reader: i+= 1 if i > 1: vw_line = "" if str(line[1]) == "1": vw_line += "1 '" else: vw_line += "-1 '" vw_line += str(line[0]) + " |f " vw_line += "passenger_class_" + str(line[2]) + " " vw_line += "sex_" + str(line[3]) + " " vw_line += "age:" + str(line[4]) + " " vw_line += "sibsp:" + str(line[5]) + " " vw_line += "parch:" + str(line[6]) + " " vw_line += "fare:" + str(line[7]) + " " vw_line += "embarked_" + str(line[8]) + " " vw_line += "last_name_" + str(line[9]) + " " vw_line += "title_" + str(line[10]) + " " vw_line += "fare2_" + str(line[11]) + " " vw_line += "familysize:" + str(line[12]) + " " vw_line += "farepp:" + str(line[13]) + " " vw_line += "deck_" + str(line[14]) + " " vw_line += "side_" + str(line[15]) + " " outfile.write(vw_line[:-1] + "\n") i = 0 with open("test_titanic.csv", "r") as infile, open("test_titanic.vw", "wb") as outfile: reader = csv.reader(infile) for line in reader: i+= 1 if i > 1: vw_line = "" vw_line += "1 '" vw_line += str(line[0]) + " |f " vw_line += "passenger_class_" + str(line[1]) + " " vw_line += "sex_" + str(line[2]) + " " vw_line += "age:" + str(line[3]) + " " vw_line += "sibsp:" + str(line[4]) + " " vw_line += "parch:" + str(line[5]) + " " vw_line += "fare:" + str(line[6]) + " " vw_line += "embarked_" + str(line[7]) + " " vw_line += "last_name_" + str(line[8]) + " " vw_line += "title_" + str(line[9]) + " " vw_line += "fare2_" + str(line[10]) + " " vw_line += "familysize:" + str(line[11]) + " " vw_line += "farepp:" + str(line[12]) + " " vw_line += "deck_" + str(line[13]) + " " vw_line += "side_" + str(line[14]) + " " outfile.write(vw_line[:-1] + "\n") """ Explanation: Convert to VOWPAL WABBIT format End of explanation """ import csv with open("preds_titanic.txt", "r") as infile, open("submission_vw.csv", "wb") as outfile: outfile.write("PassengerId,Survived\n") for line in infile.readlines(): kaggle_line = str(line.split(" ")[1]).replace("\n","") if str(int(float(line.split(" ")[0]))) == "1": kaggle_line += ",1\n" else: kaggle_line += ",0\n" outfile.write(kaggle_line) i=0 with open("preds_titanic.txt", "r") as infile: for line in infile.readlines(): print str(line.split(" ")[1]).replace("\n","") print str(float(line.split(" ")[0])) print str(int(float(line.split(" ")[0]))) print i+=1 if i>5: break """ Explanation: Create a model vw train_titanic.vw -f model.vw --binary --passes 20 -c -q ff --adaptive --normalized --l1 0.00000001 --l2 0.0000001 -b 24 Predict vw -d test_titanic.vw -t -i model.vw -p preds_titanic.txt Create kaggle submission file End of explanation """
mayankjohri/LetsExplorePython
Section 1 - Core Python/Chapter 09 - Classes & OOPS/08_MetaProgramming.ipynb
gpl-3.0
class Foo(object): pass print(type(Foo)) class Foo: pass Foo.field = 42 x = Foo() x.field Foo.field2 = 99 x.field2 Foo.method = lambda self: "Hi!" x.method() """ Explanation: Metaprogramming Objects are created by other objects: special objects called โ€œclassesโ€ that we can set up to spit out objects that are configured to our liking. Classes are just objects, and they can be modified the same way: End of explanation """ class C: pass """ Explanation: To modify a class, you perform operations on it like any other object. You can add and subtract fields and methods, for example. The difference is that any change you make to a class affects all the objects of that class, even the ones that have already been instantiated. What creates these special โ€œclassโ€ objects? Other special objects, called metaclasses. The default metaclass is called type and in the vast majority of cases it does the right thing. In some situations, however, you can gain leverage by modifying the way that classes are produced โ€“ typically by performing extra actions or injecting code. When this is the case, you can use metaclass programming to modify the way that some of your class objects are created. Itโ€™s worth re-emphasizing that in the vast majority of cases, you donโ€™t need metaclasses, because itโ€™s a fascinating toy and the temptation to use it everywhere can be overwhelming. Some of the examples in this chapter will show both metaclass and non-metaclass solutions to a problem, so you can see that thereโ€™s usually another (often simpler) approach. Some of the functionality that was previously only available with metaclasses is now available in a simpler form using class decorators. It is still useful, however, to understand metaclasses, and certain results can still be achieved only through metaclass programming. Basic Metaprogramming So metaclasses create classes, and classes create instances. Normally when we write a class, the default metaclass type is automatically invoked to create that class, and we arenโ€™t even aware that itโ€™s happening. Itโ€™s possible to explicitly code the metaclassโ€™ creation of a class. type called with one argument produces the type information of an existing class; type called with three arguments creates a new class object. The arguments when invoking type are the name of the class, a list of base classes, and a dictionary giving the namespace for the class (all the fields and methods). So the equivalent of: End of explanation """ C = type('C', (), {}) class Cpp(object): """This is a doc""" def me(self): self.meme="Me" def you(self): self.youyou = "You" print(type(Cpp)) print(Cpp.__dict__) cpp = Cpp() cpp.me() print(cpp.meme) def me(self): self.meme="Me" def you(self): self.youyou = "You" Cpp = type('Cpp',(), dict(me=me, you=you) ) print(type(Cpp)) print(Cpp.__dict__) cpp = Cpp() cpp.me() print(cpp.meme) """ Explanation: is End of explanation """ def howdy(self, you): print("Howdy, " + you) MyList = type('MyList', (list,), dict(x=42, howdy=howdy)) ml = MyList() ml.append("Camembert") print(ml) print(ml.x) ml.howdy("John") """ Explanation: Classes are often referred to as โ€œtypes,โ€ so this reads fairly sensibly: youโ€™re calling a function that creates a new type based on its arguments. We can also add base classes, fields and methods: End of explanation """
Weenkus/Machine-Learning-University-of-Washington
Regression/assignments/.ipynb_checkpoints/Ridge Regression Programming Assignment 1-checkpoint.ipynb
mit
import pandas as pd import matplotlib.pyplot as plt from sklearn import linear_model import numpy as np from math import ceil """ Explanation: Initialise the libs End of explanation """ dtype_dict = {'bathrooms':float, 'waterfront':int, 'sqft_above':int, 'sqft_living15':float, 'grade':int, 'yr_renovated':int, 'price':float, 'bedrooms':float, 'zipcode':str, 'long':float, 'sqft_lot15':float, 'sqft_living':float, 'floors':float, 'condition':int, 'lat':float, 'date':str, 'sqft_basement':int, 'yr_built':int, 'id':str, 'sqft_lot':int, 'view':int} regressionDir = '/home/weenkus/workspace/Machine Learning - University of Washington/Regression/datasets/' sales = pd.read_csv(regressionDir + 'kc_house_data.csv', dtype = dtype_dict) sales = sales.sort(['sqft_living','price']) # dtype_dict same as above set_1 = pd.read_csv(regressionDir + 'wk3_kc_house_set_1_data.csv', dtype=dtype_dict) set_2 = pd.read_csv(regressionDir + 'wk3_kc_house_set_2_data.csv', dtype=dtype_dict) set_3 = pd.read_csv(regressionDir + 'wk3_kc_house_set_3_data.csv', dtype=dtype_dict) set_4 = pd.read_csv(regressionDir + 'wk3_kc_house_set_4_data.csv', dtype=dtype_dict) train_valid_shuffled = pd.read_csv(regressionDir + 'wk3_kc_house_train_valid_shuffled.csv', dtype=dtype_dict) test = pd.read_csv(regressionDir + 'wk3_kc_house_test_data.csv', dtype=dtype_dict) training = pd.read_csv(regressionDir + 'wk3_kc_house_train_data.csv', dtype=dtype_dict) """ Explanation: Load the data End of explanation """ # Show plots in jupyter %matplotlib inline sales.head() sales['price'].head() """ Explanation: Data exploration End of explanation """ def polynomial_dataframe(feature, degree): # feature is pandas.Series type # assume that degree >= 1 # initialize the dataframe: poly_dataframe = pd.DataFrame() # and set poly_dataframe['power_1'] equal to the passed feature poly_dataframe['power_1'] = feature # first check if degree > 1 if degree > 1: # then loop over the remaining degrees: for power in range(2, degree+1): # first we'll give the column a name: name = 'power_' + str(power) # assign poly_dataframe[name] to be feature^power; use apply(*) poly_dataframe[name] = feature; poly_dataframe[name] = poly_dataframe[name].apply(lambda x: x**power) return poly_dataframe """ Explanation: Helper functions End of explanation """ poly15_data = polynomial_dataframe(sales['sqft_living'], 15) # use equivalent of `polynomial_sframe` print(poly15_data) l2_small_penalty = 1.5e-5 model = linear_model.Ridge(alpha=l2_small_penalty, normalize=True) model.fit(poly15_data, sales['price']) model.coef_ plt.plot(poly15_data, model.predict(poly15_data), poly15_data, sales['price']) plt.show() """ Explanation: Ridge regression model fitting End of explanation """ l2_small_penalty=1e-9 poly15_data_set1 = polynomial_dataframe(set_1['sqft_living'], 15) # use equivalent of `polynomial_sframe` model1 = linear_model.Ridge(alpha=l2_small_penalty, normalize=True) model1.fit(poly15_data_set1, set_1['price']) poly15_data_set2 = polynomial_dataframe(set_2['sqft_living'], 15) # use equivalent of `polynomial_sframe` model2 = linear_model.Ridge(alpha=l2_small_penalty, normalize=True) model2.fit(poly15_data_set2, set_2['price']) poly15_data_set3 = polynomial_dataframe(set_3['sqft_living'], 15) # use equivalent of `polynomial_sframe` model3 = linear_model.Ridge(alpha=l2_small_penalty, normalize=True) model3.fit(poly15_data_set3, set_3['price']) poly15_data_set4 = polynomial_dataframe(set_4['sqft_living'], 15) # use equivalent of `polynomial_sframe` model4 = linear_model.Ridge(alpha=l2_small_penalty, normalize=True) model4.fit(poly15_data_set4, set_4['price']) plt.plot(poly15_data_set1, model1.predict(poly15_data_set1), poly15_data_set1, set_1['price']) plt.show() plt.plot(poly15_data_set2, model2.predict(poly15_data_set2), poly15_data_set2, set_2['price']) plt.show() plt.plot(poly15_data_set3, model3.predict(poly15_data_set3), poly15_data_set3, set_3['price']) plt.show() plt.plot(poly15_data_set4, model4.predict(poly15_data_set4), poly15_data_set4, set_4['price']) plt.show() print('Model 1 coefficients: ', model1.coef_) print('Model 2 coefficients: ', model2.coef_) print('Model 3 coefficients: ', model3.coef_) print('Model 4 coefficients: ', model4.coef_) """ Explanation: Ridge regression on subsets Using ridge regression with small l2 End of explanation """ l2_large_penalty=1.23e2 poly15_data_set1 = polynomial_dataframe(set_1['sqft_living'], 15) # use equivalent of `polynomial_sframe` model1 = linear_model.Ridge(alpha=l2_large_penalty, normalize=True) model1.fit(poly15_data_set1, set_1['price']) poly15_data_set2 = polynomial_dataframe(set_2['sqft_living'], 15) # use equivalent of `polynomial_sframe` model2 = linear_model.Ridge(alpha=l2_large_penalty, normalize=True) model2.fit(poly15_data_set2, set_2['price']) poly15_data_set3 = polynomial_dataframe(set_3['sqft_living'], 15) # use equivalent of `polynomial_sframe` model3 = linear_model.Ridge(alpha=l2_large_penalty, normalize=True) model3.fit(poly15_data_set3, set_3['price']) poly15_data_set4 = polynomial_dataframe(set_4['sqft_living'], 15) # use equivalent of `polynomial_sframe` model4 = linear_model.Ridge(alpha=l2_large_penalty, normalize=True) model4.fit(poly15_data_set4, set_4['price']) plt.plot(poly15_data_set1, model1.predict(poly15_data_set1), poly15_data_set1, set_1['price']) plt.show() plt.plot(poly15_data_set2, model2.predict(poly15_data_set2), poly15_data_set2, set_2['price']) plt.show() plt.plot(poly15_data_set3, model3.predict(poly15_data_set3), poly15_data_set3, set_3['price']) plt.show() plt.plot(poly15_data_set4, model4.predict(poly15_data_set4), poly15_data_set4, set_4['price']) plt.show() print('Model 1 coefficients: ', model1.coef_) print('Model 2 coefficients: ', model2.coef_) print('Model 3 coefficients: ', model3.coef_) print('Model 4 coefficients: ', model4.coef_) """ Explanation: Applying a higher L2 value End of explanation """ def k_fold_cross_validation(k, l2_penalty, data, output): n = len(data) sumRSS = 0 for i in range(k): # Get the validation/training interval start = (n*i)/k end = (n*(i+1))/k-1 #print (i, (ceil(start), ceil(end))) train_valid_shuffled[0:ceil(start)].append(train_valid_shuffled[ceil(end)+1:n]) # Train the model model = linear_model.Ridge(alpha=l2_penalty, normalize=True) model.fit(data, output) # Calculate RSS RSS = (abs(output - model.predict(data)) ** 2).sum() # Add the RSS to the sum for computing the average sumRSS += RSS return (sumRSS / k) print (k_fold_cross_validation(10, 1e-9, poly15_data_set2, set_2['price'])) """ Explanation: Selecting an L2 penalty via cross-validation Just like the polynomial degree, the L2 penalty is a "magic" parameter we need to select. We could use the validation set approach as we did in the last module, but that approach has a major disadvantage: it leaves fewer observations available for training. Cross-validation seeks to overcome this issue by using all of the training set in a smart way. We will implement a kind of cross-validation called k-fold cross-validation. The method gets its name because it involves dividing the training set into k segments of roughtly equal size. Similar to the validation set method, we measure the validation error with one of the segments designated as the validation set. The major difference is that we repeat the process k times as follows: Set aside segment 0 as the validation set, and fit a model on rest of data, and evalutate it on this validation set Set aside segment 1 as the validation set, and fit a model on rest of data, and evalutate it on this validation set ... Set aside segment k-1 as the validation set, and fit a model on rest of data, and evalutate it on this validation set After this process, we compute the average of the k validation errors, and use it as an estimate of the generalization error. Notice that all observations are used for both training and validation, as we iterate over segments of data. End of explanation """ import sys l2s = np.logspace(3, 9, num=13) train_valid_shuffled_poly15 = polynomial_dataframe(train_valid_shuffled['sqft_living'], 15) k = 10 minError = sys.maxsize for l2 in l2s: avgError = k_fold_cross_validation(k, l2, train_valid_shuffled_poly15, train_valid_shuffled['price']) print ('For l2:', l2, ' the CV is ', avgError) if avgError < minError: minError = avgError bestl2 = l2 print (minError) print (bestl2) """ Explanation: Minimize the l2 by using cross validation End of explanation """ model = linear_model.Ridge(alpha=1000, normalize=True) model.fit(training[['sqft_living']], training['price']) print("Residual sum of squares: %.2f" % ((model.predict(test[['sqft_living']]) - test['price']) ** 2).sum()) """ Explanation: Use the best l2 to train the model on all the data End of explanation """
feststelltaste/software-analytics
demos/Big Data Meetup Exercises.ipynb
gpl-3.0
import pandas as pd df = pd.read_csv( r'C:\Users\Markus\Downloads\Fire_Department_Calls_for_Service.csv') df.head() df.columns len(df) """ Explanation: San Francisco Fire Incidents Fileset (1,5 GB): https://data.sfgov.org/api/views/nuek-vuh3/rows.csv?accessType=DOWNLOAD End of explanation """ len(df['Call Type'].unique()) """ Explanation: There are over 4 million rows in the DataFrame and it takes ~15-20 seconds to do a full read of it, maybe longer if your request gets blocked on AWS. Now run the count again and see how long it takes on the cached data - it should take less than a second. End of explanation """ %matplotlib inline df['Call Type'].value_counts().head(10).plot(kind='pie', figsize=(10,10)) df['Call Type'].value_counts().head(10).plot(kind='bar', figsize=(10,5)) df.groupby('Call Type')["Unit ID"].count().sort_values(ascending=False) df.head() FORMAT = "%m/%d/%Y" df["Call Date TS"] = pd.to_datetime(df['Call Date'], format=FORMAT) df.head() df['Call Date TS'].dt.year.value_counts() from datetime import datetime july4 = datetime(2016, 7, 4, 0, 0, 0) june27 = datetime(2016, 6, 27, 0, 0, 0) subset = df[(df['Call Date TS'] >= june27) & (df['Call Date TS'] <= july4)] subset.head() subset.groupby(pd.Grouper(key='Call Date TS', freq="D")).count() subset.groupby(subset['Call Date TS'].dt.weekday_name).count() """ Explanation: Visualized the TOP 10 Call Types End of explanation """
olinguyen/shogun
doc/ipython-notebooks/clustering/GMM.ipynb
gpl-3.0
%pylab inline %matplotlib inline import os SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data') # import all Shogun classes from shogun import * from matplotlib.patches import Ellipse # a tool for visualisation def get_gaussian_ellipse_artist(mean, cov, nstd=1.96, color="red", linewidth=3): """ Returns an ellipse artist for nstd times the standard deviation of this Gaussian, specified by mean and covariance """ # compute eigenvalues (ordered) vals, vecs = eigh(cov) order = vals.argsort()[::-1] vals, vecs = vals[order], vecs[:, order] theta = numpy.degrees(arctan2(*vecs[:, 0][::-1])) # width and height are "full" widths, not radius width, height = 2 * nstd * sqrt(vals) e = Ellipse(xy=mean, width=width, height=height, angle=theta, \ edgecolor=color, fill=False, linewidth=linewidth) return e """ Explanation: Gaussian Mixture Models and Expectation Maximisation in Shogun By Heiko Strathmann - [email protected] - http://github.com/karlnapf - http://herrstrathmann.de. Based on the GMM framework of the Google summer of code 2011 project of Alesis Novik - https://github.com/alesis This notebook is about learning and using Gaussian <a href="https://en.wikipedia.org/wiki/Mixture_model">Mixture Models</a> (GMM) in Shogun. Below, we demonstrate how to use them for sampling, for density estimation via <a href="https://en.wikipedia.org/wiki/Expectation-maximization_algorithm">Expectation Maximisation (EM)</a>, and for <a href="https://en.wikipedia.org/wiki/Data_clustering">clustering</a>. Note that Shogun's interfaces for mixture models are deprecated and are soon to be replace by more intuitive and efficient ones. This notebook contains some python magic at some places to compensate for this. However, all computations are done within Shogun itself. Finite Mixture Models (skip if you just want code examples) We begin by giving some intuition about mixture models. Consider an unobserved (or latent) discrete random variable taking $k$ states $s$ with probabilities $\text{Pr}(s=i)=\pi_i$ for $1\leq i \leq k$, and $k$ random variables $x_i|s_i$ with arbritary densities or distributions, which are conditionally independent of each other given the state of $s$. In the finite mixture model, we model the probability or density for a single point $x$ begin generated by the weighted mixture of the $x_i|s_i$ $$ p(x)=\sum_{i=1}^k\text{Pr}(s=i)p(x)=\sum_{i=1}^k \pi_i p(x|s) $$ which is simply the marginalisation over the latent variable $s$. Note that $\sum_{i=1}^k\pi_i=1$. For example, for the Gaussian mixture model (GMM), we get (adding a collection of parameters $\theta:={\boldsymbol{\mu}i, \Sigma_i}{i=1}^k$ that contains $k$ mean and covariance parameters of single Gaussian distributions) $$ p(x|\theta)=\sum_{i=1}^k \pi_i \mathcal{N}(\boldsymbol{\mu}_i,\Sigma_i) $$ Note that any set of probability distributions on the same domain can be combined to such a mixture model. Note again that $s$ is an unobserved discrete random variable, i.e. we model data being generated from some weighted combination of baseline distributions. Interesting problems now are Learning the weights $\text{Pr}(s=i)=\pi_i$ from data Learning the parameters $\theta$ from data for a fixed family of $x_i|s_i$, for example for the GMM Using the learned model (which is a density estimate) for clustering or classification All of these problems are in the context of unsupervised learning since the algorithm only sees the plain data and no information on its structure. Expectation Maximisation <a href="https://en.wikipedia.org/wiki/Expectation-maximization_algorithm">Expectation Maximisation (EM)</a> is a powerful method to learn any form of latent models and can be applied to the Gaussian mixture model case. Standard methods such as Maximum Likelihood are not straightforward for latent models in general, while EM can almost always be applied. However, it might converge to local optima and does not guarantee globally optimal solutions (this can be dealt with with some tricks as we will see later). While the general idea in EM stays the same for all models it can be used on, the individual steps depend on the particular model that is being used. The basic idea in EM is to maximise a lower bound, typically called the free energy, on the log-likelihood of the model. It does so by repeatedly performing two steps The E-step optimises the free energy with respect to the latent variables $s_i$, holding the parameters $\theta$ fixed. This is done via setting the distribution over $s$ to the posterior given the used observations. The M-step optimises the free energy with respect to the paramters $\theta$, holding the distribution over the $s_i$ fixed. This is done via maximum likelihood. It can be shown that this procedure never decreases the likelihood and that stationary points (i.e. neither E-step nor M-step produce changes) of it corresponds to local maxima in the model's likelihood. See references for more details on the procedure, and how to obtain a lower bound on the log-likelihood. There exist many different flavours of EM, including variants where only subsets of the model are iterated over at a time. There is no learning rate such as step size or similar, which is good and bad since convergence can be slow. Mixtures of Gaussians in Shogun The main class for GMM in Shogun is <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CGMM.html">CGMM</a>, which contains an interface for setting up a model and sampling from it, but also to learn the model (the $\pi_i$ and parameters $\theta$) via EM. It inherits from the base class for distributions in Shogun, <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CDistribution.html">CDistribution</a>, and combines multiple single distribution instances to a mixture. We start by creating a GMM instance, sampling from it, and computing the log-likelihood of the model for some points, and the log-likelihood of each individual component for some points. All these things are done in two dimensions to be able to plot them, but they generalise to higher (or lower) dimensions easily. Let's sample, and illustrate the difference of knowing the latent variable indicating the component or not. End of explanation """ # create mixture of three Gaussians num_components=3 num_max_samples=100 gmm=GMM(num_components) dimension=2 # set means (TODO interface should be to construct mixture from individuals with set parameters) means=zeros((num_components, dimension)) means[0]=[-5.0, -4.0] means[1]=[7.0, 3.0] means[2]=[0, 0.] [gmm.set_nth_mean(means[i], i) for i in range(num_components)] # set covariances covs=zeros((num_components, dimension, dimension)) covs[0]=array([[2, 1.3],[.6, 3]]) covs[1]=array([[1.3, -0.8],[-0.8, 1.3]]) covs[2]=array([[2.5, .8],[0.8, 2.5]]) [gmm.set_nth_cov(covs[i],i) for i in range(num_components)] # set mixture coefficients, these have to sum to one (TODO these should be initialised automatically) weights=array([0.5, 0.3, 0.2]) gmm.set_coef(weights) """ Explanation: Set up the model in Shogun End of explanation """ # now sample from each component seperately first, the from the joint model hold(True) colors=["red", "green", "blue"] for i in range(num_components): # draw a number of samples from current component and plot num_samples=int(rand()*num_max_samples)+1 # emulate sampling from one component (TODO fix interface of GMM to handle this) w=zeros(num_components) w[i]=1. gmm.set_coef(w) # sample and plot (TODO fix interface to have loop within) X=array([gmm.sample() for _ in range(num_samples)]) plot(X[:,0], X[:,1], "o", color=colors[i]) # draw 95% elipsoid for current component gca().add_artist(get_gaussian_ellipse_artist(means[i], covs[i], color=colors[i])) hold(False) _=title("%dD Gaussian Mixture Model with %d components" % (dimension, num_components)) # since we used a hack to sample from each component gmm.set_coef(weights) """ Explanation: Sampling from mixture models Sampling is extremely easy since every instance of the <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CDistribution.html">CDistribution</a> class in Shogun allows to sample from it (if implemented) End of explanation """ # generate a grid over the full space and evaluate components PDF resolution=100 Xs=linspace(-10,10, resolution) Ys=linspace(-8,6, resolution) pairs=asarray([(x,y) for x in Xs for y in Ys]) D=asarray([gmm.cluster(pairs[i])[3] for i in range(len(pairs))]).reshape(resolution,resolution) figure(figsize=(18,5)) subplot(1,2,1) pcolor(Xs,Ys,D) xlim([-10,10]) ylim([-8,6]) title("Log-Likelihood of GMM") subplot(1,2,2) pcolor(Xs,Ys,exp(D)) xlim([-10,10]) ylim([-8,6]) _=title("Likelihood of GMM") """ Explanation: Evaluating densities in mixture Models Next, let us visualise the density of the joint model (which is a convex sum of the densities of the individual distributions). Note the similarity between the calls since all distributions implement the <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CDistribution.html">CDistribution</a> interface, including the mixture. End of explanation """ # sample and plot (TODO fix interface to have loop within) X=array([gmm.sample() for _ in range(num_max_samples)]) plot(X[:,0], X[:,1], "o") _=title("Samples from GMM") """ Explanation: Density estimating with mixture models Now let us draw samples from the mixture model itself rather than from individual components. This is the situation that usually occurs in practice: Someone gives you a bunch of data with no labels attached to it all all. Our job is now to find structure in the data, which we will do with a GMM. End of explanation """ def estimate_gmm(X, num_components): # bring data into shogun representation (note that Shogun data is in column vector form, so transpose) features=RealFeatures(X.T) gmm_est=GMM(num_components) gmm_est.set_features(features) # learn GMM gmm_est.train_em() return gmm_est """ Explanation: Imagine you did not know the true generating process of this data. What would you think just looking at it? There are clearly at least two components (or clusters) that might have generated this data, but three also looks reasonable. So let us try to learn a Gaussian mixture model on those. End of explanation """ component_numbers=[2,3] # plot true likelihood D_true=asarray([gmm.cluster(pairs[i])[num_components] for i in range(len(pairs))]).reshape(resolution,resolution) figure(figsize=(18,5)) subplot(1,len(component_numbers)+1,1) pcolor(Xs,Ys,exp(D_true)) xlim([-10,10]) ylim([-8,6]) title("True likelihood") for n in range(len(component_numbers)): # TODO get rid of these hacks and offer nice interface from Shogun # learn GMM with EM gmm_est=estimate_gmm(X, component_numbers[n]) # evaluate at a grid of points D_est=asarray([gmm_est.cluster(pairs[i])[component_numbers[n]] for i in range(len(pairs))]).reshape(resolution,resolution) # visualise densities subplot(1,len(component_numbers)+1,n+2) pcolor(Xs,Ys,exp(D_est)) xlim([-10,10]) ylim([-8,6]) _=title("Estimated likelihood for EM with %d components"%component_numbers[n]) """ Explanation: So far so good, now lets plot the density of this GMM using the code from above End of explanation """ # function to draw ellipses for all components of a GMM def visualise_gmm(gmm, color="blue"): for i in range(gmm.get_num_components()): component=Gaussian.obtain_from_generic(gmm.get_component(i)) gca().add_artist(get_gaussian_ellipse_artist(component.get_mean(), component.get_cov(), color=color)) # multiple runs to illustrate random initialisation matters for _ in range(3): figure(figsize=(18,5)) subplot(1, len(component_numbers)+1, 1) plot(X[:,0],X[:,1], 'o') visualise_gmm(gmm_est, color="blue") title("True components") for i in range(len(component_numbers)): gmm_est=estimate_gmm(X, component_numbers[i]) subplot(1, len(component_numbers)+1, i+2) plot(X[:,0],X[:,1], 'o') visualise_gmm(gmm_est, color=colors[i]) # TODO add a method to get likelihood of full model, retraining is inefficient likelihood=gmm_est.train_em() _=title("Estimated likelihood: %.2f (%d components)"%(likelihood,component_numbers[i])) """ Explanation: It is also possible to access the individual components of the mixture distribution. In our case, we can for example draw 95% ellipses for each of the Gaussians using the method from above. We will do this (and more) below. On local minima of EM It seems that three comonents give a density that is closest to the original one. While two components also do a reasonable job here, it might sometimes happen (<a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CKMeans.html">KMeans</a> is used to initialise the cluster centres if not done by hand, using a random cluster initialisation) that the upper two Gaussians are grouped, re-run for a couple of times to see this. This illustrates how EM might get stuck in a local minimum. We will do this below, where it might well happen that all runs produce the same or different results - no guarantees. Note that it is easily possible to initialise EM via specifying the parameters of the mixture components as did to create the original model above. One way to decide which of multiple convergenced EM instances to use is to simply compute many of them (with different initialisations) and then choose the one with the largest likelihood. WARNING Do not select the number of components like this as the model will overfit. End of explanation """ def cluster_and_visualise(gmm_est): # obtain cluster index for each point of the training data # TODO another hack here: Shogun should allow to pass multiple points and only return the index # as the likelihood can be done via the individual components # In addition, argmax should be computed for us, although log-pdf for all components should also be possible clusters=asarray([argmax(gmm_est.cluster(x)[:gmm.get_num_components()]) for x in X]) # visualise points by cluster hold(True) for i in range(gmm.get_num_components()): indices=clusters==i plot(X[indices,0],X[indices,1], 'o', color=colors[i]) hold(False) # learn gmm again gmm_est=estimate_gmm(X, num_components) figure(figsize=(18,5)) subplot(121) cluster_and_visualise(gmm) title("Clustering under true GMM") subplot(122) cluster_and_visualise(gmm_est) _=title("Clustering under estimated GMM") """ Explanation: Clustering with mixture models Recall that our initial goal was not to visualise mixture models (although that is already pretty cool) but to find clusters in a given set of points. All we need to do for this is to evaluate the log-likelihood of every point under every learned component and then pick the largest one. Shogun can do both. Below, we will illustrate both cases, obtaining a cluster index, and evaluating the log-likelihood for every point under each component. End of explanation """ figure(figsize=(18,5)) for comp_idx in range(num_components): subplot(1,num_components,comp_idx+1) # evaluated likelihood under current component # TODO Shogun should do the loop and allow to specify component indices to evaluate pdf for # TODO distribution interface should be the same everywhere component=Gaussian.obtain_from_generic(gmm.get_component(comp_idx)) cluster_likelihoods=asarray([component.compute_PDF(X[i]) for i in range(len(X))]) # normalise cluster_likelihoods-=cluster_likelihoods.min() cluster_likelihoods/=cluster_likelihoods.max() # plot, coloured by likelihood value cm=get_cmap("jet") hold(True) for j in range(len(X)): color = cm(cluster_likelihoods[j]) plot(X[j,0], X[j,1] ,"o", color=color) hold(False) title("Data coloured by likelihood for component %d" % comp_idx) """ Explanation: These are clusterings obtained via the true mixture model and the one learned via EM. There is a slight subtlety here: even the model under which the data was generated will not cluster the data correctly if the data is overlapping. This is due to the fact that the cluster with the largest probability is chosen. This doesn't allow for any ambiguity. If you are interested in cases where data overlaps, you should always look at the log-likelihood of the point for each cluster and consider taking into acount "draws" in the decision, i.e. probabilities for two different clusters are equally large. Below we plot all points, coloured by their likelihood under each component. End of explanation """ # compute cluster index for every point in space D_est=asarray([gmm_est.cluster(pairs[i])[:num_components].argmax() for i in range(len(pairs))]).reshape(resolution,resolution) # visualise clustering cluster_and_visualise(gmm_est) # visualise space partitioning hold(True) pcolor(Xs,Ys,D_est) hold(False) """ Explanation: Note how the lower left and middle cluster are overlapping in the sense that points at their intersection have similar likelihoods. If you do not care at all about this and are just interested in a partitioning of the space, simply choose the maximum. Below we plot the space partitioning for a hard clustering. End of explanation """
ultiyuan/test0
lessons/.ipynb_checkpoints/HydroAssignment-checkpoint.ipynb
gpl-2.0
#Import the required functions from VortexPanel.py and BoundaryLayer.py from VortexPanel import Panel, solve_gamma, plot_flow, make_circle #Create function to calculate the pressure coefficient def cp(gamma): return gamma**2-1 def calc_Cp(u_e, U_inf=1): #Find analytical result for Cp. Cp = [] Cp = [(1-((i**2)/(U_inf**2))) for i in u_e] return Cp N = [32, 64, 128] #Create list of resolutions for n in N: s = numpy.linspace(0, numpy.pi, n) #Points about the circumference of the circle circle = make_circle(n) #Create geometry solve_gamma(circle) c_p = numpy.zeros(n) for i,p_i in enumerate(circle): c_p[i] = cp(p_i.gamma) pyplot.plot(s, c_p, label = "N = %.f" % n) pyplot.legend(loc='best') pyplot.xlabel('Location on Cylinder Surface') pyplot.ylabel('Numerical $C_P$') s = numpy.array(numpy.linspace(0, numpy.pi, N[2])) u_e = 2.*numpy.sin(s) pyplot.figure() pyplot.plot(s, calc_Cp(u_e)) pyplot.xlabel('Location on Cylinder Surface') pyplot.ylabel('Analytical $C_P$') """ Explanation: Numerical Hydrodynamics Assignment Introduction This report will investigate the hydrodynamic characteristics of several different geometries. This will be done using numerical and analytical methods to predict pressure and force characteristics about the following geometeries: A circular cyclinder; A joukowski foil; A cambered joukowski foil. The analysis performed on each geometry will be described in more detail in the respective sections. A Circular Cylinder A circular cylinder is one of the simpler 2D geometries for which numerical and analytical analysis may be performed. As a result it allows numerical results to be easily compared with experimental values to enable validation of methods. The analysis required for this geometry is as follows: 1. To compute the pressure coefficient $c_p$ on the surface for three resolutions $N=32,64,128$, and compare the results to the exact potential flow solution. 1. To compute the friction drag coefficient $C_F$, and an estimate for the pressure drag coefficient $C_D$ for the same three resolutions at $Re_D=10^5$. Method Computing the Pressure Coefficient End of explanation """ #Import and create some useful functions. #These functions are fully explained in the comprehensive series of hydrodynamic ipython notebooks by Dr Wemouth, UoS. from BoundaryLayer import heun, g_pohl, Pohlhausen, march def pohlF(eta): return 2*eta-2*eta**3+eta**4 def pohlG(eta): return eta/6*(1-eta)**3 def disp_ratio(lam): return 3./10.-lam/120. def mom_ratio(lam): return 37./315.-lam/945.-lam**2/9072. def df_0(lam): return 2+lam/6. def g_1(lam): return df_0(lam)-lam*(disp_ratio(lam)+2*mom_ratio(lam)) from scipy.optimize import bisect lam0 = bisect(g_1,-12,12) #Useful information nu= 1e-5 N = 32 s = numpy.linspace(0, numpy.pi, N) u_e = 2.*numpy.sin(s) du_e = 2.*numpy.cos(s) delta, lam, iSep = march(s, u_e, du_e, nu) from numpy import trapz def T_w(N,nu): s = numpy.linspace(0,numpy.pi,N) u_e= 2.*numpy.sin(s) rho=1 du_e=2.*numpy.cos(s) delta,lam,iSep = march(s,u_e,du_e,nu) Re_d = delta*u_e/nu half_cf=[] for i in range(N): if Re_d[i]==0: half_cf.append(0.0) else: half_cf.append(df_0(lam[i])/Re_d[i]) return half_cf*rho*(u_e**2) s_x=numpy.sin(s) nu=1e-5 def F_F(lam,nu,delta,u_e,s_x): # s_x=numpy.sin(s) T_w2=T_w(N,nu) return (trapz(T_w2[0:iSep+1]*s_x[0:iSep+1],s[0:iSep+1])) def C_F(N,nu,rho=1,S=1,U_inf=1): return(2*F_F(N,nu))/(rho*U_inf*S) C_F_flat= 1.33*numpy.sqrt(nu/s[-1]) print('Flat Plate: '+'%.2e' %C_F_flat) C_F_circle= 2*F_F(lam,nu,delta,u_e,s_x)/s[-1] print('Circle: '+'%.2e' %C_F_circle) tau = T_w(N,nu) pyplot.plot(s,tau*s_x) pyplot.scatter(s[iSep], tau[iSep]*s_x[iSep], label='Flow separation point') pyplot.xlabel('$s$',size=20) pyplot.ylabel(r'$\tau_w s_x$', size=20) pyplot.gca().set_xlim([0, 3.5]) pyplot.gca().set_ylim([-0.002, 0.01]) pyplot.legend(loc='best') pyplot.show() """ Explanation: Computing the Friction Drag Coefficient End of explanation """ def solve_C_d(u_e,iSep,s): Sy = [-1*numpy.sin(i) for i in s] k=0 C_p = calc_Cp(u_e) for j in range(0,len(C_p)): if j >iSep: C_p[j] = C_p[iSep] else: C_p[j]=C_p[j]*Sy[j] pyplot.plot(s,C_p) pyplot.xlabel('$s$',size=20) pyplot.ylabel(r'$C_D$', size=20) pyplot.gca().set_xlim([0, 3.5]) pyplot.gca().set_ylim([-0.5, 3.5]) pyplot.scatter(s[iSep], C_p[iSep], label='Flow separation point') pyplot.legend(loc='lower right') return numpy.trapz(C_p,s) nu = 1e-5 N=32 s=numpy.linspace(0,numpy.pi,N) u_e= 2.*numpy.sin(s) du_e = 2.*numpy.cos(s) delta,lam,iSep= march(s,u_e,du_e,nu) print (solve_C_d(u_e,iSep,s)) """ Explanation: Computing the Friction Drag Coefficient End of explanation """ from VortexPanel import make_jukowski, solve_gamma_kutta #Analytical solution for this jukowski geometry def jukowski_CL(alpha,t_c=0.15+0.019): return 2.*numpy.pi*(1+4/3/numpy.sqrt(3)*t_c)*numpy.sin(alpha) alpha = numpy.linspace(0, 10./180*numpy.pi, 11) #Compute range of angles, this is done for every integer. It is possible for a greater level of detail through increasing the third variable in the function. N = [32, 64, 128] #List of panel numbers or resolutions. analytic = jukowski_CL(alpha) degrees = numpy.linspace(0, 10, 11) #For the graph axis def lift(panels): #A function to calculate the lift generated from a group of panels. c = panels[0].x[0]-panels[len(panels)/2].x[0] return -4./c*numpy.sum([p.gamma*p.S for p in panels]) """ Explanation: Analysis Joukowski Foil The geometry for the joukowski foil is easily generated using the ''Joukowski Transform" which takes a circle in the xy plane and transforms it to the $\zeta$ plane. The equation is given as follows. $$ \zeta = z + \frac 1z$$ A function for this geometry has already been programmed and will be imported into this notebook. The analysis required for this foil is detailed as follows: - To compute the lift coefficient $C_L$ as a function of the angle of attack $\alpha\le 10^o$ for three resolutions $N=32,64,128$, and and to compare the results to the exact potential flow solution. - To compute the separation point location on either side of the foil for the same angle of attack range for $N=128$. Method Calculating the Lift Coefficient End of explanation """ def calc_CL(alpha, N): foil = make_jukowski(N) solve_gamma_kutta(foil, alpha) return lift(foil) C_L = [] #Create empty list. This is a lazy method, a computationally faster method would be to use arrays. However this is quick to program. for i in N: empty = [] for j in alpha: empty.append(calc_CL(j, i)) C_L.append(empty) """ Explanation: Now to create a function to calculate the lift coefficent from a joukowski foil given an angle of attack, $\alpha$, and number of panels, N. End of explanation """ pyplot.figure(figsize=(10,7)) pyplot.plot(degrees, analytic, label=r'Analytical Solution' ) pyplot.plot(degrees, C_L[0], label=r'Solution, N = 32') pyplot.plot(degrees, C_L[1], label=r'Solution, N = 64') pyplot.plot(degrees, C_L[2], label=r'Solution, N = 128') pyplot.legend(loc='lower right') pyplot.xlabel(r'Angle of Attack $^o$', fontsize=16) pyplot.ylabel(r'$C_L$', fontsize=16) axes = pyplot.gca() axes.set_xlim([0,11]) axes.set_ylim([0,1.5]) pyplot.show() """ Explanation: These results are understood best graphically. End of explanation """ from BoundaryLayer import Pohlhausen, march def solve_plot_boundary_layers_1(panels,alpha=0,nu=1e-5): #Function from boundarylayer module, with the plot lines commented out. from VortexPanel import plot_flow from matplotlib import pyplot # Set up and solve the top boundary layer top_panels = [p for p in panels if p.gamma<=0] # negative gamma on the top top = Pohlhausen(top_panels,nu) top.march() # Set up and solve the bottom boundary layer bottom_panels = [p for p in panels if p.gamma>=0] # positive gamma on the bottom bottom_panels = bottom_panels[::-1] # reverse array so 0 is stagnation bottom = Pohlhausen(bottom_panels,nu) bottom.march() # plot flow with stagnation points #plot_flow(panels,alpha) #pyplot.scatter(top.xSep, top.ySep, s=100, c='r') #pyplot.scatter(bottom.xSep, bottom.ySep, s=100, c='g') return top,bottom def predict_jukowski_separation(t_c,alpha=0,N=128): #Function from separation prediction and altered to return a value. # set dx to gets the correct t/c foil = make_jukowski(N,dx=t_c-0.019) #t_c-0.019 is the shift from t/c to dx # find and print t/c x0 = foil[N/2].xc c = foil[0].xc-x0 t = 2.*numpy.max([p.yc for p in foil]) #print "t/c = "+"%.3f"%(t/c) # solve potential flow and boundary layer evolution solve_gamma_kutta(foil,alpha) top,bottom = solve_plot_boundary_layers_1(foil,alpha) #Return point of seperation return (top.x_sep-x0)/c, (bottom.x_sep-x0)/c sep_point = [] #Create empty list as it's easy. for a in alpha: sep_point.append(predict_jukowski_separation(0.15, a)) sep_point = numpy.array(sep_point) # Turn sep_point from list into an array """ Explanation: Calculating the Flow Separation Points End of explanation """ pyplot.figure(figsize=(10,7)) pyplot.ylabel(r'$\frac{x}{c}$', fontsize=24) pyplot.xlabel(r'Angle of Attack $^o$', fontsize=16) pyplot.plot(degrees, sep_point[:,0], label=r'Top Separation Point') pyplot.plot(degrees, sep_point[:,1], label=r'Bottom Separation Point') pyplot.legend(loc='right') pyplot.show() """ Explanation: Now to plot the graph of these results. End of explanation """ def camber(t_c, alpha, dy, N=128): #Function to find empirical relationship between dy and camber. dx=t_c-0.019 foil = make_jukowski(N, dx, dy) #plot_flow(foil, alpha) #Can be commented in to inspect the shape of the foil a = int(N/4) b = int((3*N)/4) y_bar = 0.5*(foil[a].yc + foil[b].yc) return y_bar trial_dy = numpy.linspace(0, -1, 30) #Range of experimental values for dy y_bar = [] #Create empty list. for i in trial_dy: y_bar.append(camber(0.15, 0, i, 128)) """ Explanation: Analysis Cambered Joukowski Foil The geometry is the 15% Jukowski foil with camber, quantified by the height of the mean line at $x=0$, ie $\bar y = \frac 12 (y[N/4]+y[3N/4])$. The the lift force coefficient $C_L$ and boundary layer separation point locations for $\bar y/c = 0.02,0.04,0.08$ at $\alpha=0$ using $N=128$ panels are required, then a comparision to the symetric foil solution needs to be completed. Method Generating the Foil Geometry Altering the dy input to the make_jukowski function alters the camber of the foil, however it is unknown what the exact linear relationship is. From the Joukowski transform equation in the first section of this report, the following equation is true. $$ dy \propto \bar y $$ As a result this relationship must be investigated, it is likely that for a particular foil geometry there will be an empirical factor that works for that case in particular. First a function is required to give the distance $\bar y$ when given dy. End of explanation """ #Investigating the relationship between dy and y_bar def camber_emp(trial_dy, y_bar): from sklearn.linear_model import LinearRegression #This is a module developed for machine learning. It is useful for this sort of analysis. X = trial_dy[:, numpy.newaxis] regr = LinearRegression() regr.fit(X, y_bar) fig = pyplot.figure(figsize=(8,6)) ax = fig.add_subplot(1,1,1) pyplot.ylabel(r"$\bar y$ ",fontsize=16) pyplot.xlabel("dy ",fontsize=16) pyplot.plot(trial_dy, y_bar) pyplot.gca().set_xlim(left=-1) pyplot.gca().set_ylim(bottom=0) print("Multiply the dy value by %.5f to obtain the ybar/c value for the foil geometry chosen." % regr.coef_) pyplot.show() return regr.coef_ coef = camber_emp(trial_dy, y_bar) """ Explanation: Create a function to give the: 1. Graphical relationship between dy and $\bar y$ to confirm its linearity. 1. Give the gradient of the relationship. End of explanation """ def predict_jukowski_separation_camber(t_c, y_bar_c, alpha=0,N=128): #Function from separation prediction and altered to account for camber. # set dx to gets the correct t/c dx = t_c -0.019 dy = y_bar_c/(-0.84675) foil = make_jukowski(N, dx, dy) #t_c-0.019 is the shift from t/c to dx x0 = foil[N/2].xc c = foil[0].xc-x0 t = 2.*numpy.max([p.yc for p in foil]) # solve potential flow and boundary layer evolution solve_gamma_kutta(foil,alpha) top,bottom = solve_plot_boundary_layers_1(foil,alpha) #Return point of separation return (top.x_sep-x0)/c, (bottom.x_sep-x0)/c def calc_CL_camb(alpha, N, t_c, y_bar_c): #Function from earlier in this report altered to account for camber. dx = t_c - 0.019 dy = y_bar_c/(-0.84675) foil = make_jukowski(N, dx, dy) solve_gamma_kutta(foil, alpha) return lift(foil) fill_val = jukowski_CL(0) #The lift coefficient for the symmetric foil. analytic = numpy.full_like(y_bar_c, fill_val) #Python will get sad unless it gets an array of analytical C_L to plot. analytic """ Explanation: This is clearly a linear relationship so it is certainly appropriate to use this value generated. The following relationship between these factors can be derived. $$\frac{\bar y/c}{-0.84675} = dy$$ Computing the Lift Coefficent and Separation Points for Different Cambers Take functions used earlier in this report and alter to account for camber. End of explanation """ y_bar_c = [0.02, 0.04, 0.08] #List of cambers. c_l = [] for i in y_bar_c: c_l.append(calc_CL_camb(0, 128, 0.15, i)) sep_points_camber = [] for i in y_bar_c: sep_points_camber.append(predict_jukowski_separation_camber(0.15, i, 0, 128)) sep_points_camber = numpy.array(sep_points_camber) """ Explanation: Find the $C_L$ values and separation points for the various cambers. End of explanation """ fig, ax1 = pyplot.subplots(figsize=(10,7)) ax2 = ax1.twinx() ax1.set_ylabel(r'$C_L$', fontsize=16) ax1.set_xlabel(r'$\frac{\bar{y}}{c}$', fontsize=28) ax1.scatter(y_bar_c, c_l, label=r'Numerical $C_L$') ax1.plot(y_bar_c, analytic, label=r'Symmetric Foil $C_L$', color='black') ax2.scatter(y_bar_c, sep_points_camber[:,1], label=r'Bottom Separation Point',color='red') ax2.scatter(y_bar_c, sep_points_camber[:,0], label=r'Top Separation Point',color='green') ax2.set_ylabel(r'$\frac{x}{c}$', fontsize=28) ax1.legend(loc='best') ax2.legend(loc='right') #Plot the separation points. pyplot.figure(figsize=(8, 8)) pyplot.scatter(y_bar_c, sep_points_camber[:,1], label=r'Bottom Separation Point',color='red') pyplot.scatter(y_bar_c, sep_points_camber[:,0], label=r'Top Separation Point',color='green') pyplot.ylabel(r'$\frac{x}{c}$', fontsize=28) pyplot.xlabel(r'$\frac{\bar{y}}{c}$', fontsize=28) pyplot.legend(loc='best') pyplot.show() #Plot the lift coefficents. pyplot.figure(figsize=(8, 8)) pyplot.ylabel(r'$C_L$', fontsize=16) pyplot.xlabel(r'$\frac{\bar{y}}{c}$', fontsize=28) pyplot.scatter(y_bar_c, c_l, label=r'Numerical $C_L$') pyplot.plot(y_bar_c, analytic, label=r'Symmetric Foil $C_L$', color='black') pyplot.legend(loc='best') pyplot.show() """ Explanation: Now to visually inspect these results. End of explanation """
muxiaobai/CourseExercises
python/kaggle/data-visual/Multivariate.ipynb
gpl-2.0
sns.lmplot(x='Attack',y='Defense',hue='Legendary',fit_reg=False,markers=['x','o'],data = pokemon) plt.show() sns.heatmap( pokemon.loc[:, ['HP', 'Attack', 'Sp. Atk', 'Defense', 'Sp. Def', 'Speed']].corr(), annot=True ) plt.show() import pandas as pd from pandas.plotting import parallel_coordinates p = (pokemon[(pokemon['Type 1'].isin(["Psychic", "Fighting"]))] .loc[:, ['Type 1', 'Attack', 'Sp. Atk', 'Defense', 'Sp. Def']] ) parallel_coordinates(p, 'Type 1') plt.show() """ Explanation: Multivariate plotting ๅคšๅ˜้‡ df.plot.scatter() df.plot.box() sns.heatmap pd.plotting.parallel_coordinates ๅ‚่€ƒ - https://zhuanlan.zhihu.com/p/27683042 - http://seaborn.pydata.org/tutorial/categorical.html#categorical-scatterplots End of explanation """ sns.swarmplot(x='Generation',y='Defense',hue='Legendary',data = pokemon) plt.show() sns.stripplot(x='Generation',y='Defense',hue='Legendary',data = pokemon) plt.show() sns.boxplot(x="Generation", y="Total", hue='Legendary', data=pokemon) plt.show() sns.violinplot(x='Generation',y='Defense',hue='Legendary',data = pokemon, split=True, inner="stick", palette="Set3"); plt.show() sns.barplot(x="Generation", y="Defense", hue="Legendary", data=pokemon) plt.show() sns.countplot(x="Generation",hue="Legendary", data=pokemon ) plt.show() sns.pointplot(x="Generation", y="Defense", hue="Legendary", data=pokemon) plt.show() # sns.swarmplot(x='Generation',y='Defense',hue='Legendary',data = pokemon) sns.factorplot(x="Generation", y="Defense", hue="Legendary", data=pokemon, kind="swarm"); sns.factorplot(x="Generation", y="Defense", hue="Legendary",data=pokemon, kind="box") sns.factorplot(x="Generation", y="Defense", hue="Legendary",data=pokemon, kind="bar") sns.factorplot(x="Generation", y="Defense", col="Legendary", data=pokemon, kind="bar") sns.factorplot(x="Generation", y="Defense", row="Legendary", data=pokemon, kind="bar") # hue row,col ไธ‰ไธชๅ…ณ็ณป #sns.factorplot(x="Generation", y="Defense", hue="Legendary", col="time", data=pokemon, kind="bar") plt.show() """ Explanation: ้žๅธธๅฎž็”จ็š„ๆ–นๆณ•ๆ˜ฏๅฐ†Seaborn็š„ๅˆ†็ฑปๅ›พๅˆ†ไธบไธ‰็ฑป๏ผŒๅฐ†ๅˆ†็ฑปๅ˜้‡ๆฏไธช็บงๅˆซ็š„ๆฏไธช่ง‚ๅฏŸ็ป“ๆžœๆ˜พ็คบๅ‡บๆฅ๏ผŒๆ˜พ็คบๆฏไธช่ง‚ๅฏŸๅˆ†ๅธƒ็š„ๆŠฝ่ฑก่กจ็คบ๏ผŒไปฅๅŠๅบ”็”จ็ปŸ่ฎกไผฐ่ฎกๆ˜พ็คบ็š„ๆƒ้‡่ถ‹ๅŠฟๅ’Œ็ฝฎไฟกๅŒบ้—ด๏ผš ็ฌฌไธ€ไธชๅŒ…ๆ‹ฌๅ‡ฝๆ•ฐswarmplot()ๅ’Œstripplot() ็ฌฌไบŒไธชๅŒ…ๆ‹ฌๅ‡ฝๆ•ฐboxplot()ๅ’Œviolinplot() ็ฌฌไธ‰ไธชๅŒ…ๆ‹ฌๅ‡ฝๆ•ฐbarplot()ๅ’Œpointplt() End of explanation """ g = sns.FacetGrid(pokemon, col ="Generation", row="Legendary") g.map(sns.kdeplot, "Attack") plt.show() sns.pairplot(pokemon[['HP', 'Attack', 'Defense']]) plt.show() g = sns.PairGrid(pokemon, x_vars=["Generation","Legendary"], y_vars=["Attack","Defense","Sp. Atk", "Sp. Def"], aspect=.85, size=6) g.map(sns.violinplot,palette="pastel") plt.show() """ Explanation: 1. Facet Grid 2 . Pair Plot End of explanation """
eshlykov/mipt-day-after-day
statistics/hw-09/09.2.ipynb
unlicense
import numpy """ Explanation: 9. ะ›ะธะฝะตะนะฝะฐั ั€ะตะณั€ะตััะธั 2. ะ’ ั‡ะตั‚ั‹ั€ะตั…ัƒะณะพะปัŒะฝะธะบะต $ABCD$ ะฝะตะทะฐะฒะธัะธะผั‹ะต ั€ะฐะฒะฝั‹ะต ะฟะพ ั‚ะพั‡ะฝะพัั‚ะธ ะธะทะผะตั€ะตะฝะธั ัƒะณะปะพะฒ $ABD$, $DBC$, $ABC$, $BCD$, $CDB$, $BDA$, $CDA$, $DAB$ (ะฒ ะณั€ะฐะดัƒัะฐั…) ะดะฐะปะธ ั€ะตะทัƒะปัŒั‚ะฐั‚ั‹ $50.78$, $30.25$, $78.29$, $99.57$, $50.42$, $40.59$, $88.87$, $89.86$ ัะพะพั‚ะฒะตั‚ัั‚ะฒะตะฝะฝะพ. ะกั‡ะธั‚ะฐั, ั‡ั‚ะพ ะพัˆะธะฑะบะธ ะธะทะผะตั€ะตะฝะธะน ั€ะฐัะฟั€ะตะดะตะปะตะฝั‹ ะฝะพั€ะผะฐะปัŒะฝะพ ะฟะพ ะทะฐะบะพะฝัƒ $N(0, \sigma^2)$, ะฝะฐะนะดะธั‚ะต ะพะฟั‚ะธะผะฐะปัŒะฝั‹ะต ะพั†ะตะฝะบะธ ัƒะณะปะพะฒ $\beta_1 = ABD$, $\beta_2 = DBC$, $\beta_3 = CDB$, $\beta_4 = BDA$ ะธ ะฝะตะธะทะฒะตัั‚ะฝะพะน ะดะธัะฟะตั€ัะธะธ $\sigma^2$. ะงะธัะปะตะฝะฝะพ ะฟะพัั‡ะธั‚ะฐั‚ัŒ ะผะพะถะฝะพ ะฒ Python. End of explanation """ X = numpy.array([ [1, 0, 0, 0], [0, 1, 0, 0], [1, 1, 0, 0], [0, -1, -1, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 1, 1], [-1, 0, 0, -1] ]) Y = numpy.array([50.78, 30.25, 78.29, 99.57 - 180, 50.42, 40.59, 88.87, 89.86 - 180]).T Beta = numpy.linalg.inv(X.T @ X) @ X.T @ Y print('Beta =', Beta) n, k = X.shape RSS = (Y - X @ Beta).T @ (Y - X @ Beta) sigma2 = RSS / (n - k) print('sigma^2 =', sigma2) """ Explanation: ะ—ะฐะผะตั‚ะธะผ, ั‡ั‚ะพ $ABD = \beta_1$, $DBC = \beta_2$, $ABC = \beta_1 + \beta_2$, $BCD = 180 - \beta_2 - \beta_3$, $CDB = \beta_3$, $BDA= \beta_4$, $CDA = \beta_3 + \beta_4$, $DAB = 180 - \beta_1 - \beta_4$. ะขะพะณะดะฐ: End of explanation """
fonnesbeck/scientific-python-workshop
notebooks/Model Building with PyMC.ipynb
cc0-1.0
import pymc as pm import numpy as np from pymc.examples import disaster_model switchpoint = pm.DiscreteUniform('switchpoint', lower=0, upper=110) """ Explanation: Building Models in PyMC Bayesian inference begins with specification of a probability model relating unknown variables to data. PyMC provides three basic building blocks for Bayesian probability models: Stochastic, Deterministic and Potential. A Stochastic object represents a variable whose value is not completely determined by its parents, and a Deterministic object represents a variable that is entirely determined by its parents. In object-oriented programming parlance, Stochastic and Deterministic are subclasses of the Variable class, which only serves as a template for other classes and is never actually implemented in models. The third basic class, Potential, represents 'factor potentials', which are not variables but simply log-likelihood terms and/or constraints that are multiplied into joint distributions to modify them. Potential and Variable are subclasses of Node. The Stochastic class A stochastic variable has the following primary attributes: value : The variable's current value. logp : The log-probability of the variable's current value given the values of its parents. A stochastic variable can optionally be endowed with a method called random, which draws a value for the variable given the values of its parents. Creation of stochastic variables There are three main ways to create stochastic variables, called the automatic, decorator, and direct interfaces. Automatic Stochastic variables with standard distributions provided by PyMC can be created in a single line using special subclasses of Stochastic. For example, the uniformly-distributed discrete variable $switchpoint$ in the coal mining disasters model is created using the automatic interface as follows: End of explanation """ early_mean = pm.Exponential('early_mean', beta=1., value=1) late_mean = pm.Exponential('late_mean', beta=1., value=1) """ Explanation: Similarly, the rate parameters can automatically be given exponential priors: End of explanation """ @pm.stochastic def switchpoint(value=1900, t_l=1851, t_h=1962): """The switchpoint for the rate of disaster occurrence.""" if value > t_h or value < t_l: # Invalid values return -np.inf else: # Uniform log-likelihood return -np.log(t_h - t_l + 1) """ Explanation: Decorator Uniformly-distributed discrete stochastic variable $switchpoint$ in the disasters model could alternatively be created from a function that computes its log-probability as follows: End of explanation """ def switchpoint_logp(value, t_l, t_h): if value > t_h or value < t_l: return -np.inf else: return -np.log(t_h - t_l + 1) def switchpoint_rand(t_l, t_h): return np.round( (t_l - t_h) * np.random.random() ) + t_l switchpoint = pm.Stochastic( logp = switchpoint_logp, doc = 'The switchpoint for the rate of disaster occurrence.', name = 'switchpoint', parents = {'t_l': 1851, 't_h': 1962}, random = switchpoint_rand, trace = True, value = 1900, dtype=int, rseed = 1., observed = False, cache_depth = 2, plot=True, verbose = 0) """ Explanation: Note that this is a simple Python function preceded by a Python expression called a decorator, here called @stochastic. Generally, decorators enhance functions with additional properties or functionality. The Stochastic object produced by the @stochastic decorator will evaluate its log-probability using the function switchpoint. The value argument, which is required, provides an initial value for the variable. The remaining arguments will be assigned as parents of switchpoint (i.e. they will populate the parents dictionary). To emphasize, the Python function decorated by @stochastic should compute the log-density or log-probability of the variable. That is why the return value in the example above is $-\log(t_h-t_l+1)$ rather than $1/(t_h-t_l+1)$. Direct Its also possible to instantiate Stochastic directly: End of explanation """ from scipy.stats.distributions import poisson @pm.observed def likelihood(value=[1, 2, 1, 5], parameter=3): return poisson.logpmf(value, parameter).sum() """ Explanation: Notice that the log-probability and random variate functions are specified externally and passed to Stochastic as arguments. This is a rather awkward way to instantiate a stochastic variable; consequently, such implementations should be rare. Data Stochastics Data are represented by Stochastic objects whose observed attribute is set to True. If a stochastic variable's observed flag is True, its value cannot be changed, and it won't be sampled by the fitting method. In each interface, an optional keyword argument observed can be set to True. In the decorator interface, the @observed decorator is used instead of @stochastic: End of explanation """ disasters = pm.Poisson('disasters', mu=2, value=disaster_model.disasters_array, observed=True) """ Explanation: In the other interfaces, the observed=True argument is added to the instantiation of the Stochastic, or its subclass: End of explanation """ @pm.deterministic def rate(s=switchpoint, e=early_mean, l=late_mean): ''' Concatenate Poisson means ''' out = np.empty(len(disaster_model.disasters_array)) out[:s] = e out[s:] = l return out """ Explanation: The Deterministic class The Deterministic class represents variables whose values are completely determined by the values of their parents. For example, in our disasters model, $rate$ is a deterministic variable. End of explanation """ x = pm.MvNormal('x', np.ones(3), np.eye(3)) y = pm.MvNormal('y', np.ones(3), np.eye(3)) x+y print(x[0]) print(x[0]+y[2]) """ Explanation: so rate's value can be computed exactly from the values of its parents early_mean, late_mean and switchpoint. A Deterministic variable's most important attribute is value, which gives the current value of the variable given the values of its parents. Like Stochastic's logp attribute, this attribute is computed on-demand and cached for efficiency. A Deterministic variable has the following additional attributes: parents : A dictionary containing the variable's parents. The keys of the dictionary correspond to the names assigned to the variable's parents by the variable, and the values correspond to the actual parents. children : A set containing the variable's children, which must be nodes. Deterministic variables have no methods. Creation of deterministic variables Deterministic variables are less complicated than stochastic variables, and have similar automatic, decorator, and direct interfaces: Automatic A handful of common functions have been wrapped in Deterministic objects. These are brief enough to list: LinearCombination : Has two parents $x$ and $y$, both of which must be iterable (i.e. vector-valued). This function returns: \[\sum_i x_i^T y_i\] Index : Has two parents $x$ and index. $x$ must be iterable, index must be valued as an integer. \[x[\text{index}]\] Index is useful for implementing dynamic models, in which the parent-child connections change. Lambda : Converts an anonymous function (in Python, called lambda functions) to a Deterministic instance on a single line. CompletedDirichlet : PyMC represents Dirichlet variables of length $k$ by the first $k-1$ elements; since they must sum to 1, the $k^{th}$ element is determined by the others. CompletedDirichlet appends the $k^{th}$ element to the value of its parent $D$. Logit, InvLogit, StukelLogit, StukelInvLogit : Common link functions for generalized linear models, and their inverses. Its a good idea to use these classes when feasible in order to give hints to step methods. Certain elementary operations on variables create deterministic variables. For example: End of explanation """ def rate_eval(switchpoint=switchpoint, early_mean=early_mean, late_mean=late_mean): value = np.zeros(111) value[:switchpoint] = early_mean value[switchpoint:] = late_mean return value rate = pm.Deterministic(eval = rate_eval, name = 'rate', parents = {'switchpoint': switchpoint, 'early_mean': early_mean, 'late_mean': late_mean}, doc = 'The rate of disaster occurrence.', trace = True, verbose = 0, dtype=float, plot=False, cache_depth = 2) """ Explanation: All the objects thus created have trace=False and plot=False by default. Decorator We have seen in the disasters example how the decorator interface is used to create a deterministic variable. Notice that rather than returning the log-probability, as is the case for Stochastic objects, the function returns the value of the deterministic object, given its parents. Also notice that, unlike for Stochastic objects, there is no value argument passed, since the value is calculated deterministically by the function itself. Direct Deterministic objects can also be instantiated directly: End of explanation """ N = 10 x_0 = pm.Normal('x_0', mu=0, tau=1) x = np.empty(N, dtype=object) x[0] = x_0 for i in range(1, N): x[i] = pm.Normal('x_%i' % i, mu=x[i-1], tau=1) @pm.observed def y(value=1, mu=x, tau=100): return pm.normal_like(value, (mu**2).sum(), tau) """ Explanation: Containers In some situations it would be inconvenient to assign a unique label to each parent of some variable. Consider $y$ in the following model: $$\begin{align} x_0 &\sim N (0,\tau_x)\ x_{i+1}|x_i &\sim \text{N}(x_i, \tau_x)\ &i=0,\ldots, N-2\ y|x &\sim N \left(\sum_{i=0}^{N-1}x_i^2,\tau_y\right) \end{align}$$ Here, $y$ depends on every element of the Markov chain $x$, but we wouldn't want to manually enter $N$ parent labels x_0, x_1, etc. This situation can be handled naturally in PyMC: End of explanation """ @pm.potential def rate_constraint(l1=early_mean, l2=late_mean): if np.abs(l2 - l1) > 1: return -np.inf return 0 """ Explanation: PyMC automatically wraps array $x$ in an appropriate Container class. The expression 'x_%i' % i labels each Normal object in the container with the appropriate index $i$. For example, if i=1, the name of the corresponding element becomes x_1. Containers, like variables, have an attribute called value. This attribute returns a copy of the (possibly nested) iterable that was passed into the container function, but with each variable inside replaced with its corresponding value. The Potential class For some applications, we want to be able to modify the joint density by incorporating terms that don't correspond to probabilities of variables conditional on parents, for example: $$\begin{eqnarray} p(x_0, x_2, \ldots x_{N-1}) \propto \prod_{i=0}^{N-2} \psi_i(x_i, x_{i+1}). \end{eqnarray}$$ In other cases we may want to add probability terms to existing models. For example, suppose we want to constrain the difference between the early and late means in the disaster model to be less than 1, so that the joint density becomes: $$p(y,\tau,\lambda_1,\lambda_2) \propto p(y|\tau,\lambda_1,\lambda_2) p(\tau) p(\lambda_1) p(\lambda_2) I(|\lambda_2-\lambda_1| \lt 1)$$ Arbitrary factors are implemented by objects of class Potential. Bayesian hierarchical notation doesn't accomodate these potentials. Potentials have one important attribute, logp, the log of their current probability or probability density value given the values of their parents. The only other additional attribute of interest is parents, a dictionary containing the potential's parents. Potentials have no methods. They have no trace attribute, because they are not variables. They cannot serve as parents of variables (for the same reason), so they have no children attribute. Creation of Potentials There are two ways to create potentials: Decorator A potential can be created via a decorator in a way very similar to Deterministic's decorator interface: End of explanation """ def rate_constraint_logp(l1=early_mean, l2=late_mean): if np.abs(l2 - l1) > 1: return -np.inf return 0 rate_constraint = pm.Potential(logp = rate_constraint_logp, name = 'rate_constraint', parents = {'l1': early_mean, 'l2': late_mean}, doc = 'Constraint on rate differences', verbose = 0, cache_depth = 2) """ Explanation: The function supplied should return the potential's current log-probability or log-density as a Numpy float. The potential decorator can take verbose and cache_depth arguments like the stochastic decorator. Direct The same potential could be created directly as follows: End of explanation """ # Log dose in each group log_dose = [-.86, -.3, -.05, .73] # Sample size in each group n = 5 # Outcomes deaths = [0, 1, 3, 5] ## Write your answer here """ Explanation: Example: Bioassay model Recall from a previous lecture the bioassay example, where the number of deaths in a toxicity experiment was modeled as a binomial response, with the probability of death being a linear function of dose: $$\begin{aligned} y_i &\sim \text{Bin}(n_i, p_i) \ \text{logit}(p_i) &= a + b x_i \end{aligned}$$ Implement this model in PyMC (we will show you how to fit the model later!) End of explanation """ from pymc.examples import gelman_bioassay M = pm.MAP(gelman_bioassay) M.fit(method='fmin_powell') """ Explanation: Fitting Models PyMC provides three objects that fit models: MCMC, which coordinates Markov chain Monte Carlo algorithms. The actual work of updating stochastic variables conditional on the rest of the model is done by StepMethod objects. MAP, which computes maximum a posteriori estimates. NormApprox, the joint distribution of all stochastic variables in a model is approximated as normal using local information at the maximum a posteriori estimate. All three objects are subclasses of Model, which is PyMC's base class for fitting methods. MCMC and NormApprox, both of which can produce samples from the posterior, are subclasses of Sampler, which is PyMC's base class for Monte Carlo fitting methods. Sampler provides a generic sampling loop method and database support for storing large sets of joint samples. These base classes implement some basic methods that are inherited by the three implemented fitting methods, so they are documented at the end of this section. Maximum a posteriori estimates The MAP class sets all stochastic variables to their maximum a posteriori values using functions in SciPy's optimize package; hence, SciPy must be installed to use it. MAP can only handle variables whose dtype is float, so it will not work, for example, on the disaster model example. We can fit the bioassay example using MAP: End of explanation """ M.alpha.value M.beta.value """ Explanation: This call will cause $M$ to fit the model using Powell's method, which does not require derivatives. The variables in DisasterModel have now been set to their maximum a posteriori values: End of explanation """ M.AIC M.BIC """ Explanation: We can also calculate model selection statistics, AIC and BIC: End of explanation """ N = pm.NormApprox(gelman_bioassay) N.fit() """ Explanation: MAP has two useful methods: fit(method ='fmin', iterlim=1000, tol=.0001) : The optimization method may be fmin, fmin_l_bfgs_b, fmin_ncg, fmin_cg, or fmin_powell. See the documentation of SciPy's optimize package for the details of these methods. The tol and iterlim parameters are passed to the optimization function under the appropriate names. revert_to_max() : If the values of the constituent stochastic variables change after fitting, this function will reset them to their maximum a posteriori values. The useful attributes of MAP are: logp : The joint log-probability of the model. logp_at_max : The maximum joint log-probability of the model. AIC : Akaike's information criterion for this model. BIC : The Bayesian information criterion for this model. One use of the MAP class is finding reasonable initial states for MCMC chains. Note that multiple Model subclasses can handle the same collection of nodes. Normal approximations The NormApprox class extends the MAP class by approximating the posterior covariance of the model using the Fisher information matrix, or the Hessian of the joint log probability at the maximum. End of explanation """ N.mu[N.alpha] N.C[N.alpha, N.beta] """ Explanation: The approximate joint posterior mean and covariance of the variables are available via the attributes mu and C, which the the approximate posterior mean and variance/covariance, respectively: End of explanation """ N.sample(100) N.trace('alpha')[:10] """ Explanation: As with MAP, the variables have been set to their maximum a posteriori values (which are also in the mu attribute) and the AIC and BIC of the model are available. We can also generate samples from the posterior: End of explanation """ M = pm.MCMC(gelman_bioassay, db='sqlite') """ Explanation: In addition to the methods and attributes of MAP, NormApprox provides the following methods: sample(iter) : Samples from the approximate posterior distribution are drawn and stored. isample(iter) : An 'interactive' version of sample(): sampling can be paused, returning control to the user. draw : Sets all variables to random values drawn from the approximate posterior. MCMC The MCMC class implements PyMC's core business: producing Markov chain Monte Carlo samples for a model's variables. Its primary job is to create and coordinate a collection of 'step methods', each of which is responsible for updating one or more variables. MCMC provides the following useful methods: sample(iter, burn, thin, tune_interval, tune_throughout, save_interval, ...) : Runs the MCMC algorithm and produces the traces. The iter argument controls the total number of MCMC iterations. No tallying will be done during the first burn iterations; these samples will be forgotten. After this burn-in period, tallying will be done each thin iterations. Tuning will be done each tune_interval iterations. If tune_throughout=False, no more tuning will be done after the burnin period. The model state will be saved every save_interval iterations, if given. isample(iter, burn, thin, tune_interval, tune_throughout, save_interval, ...) : An interactive version of sample. The sampling loop may be paused at any time, returning control to the user. use_step_method(method, *args, **kwargs): : Creates an instance of step method class method to handle some stochastic variables. The extra arguments are passed to the init method of method. Assigning a step method to a variable manually will prevent the MCMC instance from automatically assigning one. However, you may handle a variable with multiple step methods. stats(): : Generate summary statistics for all nodes in the model. The sampler's MCMC algorithms can be accessed via the step_method_dict attribute. M.step_method_dict[x] returns a list of the step methods M will use to handle the stochastic variable x. After sampling, the information tallied by M can be queried via M.db.trace_names. In addition to the values of variables, tuning information for adaptive step methods is generally tallied. These โ€˜tracesโ€™ can be plotted to verify that tuning has in fact terminated. After sampling ends you can retrieve the trace as M.trace[โ€™var_nameโ€™]. We can instantiate a MCMC sampler for the bioassay example as follows: End of explanation """ M.use_step_method(pm.Metropolis, M.alpha, proposal_sd=1., proposal_distribution='Normal') """ Explanation: Step methods Step method objects handle individual stochastic variables, or sometimes groups of them. They are responsible for making the variables they handle take single MCMC steps conditional on the rest of the model. Each subclass of StepMethod implements a method called step(), which is called by MCMC. Step methods with adaptive tuning parameters can optionally implement a method called tune(), which causes them to assess performance (based on the acceptance rates of proposed values for the variable) so far and adjust. The major subclasses of StepMethod are Metropolis and AdaptiveMetropolis. PyMC provides several flavors of the basic Metropolis steps. Metropolis Metropolis and subclasses implement Metropolis-Hastings steps. To tell an MCMC object :math:M to handle a variable :math:x with a Metropolis step method, you might do the following: End of explanation """ from pymc.examples import disaster_model_linear M = pm.MCMC(disaster_model_linear) M.use_step_method(pm.AdaptiveMetropolis, M.params_of_mean) """ Explanation: Metropolis itself handles float-valued variables, and subclasses DiscreteMetropolis and BinaryMetropolis handle integer- and boolean-valued variables, respectively. Metropolis' __init__ method takes the following arguments: stochastic : The variable to handle. proposal_sd : A float or array of floats. This sets the proposal standard deviation if the proposal distribution is normal. scale : A float, defaulting to 1. If the scale argument is provided but not proposal_sd, proposal_sd is computed as follows: python if all(self.stochastic.value != 0.): self.proposal_sd = (ones(shape(self.stochastic.value)) * abs(self.stochastic.value) * scale) else: self.proposal_sd = ones(shape(self.stochastic.value)) * scale proposal_distribution : A string indicating which distribution should be used for proposals. Current options are 'Normal' and 'Prior'. If proposal_distribution=None, the proposal distribution is chosen automatically. It is set to 'Prior' if the variable has no children and has a random method, and to 'Normal' otherwise. Alhough the proposal_sd attribute is fixed at creation, Metropolis step methods adjust their initial proposal standard deviations using an attribute called adaptive_scale_factor. During tuning, the acceptance ratio of the step method is examined, and this scale factor is updated accordingly. If the proposal distribution is normal, proposals will have standard deviation self.proposal_sd * self.adaptive_scale_factor. By default, tuning will continue throughout the sampling loop, even after the burnin period is over. This can be changed via the tune_throughout argument to MCMC.sample. If an adaptive step method's tally flag is set (the default for Metropolis), a trace of its tuning parameters will be kept. If you allow tuning to continue throughout the sampling loop, it is important to verify that the 'Diminishing Tuning' condition of Roberts and Rosenthal (2007) is satisfied: the amount of tuning should decrease to zero, or tuning should become very infrequent. If a Metropolis step method handles an array-valued variable, it proposes all elements independently but simultaneously. That is, it decides whether to accept or reject all elements together but it does not attempt to take the posterior correlation between elements into account. The AdaptiveMetropolis class (see below), on the other hand, does make correlated proposals. AdaptiveMetropolis The AdaptativeMetropolis (AM) step method works like a regular Metropolis step method, with the exception that its variables are block-updated using a multivariate jump distribution whose covariance is tuned during sampling. Although the chain is non-Markovian, it has correct ergodic properties (Haario et al., 2001). AdaptiveMetropolis works on vector-valued, continuous stochastics: End of explanation """ M = pm.MCMC(gelman_bioassay) M.sample(10000, burn=5000) %matplotlib inline pm.Matplot.plot(M.LD50) """ Explanation: AdaptativeMetropolis's init method takes the following arguments: stochastics : The stochastic variables to handle. These will be updated jointly. cov (optional) : An initial covariance matrix. Defaults to the identity matrix, adjusted according to the scales argument. delay (optional) : The number of iterations to delay before computing the empirical covariance matrix. scales (optional): : The initial covariance matrix will be diagonal, and its diagonal elements will be set to scales times the stochastics' values, squared. interval (optional): : The number of iterations between updates of the covariance matrix. Defaults to 1000. greedy (optional): : If True, only accepted jumps will be counted toward the delay before the covariance is first computed. Defaults to True. shrink_if_necessary (optional): : Whether the proposal covariance should be shrunk if the acceptance rate becomes extremely small. In this algorithm, jumps are proposed from a multivariate normal distribution with covariance matrix $\Sigma$. The algorithm first iterates until delay samples have been drawn (if greedy is true, until delay jumps have been accepted). At this point, $\Sigma$ is given the value of the empirical covariance of the trace so far and sampling resumes. The covariance is then updated each interval iterations throughout the entire sampling run. It is this constant adaptation of the proposal distribution that makes the chain non-Markovian. DiscreteMetropolis This class is just like Metropolis, but specialized to handle Stochastic instances with dtype int. The jump proposal distribution can either be 'Normal', 'Prior' or 'Poisson' (the default). In the normal case, the proposed value is drawn from a normal distribution centered at the current value and then rounded to the nearest integer. BinaryMetropolis This class is specialized to handle Stochastic instances with dtype bool. For array-valued variables, BinaryMetropolis can be set to propose from the prior by passing in dist="Prior". Otherwise, the argument p_jump of the init method specifies how probable a change is. Like Metropolis' attribute proposal_sd, p_jump is tuned throughout the sampling loop via adaptive_scale_factor. Automatic assignment of step methods Every step method subclass (including user-defined ones) that does not require any __init__ arguments other than the stochastic variable to be handled adds itself to a list called StepMethodRegistry in the PyMC namespace. If a stochastic variable in an MCMC object has not been explicitly assigned a step method, each class in StepMethodRegistry is allowed to examine the variable. To do so, each step method implements a class method called competence(stochastic), whose only argument is a single stochastic variable. These methods return values from 0 to 3; 0 meaning the step method cannot safely handle the variable and 3 meaning it will most likely perform well for variables like this. The MCMC object assigns the step method that returns the highest competence value to each of its stochastic variables. Running MCMC Samplers We can carry out Markov chain Monte Carlo sampling by calling the sample method (or in the terminal, isample) with the appropriate arguments. End of explanation """
wryoung412/CS294_Deep_RL
hw2/HW2.ipynb
mit
from frozen_lake import FrozenLakeEnv env = FrozenLakeEnv() print(env.__doc__) """ Explanation: Assignment 2: Markov Decision Processes Homework Instructions All your answers should be written in this notebook. You shouldn't need to write or modify any other files. Look for four instances of "YOUR CODE HERE"--those are the only parts of the code you need to write. To grade your homework, we will check whether the printouts immediately following your code match up with the results we got. The portions used for grading are highlighted in yellow. (However, note that the yellow highlighting does not show up when github renders this file.) To submit your homework, send an email to &#98;&#101;&#114;&#107;&#101;&#108;&#101;&#121;&#100;&#101;&#101;&#112;&#114;&#108;&#99;&#111;&#117;&#114;&#115;&#101;&#64;&#103;&#109;&#97;&#105;&#108;&#46;&#99;&#111;&#109; with the subject line "Deep RL Assignment 2" and two attachments: 1. This ipynb file 2. A pdf version of this file (To make the pdf, do File - Print Preview) The homework is due Febrary 22nd, 11:59 pm. Introduction This assignment will review the two classic methods for solving Markov Decision Processes (MDPs) with finite state and action spaces. We will implement value iteration (VI) and policy iteration (PI) for a finite MDP, both of which find the optimal policy in a finite number of iterations. The experiments here will use the Frozen Lake environment, a simple gridworld MDP that is taken from gym and slightly modified for this assignment. In this MDP, the agent must navigate from the start state to the goal state on a 4x4 grid, with stochastic transitions. End of explanation """ # Some basic imports and setup import numpy as np, numpy.random as nr, gym np.set_printoptions(precision=3) def begin_grading(): print("\x1b[43m") def end_grading(): print("\x1b[0m") # Seed RNGs so you get the same printouts as me env.seed(0); from gym.spaces import prng; prng.seed(10) # Generate the episode env.reset() for t in range(100): env.render() a = env.action_space.sample() ob, rew, done, _ = env.step(a) if done: break assert done env.render(); """ Explanation: Let's look at what a random episode looks like. End of explanation """ class MDP(object): def __init__(self, P, nS, nA, desc=None): self.P = P # state transition and reward probabilities, explained below self.nS = nS # number of states self.nA = nA # number of actions self.desc = desc # 2D array specifying what each grid cell means (used for plotting) mdp = MDP( {s : {a : [tup[:3] for tup in tups] for (a, tups) in a2d.items()} for (s, a2d) in env.P.items()}, env.nS, env.nA, env.desc) print("mdp.P is a two-level dict where the first key is the state and the second key is the action.") print("The 2D grid cells are associated with indices [0, 1, 2, ..., 15] from left to right and top to down, as in") print(np.arange(16).reshape(4,4)) print("mdp.P[state][action] is a list of tuples (probability, nextstate, reward).\n") print("For example, state 0 is the initial state, and the transition information for s=0, a=0 is \nP[0][0] =", mdp.P[0][0], "\n") print("As another example, state 5 corresponds to a hole in the ice, which transitions to itself with probability 1 and reward 0.") print("P[5][0] =", mdp.P[5][0], '\n') """ Explanation: In the episodeย above, the agent falls into a hole after two timesteps. Also note the stochasticity--on the first step, the DOWN action is selected, but the agent moves to the right. We extract the relevant information from the gym Env into the MDP class below. The env object won't be used any further, we'll just use the mdp object. End of explanation """ import random import datetime def sarsa_lambda(env, gamma, delta, rate, epsilon, nIt, render): """Salsa(lambda) algorithm Args: env: environment gamma: decay of reward delta: the lambda parameter for Salsa(lambda) algorithm rate: learning rate nIt: number of iterations render: boolean which determines if render the state or not """ random.seed(datetime.datetime.now().timestamp()) q = np.array([0] * env.nS * env.nA, dtype = float).reshape(env.nS, env.nA) for i in range(nIt): trace = np.zeros_like(q) obs_prev = None act_prev = None obs = None done = False totalr = 0. # Need to reorganize the code a little bit as Sarsa(lambda) needs an extra action sampling while not done: if render: env.render() if obs is None: obs = env.reset() else: assert act is not None obs, r, done, _ = env.step(act) totalr += r p = np.random.uniform(0., 1.) if p > epsilon: act = np.argmax(q[obs]) else: act = np.random.randint(env.nA) # Sarsa(delta) # R and S are ready. Waiting for A. if obs_prev is not None: trace *= delta * gamma trace[obs_prev][act_prev] += 1 q += rate * trace * (r + gamma * q[obs][act] - q[obs_prev][act_prev]) obs_prev = obs act_prev = act if render: env.render() return q gamma = 0.9 # decay of reward delta = 0.5 # decay of eligibility trace rate = 0.1 # the learning rate, or alpha in the book nIt = 1000 epsilon = 0.5 # epsilon greedy q = sarsa_lambda(env, gamma, delta, rate, epsilon, nIt, False) print("Q function:\n") print(q) print() print("Greedy algorithm:") import matplotlib.pyplot as plt %matplotlib inline def policy_matrix(q): indices = np.argmax(q, axis = 1) indices[np.max(q, axis = 1) == 0] = 4 to_direction = np.vectorize(lambda x: ['L', 'D', 'R', 'U', ''][x]) return to_direction(indices.reshape(4, 4)) plt.figure(figsize=(3,3)) # imshow makes top left the origin plt.imshow(np.array([0] * 16).reshape(4,4), cmap='gray', interpolation='none', clim=(0,1)) ax = plt.gca() ax.set_xticks(np.arange(4)-.5) ax.set_yticks(np.arange(4)-.5) directions = policy_matrix(q) for y in range(4): for x in range(4): plt.text(x, y, str(env.desc[y,x].item().decode()) + ',' + directions[y, x], color='g', size=12, verticalalignment='center', horizontalalignment='center', fontweight='bold') plt.grid(color='b', lw=2, ls='-') """ Explanation: Part 1: Value Iteration Problem 1: implement value iteration In this problem, you'll implement value iteration, which has the following pseudocode: Initialize $V^{(0)}(s)=0$, for all $s$ For $i=0, 1, 2, \dots$ - $V^{(i+1)}(s) = \max_a \sum_{s'} P(s,a,s') [ R(s,a,s') + \gamma V^{(i)}(s')]$, for all $s$ We additionally define the sequence of greedy policies $\pi^{(0)}, \pi^{(1)}, \dots, \pi^{(n-1)}$, where $$\pi^{(i)}(s) = \arg \max_a \sum_{s'} P(s,a,s') [ R(s,a,s') + \gamma V^{(i)}(s')]$$ Your code will return two lists: $[V^{(0)}, V^{(1)}, \dots, V^{(n)}]$ and $[\pi^{(0)}, \pi^{(1)}, \dots, \pi^{(n-1)}]$ To ensure that you get the same policies as the reference solution, choose the lower-index action to break ties in $\arg \max_a$. This is done automatically by np.argmax. This will only affect the "# chg actions" printout below--it won't affect the values computed. <div class="alert alert-warning"> Warning: make a copy of your value function each iteration and use that copy for the update--don't update your value function in place. Updating in-place is also a valid algorithm, sometimes called Gauss-Seidel value iteration or asynchronous value iteration, but it will cause you to get different results than me. </div> End of explanation """
ceos-seo/data_cube_notebooks
notebooks/training/ardc_training/Training_TaskE_Transect.ipynb
apache-2.0
import xarray as xr import numpy as np import datacube import utils.data_cube_utilities.data_access_api as dc_api from datacube.utils.aws import configure_s3_access configure_s3_access(requester_pays=True) api = dc_api.DataAccessApi() dc = api.dc """ Explanation: ARDC Training: Python Notebooks Task-E: This notebook will demonstrate 2D transect analyses and 3D Hovmoller plots. We will run these for NDVI (land) and TSM (water quality) to show the spatial and temporal variation of data along a line (transect) for a given time slice and for the entire time series. Import the Datacube Configuration End of explanation """ list_of_products = dc.list_products() netCDF_products = list_of_products[list_of_products['format'] == 'NetCDF'] netCDF_products """ Explanation: Browse the available Data Cubes End of explanation """ # Change the data platform and data cube here platform = 'LANDSAT_7' product = 'ls7_usgs_sr_scene' collection = 'c1' level = 'l2' """ Explanation: Pick a product Use the platform and product names from the previous block to select a Data Cube. End of explanation """ from utils.data_cube_utilities.dc_time import _n64_to_datetime, dt_to_str extents = api.get_full_dataset_extent(platform = platform, product = product, measurements=[]) latitude_extents = (min(extents['latitude'].values),max(extents['latitude'].values)) longitude_extents = (min(extents['longitude'].values),max(extents['longitude'].values)) time_extents = (min(extents['time'].values),max(extents['time'].values)) print("Latitude Extents:", latitude_extents) print("Longitude Extents:", longitude_extents) print("Time Extents:", list(map(dt_to_str, map(_n64_to_datetime, time_extents)))) """ Explanation: Display Latitude-Longitude and Time Bounds of the Data Cube End of explanation """ ## The code below renders a map that can be used to orient yourself with the region. from utils.data_cube_utilities.dc_display_map import display_map display_map(latitude = latitude_extents, longitude = longitude_extents) """ Explanation: Visualize Data Cube Region End of explanation """ ## Vietnam - Central Lam Dong Province ## # longitude_extents = (107.0, 107.2) # latitude_extents = (11.7, 12.0) ## Vietnam Ho Tri An Lake # longitude_extents = (107.0, 107.2) # latitude_extents = (11.1, 11.3) ## Sierra Leone - Delta du Saloum latitude_extents = (13.55, 14.12) longitude_extents = (-16.80, -16.38) time_extents = ('2005-01-01', '2005-12-31') display_map(latitude = latitude_extents, longitude = longitude_extents) """ Explanation: Pick a smaller analysis region and display that region Try to keep your region to less than 0.2-deg x 0.2-deg for rapid processing. You can click on the map above to find the Lat-Lon coordinates of any location. You will want to identify a region with an inland water body and some vegetation. Pick a time window of several years. End of explanation """ landsat_dataset = dc.load(latitude = latitude_extents, longitude = longitude_extents, platform = platform, time = time_extents, product = product, measurements = ['red', 'green', 'blue', 'nir', 'swir1', 'swir2', 'pixel_qa']) landsat_dataset #view the dimensions and sample content from the cube """ Explanation: Load the dataset and the required spectral bands or other parameters After loading, you will view the Xarray dataset. Notice the dimensions represent the number of pixels in your latitude and longitude dimension as well as the number of time slices (time) in your time series. End of explanation """ from utils.data_cube_utilities.clean_mask import landsat_qa_clean_mask plt_col_lvl_params = dict(platform=platform, collection=collection, level=level) clear_xarray = landsat_qa_clean_mask(landsat_dataset, cover_types=['clear'], **plt_col_lvl_params) water_xarray = landsat_qa_clean_mask(landsat_dataset, cover_types=['water'], **plt_col_lvl_params) shadow_xarray = landsat_qa_clean_mask(landsat_dataset, cover_types=['cld_shd'], **plt_col_lvl_params) cloud_xarray = landsat_qa_clean_mask(landsat_dataset, cover_types=['cloud'], **plt_col_lvl_params) clean_xarray = (clear_xarray | water_xarray).rename("clean_mask") def NDVI(dataset): return ((dataset.nir - dataset.red)/(dataset.nir + dataset.red)).rename("NDVI") ndvi_xarray = NDVI(landsat_dataset) # Vegetation Index from utils.data_cube_utilities.dc_water_quality import tsm tsm_xarray = tsm(landsat_dataset, clean_mask = water_xarray.values.astype(bool) ).tsm """ Explanation: Preparing the data We will filter out the clouds and the water using the Landsat pixel_qa information. Next, we will calculate the values of NDVI (vegetation index) and TSM (water quality). End of explanation """ combined_dataset = xr.merge([landsat_dataset, clean_xarray, clear_xarray, water_xarray, shadow_xarray, cloud_xarray, ndvi_xarray, tsm_xarray]) # Copy original crs to merged dataset combined_dataset = combined_dataset.assign_attrs(landsat_dataset.attrs) """ Explanation: Combine everything into one XARRAY for further analysis End of explanation """ # Water and Land Mixed Examples mid_lon = np.mean(longitude_extents) mid_lat = np.mean(latitude_extents) # North-South Path start = (latitude_extents[0], mid_lon) end = (latitude_extents[1], mid_lon) # East-West Path # start = (mid_lat, longitude_extents[0]) # end = (mid_lat, longitude_extents[1]) # East-West Path for Lake Ho Tri An # start = ( 11.25, 107.02 ) # end = ( 11.25, 107.18 ) """ Explanation: Define a path for a transect A transect is just a line that will run across our region of interest. Use the display map above to find the end points of your desired line. If you click on the map it will give you precise Lat-Lon positions for a point. Start with a line across a mix of water and land End of explanation """ import folium import numpy as np from folium.features import CustomIcon def plot_a_path(points , zoom = 15): xs,ys = zip(*points) map_center_point = (np.mean(xs), np.mean(ys)) the_map = folium.Map(location=[map_center_point[0], map_center_point[1]], zoom_start = zoom, tiles='http://mt1.google.com/vt/lyrs=y&z={z}&x={x}&y={y}', attr = "Google Attribution") path = folium.PolyLine(locations=points, weight=5, color = 'orange') the_map.add_child(path) start = ( xs[0] ,ys[0] ) end = ( xs[-1],ys[-1]) return the_map plot_a_path([start,end]) """ Explanation: Plot the transect line End of explanation """ from utils.data_cube_utilities.transect import line_scan import numpy as np def get_index_at(coords, ds): '''Returns an integer index pair.''' lat = coords[0] lon = coords[1] nearest_lat = ds.sel(latitude = lat, method = 'nearest').latitude.values nearest_lon = ds.sel(longitude = lon, method = 'nearest').longitude.values lat_index = np.where(ds.latitude.values == nearest_lat)[0] lon_index = np.where(ds.longitude.values == nearest_lon)[0] return (int(lat_index), int(lon_index)) def create_pixel_trail(start, end, ds): a = get_index_at(start, ds) b = get_index_at(end, ds) indices = line_scan.line_scan(a, b) pixels = [ ds.isel(latitude = x, longitude = y) for x, y in indices] return pixels list_of_pixels_along_segment = create_pixel_trail(start, end, landsat_dataset) """ Explanation: Find the nearest pixels along the transect path End of explanation """ import xarray import matplotlib.pyplot as plt from matplotlib.ticker import FuncFormatter from datetime import datetime import time def plot_list_of_pixels(list_of_pixels, band_name, y = None): start = ( "{0:.2f}".format(float(list_of_pixels[0].latitude.values )), "{0:.2f}".format(float(list_of_pixels[0].longitude.values)) ) end = ( "{0:.2f}".format(float(list_of_pixels[-1].latitude.values)), "{0:.2f}".format(float(list_of_pixels[-1].longitude.values)) ) def reformat_n64(t): return time.strftime("%Y.%m.%d", time.gmtime(t.astype(int)/1000000000)) def pixel_to_array(pixel): return(pixel.values) def figure_ratio(x,y, fixed_width = 10): width = fixed_width height = y * (fixed_width / x) return (width, height) pixel_array = np.transpose([pixel_to_array(pix) for pix in list_of_pixels]) #If the data has one acquisition, then plot transect (2-D), else Hovmรถller (3-D) if y.size == 1: plt.figure(figsize = (15,5)) plt.scatter(np.arange(pixel_array.size), pixel_array) plt.title("Transect (2-D) \n Acquisition date: {}".format(reformat_n64(y))) plt.xlabel("Pixels along the transect \n {} - {} \n ".format(start,end)) plt.ylabel(band_name) else: m = FuncFormatter(lambda x :x ) figure = plt.figure(figsize = figure_ratio(len(list_of_pixels), len(list_of_pixels[0].values), fixed_width = 15)) number_of_y_ticks = 5 ax = plt.gca() cax = ax.imshow(pixel_array, interpolation='none') figure.colorbar(cax,fraction=0.110, pad=0.04) ax.set_title("Hovmรถller (3-D) \n Acquisition range: {} - {} \n ".format(reformat_n64(y[0]),reformat_n64(y[-1]))) plt.xlabel("Pixels along the transect \n {} - {} \n ".format(start,end)) ax.get_yaxis().set_major_formatter( FuncFormatter(lambda x, p: reformat_n64(list_of_pixels[0].time.values[int(x)]) if int(x) < len(list_of_pixels[0].time) else "")) plt.ylabel("Time") plt.show() def transect_plot(start, end, da): if type(da) is not xarray.DataArray and (type(da) is xarray.Dataset) : raise Exception('You should be passing in a data-array, not a Dataset') pixels = create_pixel_trail(start, end,da) dates = da.time.values lats = [x.latitude.values for x in pixels] lons = [x.longitude.values for x in pixels] plot_list_of_pixels(pixels, da.name, y = dates) pixels = create_pixel_trail(start, end, landsat_dataset) t = 2 subset = list( map(lambda x: x.isel(time = t), pixels)) """ Explanation: Groundwork for Transect (2-D) and Hovmรถller (3-D) Plots End of explanation """ from utils.data_cube_utilities.clean_mask import landsat_qa_clean_mask clean_mask = landsat_qa_clean_mask(landsat_dataset, platform=platform, collection=collection, level=level) cloudless_dataset = landsat_dataset.where(clean_mask) """ Explanation: Mask Clouds End of explanation """ # select an acquisition number from the start (t=0) to "time" using the array limits above acquisition_number = 10 #If plotted will create the 2-D transect cloudless_dataset_for_acq_no = cloudless_dataset.isel(time = acquisition_number) #If Plotted will create the 3-D Hovmoller plot for a portion of the time series (min to max) min_acq = 1 max_acq = 4 cloudless_dataset_from_1_to_acq_no = cloudless_dataset.isel(time = slice(min_acq, max_acq)) """ Explanation: Select an acquisition date and then plot a 2D transect without clouds End of explanation """ band = 'green' """ Explanation: Select one of the XARRAY parameters for analysis End of explanation """ transect_plot(start, end, cloudless_dataset_for_acq_no[band]) """ Explanation: Create a 2D Transect plot of the "band" for one date End of explanation """ transect_plot(start, end, NDVI(cloudless_dataset_for_acq_no)) """ Explanation: Create a 2D Transect plot of NDVI for one date End of explanation """ transect_plot(start, end, NDVI(cloudless_dataset)) """ Explanation: Create a 3D Hovmoller plot of NDVI for the entire time series End of explanation """ transect_plot(start, end, water_xarray.isel(time = acquisition_number)) """ Explanation: Create a 2D Transect plot of water existence for one date End of explanation """ transect_plot(start, end, water_xarray) """ Explanation: Create a 3D Hovmoller plot of water extent for the entire time series End of explanation """ transect_plot(start, end, tsm_xarray.isel(time = acquisition_number)) """ Explanation: Create a 2D Transect plot of water quality (TSM) for one date End of explanation """ transect_plot(start, end, tsm_xarray) """ Explanation: Create a 3D Hovmoller plot of water quality (TSM) for one date End of explanation """
wei-Z/Python-Machine-Learning
code/ch08/ch08.ipynb
mit
%load_ext watermark %watermark -a 'Sebastian Raschka' -u -d -v -p numpy,pandas,matplotlib,scikit-learn,nltk # to install watermark just uncomment the following line: #%install_ext https://raw.githubusercontent.com/rasbt/watermark/master/watermark.py """ Explanation: Sebastian Raschka, 2015 https://github.com/rasbt/python-machine-learning-book Python Machine Learning - Code Examples Chapter 8 - Applying Machine Learning To Sentiment Analysis Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s). End of explanation """ import pyprind import pandas as pd import os # change the `basepath` to the directory of the # unzipped movie dataset #basepath = '/Users/Sebastian/Desktop/aclImdb/' basepath = './aclImdb' labels = {'pos': 1, 'neg': 0} pbar = pyprind.ProgBar(50000) df = pd.DataFrame() for s in ('test', 'train'): for l in ('pos', 'neg'): path = os.path.join(basepath, s, l) for file in os.listdir(path): with open(os.path.join(path, file), 'r', encoding='utf-8') as infile: txt = infile.read() df = df.append([[txt, labels[l]]], ignore_index=True) pbar.update() df.columns = ['review', 'sentiment'] """ Explanation: <br> <br> Overview Obtaining the IMDb movie review dataset Introducing the bag-of-words model Transforming words into feature vectors Assessing word relevancy via term frequency-inverse document frequency Cleaning text data Processing documents into tokens Training a logistic regression model for document classification Working with bigger data โ€“ online algorithms and out-of-core learning Summary <br> <br> Obtaining the IMDb movie review dataset The IMDB movie review set can be downloaded from http://ai.stanford.edu/~amaas/data/sentiment/. After downloading the dataset, decompress the files. A) If you are working with Linux or MacOS X, open a new terminal windowm cd into the download directory and execute tar -zxf aclImdb_v1.tar.gz B) If you are working with Windows, download an archiver such as 7Zip to extract the files from the download archive. Compatibility Note: I received an email from a reader who was having troubles with reading the movie review texts due to encoding issues. Typically, Python's default encoding is set to 'utf-8', which shouldn't cause troubles when running this IPython notebook. You can simply check the encoding on your machine by firing up a new Python interpreter from the command line terminal and execute &gt;&gt;&gt; import sys &gt;&gt;&gt; sys.getdefaultencoding() If the returned result is not 'utf-8', you probably need to change your Python's encoding to 'utf-8', for example by typing export PYTHONIOENCODING=utf8 in your terminal shell prior to running this IPython notebook. (Note that this is a temporary change, and it needs to be executed in the same shell that you'll use to launch ipython notebook. Alternatively, you can replace the lines with open(os.path.join(path, file), 'r') as infile: ... pd.read_csv('./movie_data.csv') ... df.to_csv('./movie_data.csv', index=False) by with open(os.path.join(path, file), 'r', encoding='utf-8') as infile: ... pd.read_csv('./movie_data.csv', encoding='utf-8') ... df.to_csv('./movie_data.csv', index=False, encoding='utf-8') in the following cells to achieve the desired effect. End of explanation """ import numpy as np np.random.seed(0) df = df.reindex(np.random.permutation(df.index)) """ Explanation: Shuffling the DataFrame: End of explanation """ df.to_csv('./movie_data.csv', index=False) import pandas as pd df = pd.read_csv('./movie_data.csv') df.head(3) """ Explanation: Optional: Saving the assembled data as CSV file: End of explanation """ import numpy as np from sklearn.feature_extraction.text import CountVectorizer count = CountVectorizer() docs = np.array([ 'The sun is shining', 'The weather is sweet', 'The sun is shining and the weather is sweet']) bag = count.fit_transform(docs) print(count.vocabulary_) print(bag.toarray()) """ Explanation: <hr> Note If you have problems with creating the movie_data.csv file in the previous chapter, you can find a download a zip archive at https://github.com/rasbt/python-machine-learning-book/tree/master/code/datasets/movie <hr> <br> <br> Introducing the bag-of-words model ... Transforming documents into feature vectors End of explanation """ np.set_printoptions(precision=2) from sklearn.feature_extraction.text import TfidfTransformer tfidf = TfidfTransformer(use_idf=True, norm='l2', smooth_idf=True) print(tfidf.fit_transform(count.fit_transform(docs)).toarray()) tf_is = 2 n_docs = 3 idf_is = np.log((n_docs+1) / (3+1)) tfidf_is = tf_is * (idf_is + 1) print('tf-idf of term "is" = %.2f' % tfidf_is) tfidf = TfidfTransformer(use_idf=True, norm=None, smooth_idf=True) raw_tfidf = tfidf.fit_transform(count.fit_transform(docs)).toarray()[-1] raw_tfidf l2_tfidf = raw_tfidf / np.sqrt(np.sum(raw_tfidf**2)) l2_tfidf """ Explanation: <br> Assessing word relevancy via term frequency-inverse document frequency End of explanation """ df.loc[0, 'review'][-50:] import re def preprocessor(text): text = re.sub('<[^>]*>', '', text) emoticons = re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)', text) text = re.sub('[\W]+', ' ', text.lower()) +\ ' '.join(emoticons).replace('-', '') return text preprocessor(df.loc[0, 'review'][-50:]) preprocessor("</a>This :) is :( a test :-)!") df['review'] = df['review'].apply(preprocessor) """ Explanation: <br> Cleaning text data End of explanation """ from nltk.stem.porter import PorterStemmer porter = PorterStemmer() def tokenizer(text): return text.split() def tokenizer_porter(text): return [porter.stem(word) for word in text.split()] tokenizer('runners like running and thus they run') tokenizer_porter('runners like running and thus they run') import nltk nltk.download('stopwords') from nltk.corpus import stopwords stop = stopwords.words('english') [w for w in tokenizer_porter('a runner likes running and runs a lot')[-10:] if w not in stop] """ Explanation: <br> Processing documents into tokens End of explanation """ X_train = df.loc[:25000, 'review'].values y_train = df.loc[:25000, 'sentiment'].values X_test = df.loc[25000:, 'review'].values y_test = df.loc[25000:, 'sentiment'].values from sklearn.grid_search import GridSearchCV from sklearn.pipeline import Pipeline from sklearn.linear_model import LogisticRegression from sklearn.feature_extraction.text import TfidfVectorizer tfidf = TfidfVectorizer(strip_accents=None, lowercase=False, preprocessor=None) param_grid = [{'vect__ngram_range': [(1, 1)], 'vect__stop_words': [stop, None], 'vect__tokenizer': [tokenizer, tokenizer_porter], 'clf__penalty': ['l1', 'l2'], 'clf__C': [1.0, 10.0, 100.0]}, {'vect__ngram_range': [(1, 1)], 'vect__stop_words': [stop, None], 'vect__tokenizer': [tokenizer, tokenizer_porter], 'vect__use_idf':[False], 'vect__norm':[None], 'clf__penalty': ['l1', 'l2'], 'clf__C': [1.0, 10.0, 100.0]}, ] lr_tfidf = Pipeline([('vect', tfidf), ('clf', LogisticRegression(random_state=0))]) gs_lr_tfidf = GridSearchCV(lr_tfidf, param_grid, scoring='accuracy', cv=5, verbose=1, n_jobs=-1) gs_lr_tfidf.fit(X_train, y_train) print('Best parameter set: %s ' % gs_lr_tfidf.best_params_) print('CV Accuracy: %.3f' % gs_lr_tfidf.best_score_) clf = gs_lr_tfidf.best_estimator_ print('Test Accuracy: %.3f' % clf.score(X_test, y_test)) """ Explanation: <br> <br> Training a logistic regression model for document classification Strip HTML and punctuation to speed up the GridSearch later: End of explanation """ from sklearn.cross_validation import StratifiedKFold, cross_val_score from sklearn.linear_model import LogisticRegression import numpy as np np.random.seed(0) np.set_printoptions(precision=6) y = [np.random.randint(3) for i in range(25)] X = (y + np.random.randn(25)).reshape(-1, 1) cv5_idx = list(StratifiedKFold(y, n_folds=5, shuffle=False, random_state=0)) cross_val_score(LogisticRegression(random_state=123), X, y, cv=cv5_idx) """ Explanation: <hr> <hr> Start comment: Please note that gs_lr_tfidf.best_score_ is the average k-fold cross-validation score. I.e., if we have a GridSearchCV object with 5-fold cross-validation (like the one above), the best_score_ attribute returns the average score over the 5-folds of the best model. To illustrate this with an example: End of explanation """ from sklearn.grid_search import GridSearchCV gs = GridSearchCV(LogisticRegression(), {}, cv=cv5_idx, verbose=3).fit(X, y) """ Explanation: By executing the code above, we created a simple data set of random integers that shall represent our class labels. Next, we fed the indices of 5 cross-validation folds (cv3_idx) to the cross_val_score scorer, which returned 5 accuracy scores -- these are the 5 accuracy values for the 5 test folds. Next, let us use the GridSearchCV object and feed it the same 5 cross-validation sets (via the pre-generated cv3_idx indices): End of explanation """ gs.best_score_ """ Explanation: As we can see, the scores for the 5 folds are exactly the same as the ones from cross_val_score earlier. Now, the best_score_ attribute of the GridSearchCV object, which becomes available after fitting, returns the average accuracy score of the best model: End of explanation """ cross_val_score(LogisticRegression(), X, y, cv=cv5_idx).mean() """ Explanation: As we can see, the result above is consistent with the average score computed the cross_val_score. End of explanation """ import numpy as np import re from nltk.corpus import stopwords def tokenizer(text): text = re.sub('<[^>]*>', '', text) emoticons = re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)', text.lower()) text = re.sub('[\W]+', ' ', text.lower()) +\ ' '.join(emoticons).replace('-', '') tokenized = [w for w in text.split() if w not in stop] return tokenized def stream_docs(path): with open(path, 'r', encoding='utf-8') as csv: next(csv) # skip header for line in csv: text, label = line[:-3], int(line[-2]) yield text, label next(stream_docs(path='./movie_data.csv')) def get_minibatch(doc_stream, size): docs, y = [], [] try: for _ in range(size): text, label = next(doc_stream) docs.append(text) y.append(label) except StopIteration: return None, None return docs, y from sklearn.feature_extraction.text import HashingVectorizer from sklearn.linear_model import SGDClassifier vect = HashingVectorizer(decode_error='ignore', n_features=2**21, preprocessor=None, tokenizer=tokenizer) clf = SGDClassifier(loss='log', random_state=1, n_iter=1) doc_stream = stream_docs(path='./movie_data.csv') import pyprind pbar = pyprind.ProgBar(45) classes = np.array([0, 1]) for _ in range(45): X_train, y_train = get_minibatch(doc_stream, size=1000) if not X_train: break X_train = vect.transform(X_train) clf.partial_fit(X_train, y_train, classes=classes) pbar.update() X_test, y_test = get_minibatch(doc_stream, size=5000) X_test = vect.transform(X_test) print('Accuracy: %.3f' % clf.score(X_test, y_test)) clf = clf.partial_fit(X_test, y_test) """ Explanation: End comment. <hr> <hr> <br> <br> Working with bigger data - online algorithms and out-of-core learning End of explanation """
tensorflow/docs
site/en/tutorials/load_data/images.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2020 The TensorFlow Authors. End of explanation """ import numpy as np import os import PIL import PIL.Image import tensorflow as tf import tensorflow_datasets as tfds print(tf.__version__) """ Explanation: Load and preprocess images <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/load_data/images"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/load_data/images.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/load_data/images.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/load_data/images.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> This tutorial shows how to load and preprocess an image dataset in three ways: First, you will use high-level Keras preprocessing utilities (such as tf.keras.utils.image_dataset_from_directory) and layers (such as tf.keras.layers.Rescaling) to read a directory of images on disk. Next, you will write your own input pipeline from scratch using tf.data. Finally, you will download a dataset from the large catalog available in TensorFlow Datasets. Setup End of explanation """ import pathlib dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz" data_dir = tf.keras.utils.get_file(origin=dataset_url, fname='flower_photos', untar=True) data_dir = pathlib.Path(data_dir) """ Explanation: Download the flowers dataset This tutorial uses a dataset of several thousand photos of flowers. The flowers dataset contains five sub-directories, one per class: flowers_photos/ daisy/ dandelion/ roses/ sunflowers/ tulips/ Note: all images are licensed CC-BY, creators are listed in the LICENSE.txt file. End of explanation """ image_count = len(list(data_dir.glob('*/*.jpg'))) print(image_count) """ Explanation: After downloading (218MB), you should now have a copy of the flower photos available. There are 3,670 total images: End of explanation """ roses = list(data_dir.glob('roses/*')) PIL.Image.open(str(roses[0])) roses = list(data_dir.glob('roses/*')) PIL.Image.open(str(roses[1])) """ Explanation: Each directory contains images of that type of flower. Here are some roses: End of explanation """ batch_size = 32 img_height = 180 img_width = 180 """ Explanation: Load data using a Keras utility Let's load these images off disk using the helpful tf.keras.utils.image_dataset_from_directory utility. Create a dataset Define some parameters for the loader: End of explanation """ train_ds = tf.keras.utils.image_dataset_from_directory( data_dir, validation_split=0.2, subset="training", seed=123, image_size=(img_height, img_width), batch_size=batch_size) val_ds = tf.keras.utils.image_dataset_from_directory( data_dir, validation_split=0.2, subset="validation", seed=123, image_size=(img_height, img_width), batch_size=batch_size) """ Explanation: It's good practice to use a validation split when developing your model. You will use 80% of the images for training and 20% for validation. End of explanation """ class_names = train_ds.class_names print(class_names) """ Explanation: You can find the class names in the class_names attribute on these datasets. End of explanation """ import matplotlib.pyplot as plt plt.figure(figsize=(10, 10)) for images, labels in train_ds.take(1): for i in range(9): ax = plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype("uint8")) plt.title(class_names[labels[i]]) plt.axis("off") """ Explanation: Visualize the data Here are the first nine images from the training dataset. End of explanation """ for image_batch, labels_batch in train_ds: print(image_batch.shape) print(labels_batch.shape) break """ Explanation: You can train a model using these datasets by passing them to model.fit (shown later in this tutorial). If you like, you can also manually iterate over the dataset and retrieve batches of images: End of explanation """ normalization_layer = tf.keras.layers.Rescaling(1./255) """ Explanation: The image_batch is a tensor of the shape (32, 180, 180, 3). This is a batch of 32 images of shape 180x180x3 (the last dimension refers to color channels RGB). The label_batch is a tensor of the shape (32,), these are corresponding labels to the 32 images. You can call .numpy() on either of these tensors to convert them to a numpy.ndarray. Standardize the data The RGB channel values are in the [0, 255] range. This is not ideal for a neural network; in general you should seek to make your input values small. Here, you will standardize values to be in the [0, 1] range by using tf.keras.layers.Rescaling: End of explanation """ normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y)) image_batch, labels_batch = next(iter(normalized_ds)) first_image = image_batch[0] # Notice the pixel values are now in `[0,1]`. print(np.min(first_image), np.max(first_image)) """ Explanation: There are two ways to use this layer. You can apply it to the dataset by calling Dataset.map: End of explanation """ AUTOTUNE = tf.data.AUTOTUNE train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE) val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE) """ Explanation: Or, you can include the layer inside your model definition to simplify deployment. You will use the second approach here. Note: If you would like to scale pixel values to [-1,1] you can instead write tf.keras.layers.Rescaling(1./127.5, offset=-1) Note: You previously resized images using the image_size argument of tf.keras.utils.image_dataset_from_directory. If you want to include the resizing logic in your model as well, you can use the tf.keras.layers.Resizing layer. Configure the dataset for performance Let's make sure to use buffered prefetching so you can yield data from disk without having I/O become blocking. These are two important methods you should use when loading data: Dataset.cache keeps the images in memory after they're loaded off disk during the first epoch. This will ensure the dataset does not become a bottleneck while training your model. If your dataset is too large to fit into memory, you can also use this method to create a performant on-disk cache. Dataset.prefetch overlaps data preprocessing and model execution while training. Interested readers can learn more about both methods, as well as how to cache data to disk in the Prefetching section of the Better performance with the tf.data API guide. End of explanation """ num_classes = 5 model = tf.keras.Sequential([ tf.keras.layers.Rescaling(1./255), tf.keras.layers.Conv2D(32, 3, activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Conv2D(32, 3, activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Conv2D(32, 3, activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(num_classes) ]) """ Explanation: Train a model For completeness, you will show how to train a simple model using the datasets you have just prepared. The Sequential model consists of three convolution blocks (tf.keras.layers.Conv2D) with a max pooling layer (tf.keras.layers.MaxPooling2D) in each of them. There's a fully-connected layer (tf.keras.layers.Dense) with 128 units on top of it that is activated by a ReLU activation function ('relu'). This model has not been tuned in any wayโ€”the goal is to show you the mechanics using the datasets you just created. To learn more about image classification, visit the Image classification tutorial. End of explanation """ model.compile( optimizer='adam', loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) """ Explanation: Choose the tf.keras.optimizers.Adam optimizer and tf.keras.losses.SparseCategoricalCrossentropy loss function. To view training and validation accuracy for each training epoch, pass the metrics argument to Model.compile. End of explanation """ model.fit( train_ds, validation_data=val_ds, epochs=3 ) """ Explanation: Note: You will only train for a few epochs so this tutorial runs quickly. End of explanation """ list_ds = tf.data.Dataset.list_files(str(data_dir/'*/*'), shuffle=False) list_ds = list_ds.shuffle(image_count, reshuffle_each_iteration=False) for f in list_ds.take(5): print(f.numpy()) """ Explanation: Note: You can also write a custom training loop instead of using Model.fit. To learn more, visit the Writing a training loop from scratch tutorial. You may notice the validation accuracy is low compared to the training accuracy, indicating your model is overfitting. You can learn more about overfitting and how to reduce it in this tutorial. Using tf.data for finer control The above Keras preprocessing utilityโ€”tf.keras.utils.image_dataset_from_directoryโ€”is a convenient way to create a tf.data.Dataset from a directory of images. For finer grain control, you can write your own input pipeline using tf.data. This section shows how to do just that, beginning with the file paths from the TGZ file you downloaded earlier. End of explanation """ class_names = np.array(sorted([item.name for item in data_dir.glob('*') if item.name != "LICENSE.txt"])) print(class_names) """ Explanation: The tree structure of the files can be used to compile a class_names list. End of explanation """ val_size = int(image_count * 0.2) train_ds = list_ds.skip(val_size) val_ds = list_ds.take(val_size) """ Explanation: Split the dataset into training and validation sets: End of explanation """ print(tf.data.experimental.cardinality(train_ds).numpy()) print(tf.data.experimental.cardinality(val_ds).numpy()) """ Explanation: You can print the length of each dataset as follows: End of explanation """ def get_label(file_path): # Convert the path to a list of path components parts = tf.strings.split(file_path, os.path.sep) # The second to last is the class-directory one_hot = parts[-2] == class_names # Integer encode the label return tf.argmax(one_hot) def decode_img(img): # Convert the compressed string to a 3D uint8 tensor img = tf.io.decode_jpeg(img, channels=3) # Resize the image to the desired size return tf.image.resize(img, [img_height, img_width]) def process_path(file_path): label = get_label(file_path) # Load the raw data from the file as a string img = tf.io.read_file(file_path) img = decode_img(img) return img, label """ Explanation: Write a short function that converts a file path to an (img, label) pair: End of explanation """ # Set `num_parallel_calls` so multiple images are loaded/processed in parallel. train_ds = train_ds.map(process_path, num_parallel_calls=AUTOTUNE) val_ds = val_ds.map(process_path, num_parallel_calls=AUTOTUNE) for image, label in train_ds.take(1): print("Image shape: ", image.numpy().shape) print("Label: ", label.numpy()) """ Explanation: Use Dataset.map to create a dataset of image, label pairs: End of explanation """ def configure_for_performance(ds): ds = ds.cache() ds = ds.shuffle(buffer_size=1000) ds = ds.batch(batch_size) ds = ds.prefetch(buffer_size=AUTOTUNE) return ds train_ds = configure_for_performance(train_ds) val_ds = configure_for_performance(val_ds) """ Explanation: Configure dataset for performance To train a model with this dataset you will want the data: To be well shuffled. To be batched. Batches to be available as soon as possible. These features can be added using the tf.data API. For more details, visit the Input Pipeline Performance guide. End of explanation """ image_batch, label_batch = next(iter(train_ds)) plt.figure(figsize=(10, 10)) for i in range(9): ax = plt.subplot(3, 3, i + 1) plt.imshow(image_batch[i].numpy().astype("uint8")) label = label_batch[i] plt.title(class_names[label]) plt.axis("off") """ Explanation: Visualize the data You can visualize this dataset similarly to the one you created previously: End of explanation """ model.fit( train_ds, validation_data=val_ds, epochs=3 ) """ Explanation: Continue training the model You have now manually built a similar tf.data.Dataset to the one created by tf.keras.utils.image_dataset_from_directory above. You can continue training the model with it. As before, you will train for just a few epochs to keep the running time short. End of explanation """ (train_ds, val_ds, test_ds), metadata = tfds.load( 'tf_flowers', split=['train[:80%]', 'train[80%:90%]', 'train[90%:]'], with_info=True, as_supervised=True, ) """ Explanation: Using TensorFlow Datasets So far, this tutorial has focused on loading data off disk. You can also find a dataset to use by exploring the large catalog of easy-to-download datasets at TensorFlow Datasets. As you have previously loaded the Flowers dataset off disk, let's now import it with TensorFlow Datasets. Download the Flowers dataset using TensorFlow Datasets: End of explanation """ num_classes = metadata.features['label'].num_classes print(num_classes) """ Explanation: The flowers dataset has five classes: End of explanation """ get_label_name = metadata.features['label'].int2str image, label = next(iter(train_ds)) _ = plt.imshow(image) _ = plt.title(get_label_name(label)) """ Explanation: Retrieve an image from the dataset: End of explanation """ train_ds = configure_for_performance(train_ds) val_ds = configure_for_performance(val_ds) test_ds = configure_for_performance(test_ds) """ Explanation: As before, remember to batch, shuffle, and configure the training, validation, and test sets for performance: End of explanation """
esa-as/2016-ml-contest
MSS_Xmas_Trees/ml_seg_try1.ipynb
apache-2.0
from numpy.fft import rfft from scipy import signal import numpy as np import matplotlib.pyplot as plt import plotly.plotly as py import pandas as pd import timeit from sqlalchemy.sql import text from sklearn import tree from sklearn import cross_validation from sklearn.cross_validation import train_test_split from sklearn import metrics from sklearn.cross_validation import cross_val_score from sklearn.tree import export_graphviz from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression #import sherlock.filesystem as sfs #import sherlock.database as sdb from sklearn import preprocessing from sklearn.cross_validation import train_test_split """ Explanation: Contest entry by Wouter Kimman Strategy: Trying some pre-processing with simple random forest..hopefully preprocessing as important as type of classifier The problem has a smell of descision trees to me, since most predictions of the neighboring facies are very accurate. End of explanation """ filename = 'training_data.csv' training_data0 = pd.read_csv(filename) training_data0.head() """ Explanation: First steps, reading in and exploring the data are the same as Brendon's steps: End of explanation """ correct_facies_labels = training_data0['Facies'].values feature_vectors = training_data0.drop(['Formation', 'Well Name', 'Depth','Facies','FaciesLabels'], axis=1) feature_vectors.describe() """ Explanation: 1) Prediction from training set using all wells Let's do a first shot with random forests. First we cheat and see how awesome we would do if the test data was not from an independent well End of explanation """ scaler = preprocessing.StandardScaler().fit(feature_vectors) scaled_features = scaler.transform(feature_vectors) X_train, X_test, y_train, y_test = train_test_split(scaled_features, correct_facies_labels, test_size=0.2, random_state=0) rf = RandomForestClassifier(max_depth = 15,n_estimators=200,max_features=None) #rf = RandomForestClassifier() rf.fit(X_train, y_train) predicted_random_forest = rf.predict(X_test) print "prediction from random forest:" print metrics.accuracy_score(list(y_test), predicted_random_forest) print "f1 score:" print metrics.f1_score(list(y_test), predicted_random_forest,average = 'weighted') training_data=training_data0.copy() """ Explanation: scale the data: End of explanation """ #remove 1 well blind = training_data[training_data['Well Name'] == 'SHANKLE'] training_data = training_data[training_data['Well Name'] != 'SHANKLE'] correct_facies_labels = training_data['Facies'].values feature_vectors = training_data.drop(['Formation', 'Well Name', 'Depth','Facies','FaciesLabels'], axis=1) scaler = preprocessing.StandardScaler().fit(feature_vectors) scaled_features = scaler.transform(feature_vectors) X_train, dum1, y_train, dum2 = train_test_split(scaled_features, correct_facies_labels, test_size=0.2, random_state=0) rf.fit(X_train, y_train) # get the blind well correct_facies_labels = blind['Facies'].values feature_vectors = blind.drop(['Formation', 'Well Name', 'Depth','Facies','FaciesLabels'], axis=1) scaler = preprocessing.StandardScaler().fit(feature_vectors) scaled_features = scaler.transform(feature_vectors) predicted_random_forest = rf.predict(scaled_features) print "All training data different from test well" print "prediction from random forest:" print metrics.accuracy_score(correct_facies_labels, predicted_random_forest) print "f1 score:" print metrics.f1_score(correct_facies_labels, predicted_random_forest,average = 'weighted') """ Explanation: 2) Prediction of Blind well End of explanation """ from sklearn.metrics import confusion_matrix from classification_utilities import display_cm, display_adj_cm #conf = confusion_matrix(correct_facies_labels, predicted_gradboost) conf = confusion_matrix(correct_facies_labels, predicted_random_forest) display_cm(conf, facies_labels, hide_zeros=True) """ Explanation: The prediction performs much much beter if the all data is included in the training, compared to blind wells. Shouldn't be that much a surprise but doesn't this suggest some wells are not representative of the others End of explanation """ temp_1=training_data.groupby('Formation').mean() temp_2=training_data.groupby('Facies').mean() #temp_3=training_data.groupby('Facies').count() temp_2 """ Explanation: This is the benchmark to beat : 0.44 using rf, (slightly higher for gradient boost) 3) Data exploration Basic statistics by facies: End of explanation """ temp_4=training_data.groupby('Well Name') #temp_4.describe() #temp_5=training_data.groupby('Well Name').count() #temp_5=training_data.groupby('Well Name').max() temp_5=training_data.groupby('Well Name').mean() temp_5 """ Explanation: Basic statistics by well: End of explanation """ xx0 = list(training_data0.Facies) #xx1 = list(training_data0.DeltaPHI) xx1 = list(training_data0.GR) x_min1=np.roll(xx1, 1) x_min2=np.roll(xx1, 2) x_min3=np.roll(xx1, 3) scale=0.5 #b, a = signal.butter(2, 0.125, analog=False) b, a = signal.butter(2, 0.09, btype='low', analog=False) b, a = signal.butter(2, 0.2, btype='high', analog=False) xx1=xx1-np.mean(xx1) xx_fil = signal.filtfilt(b, a, xx1) xx_mf= signal.medfilt(xx1,15) xx_grad=np.gradient(xx1) fig, ax = plt.subplots(figsize=(30, 20)) plt.plot(scale*xx1, color='black', label='Original Delta PHI') #plt.plot(scale*xx_grad, color='blue', label='derivative') #plt.plot(scale*xx_fil, color='red', label='low pass filter') #plt.plot(scale*xx_fil, color='red', label='high pass filter') plt.plot(scale*xx_mf, color='blue', label='median filter') #plt.plot(x_min1, color='yellow', label='1 sample shift') #xlim([500 800]) plt.plot(xx0, color='green', label='Facies') ax.set_xlim(400,700) #plt.plot(sig_lf, color='#cc0000', label='lfilter') plt.legend(loc="best") plt.show() def magic(df): df1=df.copy() b, a = signal.butter(2, 0.2, btype='high', analog=False) feats00=['GR','ILD_log10','DeltaPHI','PHIND','PE','NM_M','RELPOS'] feats01=['GR','DeltaPHI','PHIND'] for ii in feats0: df1[ii]=df[ii] name1=ii + '_1' name2=ii + '_2' name3=ii + '_3' name4=ii + '_4' xx1 = list(df[ii]) xx_mf= signal.medfilt(xx1,9) x_min3=np.roll(xx_mf, 3) xx1a=xx1-np.mean(xx1) xx_fil = signal.filtfilt(b, a, xx1) xx_grad=np.gradient(xx1a) if ii in feats01: df1[name1]=x_min3 df1[name2]=xx_fil df1[name3]=xx_grad df1[name4]=xx_mf return df1 #del training_data1 df=training_data0.copy() training_data1=magic(df) x=rf.feature_importances_ kolummen = feature_vectors.columns.tolist() mask=x>0.025 mask=x>0.035 #mask=x>0.025 x1=x[mask] #kols=kolummen[mask] kols=[] kols_out=[] count=0 for name in kolummen: if mask[count]==True: kols.append(name) else: kols_out.append(name) count+=1 fig, ax = plt.subplots(figsize=(30, 20)) ## the data N = len(kols) #N = len(kolummen)-18 #X=gradboost.feature_importances_ #X=rf.feature_importances_ X=x1 ## necessary variables ind = np.arange(N) # the x locations for the groups width = 0.30 # the width of the bars fsize=16 ## the bars rects1 = ax.bar(ind, X, width, color='black') # axes and labels ax.set_xlim(-width,len(ind)+width) #ax.set_ylim(0,45) ax.set_xlabel('feature', fontsize=fsize) ax.set_ylabel('importance', fontsize=fsize) ax.set_title('feature importance', fontsize=fsize) #xTickMarks = ['Group'+str(i) for i in range(1,6)] xTickMarks = kols ax.set_xticks(ind+width) xtickNames = ax.set_xticklabels(xTickMarks, fontsize=fsize) plt.setp(xtickNames, rotation=45, fontsize=fsize) ## add a legend #ax.legend( (rects1[0], rects2[0]), ('Men', 'Women') ) print count print N plt.show() training_data1a = training_data1.drop(kols_out, axis=1) training_data1a.head() def run_test(remove_well, df_train): #df_test=training_data0 df_test=training_data1 #--------------------------------- #df_train=training_data1a #df_train=training_data2 #df_test=df_test.drop(kols_out, axis=1) #--------------------------------- #df_train=training_data0 #df_train=training_data1 #df_train=df_train.drop(kols_out, axis=1) #training_data1a = training_data1.drop(kols_out, axis=1) blind = df_test[df_test['Well Name'] == remove_well] training_data = df_train[df_train['Well Name'] != remove_well] correct_facies_labels_train = training_data['Facies'].values feature_vectors = training_data.drop(['Formation', 'Well Name', 'Depth','Facies','FaciesLabels'], axis=1) scaler = preprocessing.StandardScaler().fit(feature_vectors) #scaled_features_train = scaler.transform(feature_vectors) scaled_features_train = feature_vectors rf = RandomForestClassifier(max_depth = 15, n_estimators=600) #rf = RandomForestClassifier() rf.fit(scaled_features_train, correct_facies_labels_train) # get the blind well correct_facies_labels = blind['Facies'].values feature_vectors = blind.drop(['Formation', 'Well Name', 'Depth','Facies','FaciesLabels'], axis=1) scaler = preprocessing.StandardScaler().fit(feature_vectors) #scaled_features = scaler.transform(feature_vectors) scaled_features =feature_vectors predicted_random_forest = rf.predict(scaled_features) #print "All training data different from test well" #print "prediction from random forest:" #print metrics.accuracy_score(correct_facies_labels, predicted_random_forest) #printnt "f1 score:" #print metrics.f1_score(correct_facies_labels, predicted_random_forest,average = None) #print "average" out_f1=metrics.f1_score(correct_facies_labels, predicted_random_forest,average = 'micro') return out_f1 #print # 5-Fold Cross validation #print "3-Fold Cross validation" #cv_scores = cross_val_score(rf, scaled_features, correct_facies_labels, cv=4, scoring='f1_macro') #avg_cv_score = np.mean(cv_scores) #print cv_scores #avg_cv_score #df_train=training_data1a df_train=training_data1 wells=['CHURCHMAN BIBLE','SHANKLE','NOLAN','NEWBY','Recruit F9' ,'CROSS H CATTLE','LUKE G U','SHRIMPLIN'] av_all=[] for remove_well in wells: all=[] print("well : %s, f1 for different runs:" % (remove_well)) for ii in range(3): out_f1=run_test(remove_well,df_train) all.append(out_f1) av1=np.mean(all) av_all.append(av1) print("average f1 is %f, 2*std is %f" % (av1, 2*np.std(all)) ) print("overall average f1 is %f" % (np.mean(av_all))) #rf = RandomForestClassifier(max_depth = 1, max_features= 'sqrt', n_estimators=50, oob_score = True) rfc = RandomForestClassifier(max_depth = 9, max_features= 'sqrt', n_estimators=250) #rf = RandomForestClassifier() #rf.fit(scaled_features_train, correct_facies_labels_train) param_grid = { 'max_depth' : [5,6,7,8,9], 'n_estimators': [150, 250, 350, 600] } # 'max_features': ['auto', 'sqrt', 'log2'] #} CV_rfc = GridSearchCV(estimator=rfc, param_grid=param_grid, cv= 5) #CV_rfc.fit(X, y) CV_rfc.fit(scaled_features_train, correct_facies_labels_train) print CV_rfc.best_params_ """ Explanation: 4 ) Select a feature from 1 well and play with this End of explanation """ filename = 'training_data.csv' training_data = pd.read_csv(filename) filename = 'validation_data_nofacies.csv' test_data = pd.read_csv(filename) test_data.head() training_data['Well Name'] = training_data['Well Name'].astype('category') training_data['Formation'] = training_data['Formation'].astype('category') training_data['Well Name'].unique() facies_labels = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS', 'WS', 'D','PS', 'BS'] training_data.loc[:,'FaciesLabels'] = training_data.apply(lambda row: label_facies(row, facies_labels), axis=1) #preprocessing test_data1=magic(test_data) training_data1=magic(training_data) def predict_final(test_well, training_data,test_data): blind = test_data[test_data['Well Name'] == test_well] correct_facies_labels_train = training_data['Facies'].values feature_vectors_train = training_data.drop(['Formation', 'Well Name', 'Depth','Facies','FaciesLabels'], axis=1) rf = RandomForestClassifier(max_depth = 15, n_estimators=600) rf.fit(feature_vectors_train, correct_facies_labels_train) # the blind well feature_vectors_blind = blind.drop(['Formation', 'Well Name', 'Depth'], axis=1) predicted_random_forest = rf.predict(feature_vectors_blind) #out_f1=metrics.f1_score(correct_facies_labels, predicted_random_forest,average = 'micro') return predicted_random_forest test_well='STUART' predicted_stu=predict_final(test_well, training_data1, test_data1) test_well='CRAWFORD' predicted_craw=predict_final(test_well, training_data1, test_data1) predicted_stu predicted_craw """ Explanation: Train for the test data End of explanation """
alee156/NeuroCV
2 Dimensional Array Manipulations and Equalization.ipynb
apache-2.0
import matplotlib.pyplot as plt import scipy.ndimage import csv,gc import matplotlib import numpy as np import nibabel as nb %matplotlib inline BINS = 32 import csv,gc import matplotlib import numpy as np import nibabel as nb %matplotlib inline BINS = 32 ### Run below if necessary ##import sys ##sys.path.append('/usr/local/lib/python2.7/site-packages') import math import csv,gc import matplotlib import numpy as np import cv2 #%matplotlib BINS = 32 import matplotlib.pyplot as plt %matplotlib inline from skimage import data, img_as_float from skimage import exposure z = np.random.randint(0.0,10.0,(2,2)) print z print z[0] print z[1] zz = z.reshape(-1) print zz print zz.sum() plt.hist(zz, bins='auto') plt.show() ## We expect 1 zero, 2 threes, 1 six plt.hist(zz, bins = 10) plt.show() ## Histogram Normalization '''zhisteq = zz zz.astype(float) i=0 while i<7: zhisteq[i] = (zz[i]/histsum) i+=1 zhisteq.astype(float) print zhisteq ''' zhistnorm = zz*1.0/sum(zz) print zhistnorm plt.hist(zhistnorm, bins = 7) plt.show """ Explanation: 2 Dimensional Array Manipulations and Equalization I have absolutely no idea why the below errors occur, I searched through stack exchange and apparently there is some kind of circular logic occuring with the matplotlib import, but my error was not fixed through python kernel editing unfortunately. End of explanation """ import math ## Reminder of Z's values print z # Number of pixels pixels = len(z) * len(z[0]) print "The number of pixels is {}".format(pixels) ## The output should be 4 since there are 4 numbers in this array ##Flatten method doesn't work because of tuple conversion later on '''# Now we flatten Z zz = z.reshape(-1)''' # Initialize histogram and CDF hist = {} cdf = {} norm_cdf = {} ## The range value should be be adjusted to the bin number ### In our case the range and bin number is obviously 10 ### since we generated numbers 1-10 BINS = 10 for i in range(BINS): hist[i] = 0 cdf[i] = 0 norm_cdf[i] = 0 # Create histogram for row in z: for val in row: hist[val] += 1 ''' for val in zz: hist[val] += 1 ''' # Create cdf for i in range(BINS): for j in range(i+1): cdf[i] += hist[j] norm_cdf[i] = int(math.floor(float(cdf[i]-1)/63*BINS)) print "The histogram values are {}".format(hist) print "The cdf values are {}".format(cdf) print "The normalized cdf values are {}".format(norm_cdf) """ Explanation: Let's pretend Z is a very simple image End of explanation """ newimg = np.empty(z.shape) ## This should be the same exact dimensions as the original array print newimg print z.shape ## set x_length to the first number, y_length to the second x_length = z.shape[0] y_length = z.shape[1] print x_length, y_length for i in range(x_length): for j in range(y_length): newimg[i][j] = norm_cdf[ z[i][j] ] print newimg """ Explanation: This matches our constructed histogram so we can proceed to create a new histogram and construct the equalized one End of explanation """ img = [ [52, 55, 61, 66, 70, 61, 64, 73], [63, 59, 55, 90, 109, 85, 69, 72], [62, 59, 68, 113, 144, 104, 66, 73], [63, 58, 71, 122, 154, 106, 70, 69], [67, 61, 68, 104, 126, 88, 68, 70], [79, 65, 60, 70, 77, 68, 58, 75], [85, 71, 64, 59, 55, 61, 65, 83], [87, 79, 69, 68, 65, 76, 78, 94] ] img = np.asarray(img) print img print " " print img[0] print img[1] print " " imgflat = img.reshape(-1) print imgflat print imgflat.sum() print " " fig = plt.hist(imgflat, bins='auto') plt.title('Histogram') plt.show() print " " imgnorm = imgflat*1.0/sum(imgflat) print imgnorm fig = plt.hist(imgnorm, bins = 'auto') plt.title('Normalized Histogram') plt.show import math ## Reminder of Z's values print img # Number of pixels pixels = len(img) * len(img[0]) print "The number of pixels is {}".format(pixels) ## The output should be 4 since there are 4 numbers in this array ##Flatten method doesn't work because of tuple conversion later on '''# Now we flatten Z zz = z.reshape(-1)''' # Initialize histogram and CDF hist = {} cdf = {} norm_cdf = {} ## The range value should be be adjusted to the bin number BINS = 255 for i in range(BINS): hist[i] = 0 cdf[i] = 0 norm_cdf[i] = 0 # Create histogram for row in img: for val in row: hist[val] += 1 ''' for val in zz: hist[val] += 1 ''' # Create cdf for i in range(BINS): for j in range(i+1): cdf[i] += hist[j] norm_cdf[i] = int(math.floor(float(cdf[i]-1)/63*BINS)) print "The histogram values are {}".format(hist) print "The cdf values are {}".format(cdf) print "The normalized cdf values are {}".format(norm_cdf) newimg = np.empty(img.shape) ## This should be the same exact dimensions as the original array print newimg print img.shape ## set x_length to the first number, y_length to the second x_length = img.shape[0] y_length = img.shape[1] print x_length, y_length for i in range(x_length): for j in range(y_length): newimg[i][j] = norm_cdf[ img[i][j] ] print newimg fig = plt.hist(newimg, bins = 'auto') plt.title('Equalized Histogram') plt.show ## This is wrong for i in range(8): for j in range(8): newimg[i][j] = norm_cdf[ img[i][j] ] print '+-------+-----------+-----+----------------+' print '| %5s | %9s | %3s | %14s |' % ('Value', 'Histogram', 'cdf', 'Normalized cdf') print '+-------+-----------+-----+----------------+' for i in range(255): if hist[i] == 0: continue print '| %5s | %9s | %3s | %14s |' % (i, hist[i], cdf[i], norm_cdf[i]) print '+-------+-----------+-----+----------------+' print '' print 'Original subimage:' print '' for i in range(8): print ('%4d'*8) % tuple(img[i]) print '' print '' print 'Equalized subimage:' print '' for i in range(8): print ('%4d'*8) % tuple(newimg[i]) for i in range(x_length): for j in range(y_length): newimg[i][j] = norm_cdf[ img[i][j] ] print newimg fig = plt.hist(newimg, bins = 'auto') plt.title('Equalized Histogram') plt.show histeqimg = np.empty(img.shape) for i in range(8): histeqimg[i] = ('%4d'*8) % tuple(newimg[i]) print histeqimg fig = plt.hist(histeqimg, bins = 'auto') plt.title('Equalized Histogram 2') plt.show print(repr(histeqimg)) histeqimg.append(line.strip('\n').strip('\t').split(' ').pop(7)) ### Errors caused by massive number of zeros? print '' print 'Original subimage:' print '' for i in range(8): print ('%4d'*8) % tuple(img[i]) print '' imgflat = img.reshape(-1) print img print " " fig = plt.hist(imgflat, bins='auto') plt.title('Original Histogram') plt.show() print '' print '' print 'Equalized subimage:' print '' for i in range(8): print ('%4d'*8) % tuple(newimg[i]) for i in range(x_length): for j in range(y_length): newimg[i][j] = norm_cdf[ img[i][j] ] print '' print newimg fig = plt.hist(newimg, bins = 'auto') plt.title('Equalized Histogram') plt.show fig = plt.hist(imgflat, bins=255) plt.title('Original Histogram') plt.show() fig = plt.hist(newimg, bins = 255) plt.title('Equalized Histogram') plt.show print img print '' print newimg print '' flatimg = img.reshape(-1) flattenedimg, bin_edges1 = np.histogram(flatimg) print flatimg print flattenedimg print '' flatnewimg = newimg.reshape(-1) flattenednewimg, bin_edges2 = np.histogram(flatnewimg) print flatnewimg print flattenednewimg print '' fig = plt.hist(flattenedimg, bins = 255) plt.title('Original Histogram (Flat version)') plt.show fig = plt.hist(flattenednewimg, bins = 255) plt.title('Equalized Histogram (Flat version)') plt.show """ Explanation: This probably didn't work because of the tiny size of the array, let's try something more established End of explanation """
djgagne/hagelslag-unidata
demos/unidata_users_workshop_2015.ipynb
mit
%matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt from datetime import datetime, timedelta from mpl_toolkits.basemap import Basemap from IPython.display import display from IPython.html import widgets from scipy.ndimage import gaussian_filter, find_objects from copy import deepcopy from glob import glob """ Explanation: Severe Weather Forecasting with Python and Data Science Tools: Interactive Demo David John Gagne, University of Oklahoma and NCAR Introduction Severe weather forecasting has entered an age of unprecedented access to large model and observational datasets with even greater hordes of data in the pipeline. With multiple ensembles of convection-allowing models available and an increasing variety of observations derived from radar, satellite, surface, upper air, and crowd-sourcing, forecasters can easily be overwhelmed with guidance. Without ways to organize, synthesize, and visualize the data in a useful manner for forecasters, the pile of new models and observations will languish unused and will not fulfill their full potential. An even worse outcome would be to take the human forecasters completely out of the loop and trust the models, which is a way fraught with peril. Data science tools offer ways to synthesize essential information from many disparate data sources while also quantifying uncertainty. When forecasters use the tools properly, they can identify potential hazards and the associated spatial and time uncertainties more quickly by using the output of the tools to help target their domain knowledge. This module demonstrates how data science tools from the image processing and machine learning families can be used to create a forecast of severe hail. It aims to teach the advantages, challenges, and limitations of these tools through hands-on interaction. End of explanation """ from hagelslag.processing.EnhancedWatershedSegmenter import EnhancedWatershed from hagelslag.data.ModelOutput import ModelOutput from hagelslag.processing.ObjectMatcher import ObjectMatcher, shifted_centroid_distance, centroid_distance from hagelslag.processing.STObject import STObject """ Explanation: Part 1: Storm Track Identification We will be using the hagelslag library to perform object-based data processing of convection-allowing model output. End of explanation """ model_path = "../testdata/spring2015_unidata/" ensemble_name = "SSEF" member ="wrf-s3cn_arw" run_date = datetime(2015, 6, 4) # We will be using the uh_max (hourly max 2-5 km Updraft Helicity) variable for this exercise # cmpref (simulated composite radar reflectivity) is also available. variable = "uh_max" start_date = run_date + timedelta(hours=12) end_date = run_date + timedelta(hours=29) model_grid = ModelOutput(ensemble_name, member, run_date, variable, start_date, end_date, model_path, single_step=False) model_grid.load_data() model_grid.load_map_info("../mapfiles/ssef2015.map") """ Explanation: We will be using model output from the control run of the Center for Analysis and Prediction of Storms 2015 Storm-Scale Ensemble Forecast system. The model output for this exercise is included with the hagelslag package, but additional variables can be downloaded from the Unidata RAMADDA server or from my personal page. Please untar the data in a local directory and modify the model_path variable below to point to the spring2015_unidata directory. End of explanation """ lon_range = (-108, -90) lat_range = (35, 45) basemap = Basemap(projection="cyl", resolution="l", llcrnrlon=lon_range[0], urcrnrlon=lon_range[1], llcrnrlat=lat_range[0], urcrnrlat=lat_range[1]) plt.figure(figsize=(12,8)) basemap.drawstates() plt.contourf(model_grid.lon, model_grid.lat, model_grid.data.max(axis=0), np.arange(25,225,25), extend="max", cmap="YlOrRd") plt.colorbar(shrink=0.6, fraction=0.05, pad=0.02 ) title_info = plt.title("Max Updraft Helicity {0}-{1}".format(start_date.strftime("%d %B %y %H:%M"), end_date.strftime("%d %B %y %H:%M"))) """ Explanation: The max updraft helicity map over the full period shows mutiple long and intense tracks in the central Plains. End of explanation """ zoomable_bmap = Basemap(projection="cyl", resolution="l", llcrnrlon=model_grid.lon.min(), llcrnrlat=model_grid.lat.min(), urcrnrlon=model_grid.lon.max(), urcrnrlat=model_grid.lat.max(), fix_aspect=False) def model_time_viewer(lon_range, lat_range, hour): #lon_range = (-108, -90) #lat_range = (35, 45) #basemap = Basemap(projection="cyl", # resolution="l", # llcrnrlon=lon_range[0], # urcrnrlon=lon_range[1], # llcrnrlat=lat_range[0], # urcrnrlat=lat_range[1]) plt.figure(figsize=(12,8)) zoomable_bmap.drawstates() zoomable_bmap.drawcoastlines() zoomable_bmap.drawcountries() plt.contourf(model_grid.lon, model_grid.lat, model_grid.data[hour - model_grid.start_hour], np.arange(25,225,25), extend="max", cmap="YlOrRd") plt.colorbar(shrink=0.6, fraction=0.05, pad=0.02) title_info = plt.title("Max Updraft Helicity Valid {0}".format((run_date + timedelta(hours=hour)).strftime( "%d %B %Y %H UTC"))) plt.xlim(*lon_range) plt.ylim(*lat_range) lon_slider = widgets.IntRangeSlider(min=int(model_grid.lon.min()), max=int(model_grid.lon.max()), step=1, value=(-108, -90)) lat_slider = widgets.IntRangeSlider(min=int(model_grid.lat.min()), max=int(model_grid.lat.max()), value=(35,45), step=1) hour_slider = widgets.IntSlider(min=model_grid.start_hour, max=model_grid.end_hour, step=1, value=0) w = widgets.interactive(model_time_viewer, lon_range=lon_slider, lat_range=lat_slider, hour=hour_slider) display(w) """ Explanation: To investigate the timing of the tracks, we can use this interactive widget to explore the tracks through time and to zoom in on areas of interest. End of explanation """ def ew_demo(min_max, step_val, size_val=50, delta_val=5, time=12): ew = EnhancedWatershed(min_max[0],step_val,min_max[1],size_val,delta_val) plt.figure(figsize=(12,8)) basemap.drawstates() labels = ew.label(gaussian_filter(model_grid.data[time - model_grid.start_hour], 1)) plt.contourf(model_grid.lon, model_grid.lat, labels, np.arange(1,labels.max()), extend="max", cmap="Set1") plt.xlim(*lon_range) plt.ylim(*lat_range) plt.grid() plt.title("Enhanced Watershed ({0:d},{1:d},{2:d},{3:d},{4:d}) Time: {5:d}".format(min_max[0], step_val, min_max[1], size_val, delta_val, time)) minmax_slider = widgets.IntRangeSlider(min=10, max=300, step=10, value=(25,200)) step_slider = widgets.IntSlider(min=1, max=10, step=1, value=1) size_slider = widgets.IntSlider(min=5, max=200, step=5, value=50) delta_slider = widgets.IntSlider(min=10, max=200, step=10, value=20) time_slider = widgets.IntSlider(min=model_grid.start_hour, max=model_grid.end_hour, step=1, value=24) w = widgets.interactive(ew_demo, min_max=minmax_slider, step_val=step_slider, size_val=size_slider, delta_val=delta_slider, time=time_slider) display(w) """ Explanation: Storm Track Identification with the Enhanced Watershed Our first data science tool is the enhanced watershed (Lakshmanan 2009), which is used for identifying features in gridded data. The original watershed transform identifies regions from an image or grid by finding local maxima and then growing objects from those maxima in discrete steps by looking for adjacent pixels with at least a certain intensity in an iterative fashion. The traditional watershed uses an intensity threshold as the stopping criterion for growth, which can produce unrealistic looking objects. The enhanced watershed first discretizes the data and then uses size and relative intensity thresholds to identify discrete objects. Buffer regions are also created around each object. The enhanced watershed has the following tunable parameters: * min, step, max: parameters to quantize the grid into a discrete number of levels * size: growth of an object is stopped after it reaches the specified number of grid points in area * delta: the maximum range of values contained within an object Exercise: Manual Tuning Pick a model time step and tune the enhanced watershed parameters until the objects look reasonable. Note how changing parameter values affects the shape of the objects. See how your chosen set of parameters handles other time steps. Finally, see what parameter settings produce particularly poor objects. If you find either a particularly good representation or a hilariously bad one, right-click the image, save it, and email the image to me at [email protected]. End of explanation """ def get_forecast_objects(model_grid, ew_params, min_size, gaussian_window): ew = EnhancedWatershed(*ew_params) model_objects = [] print "Find model objects Hour:", for h, hour in enumerate(np.arange(model_grid.start_hour, model_grid.end_hour + 1)): print "{0:02d}".format(hour), hour_labels = ew.size_filter(ew.label(gaussian_filter(model_grid.data[h], gaussian_window)), min_size) obj_slices = find_objects(hour_labels) num_slices = len(obj_slices) model_objects.append([]) if num_slices > 0: for sl in obj_slices: model_objects[-1].append(STObject(model_grid.data[h][sl], np.where(hour_labels[sl] > 0, 1, 0), model_grid.x[sl], model_grid.y[sl], model_grid.i[sl], model_grid.j[sl], hour, hour, dx=3000)) if h > 0: dims = model_objects[-1][-1].timesteps[0].shape model_objects[-1][-1].estimate_motion(hour, model_grid.data[h-1], dims[1], dims[0]) return model_objects min_thresh = 20 max_thresh = 200 step = 1 max_size = 90 min_size = 20 delta = 100 gaussian_filter_size = 2 model_objects = get_forecast_objects(model_grid, (min_thresh, step, max_thresh, max_size, delta), min_size, gaussian_filter_size) """ Explanation: Once you find a desirable set of enhanced watershed parameters, input them below and generate storm objects for all time steps. End of explanation """ def track_forecast_objects(input_model_objects, model_grid, object_matcher): model_objects = deepcopy(input_model_objects) hours = np.arange(int(model_grid.start_hour), int(model_grid.end_hour) + 1) tracked_model_objects = [] for h, hour in enumerate(hours): past_time_objs = [] for obj in tracked_model_objects: # Potential trackable objects are identified if obj.end_time == hour - 1: past_time_objs.append(obj) # If no objects existed in the last time step, then consider objects in current time step all new if len(past_time_objs) == 0: tracked_model_objects.extend(model_objects[h]) # Match from previous time step with current time step elif len(past_time_objs) > 0 and len(model_objects[h]) > 0: assignments = object_matcher.match_objects(past_time_objs, model_objects[h], hour - 1, hour) unpaired = range(len(model_objects[h])) for pair in assignments: past_time_objs[pair[0]].extend(model_objects[h][pair[1]]) unpaired.remove(pair[1]) if len(unpaired) > 0: for up in unpaired: tracked_model_objects.append(model_objects[h][up]) #print("Tracked Model Objects: {0:03d} Hour: {1:02d}".format(len(tracked_model_objects), hour)) return tracked_model_objects def make_tracks(dist_weight, max_distance): global tracked_model_objects object_matcher = ObjectMatcher([shifted_centroid_distance, centroid_distance], np.array([dist_weight, 1-dist_weight]), np.array([max_distance * 1000] * 2)) tracked_model_objects = track_forecast_objects(model_objects, model_grid, object_matcher) color_list = ["red", "orange", "blue", "green", "purple", "darkgreen", "teal", "brown"] color_arr = np.tile(color_list, len(tracked_model_objects) / len(color_list) + 1) plt.figure(figsize=(12, 8)) basemap.drawstates() for t, tracked_model_object in enumerate(tracked_model_objects): traj = tracked_model_object.trajectory() t_lon, t_lat = model_grid.proj(traj[0], traj[1], inverse=True) plt.plot(t_lon, t_lat, marker='o', markersize=4, color=color_arr[t]) #plt.barbs(t_lon, t_lat, tracked_model_object.u /3000, # tracked_model_object.v / 3000.0, length=6, # barbcolor=color_arr[t]) plt.title("Forecast Tracks Shifted Centroid: {0:0.1f}, Centroid: {1:0.1f}, Max Distance: {2:3d} km".format( dist_weight, 1-dist_weight, max_distance)) tracked_model_objects = None weight_slider = widgets.FloatSlider(min=0, max=1, step=1, value=1) dist_slider = widgets.IntSlider(min=10, max=1000, step=10, value=50) track_w = widgets.interactive(make_tracks, dist_weight=weight_slider, max_distance=dist_slider) display(track_w) """ Explanation: Object Tracking Tracking storms over time provides useful information about their evolution and threat potential. However, storm tracking is a challenging problem due to storms forming, splitting, merging, and dying. Basic object-based storm tracking methods compare the locations of storms at one time step with the locations at the previous time steps and then find an optimal way to match storms from the two sets appropriately. End of explanation """ from sklearn.linear_model import LinearRegression, Ridge from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.neighbors import KernelDensity from scipy.stats import norm """ Explanation: Part 2: Hail Size Prediction with Machine Learning Once storm tracks have been identified, data can be extracted from within each step of the storm track from the other model fields. The forecast tracks are also associated with the observed tracks using a process similar to storm tracking. Storm track data and associated hail sizes have been extracted for a set of model runs from May 20 through 3 June 2015. We will use this data to find a relationship between the model output and hail size and try to account for the uncertainty in that relationship. First we will import some statistical and machine learning models from the scikit-learn package. The library supports a wide variety of machine learning models, which are described in great detail in the official documentation. End of explanation """ train_data_dir = "../testdata/track_data_csv_unidata_train/" forecast_data_dir = "../testdata/track_data_csv_unidata_forecast/" train_step_files = sorted(glob(train_data_dir + "track_step_SSEF*.csv")) train_total_files = sorted(glob(train_data_dir + "track_total_SSEF*.csv")) track_step_data = pd.concat(map(pd.read_csv, train_step_files), ignore_index=True) track_total_data = pd.concat(map(pd.read_csv, train_total_files), ignore_index=True) track_forecast_data = pd.read_csv(forecast_data_dir + "track_step_SSEF_wrf-s3cn_arw_20150604.csv") pd.set_option('display.max_columns', track_step_data.shape[1]) print track_step_data.shape track_step_data.describe() """ Explanation: First, we will upload the data using the pandas library. The data are stored in DataFrames, a 2-dimensional data structure in which each row is a record, and each column contains data of the same type. DataFrames allow arbitrary indexing of the rows and columns. They are based on the R data frame. The individual steps of each track are stored in track_step_data, and information about the full tracks are stored in track_total_data. The dataset contains 117 columns of data. End of explanation """ track_step_data["Hail_Size"].hist(bins=np.arange(0,105,5)) plt.xlabel("Hail Size (mm)") plt.ylabel("Frequency") """ Explanation: The extracted hail sizes show a skewed distribution with a long tail. Storms with a hail size of 0 were not matched with an observed track and should be excluded when building a regression model. End of explanation """ # We are filtering the unmatched storm tracks and storm tracks matched with unrealistically high values filter_idx = (track_step_data['Hail_Size'] > 0) & (track_step_data['Hail_Size'] < 100) x_var = "uh_max_max" print "Standard deviation", track_step_data["Hail_Size"][filter_idx].std() print "Correlation coefficient", np.corrcoef(track_step_data[x_var][filter_idx], track_step_data['Hail_Size'][filter_idx])[0,1] lr = LinearRegression() log_lr = LinearRegression() log_lr.fit(np.log(track_step_data.ix[filter_idx,[x_var]]), np.log(track_step_data['Hail_Size'][filter_idx])) lr.fit(track_step_data.ix[filter_idx,[x_var]], track_step_data['Hail_Size'][filter_idx]) print "Linear model:", "a", lr.coef_[0], "b",lr.intercept_ print "Power law model:","a",log_lr.coef_[0], "b",log_lr.intercept_ plt.scatter(track_step_data.ix[filter_idx, x_var], track_step_data.ix[filter_idx, 'Hail_Size'], 10, 'r') uh_test_vals = np.arange(1 , track_step_data.ix[filter_idx, x_var].max()) power_hail_vals = np.exp(log_lr.intercept_) * uh_test_vals ** log_lr.coef_[0] hail_vals = lr.intercept_ + lr.coef_[0] * uh_test_vals plt.plot(uh_test_vals, power_hail_vals) plt.plot(uh_test_vals, hail_vals) plt.xlabel(x_var) plt.ylabel("Hail Size (mm)") """ Explanation: The simplest and generally first choice for a statistical model is an ordinary linear regression. The model minimizes the mean squared error over the full training set. Since we used updraft helicity to identify the storm tracks, we start with building two linear models using the maximum updraft helicity and observed hail size. We first fit a linear model of the form $y=ax+b$. Then we fit a power-law model (think Z-R relationship) of the form $\ln(y)=a\ln(x) + b$. The training data and the two linear models are plotted below. End of explanation """ rf = RandomForestRegressor(n_estimators=500, min_samples_split=20, max_features="sqrt") rf.fit(track_step_data.ix[filter_idx, 3:-1], track_step_data['Hail_Size'][filter_idx]) """ Explanation: While the linear regression fit does show a slight positive relationship with hail size, it also shows a large amount of variance. We could try to train a multiple linear regression from a subset of all the variables, but finding that subset is time consuming, and the resulting model still relies on the assumption of constant variance and normality. Alternatively, we could use a decision tree, which is a popular model from the machine learning community that performs variable selection, is robust against outliers and missing data, and does not rely on any parametric data model assumptions. While individual decision trees do not provide very high accuracy, randomized ensembles of decision trees are consistently among the best performing models in many applications. We will experiment with the Random Forest, a popular decision tree ensemble due to its ease of use and generally high accuracy. End of explanation """ def plot_importances(num_features): feature_names = np.array(["{0} ({1:d})".format(f, x) for x, f in enumerate(track_step_data.columns[3:-1].values)]) feature_ranks = np.argsort(rf.feature_importances_) plt.figure(figsize=(5,8)) plt.barh(np.arange(feature_ranks.size)[-num_features:], rf.feature_importances_[feature_ranks][-num_features:], height=1) plt.yticks(np.arange(feature_ranks.size)[-num_features:] + 0.5, feature_names[feature_ranks][-num_features:]) plt.ylim(feature_names.size-num_features, feature_names.size) plt.xlabel("Normalized Importance") plt.title("Random Forest Variable Importance") feature_slider = widgets.IntSlider(min=1, max=100, value=10) feature_w = widgets.interactive(plot_importances, num_features=feature_slider) display(feature_w) """ Explanation: Random forests provide a measure of how much each input variable affects the performance of the model called variable importance. It is a normalized measure of the decrease in error produced by each variable. End of explanation """ ver_idx = (track_forecast_data["Hail_Size"].values > 0) & (track_forecast_data["Hail_Size"].values < 100) rf_preds = rf.predict(track_forecast_data.ix[ver_idx, 3:-1]) lr_preds = lr.predict(track_forecast_data.ix[ver_idx,["uh_max_max"]]) rf_rmse = np.sqrt(np.mean(np.power(rf_preds - track_forecast_data.ix[ver_idx, "Hail_Size"], 2))) lr_rmse = np.sqrt(np.mean(np.power(lr_preds - track_forecast_data.ix[ver_idx, "Hail_Size"], 2))) plt.figure(figsize=(12, 6)) plt.subplot(1,2,1) plt.scatter(rf_preds, track_forecast_data.ix[ver_idx, "Hail_Size"]) plt.plot(np.arange(0, 65, 5), np.arange(0, 65, 5), "k--") plt.xlabel("Random Forest Hail Size (mm)") plt.ylabel("Observed Hail Size (mm)") plt.title("Random Forest Predictions RMSE: {0:0.3f}".format(rf_rmse)) plt.xlim(20,60) plt.ylim(20,60) plt.subplot(1,2,2) plt.scatter(lr_preds, track_forecast_data.ix[ver_idx, "Hail_Size"]) plt.plot(np.arange(0, 65, 5), np.arange(0, 65, 5), "k--") plt.xlabel("Linear Regression Hail Size (mm)") plt.ylabel("Observed Hail Size (mm)") plt.title("Linear Regression Predictions RMSE: {0:0.3f}".format(lr_rmse)) plt.xlim(20,60) plt.ylim(20,60) """ Explanation: We can validate the accuracy of the two models by comparing the hail size predictions for 4 June 2015 from each model. The root mean squared errors from each model are similar, but the random forest appears to be better at spreading the predictions over a larger range of hail sizes. End of explanation """ kde = KernelDensity(bandwidth=4) bins = np.arange(0, 100) bins = bins.reshape((bins.size, 1)) rf_tree_preds = np.array([t.predict(track_forecast_data.ix[ver_idx,3:-1]) for t in rf.estimators_]) mean_preds = rf_tree_preds.mean(axis=0) sd_preds = rf_tree_preds.std(axis=0) rf_pdfs = [] for r in range(rf_tree_preds.shape[1]): kde.fit(rf_tree_preds[:, r:r+1]) rf_pdfs.append(np.exp(kde.score_samples(bins))) rf_pdfs = np.array(rf_pdfs) rf_cdfs = rf_pdfs.cumsum(axis=1) pred_sorted = np.argsort(mean_preds) def plot_pdfs(min_max): plt.figure(figsize=(15,5)) plt.subplot(1,2,1) plt.title("Random Forest KDE Prediction PDFs") for r in rf_pdfs[pred_sorted][min_max[0]:min_max[1]+1]: plt.plot(bins, r) plt.ylim(0, 0.1) plt.xlabel("Forecast Hail Size (mm)") plt.ylabel("Probability Density") plt.xticks(np.arange(0, 105, 5)) plt.grid() plt.subplot(1,2,2) plt.title("Random Forest Gaussian Prediction PDFs") for r2, mean_pred in enumerate(mean_preds[pred_sorted][min_max[0]:min_max[1]+1]): plt.plot(bins, norm.pdf(bins, loc=mean_pred, scale=sd_preds[pred_sorted][r2])) plt.ylim(0, 0.1) plt.xlabel("Forecast Hail Size (mm)") plt.ylabel("Probability Density") plt.xticks(np.arange(0, 105, 5)) plt.grid() mm_slider = widgets.IntRangeSlider(min=0, max=rf_pdfs.shape[0]) display(widgets.interactive(plot_pdfs, min_max=mm_slider)) """ Explanation: Since each tree in the random forest produces an independent output, a probability density function can be generated from them for each prediction. To translate the predictions into probabilities, two methods can be used. A kernel density estimate (KDE) uses a moving window to determine probability based on the concentration of events at particular values. The alternative approach is to assume a parametric distribution, such as a Gaussian, and fit the distribution parameters to your predictions. As the KDE is non-parametric, it is much better at identifying the longer tails and secondary peaks that may hint at the chance for extreme hail. The example below shows how the KDE and Gaussian distributions compare for all of the June 4 predictions. End of explanation """
ddtm/dl-course
Seminar2/Seminar2.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt import numpy as np import random from IPython import display from sklearn import datasets, preprocessing (X, y) = datasets.make_circles(n_samples=1024, shuffle=True, noise=0.2, factor=0.4) ind = np.logical_or(y==1, X[:,1] > X[:,0] - 0.5) X = X[ind,:] m = np.array([[1, 1], [-2, 1]]) X = preprocessing.scale(X) y = y[ind] y = 2*y - 1 plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) plt.show() h = 0.01 x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) def visualize(X, y, w, loss, n_iter): plt.clf() Z = classify(np.c_[xx.ravel(), yy.ravel()], w) Z = Z.reshape(xx.shape) plt.subplot(1,2,1) plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.subplot(1,2,2) plt.plot(loss) ymin, ymax = plt.ylim() plt.ylim(0, ymax) display.clear_output(wait=True) display.display(plt.gcf()) """ Explanation: Seminar 0 (Linear models, Optimization) In this seminar you will implement a simple linear classifier using numpy and your brain. Two-dimensional classification End of explanation """ def expand(X): X_ = np.zeros((X.shape[0], 6)) X_[:,0:2] = X X_[:,2:4] = X**2 X_[:,4] = X[:,0] * X[:,1] X_[:,5] = 1 return X_ def classify(X, w): """ Given feature matrix X [n_samples,2] and weight vector w [6], return an array of +1 or -1 predictions""" <your code here> """ Explanation: Your task starts here First, let's write function that predicts class given X. Since the problem above isn't linearly separable, we add quadratic features to the classifier. This transformation is implemented in the expand function. don't forget to expand X inside classify and other functions Classifying sample should not be much harder that computing sign of dot product. End of explanation """ def compute_loss(X, y, w): """ Given feature matrix X [n_samples,2], target vector [n_samples] of +1/-1, and weight vector w [6], compute scalar loss function using formula above. """ <your code here> def compute_grad(X, y, w): """ Given feature matrix X [n_samples,2], target vector [n_samples] of +1/-1, and weight vector w [6], compute vector [6] of derivatives of L over each weights. """ <your code here> """ Explanation: The loss you should try to minimize is the Hinge Loss. $$ L = {1 \over N} \sum_i max(0,1-y_i \cdot \vec w \vec x_i) $$ End of explanation """ w = np.array([1,0,0,0,0,0]) alpha = 0.0 # learning rate n_iter = 50 batch_size = 4 loss = np.zeros(n_iter) plt.figure(figsize=(12,5)) for i in range(n_iter): ind = random.sample(range(X.shape[0]), batch_size) loss[i] = compute_loss(X, y, w) visualize(X[ind,:], y[ind], w, loss, n_iter) w = w - alpha * compute_grad(X[ind,:], y[ind], w) visualize(X, y, w, loss, n_iter) plt.clf() """ Explanation: Training Find an optimal learning rate for gradient descent for given batch size. You can see the example of correct output below this cell before you run it. Don't change the batch size! End of explanation """ w = np.array([1,0,0,0,0,0]) alpha = 0.0 # learning rate mu = 0.0 # momentum n_iter = 50 batch_size = 4 loss = np.zeros(n_iter) plt.figure(figsize=(12,5)) for i in range(n_iter): ind = random.sample(range(X.shape[0]), batch_size) loss[i] = compute_loss(X, y, w) visualize(X[ind,:], y[ind], w, loss, n_iter) <update w and anything else here> visualize(X, y, w, loss, n_iter) plt.clf() """ Explanation: Implement gradient descent with momentum and test it's performance for different learning rate and momentum values. End of explanation """ w = np.array([1,0,0,0,0,0]) alpha = 0.0 # learning rate mean_squared_norm = 0.0 #moving average of gradient norm squared n_iter = 50 batch_size = 4 loss = np.zeros(n_iter) plt.figure(figsize=(12,5)) for i in range(n_iter): ind = random.sample(range(X.shape[0]), batch_size) loss[i] = compute_loss(X, y, w) visualize(X[ind,:], y[ind], w, loss, n_iter) <update w and anything else here> visualize(X, y, w, loss, n_iter) plt.clf() """ Explanation: Implement RMSPROP algorithm End of explanation """
mas-dse-greina/neon
Display CIFAR-10 Images.ipynb
apache-2.0
from neon.data import CIFAR10 # Neon's helper function to download the CIFAR-10 data from PIL import Image # The Python Image Library (PIL) import numpy as np # Our old friend numpy """ Explanation: Displaying the CIFAR-10 Images In Neon Tony Reina<br> 27 JUN 2017 Neon has convolutional neural networks to predict the 10 classes from the CIFAR-10 dataset. However, I found it difficult to find code that would allow me to just display the images. So I wrote this tutorial. Let's load the CIFAR-10 dataset using Neon's ModelZoo helper function CIFAR10. CIFAR-10 is a labeled dataset of 60,0000 32x32 images collected by Alex Krizhevsky, Vinod Nair, and Geoffrey Hinton. The Neon data randomly selects 50,000 of the images for training and 10,000 for testing/validation. There are 10 classes: | Class # | Class Name | |: ------- :| ----------- | | 0 | airplane | | 1 | automobile | | 2 | bird | | 3 | cat | | 4 | deer | | 5 | dog | | 6 | frog | | 7 | horse | | 8 | ship | | 9 | truck | The classes are mutually exclusive. For example, the images of trucks are large vehicles (fire trucks, dump trucks) rather than pickup trucks or SUVs (which could be considered automobiles). Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. End of explanation """ dataset = dict() # We'll create a dictionary for the training and testing/validation set images pathToSaveData = './path' # The path where neon will download and extract the data files cifar10 = CIFAR10(path=pathToSaveData, normalize=False, whiten=False, contrast_normalize=False) dataset['train'], dataset['validation'], numClasses = cifar10.load_data() """ Explanation: Load the Neon ModelZoo data for CIFAR-10 When you call CIFAR10's load_data() helper function, it will look for the CIFAR-10 data in the file path you've specified and download the file from the University of Toronto website if needed (the file is 163 MB). According to the website, the archive contains the files data_batch_1, data_batch_2, ..., data_batch_5, as well as test_batch. Each of these files is a Python "pickled" object produced with cPickle. These pickled batch files contain dictionaries with the images: data is a Nx3072 numpy array of unsigned 8-bit integers (uint8) (where N is the number of images; 50,000 for train and 10,000 for validation). Each row of the array stores a 32x32 color image. The first 1024 entries contain the red channel values, the next 1024 the green, and the final 1024 the blue. The image is stored in row-major order, so that the first 32 entries of the array are the red channel values of the first row of the image. labels is a list of N numbers (where N is the number of images; 50,000 for train and 10,000 for validation) in the range 0-9. The number at index i indicates the label of the ith image in the array data. End of explanation """ import matplotlib.pyplot as plt %matplotlib inline def getImage(dataset=dataset, setName='train', index=0): # The images are index 0 of the dictionary # They are stored as a 3072 element vector so we need to reshape this into a tensor. # The first dimension is the red/green/blue channel, the second is the pixel row, the third is the pixel column im = dataset[setName][0][index].reshape(3,32,32) # PIL and matplotlib want the red/green/blue channels last in the matrix. So we just need to rearrange # the tensor to put that dimension last. im = np.transpose(im, axes=[1, 2, 0]) # Put the 0-th dimension at the end # Image are supposed to be unsigned 8-bit integers. If we keep the raw images, then # this line is not needed. However, if we normalize or whiten the image, then the values become # floats. So we need to convert them back to uint8s. im = np.uint8(im) classIndex = dataset[setName][1][index][0] # This is the class label (0-9) classNames = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] # Now use PIL's Image helper to turn our array into something that matplotlib will understand as an image. return [classIndex, classNames[classIndex], Image.fromarray(im)] idx, name, im = getImage(dataset, 'train', 1022) # Get image 1022 of the training data. plt.imshow(im); plt.title(name); plt.axis('off'); idx, name, im = getImage(dataset, 'train', 8888) # Get image 8888 of the training data plt.imshow(im); plt.title(name); plt.axis('off'); idx, name, im = getImage(dataset, 'train', 5002) # Get image 5002 of the training data plt.imshow(im); plt.title(name); plt.axis('off'); idx, name, im = getImage(dataset, 'train', 10022) # Get image 10022 of the training data plt.imshow(im); plt.title(name); plt.axis('off'); idx, name, im = getImage(dataset, 'train', 7022) # Get image 7022 of the training data plt.imshow(im); plt.title(name); plt.axis('off'); idx, name, im = getImage(dataset, 'validation', 1022) # Get image 1022 of the validation data plt.imshow(im); plt.title(name); plt.axis('off'); idx, name, im = getImage(dataset, 'validation', 1031) # Get image 1031 of the validation data plt.imshow(im); plt.title(name); plt.axis('off'); idx, name, im = getImage(dataset, 'validation', 9135) # Get image 9135 of the validation data plt.imshow(im); plt.title(name); plt.axis('off'); """ Explanation: Data format The images are loaded into the 'train' and 'validation' dictionary keys. There are 50,000 train images and 10,000 validation images. The first element [0] is the image data. The second element [1] is the image label (class 0-9). To display one of the images, we just need to reshape the data. We're given the 3x32x32 images as an unraveled 3,072 element vector. To display them properly, we need to reshape them to 3x32x32 and then move the 3 red-blue-green channels to the first dimension of the tensor. End of explanation """
EmuKit/emukit
notebooks/Emukit-tutorial-multi-fidelity-bayesian-optimization.ipynb
apache-2.0
# Load function import emukit.test_functions.forrester # The multi-fidelity Forrester function is already wrapped as an Emukit UserFunction object in # the test_functions package forrester_fcn, _ = emukit.test_functions.forrester.multi_fidelity_forrester_function() forrester_fcn_low = forrester_fcn.f[0] forrester_fcn_high = forrester_fcn.f[1] # Assign costs low_fidelity_cost = 1 high_fidelity_cost = 5 """ Explanation: Multi-Fidelity Bayesian Optimization This notebook contains an example of using Emukit to perform Bayesian optimization on a function where low-fidelity approximations are availalbe. The high fidelity function to be optimized is the forreseter function given by: $$ f_{high}(x) = (6x - 2)^2 \sin(12x - 4) $$ The low fidelity approximation of this function is given by: $$ f_{low}(x) = 0.5 f_{high}(x) + 10 (x - 0.5) + 5 $$ Both are defined in the interval [0, 1]. Each evaluation of the high fidelity function costs $5$ units, whereas evaluating the low fidelity function costs $1$. We wish to find the location of the maximum of $f_{high}$ while taking advantage of the cheaper to evaluate $f_{low}$. End of explanation """ import matplotlib.pyplot as plt import matplotlib matplotlib.rcParams.update({'font.size': 16}) FIG_SIZE = (12, 8) import numpy as np np.random.seed(12345) from emukit.multi_fidelity.convert_lists_to_array import convert_x_list_to_array n_plot_points = 100 x_plot = np.linspace(0, 1, 500)[:, None] y_plot_low = forrester_fcn_low(x_plot) y_plot_high = forrester_fcn_high(x_plot) plt.figure(figsize=FIG_SIZE) plt.plot(x_plot, y_plot_low, 'b') plt.plot(x_plot, y_plot_high, 'r') plt.legend(['Low fidelity', 'High fidelity']) plt.xlim(0, 1) plt.title('High and low fidelity Forrester functions') plt.xlabel('x') plt.ylabel('y'); plt.show() """ Explanation: Plot Functions End of explanation """ from emukit.core import ParameterSpace, ContinuousParameter, InformationSourceParameter n_fidelities = 2 parameter_space = ParameterSpace([ContinuousParameter('x', 0, 1), InformationSourceParameter(n_fidelities)]) """ Explanation: Bayesian optimization Define Parameter Space The parameter space now contains two parameters: the first is a ContinuousParameter that is the $x$ input to the Forrester function. The second is an InformaionSourceParameter that tells Emukit whether a given fucntion evaluation is to be performed by the high or low fidelity function. End of explanation """ x_low = np.random.rand(12)[:, None] x_high = x_low[:6, :] y_low = forrester_fcn_low(x_low) y_high = forrester_fcn_high(x_high) """ Explanation: Generate Initial Data We shall randomly choose 12 low fidelity and then choose 6 of these points at which to evaluate the high fidelity function. End of explanation """ from emukit.multi_fidelity.models.linear_model import GPyLinearMultiFidelityModel import GPy from emukit.multi_fidelity.kernels.linear_multi_fidelity_kernel import LinearMultiFidelityKernel from emukit.multi_fidelity.convert_lists_to_array import convert_xy_lists_to_arrays from emukit.model_wrappers import GPyMultiOutputWrapper from GPy.models.gp_regression import GPRegression x_array, y_array = convert_xy_lists_to_arrays([x_low, x_high], [y_low, y_high]) kern_low = GPy.kern.RBF(1) kern_low.lengthscale.constrain_bounded(0.01, 0.5) kern_err = GPy.kern.RBF(1) kern_err.lengthscale.constrain_bounded(0.01, 0.5) multi_fidelity_kernel = LinearMultiFidelityKernel([kern_low, kern_err]) gpy_model = GPyLinearMultiFidelityModel(x_array, y_array, multi_fidelity_kernel, n_fidelities) gpy_model.likelihood.Gaussian_noise.fix(0.1) gpy_model.likelihood.Gaussian_noise_1.fix(0.1) model = GPyMultiOutputWrapper(gpy_model, 2, 5, verbose_optimization=False) model.optimize() x_plot_low = np.concatenate([np.atleast_2d(x_plot), np.zeros((x_plot.shape[0], 1))], axis=1) x_plot_high = np.concatenate([np.atleast_2d(x_plot), np.ones((x_plot.shape[0], 1))], axis=1) def plot_model(x_low, y_low, x_high, y_high): mean_low, var_low = model.predict(x_plot_low) mean_high, var_high = model.predict(x_plot_high) plt.figure(figsize=FIG_SIZE) def plot_with_error_bars(x, mean, var, color): plt.plot(x, mean, color=color) plt.fill_between(x.flatten(), mean.flatten() - 1.96*var.flatten(), mean.flatten() + 1.96*var.flatten(), alpha=0.2, color=color) plot_with_error_bars(x_plot_high[:, 0], mean_low, var_low, 'b') plot_with_error_bars(x_plot_high[:, 0], mean_high, var_high, 'r') plt.plot(x_plot, forrester_fcn_high(x_plot), 'k--') plt.scatter(x_low, y_low, color='b') plt.scatter(x_high, y_high, color='r') plt.legend(['Low fidelity model', 'High fidelity model', 'True high fidelity']) plt.title('Low and High Fidelity Models') plt.xlim(0, 1) plt.xlabel('x') plt.ylabel('y'); plt.show() plot_model(x_low, y_low, x_high, y_high) """ Explanation: Define Model We will use the linear multi-fidelity model defined in Emukit. In this model, the high-fidelity function is modelled as a scaled sum of the low-fidelity function plus an error term: $$ f_{high}(x) = f_{err}(x) + \rho \,f_{low}(x) $$ End of explanation """ from emukit.bayesian_optimization.acquisitions.entropy_search import MultiInformationSourceEntropySearch from emukit.core.acquisition import Acquisition # Define cost of different fidelities as acquisition function class Cost(Acquisition): def __init__(self, costs): self.costs = costs def evaluate(self, x): fidelity_index = x[:, -1].astype(int) x_cost = np.array([self.costs[i] for i in fidelity_index]) return x_cost[:, None] @property def has_gradients(self): return True def evaluate_with_gradients(self, x): return self.evalute(x), np.zeros(x.shape) cost_acquisition = Cost([low_fidelity_cost, high_fidelity_cost]) acquisition = MultiInformationSourceEntropySearch(model, parameter_space) / cost_acquisition """ Explanation: Define Acquisition Function As in [1] & [2] we shall use the entropy search acquisition function, scaled by the cost of evaluating either the high or low fidelity function. End of explanation """ from emukit.core.loop import FixedIntervalUpdater, OuterLoop, SequentialPointCalculator from emukit.core.loop.loop_state import create_loop_state from emukit.core.optimization.multi_source_acquisition_optimizer import MultiSourceAcquisitionOptimizer from emukit.core.optimization import GradientAcquisitionOptimizer initial_loop_state = create_loop_state(x_array, y_array) acquisition_optimizer = MultiSourceAcquisitionOptimizer(GradientAcquisitionOptimizer(parameter_space), parameter_space) candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer) model_updater = FixedIntervalUpdater(model) loop = OuterLoop(candidate_point_calculator, model_updater, initial_loop_state) """ Explanation: Create OuterLoop End of explanation """ def plot_acquisition(loop, loop_state): colours = ['b', 'r'] plt.plot(x_plot_low[:, 0], loop.candidate_point_calculator.acquisition.evaluate(x_plot_low), 'b') plt.plot(x_plot_high[:, 0], loop.candidate_point_calculator.acquisition.evaluate(x_plot_high), 'r') previous_x_collected = loop_state.X[[-1], :] fidelity_idx = int(previous_x_collected[0, -1]) plt.scatter(previous_x_collected[0, 0], loop.candidate_point_calculator.acquisition.evaluate(previous_x_collected), color=colours[fidelity_idx]) plt.legend(['Low fidelity', 'High fidelity'], fontsize=12) plt.title('Acquisition Function at Iteration ' + str(loop_state.iteration)) plt.xlabel('x') plt.xlim(0, 1) plt.ylabel('Acquisition Value') plt.tight_layout() plt.show() loop.iteration_end_event.append(plot_acquisition) """ Explanation: Add Plotting of Acquisition Function To see how the acquisition function evolves. This is done by using the iteration_end_event on the OuterLoop. This is a list of functions where each function should have the signature: function(loop, loop_state). All functions in the list are called after each iteration of the optimization loop. End of explanation """ x_search = np.stack([np.linspace(0, 1, 1000), np.ones(1000)], axis=1) model_min_mean = [] model_min_loc = [] def calculate_metrics(loop, loop_state): mean, var = loop.model_updaters[0].model.predict(x_search) model_min_mean.append(np.min(mean)) model_min_loc.append(x_search[np.argmin(mean), 0]) # subscribe to event loop.iteration_end_event.append(calculate_metrics) """ Explanation: Find Esimated Minimum at Every Iteration On each iteration of the optimization loop, find the minimum value of the high fidelity model. End of explanation """ loop.run_loop(forrester_fcn, 10) """ Explanation: Run Optimization End of explanation """ is_high_fidelity = loop.loop_state.X[:, -1] == 1 plot_model(x_low=loop.loop_state.X[~is_high_fidelity, 0], y_low=loop.loop_state.Y[~is_high_fidelity], x_high=loop.loop_state.X[is_high_fidelity, 0], y_high=loop.loop_state.Y[is_high_fidelity]) """ Explanation: Plot Final Model End of explanation """ from emukit.bayesian_optimization.loops import BayesianOptimizationLoop from emukit.bayesian_optimization.acquisitions.entropy_search import EntropySearch from emukit.model_wrappers import GPyModelWrapper import GPy # Make model gpy_model = GPy.models.GPRegression(x_high, y_high) gpy_model.Gaussian_noise.variance.fix(0.1) hf_only_model = GPyModelWrapper(gpy_model) # Create loop hf_only_space = ParameterSpace([ContinuousParameter('x', 0, 1)]) hf_only_acquisition = EntropySearch(hf_only_model, hf_only_space) hf_only_loop = BayesianOptimizationLoop(hf_only_space, hf_only_model, hf_only_acquisition) # Calculate best guess at minimum at each iteration of loop hf_only_model_min_mean = [] x_search = np.linspace(0, 1, 1000)[:, None] hf_only_model_min_loc = [] def calculate_metrics(loop, loop_state): mean, var = loop.model_updaters[0].model.predict(x_search) hf_only_model_min_mean.append(np.min(mean)) hf_only_model_min_loc.append(x_search[np.argmin(mean)]) # subscribe to event hf_only_loop.iteration_end_event.append(calculate_metrics) # Run optimization hf_only_loop.run_loop(forrester_fcn_high, 10) """ Explanation: Comparison to High Fidelity Only Bayesian Optimization This section compares the multi-fidelity optimization to Bayesian optimization using high fidelity observations only. End of explanation """ # Plot comparison plt.figure(figsize=FIG_SIZE) x = np.array(range(len(model_min_mean))) + 1 # Calculate cumulative cost of evaluating high fidelity only observations n_hf_points = hf_only_loop.loop_state.X.shape[0] cumulative_cost_hf = high_fidelity_cost * (np.array(range(n_hf_points)) + 1) cumulative_cost_hf = cumulative_cost_hf[x_high.shape[0]:] # Calculate cumulative cost of evaluating multi-fidelity observations cost_mf = cost_acquisition.evaluate(loop.loop_state.X) cumulative_cost_mf = np.cumsum(cost_mf) cumulative_cost_mf = cumulative_cost_mf[x_array.shape[0]:] x_min = np.min([cumulative_cost_hf, cumulative_cost_mf]) x_max = np.max([cumulative_cost_hf, cumulative_cost_mf]) plt.plot(cumulative_cost_hf, hf_only_model_min_loc, 'm', marker='x', markersize=16) plt.plot(cumulative_cost_mf, model_min_loc, 'c', marker='.', markersize=16) plt.hlines(x_search[np.argmin(forrester_fcn_high(x_search))], x_min, x_max, color='k', linestyle='--') plt.legend(['High fidelity only optimization', 'Multi-fidelity only optimization', 'True minimum']) plt.title('Comparison of Multi-Fidelity and High Fidelity Only Optimizations') plt.ylabel('Estimated Location of Minimum') plt.xlabel('Cumulative Cost of Evaluting Objective'); plt.show() """ Explanation: Plot Estimated Minimum Location End of explanation """
wangzexian/summrerschool2015
theano_mlp/theano_mlp.ipynb
bsd-3-clause
import numpy import theano from theano import tensor # Set lower precision float, otherwise the notebook will take too long to run theano.config.floatX = 'float32' class HiddenLayer(object): def __init__(self, rng, input, n_in, n_out, W=None, b=None, activation=tensor.tanh): """ Typical hidden layer of a MLP: units are fully-connected and have sigmoidal activation function. Weight matrix W is of shape (n_in,n_out) and the bias vector b is of shape (n_out,). NOTE : The nonlinearity used here is tanh Hidden unit activation is given by: tanh(dot(input,W) + b) :type rng: numpy.random.RandomState :param rng: a random number generator used to initialize weights :type input: theano.tensor.dmatrix :param input: a symbolic tensor of shape (n_examples, n_in) :type n_in: int :param n_in: dimensionality of input :type n_out: int :param n_out: number of hidden units :type activation: theano.Op or function :param activation: Non linearity to be applied in the hidden layer """ self.input = input # `W` is initialized with `W_values` which is uniformely sampled # from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden)) # for tanh activation function # the output of uniform if converted using asarray to dtype # theano.config.floatX so that the code is runable on GPU # Note : optimal initialization of weights is dependent on the # activation function used (among other things). # For example, results presented in Glorot & Bengio (2010) # suggest that you should use 4 times larger initial weights # for sigmoid compared to tanh if W is None: W_values = numpy.asarray( rng.uniform( low=-numpy.sqrt(6. / (n_in + n_out)), high=numpy.sqrt(6. / (n_in + n_out)), size=(n_in, n_out) ), dtype=theano.config.floatX ) if activation == tensor.nnet.sigmoid: W_values *= 4 W = theano.shared(value=W_values, name='W', borrow=True) if b is None: b_values = numpy.zeros((n_out,), dtype=theano.config.floatX) b = theano.shared(value=b_values, name='b', borrow=True) self.W = W self.b = b lin_output = tensor.dot(input, self.W) + self.b self.output = ( lin_output if activation is None else activation(lin_output) ) # parameters of the model self.params = [self.W, self.b] """ Explanation: Multilayer Perceptron in Theano This notebook describes how to implement the building blocks for a multilayer perceptron in Theano, in particular how to define and combine layers. We will continue using the MNIST digits classification dataset, still using Fuel. The Model We will focus on fully-connected layers, with an elementwise non-linearity on each hidden layer, and a softmax layer (similar to the logistic regression model) for classification on the top layer. A class for hidden layers This class does all its work in its constructor: - Create and initialize shared variables for its parameters (W and b), unless there are explicitly provided. Note that the initialization scheme for W is the one described in Glorot & Bengio (2010). - Build the Theano expression for the value of the output units, given a variable for the input. - Store the input, output, and shared parameters as members. End of explanation """ class LogisticRegression(object): """Multi-class Logistic Regression Class The logistic regression is fully described by a weight matrix :math:`W` and bias vector :math:`b`. Classification is done by projecting data points onto a set of hyperplanes, the distance to which is used to determine a class membership probability. """ def __init__(self, input, target, n_in, n_out): """ Initialize the parameters of the logistic regression :type input: theano.tensor.TensorType :param input: symbolic variable that describes the input of the architecture (one minibatch) :type target: theano.tensor.TensorType :type target: column tensor that describes the target for training :type n_in: int :param n_in: number of input units, the dimension of the space in which the datapoints lie :type n_out: int :param n_out: number of output units, the dimension of the space in which the labels lie """ # keep track of model input and target. # We store a flattened (vector) version of target as y, which is easier to handle self.input = input self.target = target self.y = target.flatten() self.W = theano.shared(value=numpy.zeros((n_in, n_out), dtype=theano.config.floatX), name='W', borrow=True) self.b = theano.shared(value=numpy.zeros((n_out,), dtype=theano.config.floatX), name='b', borrow=True) # class-membership probabilities self.p_y_given_x = tensor.nnet.softmax(tensor.dot(input, self.W) + self.b) # class whose probability is maximal self.y_pred = tensor.argmax(self.p_y_given_x, axis=1) # parameters of the model self.params = [self.W, self.b] def negative_log_likelihood(self): """Return the mean of the negative log-likelihood of the prediction of this model under a given target distribution. Note: we use the mean instead of the sum so that the learning rate is less dependent on the batch size """ log_prob = tensor.log(self.p_y_given_x) log_likelihood = log_prob[tensor.arange(self.y.shape[0]), self.y] loss = - log_likelihood.mean() return loss def errors(self): """Return a float representing the number of errors in the minibatch over the total number of examples of the minibatch """ misclass_nb = tensor.neq(self.y_pred, self.y) misclass_rate = misclass_nb.mean() return misclass_rate """ Explanation: A softmax class for the output This class performs computations similar to what was performed in the logistic regression tutorial. Here as well, the expression for the output is built in the class constructor, which takes the input as argument. We also add the target, y, and store it as an argument. End of explanation """ class MLP(object): """Multi-Layer Perceptron Class A multilayer perceptron is a feedforward artificial neural network model that has one layer or more of hidden units and nonlinear activations. Intermediate layers usually have as activation function tanh or the sigmoid function (defined here by a ``HiddenLayer`` class) while the top layer is a softmax layer (defined here by a ``LogisticRegression`` class). """ def __init__(self, rng, input, target, n_in, n_hidden, n_out, activation=tensor.tanh): """Initialize the parameters for the multilayer perceptron :type rng: numpy.random.RandomState :param rng: a random number generator used to initialize weights :type input: theano.tensor.TensorType :param input: symbolic variable that describes the input of the architecture (one minibatch) :type target: theano.tensor.TensorType :type target: column tensor that describes the target for training :type n_in: int :param n_in: number of input units, the dimension of the space in which the datapoints lie :type n_hidden: list of int :param n_hidden: number of hidden units in each hidden layer :type n_out: int :param n_out: number of output units, the dimension of the space in which the labels lie :type activation: theano.Op or function :param activation: Non linearity to be applied in all hidden layers """ # keep track of model input and target. # We store a flattened (vector) version of target as y, which is easier to handle self.input = input self.target = target self.y = target.flatten() # Build all necessary hidden layers and chain them self.hidden_layers = [] layer_input = input layer_n_in = n_in for nh in n_hidden: hidden_layer = HiddenLayer( rng=rng, input=layer_input, n_in=layer_n_in, n_out=nh, activation=activation) self.hidden_layers.append(hidden_layer) # prepare variables for next layer layer_input = hidden_layer.output layer_n_in = nh # The logistic regression layer gets as input the hidden units of the hidden layer, # and the target self.log_reg_layer = LogisticRegression( input=layer_input, target=target, n_in=layer_n_in, n_out=n_out) # self.params has all the parameters of the model, # self.weights contains only the `W` variables. # We also give unique name to the parameters, this will be useful to save them. self.params = [] self.weights = [] layer_idx = 0 for hl in self.hidden_layers: self.params.extend(hl.params) self.weights.append(hl.W) for hlp in hl.params: prev_name = hlp.name hlp.name = 'layer' + str(layer_idx) + '.' + prev_name layer_idx += 1 self.params.extend(self.log_reg_layer.params) self.weights.append(self.log_reg_layer.W) for lrp in self.log_reg_layer.params: prev_name = lrp.name lrp.name = 'layer' + str(layer_idx) + '.' + prev_name # L1 norm ; one regularization option is to enforce L1 norm to be small self.L1 = sum(abs(W).sum() for W in self.weights) # square of L2 norm ; one regularization option is to enforce square of L2 norm to be small self.L2_sqr = sum((W ** 2).sum() for W in self.weights) def negative_log_likelihood(self): # negative log likelihood of the MLP is given by the negative # log likelihood of the output of the model, computed in the # logistic regression layer return self.log_reg_layer.negative_log_likelihood() def errors(self): # same holds for the function computing the number of errors return self.log_reg_layer.errors() """ Explanation: The MLP class That class brings together the different parts of the model. It also adds additional controls on the training of the full network, for instance an expression for L1 or L2 regularization (weight decay). We can specify an arbitrary number of hidden layers, providing an empty one will reproduce the logistic regression model. End of explanation """ def nll_grad(mlp_model): loss = mlp_model.negative_log_likelihood() params = mlp_model.params grads = theano.grad(loss, wrt=params) # Return (param, grad) pairs return zip(params, grads) def sgd_updates(params_and_grads, learning_rate): return [(param, param - learning_rate * grad) for param, grad in params_and_grads] def get_simple_training_fn(mlp_model, learning_rate): inputs = [mlp_model.input, mlp_model.target] params_and_grads = nll_grad(mlp_model) updates = sgd_updates(params_and_grads, learning_rate=lr) return theano.function(inputs=inputs, outputs=[], updates=updates) def regularized_cost_grad(mlp_model, L1_reg, L2_reg): loss = (mlp_model.negative_log_likelihood() + L1_reg * mlp_model.L1 + L2_reg * mlp_model.L2_sqr) params = mlp_model.params grads = theano.grad(loss, wrt=params) # Return (param, grad) pairs return zip(params, grads) def get_regularized_training_fn(mlp_model, L1_reg, L2_reg, learning_rate): inputs = [mlp_model.input, mlp_model.target] params_and_grads = regularized_cost_grad(mlp_model, L1_reg, L2_reg) updates = sgd_updates(params_and_grads, learning_rate=lr) return theano.function(inputs, updates=updates) """ Explanation: Training Procedure We will re-use the same training algorithm: stochastic gradient descent with mini-batches, and the same early-stopping criterion. Here, the number of parameters to train is variable, and we have to wait until the MLP model is actually instantiated to have an expression for the cost and the updates. Gradient and Updates Let us define helper functions for getting expressions for the gradient of the cost wrt the parameters, and the parameter updates. The following ones are simple, but many variations can exist, for instance: - regularized costs, including L1 or L2 regularization - more complex learning rules, such as momentum, RMSProp, ADAM, ... End of explanation """ def get_test_fn(mlp_model): return theano.function([mlp_model.input, mlp_model.target], mlp_model.errors()) """ Explanation: Testing function End of explanation """ import timeit from fuel.streams import DataStream from fuel.schemes import SequentialScheme from fuel.transformers import Flatten ## early-stopping parameters tuned for 1-2 min runtime def sgd_training(train_model, test_model, train_set, valid_set, test_set, model_name='mlp_model', # maximum number of epochs n_epochs=20, # look at this many examples regardless patience=5000, # wait this much longer when a new best is found patience_increase=2, # a relative improvement of this much is considered significant improvement_threshold=0.995, batch_size=20): n_train_batches = train_set.num_examples // batch_size # Create data streams to iterate through the data. train_stream = Flatten(DataStream.default_stream( train_set, iteration_scheme=SequentialScheme(train_set.num_examples, batch_size))) valid_stream = Flatten(DataStream.default_stream( valid_set, iteration_scheme=SequentialScheme(valid_set.num_examples, batch_size))) test_stream = Flatten(DataStream.default_stream( test_set, iteration_scheme=SequentialScheme(test_set.num_examples, batch_size))) # go through this many minibatches before checking the network on the validation set; # in this case we check every epoch validation_frequency = min(n_train_batches, patience / 2) best_validation_loss = numpy.inf test_score = 0. start_time = timeit.default_timer() done_looping = False epoch = 0 while (epoch < n_epochs) and (not done_looping): epoch = epoch + 1 minibatch_index = 0 for minibatch_x, minibatch_y in train_stream.get_epoch_iterator(): train_model(minibatch_x, minibatch_y) # iteration number iter = (epoch - 1) * n_train_batches + minibatch_index if (iter + 1) % validation_frequency == 0: # compute zero-one loss on validation set validation_losses = [] for valid_xi, valid_yi in valid_stream.get_epoch_iterator(): validation_losses.append(test_model(valid_xi, valid_yi)) this_validation_loss = numpy.mean(validation_losses) print('epoch %i, minibatch %i/%i, validation error %f %%' % (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100.)) # if we got the best validation score until now if this_validation_loss < best_validation_loss: # improve patience if loss improvement is good enough if this_validation_loss < best_validation_loss * improvement_threshold: patience = max(patience, iter * patience_increase) best_validation_loss = this_validation_loss # test it on the test set test_losses = [] for test_xi, test_yi in test_stream.get_epoch_iterator(): test_losses.append(test_model(test_xi, test_yi)) test_score = numpy.mean(test_losses) print(' epoch %i, minibatch %i/%i, test error of best model %f %%' % (epoch, minibatch_index + 1, n_train_batches, test_score * 100.)) # save the best parameters # build a name -> value dictionary best = {param.name: param.get_value() for param in mlp_model.params} numpy.savez('best_{}.npz'.format(model_name), **best) minibatch_index += 1 if patience <= iter: done_looping = True break end_time = timeit.default_timer() print('Optimization complete with best validation score of %f %%, ' 'with test performance %f %%' % (best_validation_loss * 100., test_score * 100.)) print('The code ran for %d epochs, with %f epochs/sec (%.2fm total time)' % (epoch, 1. * epoch / (end_time - start_time), (end_time - start_time) / 60.)) """ Explanation: Training the Model Training procedure We first need to define a few parameters for the training loop and the early stopping procedure. End of explanation """ from fuel.datasets import MNIST # the full set is usually (0, 50000) for train, (50000, 60000) for valid and no slice for test. # We only selected a subset to go faster. train_set = MNIST(which_sets=('train',), sources=('features', 'targets'), subset=slice(0, 20000)) valid_set = MNIST(which_sets=('train',), sources=('features', 'targets'), subset=slice(20000, 24000)) test_set = MNIST(which_sets=('test',), sources=('features', 'targets')) """ Explanation: We then load our data set. End of explanation """ rng = numpy.random.RandomState(1234) x = tensor.matrix('x') # The labels coming from Fuel are in a "column" format y = tensor.icol('y') n_in = 28 * 28 n_out = 10 mlp_model = MLP( rng=rng, input=x, target=y, n_in=n_in, n_hidden=[500], n_out=n_out, activation=tensor.tanh) lr = numpy.float32(0.1) L1_reg = numpy.float32(0) L2_reg = numpy.float32(0.0001) train_model = get_regularized_training_fn(mlp_model, L1_reg, L2_reg, lr) test_model = get_test_fn(mlp_model) """ Explanation: Build the Model Now is the time to specify and build a particular instance of the MLP. Let's start with one with a single hidden layer of 500 hidden units, and a tanh non-linearity. End of explanation """ sgd_training(train_model, test_model, train_set, valid_set, test_set) """ Explanation: Launch the training phase End of explanation """ def relu(x): return x * (x > 0) rng = numpy.random.RandomState(1234) mlp_relu = MLP( rng=rng, input=x, target=y, n_in=n_in, n_hidden=[500], n_out=n_out, activation=relu) lr = numpy.float32(0.1) L1_reg = numpy.float32(0) L2_reg = numpy.float32(0.0001) train_relu = get_regularized_training_fn(mlp_relu, L1_reg, L2_reg, lr) test_relu = get_test_fn(mlp_relu) sgd_training(train_relu, test_relu, train_set, valid_set, test_set, model_name='mlp_relu') """ Explanation: How can we make it better? Max-column normalization Dropout ReLU activation End of explanation """ # This implements simple momentum def get_momentum_updates(params_and_grads, lr, rho): res = [] # numpy will promote (1 - rho) to float64 otherwise one = numpy.float32(1.) for p, g in params_and_grads: up = theano.shared(p.get_value() * 0) res.append((p, p - lr * up)) res.append((up, rho * up + (one - rho) * g)) return res # This implements the parameter updates for Adadelta def get_adadelta_updates(params_and_grads, rho): up2 = [theano.shared(p.get_value() * 0, name="up2 for " + p.name) for p, g in params_and_grads] grads2 = [theano.shared(p.get_value() * 0, name="grads2 for " + p.name) for p, g in params_and_grads] # This is dumb but numpy will promote (1 - rho) to float64 otherwise one = numpy.float32(1.) rg2up = [(rg2, rho * rg2 + (one - rho) * (g ** 2)) for rg2, (p, g) in zip(grads2, params_and_grads)] updir = [-(tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6)) * g for (p, g), ru2, rg2 in zip(params_and_grads, up2, grads2)] ru2up = [(ru2, rho * ru2 + (one - rho) * (ud ** 2)) for ru2, ud in zip(up2, updir)] param_up = [(p, p + ud) for (p, g), ud in zip(params_and_grads, updir)] return rg2up + ru2up + param_up # You can try to write an RMSProp function and train the model with it. def get_momentum_training_fn(mlp_model, L1_reg, L2_reg, lr, rho): inputs = [mlp_model.input, mlp_model.target] params_and_grads = regularized_cost_grad(mlp_model, L1_reg, L2_reg) updates = get_momentum_updates(params_and_grads, lr=lr, rho=rho) return theano.function(inputs, updates=updates) rng = numpy.random.RandomState(1234) x = tensor.matrix('x') # The labels coming from Fuel are in a "column" format y = tensor.icol('y') n_in = 28 * 28 n_out = 10 mlp_model = MLP( rng=rng, input=x, target=y, n_in=n_in, n_hidden=[500], n_out=n_out, activation=tensor.tanh) lr = numpy.float32(0.1) L1_reg = numpy.float32(0) L2_reg = numpy.float32(0.0001) rho = numpy.float32(0.95) momentum_train = get_momentum_training_fn(mlp_model, L1_reg, L2_reg, lr=lr, rho=rho) test_fn = get_test_fn(mlp_model) sgd_training(momentum_train, test_fn, train_set, valid_set, test_set, n_epochs=20, model_name='mlp_momentum') """ Explanation: Momentum training (Adadelta, RMSProp, ...) End of explanation """
Cyb3rWard0g/ThreatHunter-Playbook
docs/notebooks/windows/08_lateral_movement/WIN-201012004336.ipynb
gpl-3.0
from openhunt.mordorutils import * spark = get_spark() """ Explanation: SMB Create Remote File Metadata | | | |:------------------|:---| | collaborators | ['@Cyb3rWard0g', '@Cyb3rPandaH'] | | creation date | 2020/10/12 | | modification date | 2020/10/12 | | playbook related | [] | Hypothesis Adversaries might be creating a file remotely via the Server Message Block (SMB) Protocol. Technical Context Client systems use the Common Internet File System (CIFS) Protocol to request file and print services from server systems over a network. CIFS is a stateful protocol, in which clients establish a session with a server and use that session to make a variety of requests to access files, printers, and inter-process communication (IPC) mechanisms, such as named pipes. The extended CIFS Protocol is known as the Server Message Block (SMB). The SMB2 CREATE Request packet is sent by a client to request either creation of or access to a file. In case of a named pipe or printer, the server MUST create a new file. Offensive Tradecraft Adversaries leverage SMB to copy files over the network to either execute code remotely or exfiltrate data. Mordor Test Data | | | |:----------|:----------| | metadata | https://mordordatasets.com/notebooks/small/windows/08_lateral_movement/SDWIN-200806015757.html | | link | https://raw.githubusercontent.com/OTRF/mordor/master/datasets/small/windows/lateral_movement/host/covenant_copy_smb_CreateRequest.zip | Analytics Initialize Analytics Engine End of explanation """ mordor_file = "https://raw.githubusercontent.com/OTRF/mordor/master/datasets/small/windows/lateral_movement/host/covenant_copy_smb_CreateRequest.zip" registerMordorSQLTable(spark, mordor_file, "mordorTable") """ Explanation: Download & Process Mordor Dataset End of explanation """ df = spark.sql( ''' SELECT `@timestamp`, Hostname, ShareName, SubjectUserName, SubjectLogonId, AccessMask FROM mordorTable WHERE LOWER(Channel) = 'security' AND (EventID = 5140) AND NOT ShareName LIKE '%IPC$' AND NOT SubjectUserName LIKE '%$' ''' ) df.show(10,False) """ Explanation: Analytic I Look for non-system accounts SMB connecting (Tree Connect) to a file share that is not IPC$. | Data source | Event Provider | Relationship | Event | |:------------|:---------------|--------------|-------| | File | Microsoft-Windows-Security-Auditing | User accessed file share | 5140 | End of explanation """ df = spark.sql( ''' SELECT `@timestamp`, Hostname, ShareName, SubjectUserName, b.SubjectLogonId, IpAddress, IpPort FROM mordorTable b INNER JOIN ( SELECT SubjectLogonId FROM mordorTable WHERE LOWER(Channel) = "security" AND EventID = 5140 AND ShareName LIKE '%IPC$' AND NOT SubjectUserName LIKE '%$' ) a ON b.SubjectLogonId = a.SubjectLogonId WHERE LOWER(b.Channel) = 'security' AND b.EventID = 5140 AND b.ShareName LIKE '%C$' AND NOT SubjectUserName LIKE '%$' ''' ) df.show(10,False) """ Explanation: Analytic II Look for non-system accounts SMB connecting (Tree Connect) to an IPC$ Share and administrative shares (i.e C$) with the same logon session ID. | Data source | Event Provider | Relationship | Event | |:------------|:---------------|--------------|-------| | File | Microsoft-Windows-Security-Auditing | User accessed file share | 5140 | End of explanation """ df = spark.sql( ''' SELECT `@timestamp`, Hostname, ShareName, SubjectUserName, SubjectLogonId, IpAddress, IpPort, RelativeTargetName FROM mordorTable WHERE LOWER(Channel) = "security" AND EventID = 5145 AND ShareName LIKE '%C$' AND NOT SubjectUserName LIKE '%$' AND AccessMask = '0x2' ''' ) df.show(10,False) """ Explanation: Analytic III Look for non-system accounts SMB accessing a file with write (0x2) access mask via administrative share (i.e C$). | Data source | Event Provider | Relationship | Event | |:------------|:---------------|--------------|-------| | File | Microsoft-Windows-Security-Auditing | User accessed File | 5145 | End of explanation """ df = spark.sql( ''' SELECT `@timestamp`, Hostname, ShareName, SubjectUserName, d.SubjectLogonId, IpAddress, IpPort, RelativeTargetName FROM mordorTable d INNER JOIN ( SELECT b.SubjectLogonId FROM mordorTable b INNER JOIN ( SELECT SubjectLogonId FROM mordorTable WHERE LOWER(Channel) = "security" AND EventID = 5140 AND ShareName LIKE '%IPC$' AND NOT SubjectUserName LIKE '%$' ) a ON b.SubjectLogonId = a.SubjectLogonId WHERE LOWER(b.Channel) = 'security' AND b.EventID = 5140 AND b.ShareName LIKE '%C$' ) c ON d.SubjectLogonId = c.SubjectLogonId WHERE LOWER(d.Channel) = 'security' AND d.EventID = 5145 AND d.ShareName LIKE '%C$' AND d.AccessMask = '0x2' ''' ) df.show(10,False) """ Explanation: Analytic IV Look for non-system accounts SMB connecting (Tree Connect) to an IPC$ Share and administrative shares (i.e C$) and accessing/creating a file with write (0x2) access mask with the same logon session ID. | Data source | Event Provider | Relationship | Event | |:------------|:---------------|--------------|-------| | File | Microsoft-Windows-Security-Auditing | User accessed file share | 5140 | | File | Microsoft-Windows-Security-Auditing | User accessed File | 5145 | End of explanation """ df = spark.sql( ''' SELECT `@timestamp`, Hostname, ShareName, SubjectUserName, SubjectLogonId, IpAddress, IpPort, RelativeTargetName FROM mordorTable b INNER JOIN ( SELECT LOWER(REVERSE(SPLIT(TargetFilename, '\'))[0]) as TargetFilename FROM mordorTable WHERE Channel = 'Microsoft-Windows-Sysmon/Operational' AND Image = 'System' AND EventID = 11 ) a ON LOWER(REVERSE(SPLIT(RelativeTargetName, '\'))[0]) = a.TargetFilename WHERE LOWER(b.Channel) = 'security' AND b.EventID = 5145 AND b.AccessMask = '0x2' ''' ) df.show(10,False) """ Explanation: Analytic V Look for files that were accessed over the network with write (0x2) access mask via administrative shares (i.e C$) and that were created by the System process on the target system. | Data source | Event Provider | Relationship | Event | |:------------|:---------------|--------------|-------| | File | Microsoft-Windows-Security-Auditing | User accessed File | 5145 | | File | Microsoft-Windows-Sysmon/Operational | Process created File | 11 | End of explanation """
mathcoding/programming
notebooks/Lab3_RadiceQuadrata.ipynb
mit
def Enumerate(y, x): # print(y) if y == 0: return -1 if x == y*y: return y return Enumerate(y-1, x) print(Enumerate(16, 16)) print(Enumerate(15, 15)) """ Explanation: Calcolo della radice quadrata di un numero Le procedure che abbiamo introdotto sino ad ora sono essenzialmente delle funzioni matematiche che specificano un valore che viene calcolato a partire da uno o piรน parametri. A differenze delle funzioni matematiche, le procedure definite al calcolatore devono essere anche efficienti, ovvero devono terminare la loro esecuzione in tempo breve. Vediamo ora un semplice esempio di cosa vuol dire avere una procedura efficiente. Prendiamo per esempio la definizione matematica seguente: $$\sqrt{x} = y \quad \mbox{ se e solo se }\quad y \geq 0 \mbox{ e } y^2 = x$$ Queste definizione รจ corretta da un punto di vista matematico e potremmo usarla per controllare se un dato numero sia la radice quadrata di un altro. Tuttavia, questa definizione non descrive una procedura per calcolare la radice quadrata di un numero. Per poter calcolare la radice quadrata di un numero abbiamo bisogno di un algoritmo che viene implementato con una o piรน procedure. Enumerazione esaustiva (forza bruta) Se ci limitiamo a considerare i numeri interi, potremmo utilizzare la definizione precedente per trovare la radice quadrata di un numero intero positivo attraverso un'enumerazione esaustiva: per trovare la radice quadrata di $x$, possiamo provare tutti i numeri da $x$ a 1, e verificare ogni volta se il numero "tentativo" sia il quadrato dell'altro. ESERCIZIO 2.1: Scrivere una procedura (funzione) che prende in input un numero intero maggiore o uguale a 1, e prova tutti i numeri da $x$ a 1. Se trova la radice quadrata esatta la restituisce, altrimenti restituisce $-1$. End of explanation """ 1/10+1/10+1/10 == 3/10 """ Explanation: DOMANDA: Come fare per trovare la radice quadrata di un numero reale positivo? DOMANDA: Cosa vuol dire che due numeri reali sono uguali? ESEMPIO: Come viene valutata l'espressione logica: $$\frac{1}{10}+\frac{1}{10}+\frac{1}{10} = \frac{3}{10}$$ End of explanation """ def Abs(x): if x < 0: return -x return x def Istess(a,b): return Abs(a-b) < 0.0001 def SqrtReals(x, a, b): print(x, a, b) y = (a+b)/2 if Istess(x, y*y): return y else: if y*y > x: return SqrtReals(x, a, y) else: return SqrtReals(x, y, b) print(SqrtReals(36, 0, 36)) """ Explanation: NOTA: Bisogna fare molta attenzione quando si usano i numeri reali al calcolatore! Rivediamo quindi leggermente la definizione di radice quadrata, introducendo il concetto di tolleranza numerica. In pratica, cerchiamo un numero reale positivo tale che $$\sqrt{x} = y \quad \mbox{ tale che } \quad |y^2 - x| < \epsilon, \quad y \geq 0 $$ dove $\epsilon$ รจ una costante molto piccola, per esempio $10^{-7}$. Ricerca per bisezione Potremmo cercare di migliorare la procedura di enumerazione esaustiva considerando che i numeri che stiamo controllando sono ordinati. Potremmo evitare di controllare tutti i numeri, uno alla volta, e potremmo cercare di fare dei "salti". ESERCIZIO 2.2: Scrivere una procedura che, per trovare la radice quadrata di un numero, ogni volta divide l'intervallo di ricerca in due parti uguali, e continua ad esplorare solo la parte in cui effettivamente si puรฒ trovare la radice cercata. End of explanation """ def Newton(x, y): # print(x, x/y, y) if Istess(x, y*y): return y return Newton(x, (y+x/y)/2) print(Newton(2, 1)) """ Explanation: Il metodo di Newton Il metodo piรน usato per calcolare la radice quadrata รจ il metodo di approssimazioni successive introdotto da Newton. Il metodo consiste nel trovare la soluzione attraverso aggiustamenti successivi di una soluzione tentativo: se abbiamo un valore $y$ che dovrebbe essere il valore tentativo della radice quadrata di un altro numero $x$ possiamo ottenere una approssimazione migliore facendo la media tra $y$ e $x/y$. Il metodo di Newtnon consiste quindi di partire da un $y_0$, e ad ogni iterazione di calcolare: $$y_{i+1} = \frac{y_i + \frac{x}{y_i}}{2}$$ ESEMPIO: Ricerca della radice quadrata di 2. | Valore Tentativo $y$ | Quoziente $x/y$ | Media tra $y$ e $x/y$ | | :--: | :--: | :--: | | 1 | 2/1 | (1+2)/2=1.5 | | 1.5 | 2/1.5=1.3333 | (1.3333+1.5)/2=1.4167 | | 1.4167 | 2/1.4167=1.4118 | (1.4118 + 1.4167)/2=1.4142 | | 1.4142 | ... | ... | ESERCIZIO 2.3: Scrivere una o piรน procedure per trovare la radice quadrata di un numero, utilizzando il metodo di Newton scritto sopra. End of explanation """ # DA COMPLETARE """ Explanation: DOMANDA: รจ possibile trovare un criterio di arresto alternativo, che sia funzione dell'errore commesso nel calcolo della radice quadrata? Studiamo ora come possiamo stimare l'errore del metodo di Newton, senza conoscere il valore esatto di $\sqrt{x}$. Chiamiamo $E_i$ l'errore di approssimazione che stiamo commettendo: $$E_i = y_i - \sqrt{x}$$ da cui $$y_i = \sqrt{x} + E_i$$ L'errore che commettiamo alla prossima iterazione, sarร  pari a $$E_{i+1} = y_{i+1} - \sqrt{x} = \frac{y_i + \frac{x}{y_i}}{2} - \sqrt{x}$$ Facendo un po' di conti a partire dall'espressione precedente, si arriva a far vedere che $$E_{i+1} = \frac{E_i^2}{2 y_i} > 0$$ In pratica, dopo il primo tentativo $y_0$, tutti gli errori successivi saranno sempre positivi. Inoltre l'errore ad ogni iterazione diventa sempre piรน piccolo, in quanto $$E_i = y_i - \sqrt{x} < y_i \quad \mbox{ e quindi } 0< \frac{E_i}{y_i} <1$$ e inoltre $$E_{i+1} = \frac{E_i^2}{2 y_i} = \frac{E_i}{y_i} \times \frac{E_i}{2} < \frac{E_i}{2}$$ Riassumendo, abbiamo mostrato che l'errore diventa sempre piรน piccolo in quanto $$0 < E_{i+1} < \frac{E_i}{2} < E_i.$$ Vediamo ora come possiamo usare le espressioni precedenti per ottenere un criterio di arresto usando una stima sull'errore. Dalle relazioni precedenti abbiamo che $$0 < y_{i+1} - \sqrt{x} < y_{i} - \sqrt{x}$$ da cui $$\sqrt{x} < y_{i+1} < y_{i}$$ Se riprendiamo la definzione di errore ad una data iterazione, abbiamo che $$E_i = y_i - \sqrt{x}= y_i - y_{i+1} + y_{i+1} -\sqrt{x} = (y_i - y_{i+1}) + E_{i+1} < (y_i - y_{i+1}) + \frac{E_{i}}{2}$$ ovvero $$E_{i+1} < \frac{E_i}{2} < y_i - y_{i+1}.$$ In pratica, se si vuole calcolare la radice quadrata di un numero reale positivo con un margine di errore minore di $\epsilon < 0$, basterร  verificare la condizione: $$y_i - y_{i+1} < \epsilon.$$ Confronto tra le tre funzioni trovate Possiamo provare a fare un piccolo confronto tra le tre funzioni trovate in termini di efficienza, andando a contare, per esempio, quante volte รจ stata chiamata la funzione ricorsiva. Per poterlo fare, basta aggiungere un parametro formale il quale viene incrementato di uno ogni volta che effettuiamo una chiamata ricorsiva. Tale valore puo' essere stampato a video prima di restituire il valore finale. ESERCIZIO 2.4: Modificare le procedure precedenti per contare il numero di chiamate ricorsive con le tre procedure precedenti: (i) enumerazione esaustiva, (ii) metodo di bisezione, e (iii) metodo di Newton. End of explanation """
mne-tools/mne-tools.github.io
0.24/_downloads/cc2f4b498fc65366ac39d017e939eec5/xdawn_denoising.ipynb
bsd-3-clause
# Authors: Alexandre Barachant <[email protected]> # # License: BSD-3-Clause from mne import (io, compute_raw_covariance, read_events, pick_types, Epochs) from mne.datasets import sample from mne.preprocessing import Xdawn from mne.viz import plot_epochs_image print(__doc__) data_path = sample.data_path() """ Explanation: XDAWN Denoising XDAWN filters are trained from epochs, signal is projected in the sources space and then projected back in the sensor space using only the first two XDAWN components. The process is similar to an ICA, but is supervised in order to maximize the signal to signal + noise ratio of the evoked response :footcite:RivetEtAl2009, RivetEtAl2011. <div class="alert alert-danger"><h4>Warning</h4><p>As this denoising method exploits the known events to maximize SNR of the contrast between conditions it can lead to overfitting. To avoid a statistical analysis problem you should split epochs used in fit with the ones used in apply method.</p></div> End of explanation """ raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' tmin, tmax = -0.1, 0.3 event_id = dict(vis_r=4) # Setup for reading the raw data raw = io.read_raw_fif(raw_fname, preload=True) raw.filter(1, 20, fir_design='firwin') # replace baselining with high-pass events = read_events(event_fname) raw.info['bads'] = ['MEG 2443'] # set bad channels picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False, exclude='bads') # Epoching epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False, picks=picks, baseline=None, preload=True, verbose=False) # Plot image epoch before xdawn plot_epochs_image(epochs['vis_r'], picks=[230], vmin=-500, vmax=500) """ Explanation: Set parameters and read data End of explanation """ # Estimates signal covariance signal_cov = compute_raw_covariance(raw, picks=picks) # Xdawn instance xd = Xdawn(n_components=2, signal_cov=signal_cov) # Fit xdawn xd.fit(epochs) """ Explanation: Now, we estimate a set of xDAWN filters for the epochs (which contain only the vis_r class). End of explanation """ epochs_denoised = xd.apply(epochs) # Plot image epoch after Xdawn plot_epochs_image(epochs_denoised['vis_r'], picks=[230], vmin=-500, vmax=500) """ Explanation: Epochs are denoised by calling apply, which by default keeps only the signal subspace corresponding to the first n_components specified in the Xdawn constructor above. End of explanation """
slundberg/shap
notebooks/text_examples/text_entailment/Textual Entailment Explanation Demo.ipynb
mit
import numpy as np from transformers import AutoModelForSequenceClassification, AutoTokenizer import shap from datasets import load_dataset """ Explanation: Multi-Input Text Explanation: Textual Entailment with Facebook BART This notebook demonstrates how to get explanations for the output of the Facebook BART model trained on the mnli dataset and used for textual entailment. We use an example from the snli dataset due to mnli not being supported in the required environment for shap. BART: https://huggingface.co/facebook/bart-large-mnli End of explanation """ model = AutoModelForSequenceClassification.from_pretrained("facebook/bart-large-mnli") tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large-mnli") # load dataset dataset = load_dataset("snli") snli_label_map = {0: 'entailment', 1: 'neutral', 2: 'contradiction'} example_ind = 6 premise, hypothesis, label = ( dataset['train']['premise'][example_ind], dataset['train']['hypothesis'][example_ind], dataset['train']['label'][example_ind] ) print('Premise: ' + premise) print('Hypothesis: ' + hypothesis) true_label = snli_label_map[label] print('The true label is: {true_label}'.format(true_label=true_label)) # test model input_ids = tokenizer.encode(premise, hypothesis, return_tensors='pt') logits = model(input_ids)[0] probs = logits.softmax(dim=1) bart_label_map = {0: 'contradiction', 1: 'neutral', 2: 'entailment'} for i, lab in bart_label_map.items(): print('{lab} probability: {prob:0.2f}%'.format(lab=lab, prob=probs[0][i] * 100)) """ Explanation: Load model and tokenizer End of explanation """ import scipy as sp import torch # wrapper function for model # takes in masked string which is in the form: premise <separator token(s)> hypothesis def f(x): outputs = [] for _x in x: encoding = torch.tensor([tokenizer.encode(_x)]) output = model(encoding)[0].detach().cpu().numpy() outputs.append(output[0]) outputs = np.array(outputs) scores = (np.exp(outputs).T / np.exp(outputs).sum(-1)).T val = sp.special.logit(scores) return val # Construct explainer bart_labels = ['contradiction', 'neutral', 'entailment'] explainer = shap.Explainer(f, tokenizer, output_names=bart_labels) # encode then decode premise, hypothesis to get concatenated sentences encoded = tokenizer(premise, hypothesis)['input_ids'][1:-1] # ignore the start and end tokens, since tokenizer will naturally add them decoded = tokenizer.decode(encoded) print(decoded) shap_values = explainer([decoded]) # wrap input in list print(shap_values) """ Explanation: Run shap values End of explanation """ shap.plots.text(shap_values) """ Explanation: Explanation Visualization End of explanation """ from scipy.cluster.hierarchy import dendrogram, linkage from matplotlib import pyplot as plt Z = shap_values[0].abs.clustering Z[-1][2] = Z[-2][2] + 10 # last row's distance is extremely large, so make it a more reasonable value print(Z) labels_arr = shap_values[0].data # # clean labels of unusal characters (only for slow tokenizer, if use_fast=False) # labels_arr = [] # for token in shap_values[0].data: # if token[0] == 'ฤ ': # labels_arr.append(token[1:]) # else: # labels_arr.append(token) print(labels_arr) fig = plt.figure(figsize=(len(Z) + 20, 15)) dn = dendrogram(Z, labels=labels_arr) plt.show() """ Explanation: Input Partition Tree - Dendrogram End of explanation """ sort_order = 'positive' perturbation = 'keep' from shap import benchmark sper = benchmark.perturbation.SequentialPerturbation(explainer.model, explainer.masker, sort_order, perturbation) xs, ys, auc = sper.model_score(shap_values, [decoded]) sper.plot(xs, ys, auc) """ Explanation: Benchmarking End of explanation """
shareactorIO/pipeline
source.ml/jupyterhub.ml/notebooks/zz_old/Spark/Intro/Lab 2 - Spark SQL/Lab 2 - Spark SQL - Instructor Notebook.ipynb
apache-2.0
from pyspark.sql import SQLContext sqlContext = SQLContext(sc) """ Explanation: <img src='https://raw.githubusercontent.com/bradenrc/sparksql_pot/master/sparkSQL3.png' width="80%" height="80%"></img> <img src='https://raw.githubusercontent.com/bradenrc/sparksql_pot/master/sparkSQL1.png' width="80%" height="80%"></img> End of explanation """ !rm world_bank* -f !wget https://raw.githubusercontent.com/bradenrc/sparksql_pot/master/world_bank.json.gz """ Explanation: SQL Context queries Dataframes, not RDDs. A data file on world banks will downloaded from GitHub after removing any previous data that may exist End of explanation """ example1_df = sqlContext.read.json("world_bank.json.gz") """ Explanation: A Dataframe will be created using the sqlContext to read the file. Many other types are supported including text and Parquet End of explanation """ print example1_df.printSchema() """ Explanation: Spark SQL has the ability to infer the schema of JSON data and understand the structure of the data End of explanation """ for row in example1_df.take(2): print row print "*" * 20 """ Explanation: Let's take a look at the first two rows of data End of explanation """ #Simply use the Dataframe Object to create the table: example1_df.registerTempTable("world_bank") #now that the table is registered we can execute sql commands #NOTE that the returned object is another Dataframe: temp_df = sqlContext.sql("select * from world_bank limit 2") print type(temp_df) print "*" * 20 print temp_df #one nice feature of the notebooks and python is that we can show it in a table via Pandas sqlContext.sql("select id, borrower from world_bank limit 2").toPandas() #Here is a simple group by example: query = """ select regionname , count(*) as project_count from world_bank group by regionname order by count(*) desc """ sqlContext.sql(query).toPandas() #subselect works as well: query = """ select * from (select regionname , count(*) as project_count from world_bank group by regionname order by count(*) desc) table_alias limit 2 """ sqlContext.sql(query).toPandas() """ Explanation: Now let's register a table which is a pointer to the Dataframe and allows data access via Spark SQL End of explanation """ import random #first let's create a simple RDD #create a Python list of lists for our example data_e2 = [] for x in range(1,6): random_int = int(random.random() * 10) data_e2.append([x, random_int, random_int^2]) #create the RDD with the random list of lists rdd_example2 = sc.parallelize(data_e2) print rdd_example2.collect() from pyspark.sql.types import * #now we can assign some header information # The schema is encoded in a string. schemaString = "ID VAL1 VAL2" fields = [StructField(field_name, StringType(), True) for field_name in schemaString.split()] schema = StructType(fields) # Apply the schema to the RDD. schemaExample = sqlContext.createDataFrame(rdd_example2, schema) # Register the DataFrame as a table. schemaExample.registerTempTable("example2") # Pull the data print schemaExample.collect() #In Dataframes we can reference the columns names for example: for row in schemaExample.take(2): print row.ID, row.VAL1, row.VAL2 #Again a simple sql example: sqlContext.sql("select * from example2").toPandas() """ Explanation: Simple Example of Adding a Schema (headers) to an RDD and using it as a dataframe In the example below a simple RDD is created with Random Data in two columns and an ID column. End of explanation """ #Remember this RDD: print type(rdd_example2) print rdd_example2.collect() #we can use Row to specify the name of the columns with a Map, then use that to create the Dataframe from pyspark.sql import Row rdd_example3 = rdd_example2.map(lambda x: Row(id=x[0], val1=x[1], val2=x[2])) print rdd_example3.collect() #now we can convert rdd_example3 to a Dataframe df_example3 = rdd_example3.toDF() df_example3.registerTempTable("df_example3") print type(df_example3) #now a simple SQL statement sqlContext.sql("select * from df_example3").toPandas() """ Explanation: Another Example of creating a Dataframe from an RDD End of explanation """ query = """ select * from example2 e2 inner join df_example3 e3 on e2.id = e3.id """ print sqlContext.sql(query).toPandas() #Alternatively you can join within Python as well df_example4 = df_example3.join(schemaExample, schemaExample["id"] == df_example3["ID"] ) for row in df_example4.take(5): print row """ Explanation: Joins are supported, here is a simple example with our two new tables We can join example2 and example3 on ID End of explanation """ #first we create a Python function: def simple_function(v): return int(v * 10) #test the function print simple_function(3) #now we can register the function for use in SQL sqlContext.registerFunction("simple_function", simple_function) #now we can apply the filter in a SQL Statement query = """ select ID, VAL1, VAL2, simple_function(VAL1) as s_VAL1, simple_function(VAL2) as s_VAL1 from example2 """ sqlContext.sql(query).toPandas() #note that the VAL1 and VAL2 look like strings, we can cast them as well query = """ select ID, VAL1, VAL2, simple_function(cast(VAL1 as int)) as s_VAL1, simple_function(cast(VAL2 as int)) as s_VAL1 from example2 """ sqlContext.sql(query).toPandas() """ Explanation: One of the more powerful features is the ability to create Functions and Use them in SQL Here is a simple example End of explanation """ #import pandas library import pandas as pd print pd """ Explanation: Pandas Example Pandas is a common abstraction for working with data in Python. We can turn Pandas Dataframes into Spark Dataframes, the advantage of this could be scale or allowing us to run SQL statements agains the data. End of explanation """ !rm SIGHTINGS.csv -f !wget https://www.quandl.com/api/v3/datasets/NUFORC/SIGHTINGS.csv #using the CSV file from earlier, we can create a Pandas Dataframe: pandas_df = pd.read_csv("SIGHTINGS.csv") pandas_df.head() #now convert to Spark Dataframe spark_df = sqlContext.createDataFrame(pandas_df) #explore the first two rows: for row in spark_df.take(2): print row #register the Spark Dataframe as a table spark_df.registerTempTable("ufo_sightings") #now a SQL statement print sqlContext.sql("select * from ufo_sightings limit 10").collect() """ Explanation: First, let's grab some UFO data to play with End of explanation """ %matplotlib inline import matplotlib.pyplot as plt, numpy as np """ Explanation: Visualizing the Data Here are some simple ways to create charts using Pandas output In order to display in the notebook we need to tell matplotlib to render inline at this point import the supporting libraries as well End of explanation """ ufos_df = spark_df.toPandas() """ Explanation: Pandas can call a function "plot" to create the charts. Since most charts are created from aggregates the record set should be small enough to store in Pandas We can take our UFO data from before and create a Pandas Dataframe from the Spark Dataframe End of explanation """ ufos_df.plot(kind='bar', x='Reports', y='Count', figsize=(12, 5)) """ Explanation: To plot we call the "plot" method and specify the type, x and y axis columns and optionally the size of the chart. Many more details can be found here: http://pandas.pydata.org/pandas-docs/stable/visualization.html End of explanation """ print sqlContext.sql("select count(*) from ufo_sightings limit 10").collect() """ Explanation: This doesn't look good, there are too many observations, let's check how many: End of explanation """ ufos_df = spark_df.map(lambda x: Row(**dict(x.asDict(), year=int(x.Reports[0:4])))) """ Explanation: <h2>Ideally we could just group by year, there are many ways we could solve that:</h2> 1) parse the Reports column in SQL and output the year, then group on it 2) create a simple Python function to parse the year and call it via sql 3) as shown below: use map against the Dataframe and append a new column with "year" Tge example below takes the existing data for each row and appends a new column "year" by taking the first for characters from the Reports column Reports looks like this for example: 2016-01-31 End of explanation """ print ufos_df.take(5) """ Explanation: Quick check to verify we get the expected results End of explanation """ ufos_df.registerTempTable("ufo_withyear") """ Explanation: Register the new Dataframe as a table "ufo_withyear" End of explanation """ query = """ select sum(count) as count, year from ufo_withyear where year > 1950 group by year order by year """ pandas_ufos_withyears = sqlContext.sql(query).toPandas() pandas_ufos_withyears.plot(kind='bar', x='year', y='count', figsize=(12, 5)) """ Explanation: Now we can group by year, order by year and filter to the last 66 years End of explanation """
jseabold/statsmodels
examples/notebooks/rolling_ls.ipynb
bsd-3-clause
import pandas_datareader as pdr import pandas as pd import statsmodels.api as sm from statsmodels.regression.rolling import RollingOLS import matplotlib.pyplot as plt import seaborn seaborn.set_style('darkgrid') pd.plotting.register_matplotlib_converters() %matplotlib inline """ Explanation: Rolling Regression Rolling OLS applies OLS across a fixed windows of observations and then rolls (moves or slides) the window across the data set. They key parameter is window which determines the number of observations used in each OLS regression. By default, RollingOLS drops missing values in the window and so will estimate the model using the available data points. Estimated values are aligned so that models estimated using data points $i, i+1, ... i+window$ are stored in location $i+window$. Start by importing the modules that are used in this notebook. End of explanation """ factors = pdr.get_data_famafrench('F-F_Research_Data_Factors', start='1-1-1926')[0] print(factors.head()) industries = pdr.get_data_famafrench('10_Industry_Portfolios', start='1-1-1926')[0] print(industries.head()) """ Explanation: pandas-datareader is used to download data from Ken French's website. The two data sets downloaded are the 3 Fama-French factors and the 10 industry portfolios. Data is available from 1926. The data are monthly returns for the factors or industry portfolios. End of explanation """ endog = industries.HiTec - factors.RF.values exog = sm.add_constant(factors['Mkt-RF']) rols = RollingOLS(endog, exog, window=60) rres = rols.fit() params = rres.params print(params.head()) print(params.tail()) """ Explanation: The first model estimated is a rolling version of the CAPM that regresses the excess return of Technology sector firms on the excess return of the market. The window is 60 months, and so results are available after the first 60 (window) months. The first 59 (window - 1) estimates are all nan filled. End of explanation """ fig = rres.plot_recursive_coefficient(variables=['Mkt-RF'], figsize=(14,6)) """ Explanation: We next plot the market loading along with a 95% point-wise confidence interval. The alpha=False omits the constant column, if present. End of explanation """ exog_vars = ['Mkt-RF', 'SMB', 'HML'] exog = sm.add_constant(factors[exog_vars]) rols = RollingOLS(endog, exog, window=60) rres = rols.fit() fig = rres.plot_recursive_coefficient(variables=exog_vars, figsize=(14,18)) """ Explanation: Next, the model is expanded to include all three factors, the excess market, the size factor and the value factor. End of explanation """ joined = pd.concat([factors, industries], axis=1) joined['Mkt_RF'] = joined['Mkt-RF'] mod = RollingOLS.from_formula('HiTec ~ Mkt_RF + SMB + HML', data=joined, window=60) rres = mod.fit() print(rres.params.tail()) """ Explanation: Formulas RollingOLS and RollingWLS both support model specification using the formula interface. The example below is equivalent to the 3-factor model estimated previously. Note that one variable is renamed to have a valid Python variable name. End of explanation """ %timeit rols.fit() %timeit rols.fit(params_only=True) """ Explanation: RollingWLS: Rolling Weighted Least Squares The rolling module also provides RollingWLS which takes an optional weights input to perform rolling weighted least squares. It produces results that match WLS when applied to rolling windows of data. Fit Options Fit accepts other optional keywords to set the covariance estimator. Only two estimators are supported, 'nonrobust' (the classic OLS estimator) and 'HC0' which is White's heteroskedasticity robust estimator. You can set params_only=True to only estimate the model parameters. This is substantially faster than computing the full set of values required to perform inference. Finally, the parameter reset can be set to a positive integer to control estimation error in very long samples. RollingOLS avoids the full matrix product when rolling by only adding the most recent observation and removing the dropped observation as it rolls through the sample. Setting reset uses the full inner product every reset periods. In most applications this parameter can be omitted. End of explanation """
phoebe-project/phoebe2-docs
2.2/tutorials/requiv.ipynb
gpl-3.0
!pip install -I "phoebe>=2.2,<2.3" """ Explanation: Equivalent Radius Setup Let's first make sure we have the latest version of PHOEBE 2.2 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release). End of explanation """ %matplotlib inline import phoebe from phoebe import u # units import numpy as np import matplotlib.pyplot as plt logger = phoebe.logger() b = phoebe.default_binary() """ Explanation: As always, let's do imports and initialize a logger and a new Bundle. See Building a System for more details. End of explanation """ b.add_dataset('mesh', times=np.linspace(0,1,11), dataset='mesh01') """ Explanation: Now let's add a mesh dataset at a few different times so that we can see how the potentials affect the surfaces of the stars. End of explanation """ print(b['requiv@component']) """ Explanation: Relevant Parameters The 'requiv' parameter defines the stellar surface to have a constant volume of 4./3 pi requiv^3. End of explanation """ print(b['requiv_max@primary@component']) print(b['requiv_max@primary@constraint']) b.set_value('requiv@primary@component', 3) """ Explanation: Critical Potentials and System Checks Additionally, for each detached component, there is an requiv_max Parameter which shows the critical value at which the Roche surface will overflow. Setting requiv to a larger value will fail system checks and raise a warning. End of explanation """ b.set_value('sma@binary@component', 10) """ Explanation: At this time, if you were to call run_compute, an error would be thrown. An error isn't immediately thrown when setting requiv, however, since the overflow can be recitified by changing any of the other relevant parameters. For instance, let's change sma to be large enough to account for this value of rpole and you'll see that the error does not occur again. End of explanation """ print(b.run_checks()) b.set_value('sma@binary@component', 5) print(b.run_checks()) """ Explanation: These logger warnings are handy when running phoebe interactively, but in a script its also handy to be able to check whether the system is currently computable /before/ running run_compute. This can be done by calling run_checks which returns a boolean (whether the system passes all checks) and a message (a string describing the first failed check). End of explanation """
mathnathan/notebooks
.ipynb_checkpoints/Intro to PyTorch-checkpoint.ipynb
mit
import torch as t # Tensors a = t.tensor([1,2,3]) # Can specify type during construction a = t.tensor([1,2,3], dtype=t.half) # Can cast to different types once constructed a a.double() a.float() a.short() a.long() """ Explanation: What is PyTorch? Itโ€™s a Python based scientific computing package targeted at two sets of audiences: A replacement for NumPy to use the power of GPUs a deep learning research platform that provides maximum flexibility and speed A replacement for NumPy to use the power of GPUs... End of explanation """ import numpy as np x = t.Tensor([1,2,3]) x y = x.numpy() type(y) """ Explanation: | Data type | dtype |CPU tensor |GPU tensor | |:------------------------:|:-----------------------------:|:------------------:|:-----------------------:| | 32-bit floating point | torch.float32 or torch.float | torch.FloatTensor | torch.cuda.FloatTensor | | 64-bit floating point | torch.float64 or torch.double | torch.DoubleTensor | torch.cuda.DoubleTensor | | 16-bit floating point | torch.float16 or torch.half | torch.HalfTensor | torch.cuda.HalfTensor | | 8-bit integer (unsigned) | torch.uint8 | torch.ByteTensor | torch.cuda.ByteTensor | | 8-bit integer (signed) | torch.int8 | torch.CharTensor | torch.cuda.CharTensor | | 16-bit integer (signed) | torch.int16 or torch.short | torch.ShortTensor | torch.cuda.ShortTensor | | 32-bit integer (signed) | torch.int32 or torch.int | torch.IntTensor | torch.cuda.IntTensor | | 64-bit integer (signed) | torch.int64 or torch.long | torch.LongTensor | torch.cuda.LongTensor | Converting between Tensors and Numpy Arrays... Tensor -> Numpy End of explanation """ x y x += 10 x y """ Explanation: However, they point to the same place in memory... End of explanation """ y = np.array([5,4,3]) type(y) x = t.from_numpy(y) type(x) y x y += 10 y x """ Explanation: Numpy -> Tensor End of explanation """ import time def timer(f, trials=5): avg_time = 0 for i in range(trials): start = time.time() f() stop = time.time() avg_time += (stop - start)/trials return avg_time np_arr = np.random.rand(10000,10000) t_arr = t.rand((10000,10000)) print('type(t_arr) = ', type(t_arr)) print('t_arr.shape = ', t_arr.shape) print('type(np_arr) = ', type(np_arr)) print('np_arr.shape = ', np_arr.shape) timer(lambda: t_arr*t_arr) timer(lambda: np_arr*np_arr) """ Explanation: Speed and Efficiency Torch is faster than Numpy End of explanation """ def exp1(): y = t_arr * t_arr y = t.empty(t_arr.shape) def exp2(): t.mul(t_arr, t_arr, out=y) def exp3(): t_arr.mul_(t_arr) timer(exp1) timer(exp2) timer(exp3) """ Explanation: What about optimizing with some class methods? End of explanation """ t_arr.shape t_arr.device # Should currently be on the cpu device = t.device('cuda') t_arr.to(device) t.cuda.is_available() """ Explanation: Tensors have 100+ built-in class methods including all your favorite numpy convenience methods and a a growing linear algebra library What about the GPU? There is a class method call 'to()', that sends a tensor to a specific device. This is new in 0.4.0. End of explanation """ b = t.tensor([2]) b.requires_grad y=b*b y.backward() b.requires_grad = True # or b.requires_grad_() y=b*b y.backward() b.grad c = b.detach() y=c*c y.backward() b.grad """ Explanation: a deep learning research platform that provides maximum flexibility and speed Central to all neural networks in PyTorch is the autograd package. Letโ€™s first briefly visit this, and we will then go to training our first neural network. The autograd package provides automatic differentiation for all operations on Tensors. It is a define-by-run framework, which means that your backprop is defined by how your code is run, and that every single iteration can be different. End of explanation """
WNoxchi/Kaukasos
FACLA/SVD-NMF-review.ipynb
mit
from scipy.stats import ortho_group import numpy as np Q = ortho_group.rvs(dim=3) B = np.random.randint(0,10,size=(3,3)) A = Q@[email protected] U,S,V = np.linalg.svd(A, full_matrices=False) U S V for i in range(3): print(U[i] @ U[(i+1) % len(U)]) # wraps around # U[0] @ U[1] # U[1] @ U[2] # U[2] @ U[0] for i in range(len(U)): print(U[:,i] @ U[:, (i+1)%len(U[0])]) """ Explanation: SVD Practice. 2018/2/12 - WNixalo Fastai Computational Linear Algebra (2017) ยง2: Topic Modeling w NMF & SVD facebook research: Fast Randomized SVD 1. Singular-Value Decomposition SVD is a factorization of a real or complex matrix. It factorizes a matrix $A$ into one with orthogonal columns $V^T$, one with orthogonal rows $U$, and a diagonal matrix of singular values $ฮฃ$ (aka $S$ or $s$ or $ฯƒ$) which contains the relative importance of each factor. End of explanation """ np.isclose(np.eye(len(U)), U @ U.T) np.isclose(np.eye(len(V)), V.T @ V) """ Explanation: Wait so.. the rows of a matrix $A$ are orthogonal iff $AA^T$ is diagonal? Hmm. Math.StackEx Link End of explanation """ from sklearn import decomposition # ofc this is just dummy data to test it works datavectors = np.random.randint(-1000,1000,size=(10,50)) U,S,V = decomposition.randomized_svd(datavectors, n_components=5) U.shape, S.shape, V.shape """ Explanation: Wait but that also gives True for $VV^T$. Hmmm. 2. Truncated SVD Okay, so SVD is an exact decomposition of a matrix and allows us to pull out distinct topics from data (due to their orthonormality (orthogonality?)). But doing so for a large data corpus is ... bad. Especially if most of the data's meaning / information relevant to us is captured by a small prominent subset. IE: prevalence of articles like a and the are likely poor indicators of any particular meaning in a piece of text since they're everywhere in English. Likewise for other types of data. Hmm, so, if I understood correctly, the ฮฃ/S/s/ฯƒ matrix is ordered by value max$\rightarrow$min.. but computing the SVD of a large dataset $A$ is exactly what we want to avoid using T-SVD. Okay so how? $\rightarrow$Full SVD we're calculating the full dimension of topics -- but its handy to limit to the most important ones -- this is how SVD is used in compression. Aha. This is where I was confused. Truncation is used with Randomization in R-SVD. The Truncated section was just introducing the concept. Got it. So that's where, in R-SVD, we use a buffer in addition to the portion of the dataset we take for SVD. And yay scikit-learn has R-SVD built in. End of explanation """ # workflow w NMF is something like this V = np.random.randint(0, 20, size=(10,10)) m,n = V.shape d = 5 # num_topics clsf = decomposition.NMF(n_components=d, random_state=1) W1 = clsf.fit_transform(V) H1 = clsf.components_ """ Explanation: The idea of T-SVD is that we want to compute an approximation to the range of $A$. The range of $A$ is the space covered by the column basis. ie: Range(A) = {y: Ax = y} that is: all $y$ you can achieve by multiplying $x$ with $A$. Depending on your space, the bases are vectors that you can take linear combinations of to get any value in your space. 3. Details of Randomized SVD (Truncated) Our goal is to have an algorithm to perform Truncated SVD using Randomized values from the dataset matrix. We want to use randomization to calculate the topics we're interested in, instead of calculating all of them. Aha. So.. the way to do that, using randomization, is to have a special kind of randomization. Find a matrix $Q$ with some special properties that will allow us to pull a matrix that is a near match to our dataset matrix $A$ in the ways we want it to be. Ie: It'll have the same singular values, meaning the same importance-ordered topics. Wow mathematics is really.. somethin. That process: Compute an approximation to the range of $A$. ie: we want $Q$ with $r$ orthonormal columns st: $$A \approx QQ^TA$$ Construct $B = Q^TA,$, which is small $(r \times n)$ Compute the SVD of $B$ by standard methods (fast since $B$ is smaller than $A$), $B = SฮฃV^T$ Since: $$A \approx QQ^TA = Q(SฮฃV^T)$$ if we set $U = QS$, then we have a low-rank approximation of $A \approx UฮฃV^T$. -- okay so.. confusion here. What is $S$ and $ฮฃ$? Because I see them elsewhere taken to mean the same thing on this subject, but all of a sudden they seem to be totally different things. -- oh, so apparently $S$ here is actually something different. $ฮฃ$ is what's been interchangeably referred to in Hellenic/Latin letters throughout the notebook. NOTE that $A: m \times n$ while $Q: m \times r$, so $Q$ is generally a tall, skinny matrix and therefore much smaller & easier to compute with than $A$. Also, because $S$ & $Q$ are both orthonormal, setting $R = QS$ makes $R$ orthonormal as well. How do we find Q (in step 1)? General Idea: we find this special $Q$, then we do SVD on this smaller matrix $Q^TA$, and we plug that back in to have our Truncated-SVD for $A$. And HERE is where the Random part of Randomized SVD comes in! How do we find $Q$?: We just take a bunch of random vectors $w_i$ and look at / evaluate the subspace formed by $Aw_i$. We form a matrix $W$ with the $w_i$'s as its columns. Then we take the QR Decomposition of $AW = QR$. Then the colunms of $Q$ form an orthonormal basis for $AW$, which is the range of $A$. Basically a QR Decomposition exists for any matrix, and is an orthonormal matrix $\times$ an upper triangular matrix. So basically: we take $AW$, $W$ is random, get the $QR$ -- and a property of the QR-Decomposition is that $Q$ forms an orthonormal basis for $AW$ -- and $AW$ gives the range of $A$. Since $AW$ has far more rows than columns, it turns out in practice that these columns are approximately orthonormal. It's very unlikely you'll get linearly-dependent columns when you choose random values. Aand apparently the QR-Decomp is v.foundational to Numerical Linear Algebra. How do we choose r? We chose $Q$ to have $r$ orthonormal columns, and $r$ gives us the dimension of $B$. We choose $r$ to be the number of topics we want to retrieve $+$ some buffer. See the lesson notebook and accompanying lecture time for an implementatinon of Randomized SVD. NOTE that Scikit-Learn's implementation is more powerful; the example is for example purposes. 4. Non-negative Matrix Factorization Wiki NMF is a group of algorithms in multivariate analysis and linear algebra where a matrix $V$ is factorized into (usually) two matrices $W$ & $H$, with the property that all three matrices have no negative elements. Lecture 2 40:32 The key thing in SVD is orthogonality -- basically everything is orthogonal to eachother -- the key idea in NMF is that nothing is negative. The lower-bound is zero-clamped. NOTE your original dataset shoudl be nonnegative if you use NMF, or else you won't be able to reconstruct it. Idea Rather than constraining our factors to be orthogonal, another idea would be to constrain them to be non-negative. NMF is a factorization of a non-negative dataset $V$: $$V=WH$$ into non-negative matrices $W$, $H$. Often positive factors will be more easily interpretable (and this is the reason behind NMF's popularity). huh.. really now.?.. For example if your dataset is a matrix of faces $V$, where each columns holds a vectorized face, then $W$ would be a matrix of column facial features, and $H$ a matrix of column relative importance of features in each image. Applications of NMF / Sklearn NMF is a 'difficult' problem because it is unconstrained and NP-Hard NMF looks smth like this in schematic form: Documents Topics Topic Importance Indicators W --------- --- ----------------- o | | | | | ||| | | | | | | | | | r | | | | | โ‰ˆ ||| ----------------- d | | | | | ||| s --------- --- V W H End of explanation """
jmschrei/pomegranate
examples/bayesnet_monty_hall_train.ipynb
mit
import math from pomegranate import * """ Explanation: Training a Monty Hall Bayesian Network authors:<br> Jacob Schreiber [<a href="mailto:[email protected]">[email protected]</a>]<br> Nicholas Farn [<a href="mailto:[email protected]">[email protected]</a>] Lets test out the Bayesian Network framework to produce the Monty Hall problem, but modified a little. The Monty Hall problem is basically a game show where a guest chooses one of three doors to open, with an unknown one having a prize behind it. Monty then opens another non-chosen door without a prize behind it, and asks the guest if they would like to change their answer. Many people were surprised to find that if the guest changed their answer, there was a 66% chance of success as opposed to a 50% as might be expected if there were two doors. This can be modelled as a Bayesian network with three nodes-- guest, prize, and Monty, each over the domain of door 'A', 'B', 'C'. Monty is dependent on both guest and prize, in that it can't be either of them. Lets extend this a little bit to say the guest has an untrustworthy friend whose answer he will not go with. End of explanation """ guest = DiscreteDistribution( { 'A': 1./3, 'B': 1./3, 'C': 1./3 } ) prize = DiscreteDistribution( { 'A': 1./3, 'B': 1./3, 'C': 1./3 } ) """ Explanation: Let's create the distributions for the guest and the prize. Note that both distributions are independent of one another. End of explanation """ monty = ConditionalProbabilityTable( [[ 'A', 'A', 'A', 0.0 ], [ 'A', 'A', 'B', 0.5 ], [ 'A', 'A', 'C', 0.5 ], [ 'A', 'B', 'A', 0.0 ], [ 'A', 'B', 'B', 0.0 ], [ 'A', 'B', 'C', 1.0 ], [ 'A', 'C', 'A', 0.0 ], [ 'A', 'C', 'B', 1.0 ], [ 'A', 'C', 'C', 0.0 ], [ 'B', 'A', 'A', 0.0 ], [ 'B', 'A', 'B', 0.0 ], [ 'B', 'A', 'C', 1.0 ], [ 'B', 'B', 'A', 0.5 ], [ 'B', 'B', 'B', 0.0 ], [ 'B', 'B', 'C', 0.5 ], [ 'B', 'C', 'A', 1.0 ], [ 'B', 'C', 'B', 0.0 ], [ 'B', 'C', 'C', 0.0 ], [ 'C', 'A', 'A', 0.0 ], [ 'C', 'A', 'B', 1.0 ], [ 'C', 'A', 'C', 0.0 ], [ 'C', 'B', 'A', 1.0 ], [ 'C', 'B', 'B', 0.0 ], [ 'C', 'B', 'C', 0.0 ], [ 'C', 'C', 'A', 0.5 ], [ 'C', 'C', 'B', 0.5 ], [ 'C', 'C', 'C', 0.0 ]], [guest, prize] ) """ Explanation: Now let's create the conditional probability table for our Monty. The table is dependent on both the guest and the prize. End of explanation """ s1 = State( guest, name="guest" ) s2 = State( prize, name="prize" ) s3 = State( monty, name="monty" ) """ Explanation: Now lets create the states for the bayesian network. End of explanation """ network = BayesianNetwork( "test" ) network.add_states( s1, s2, s3 ) """ Explanation: Then the bayesian network itself, adding the states in after. End of explanation """ network.add_transition( s1, s3 ) network.add_transition( s2, s3 ) """ Explanation: Then the transitions. End of explanation """ network.bake() """ Explanation: With a "bake" to finalize the structure of our network. End of explanation """ data = [[ 'A', 'A', 'A' ], [ 'A', 'A', 'A' ], [ 'A', 'A', 'A' ], [ 'A', 'A', 'A' ], [ 'A', 'A', 'A' ], [ 'B', 'B', 'B' ], [ 'B', 'B', 'C' ], [ 'C', 'C', 'A' ], [ 'C', 'C', 'C' ], [ 'C', 'C', 'C' ], [ 'C', 'C', 'C' ], [ 'C', 'B', 'A' ]] network.fit( data ) """ Explanation: Now we can train our network on the following data. End of explanation """ observations = { 'guest' : 'A', 'prize' : 'A' } beliefs = map( str, network.predict_proba( observations ) ) print("\n".join( "{}\t{}".format( state.name, belief ) for state, belief in zip( network.states, beliefs ) )) """ Explanation: Now let's see what happens when our Guest says 'A' and the Prize is 'A'. End of explanation """
astroumd/GradMap
notebooks/Lectures2018/Lecture4/Lecture4-2BodyProblem-Student-NEW.ipynb
gpl-3.0
#Physical Constants (SI units) G=6.67e-11 AU=1.5e11 #meters. Distance between sun and earth. daysec=24.0*60*60 #seconds in a day """ Explanation: Welcome to your first numerical simulation! The 2 Body Problem Many problems in statistical physics and astrophysics requiring solving problems consisting of many particles at once (sometimes on the order of thousands or more!). This can't be done by the traditional pen and paper techniques you are all learning in your physics classes. Instead, we must impliment numerical solutions to these problems. Today, you will create your first of many numerical simulation for a simple problem is that solvable by pen and paper already, the 2 body problem in 2D. In this problem, we will describe the motion between two particles that share a force between them (such as Gravity). We'll design the simulation from an astronomer's mindset with their astronomical units in mind. This simulation will be used to confirm the general motion of the earth around the Sun, and later will be used to predict the motion between two stars within relatively close range. <br> <br> <br> We will guide you through the physics and math required to create this simulation. The problem here is designed to use the knowledge of scientific python you have been developing this week. Like any code in python, The first thing we need to do is import the libraries we need. Go ahead and import Numpy and Pyplot below as np and plt respectfully. Don't forget to put matplotlib inline to get everything within the notebook. Now we will define the physical constants of our system, which will also establish the unit system we have chosen. We'll use SI units here. Below, I've already created the constants. Make sure you understannd what they are before moving on. End of explanation """ #####run specfic constants. Change as needed##### #Masses in kg Ma=6.0e24 #always set as smaller mass Mb=2.0e30 #always set as larger mass #Time settings t=0.0 #Starting time dt=.01*daysec #Time set for simulation tend=300*daysec #Time where simulation ends #Intial conditions (posistion [m] and velocities [m/s] in x,y,z coorindates) #For Ma xa=1.0*AU ya=0.0 vxa=0.0 vya=30000.0 #For Mb xb=0.0 yb=0.0 vxb=0.0 vyb=0.0 """ Explanation: Next, we will need parameters for the simulation. These are known as intial condititons. For a 2 body gravitation problem, we'll need to know the masses of the two objects, the starting posistions of the two objects, and the starting velocities of the two objects. Below, I've included the intial conditions for the earth (a) and the Sun (b) at the average distance from the sun and the average velocity around the sun. We also need a starting time, and ending time for the simulation, and a "time-step" for the system. Feel free to adjust all of these as you see fit once you have built the system! <br> <br> <br> <br> a note on dt: As already stated, numeric simulations are approximations. In our case, we are approximating how time flows. We know it flows continious, but the computer cannot work with this. So instead, we break up our time into equal chunks called "dt". The smaller the chunks, the mroe accurate you will become, but at the cost of computer time. End of explanation """ #Function to compute the force between the two objects def FG(xa,xb,ya,yb): #Computer rx and ry between Ma and Mb rx=xb-xa ry=#Write it in #compute r^3 r3=#Write in r^3 using the equation above. Make use of np.sqrt() #Compute the force in Newtons. Use the equations above as a Guide! fx=#Write it in fy=-#Write it in return #What do we return? """ Explanation: It will be nice to create a function for the force between Ma and Mb. Below is the physics for the force of Ma on Mb. How the physics works here is not important for the moment. Right now, I want to make sure you can transfer the math shown into a python function. I'll show a picture on the board the physics behind this math for those interested. $$\vec{F_g}=\frac{-GM_aM_b}{r^3}\vec{r}$$ and - $$\vec{r}=(x_b-x_a)\hat{x}+ (y_b-y_a)\hat{y}$$ - $$r^3=((x_b-x_a)^2+(y_b-y_a)^2)^{3/2}$$ If we break Fg into the x and y componets we get: $$Fx=\frac{-GM_aM_b}{r^3}x$$ $$Fy=\frac{-GM_aM_b}{r^3}x$$ <br><br>So, $Fg$ will only need to be a function of xa, xb, ya, and yb. The velocities of the bodies will not be needed. Create a function that calculates the force between the bodies given the posistions of the bodies. My recommendation here will be feed the inputs as seperate componets and also return the force in terms of componets (say, fx and fy). This will make your code easier to make and easier to read. End of explanation """ #Run a loop for the simulation. Keep track of Ma and Mb posistions #Intialize vectors xaAr=np.array([]) yaAr=np.array([]) xbAr=#Write it in for Particle B ybAr=#Write it in for Particle B """ Explanation: Now that we have our function, we need to prepare a loop. Before we do, we need to intialize the loop and choose a loop type, for or while. Below is the general outline for how each type of loop can gp. <br> <br> <br> For loop: intialize posistions and velocities arrays with np.zeros or np.linspace for the amount of steps needed to go through the simulation (which is numSteps=(tend-t)/dt the way we have set up the problem). The for loop condition is based off time and should read rough like: for i in range(numSteps) <br> <br> <br> While loop: intialize posistions and velocities arrays with np.array([]) and use np.append() to tact on new values at each step like so, xaArray=np.append(xaArray,NEWVALUE). The while condition should read, while t&lt;tend My preference here is While since it keeps my calculations and appending seperate. But, feel free to use which ever feels best for you! End of explanation """ #Your loop here #using while loop method with appending. Can also be done with for loops while #What is our condition for ending?: #Compute current force on Ma and Mb. Ma recieves the opposite force of Mb fx,fy=Fg(xa,ya,xb,yb) #Update the velocities and posistions of the particles vxa=vxa-fx*dt/Ma vya=#Write it in for y vxb=#Write it in vyb=#Write it in xa=xa+vxa*dt ya=#Wite it in xb=#Write it in yb=#Write it in #Save data to lists xaAr=np.append(xaAr,xa) yaAr=#How will I save it for yaAr? xbAr=np.append(xbAr,xb) ybAr=np.append(ybAr,yb) #update the time by one time step, dt #How do I update the time? """ Explanation: Now for the actual simulation. This is the hardest part to code in. The general idea behind our loop is that as we step through time, we calculate the force, then calculate the new velocity, then the new posistion for each particle. At the end, we must update our arrays to reflect the new changes and update the time of the system. The time is super important! If we don't (say in a while loop), the simulation would never end and we would never get our result. Outline for the loop (order matters here) Calculate the force with the last known posistions (use your function!) Calculate the new velocities using the approximation: vb = vb + dt*fg/Mb and va= va - dt*fg/Ma Note the minus sign here, and the need to do this for the x and y directions! Calculate the new posistions using the approximation: xb = xb + dt*Vb (same for a and for y's. No minus problem here) Update the arrays to reflect our new values Update the time using t=t+dt <br> <br> <br> <br> Now when the loop closes back in, the cycle repeats in a logical way. Go one step at a time when creating this loop and use comments to help guide yourself. Ask for help if it gets tricky! End of explanation """ from IPython.display import Image Image("Earth-Sun-averageResult.jpg") #Your plot here plt.plot(#Particle A plot plt.plot(#Partcile B plot """ Explanation: Now for the fun part (or not so fun part if your simulation had an issue), plot your results! This is something well covered in previous lectures. Show me a plot of (xa,ya) and (xb,yb). Does it look sort of familiar? Hopfully you get something like the below image (in units of AU). End of explanation """
darrenxyli/deeplearning
projects/project1/DLND-your-first-network/dlnd-your-first-neural-network.ipynb
apache-2.0
%matplotlib inline %config InlineBackend.figure_format = 'retina' import numpy as np import pandas as pd import matplotlib.pyplot as plt """ Explanation: Your first neural network In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more. End of explanation """ data_path = 'Bike-Sharing-Dataset/hour.csv' rides = pd.read_csv(data_path) rides.head() """ Explanation: Load and prepare the data A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon! End of explanation """ rides[:24*10].plot(x='dteday', y='cnt') """ Explanation: Checking out the data This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above. Below is a plot showing the number of bike riders over the first 10 days in the data set. You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model. End of explanation """ dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday'] for each in dummy_fields: dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False) rides = pd.concat([rides, dummies], axis=1) fields_to_drop = ['instant', 'dteday', 'season', 'weathersit', 'weekday', 'atemp', 'mnth', 'workingday', 'hr'] data = rides.drop(fields_to_drop, axis=1) data.head() """ Explanation: Dummy variables Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to get_dummies(). End of explanation """ quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed'] # Store scalings in a dictionary so we can convert back later scaled_features = {} for each in quant_features: mean, std = data[each].mean(), data[each].std() scaled_features[each] = [mean, std] data.loc[:, each] = (data[each] - mean)/std """ Explanation: Scaling target variables To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1. The scaling factors are saved so we can go backwards when we use the network for predictions. End of explanation """ # Save the last 21 days test_data = data[-21*24:] data = data[:-21*24] # Separate the data into features and targets target_fields = ['cnt', 'casual', 'registered'] features, targets = data.drop(target_fields, axis=1), data[target_fields] test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields] """ Explanation: Splitting the data into training, testing, and validation sets We'll save the last 21 days of the data to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders. End of explanation """ # Hold out the last 60 days of the remaining data as a validation set train_features, train_targets = features[:-60*24], targets[:-60*24] val_features, val_targets = features[-60*24:], targets[-60*24:] """ Explanation: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set). End of explanation """ class NeuralNetwork(object): def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate): # Set number of nodes in input, hidden and output layers. self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Initialize weights self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5, (self.hidden_nodes, self.input_nodes)) self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5, (self.output_nodes, self.hidden_nodes)) self.lr = learning_rate # Hidden layer activation function is the sigmoid function f(x) = 1/(1 + exp(-1)) self.activation_function = lambda x: 1/ (1 + np.exp(-x)) self.activation_derivative = lambda x: x * (1 - x) # Output layer activation function is f(x) = x self.output_activation_function = lambda x: x self.output_activation_derivative = lambda x: 1 def train(self, inputs_list, targets_list): # Convert inputs list to 2d array inputs = np.array(inputs_list, ndmin=2).T targets = np.array(targets_list, ndmin = 2).T ### Forward pass ### # signals into hidden layer hidden_inputs = np.dot(self.weights_input_to_hidden, inputs) # signals from hidden layer hidden_outputs = self.activation_function(hidden_inputs) # signals into final output layer final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs) # signals from final output layer final_outputs = self.output_activation_function(final_inputs) ### Backward pass ### # Output layer error is the difference between desired target and actual output. error = targets - final_outputs output_errors = error * self.output_activation_derivative(final_inputs) # errors (back-)propagated to the hidden layer hidden_errors = np.dot(output_errors, self.weights_hidden_to_output) # hidden layer gradients hidden_grad = self.activation_derivative(hidden_outputs) # update hidden-to-output weights with gradient descent step self.weights_hidden_to_output += self.lr * np.dot(output_errors, hidden_outputs.T) # update input-to-hidden weights with gradient descent step self.weights_input_to_hidden += self.lr * np.dot(hidden_errors.T * hidden_grad, inputs.T) def run(self, inputs_list): # Run a forward pass through the network inputs = np.array(inputs_list, ndmin=2).T # signals into hidden layer hidden_inputs = np.dot(self.weights_input_to_hidden, inputs) # signals from hidden layer hidden_outputs = self.activation_function(hidden_inputs) # signals into final output layer final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs) # signals from final output layer final_outputs = self.output_activation_function(final_inputs) return final_outputs def MSE(y, Y): return np.mean((y-Y)**2) """ Explanation: Time to build the network Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes. The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation. We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation. Hint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$. Below, you have these tasks: 1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function. 2. Implement the forward pass in the train method. 3. Implement the backpropagation algorithm in the train method, including calculating the output error. 4. Implement the forward pass in the run method. End of explanation """ import sys ### Set the hyperparameters here ### epochs = 6000 learning_rate = 0.01 hidden_nodes = 28 output_nodes = 1 N_i = train_features.shape[1] network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate) losses = {'train':[], 'validation':[]} for e in range(epochs): # Go through a random batch of 128 records from the training data set batch = np.random.choice(train_features.index, size=128) for record, target in zip(train_features.ix[batch].values, train_targets.ix[batch]['cnt']): network.train(record, target) # Printing out the training progress train_loss = MSE(network.run(train_features), train_targets['cnt'].values) val_loss = MSE(network.run(val_features), val_targets['cnt'].values) sys.stdout.write("\rProgress: " + str(100 * e/float(epochs))[:4] \ + "% ... Training loss: " + str(train_loss)[:5] \ + " ... Validation loss: " + str(val_loss)[:5]) losses['train'].append(train_loss) losses['validation'].append(val_loss) plt.plot(losses['train'], label='Training loss') plt.plot(losses['validation'], label='Validation loss') plt.legend() plt.ylim(ymax=0.5) """ Explanation: Training the network Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops. You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later. Choose the number of epochs This is the number of times the dataset will pass through the network, each time updating the weights. As the number of epochs increases, the network becomes better and better at predicting the targets in the training set. You'll need to choose enough epochs to train the network well but not too many or you'll be overfitting. Choose the learning rate This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge. Choose the number of hidden nodes The more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose. End of explanation """ fig, ax = plt.subplots(figsize=(8,4)) mean, std = scaled_features['cnt'] predictions = network.run(test_features)*std + mean ax.plot(predictions[0], label='Prediction') ax.plot((test_targets['cnt']*std + mean).values, label='Data') ax.set_xlim(right=len(predictions)) ax.legend() dates = pd.to_datetime(rides.ix[test_data.index]['dteday']) dates = dates.apply(lambda d: d.strftime('%b %d')) ax.set_xticks(np.arange(len(dates))[12::24]) _ = ax.set_xticklabels(dates[12::24], rotation=45) """ Explanation: Check out your predictions Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly. End of explanation """ import unittest inputs = [0.5, -0.2, 0.1] targets = [0.4] test_w_i_h = np.array([[0.1, 0.4, -0.3], [-0.2, 0.5, 0.2]]) test_w_h_o = np.array([[0.3, -0.1]]) class TestMethods(unittest.TestCase): ########## # Unit tests for data loading ########## def test_data_path(self): # Test that file path to dataset has been unaltered self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv') def test_data_loaded(self): # Test that data frame loaded self.assertTrue(isinstance(rides, pd.DataFrame)) ########## # Unit tests for network functionality ########## def test_activation(self): network = NeuralNetwork(3, 2, 1, 0.5) # Test that the activation function is a sigmoid self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5)))) def test_train(self): # Test that weights are updated correctly on training network = NeuralNetwork(3, 2, 1, 0.5) network.weights_input_to_hidden = test_w_i_h.copy() network.weights_hidden_to_output = test_w_h_o.copy() network.train(inputs, targets) self.assertTrue(np.allclose(network.weights_hidden_to_output, np.array([[ 0.37275328, -0.03172939]]))) self.assertTrue(np.allclose(network.weights_input_to_hidden, np.array([[ 0.10562014, 0.39775194, -0.29887597], [-0.20185996, 0.50074398, 0.19962801]]))) def test_run(self): # Test correctness of run method network = NeuralNetwork(3, 2, 1, 0.5) network.weights_input_to_hidden = test_w_i_h.copy() network.weights_hidden_to_output = test_w_h_o.copy() self.assertTrue(np.allclose(network.run(inputs), 0.09998924)) suite = unittest.TestLoader().loadTestsFromModule(TestMethods()) unittest.TextTestRunner().run(suite) """ Explanation: Thinking about your results Answer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does? Note: You can edit the text in this cell by double clicking on it. When you want to render the text, press control + enter Your answer below The model does fairly well predicting the Bikeshare data in D.C. until around December 22. It begins to do well again around December 27. It begins to fail around December 22 because this begins the Christmas week. Although the dataset includes a "holiday" variable, there is no discrete indicator for Christmas week -- i.e., the effect of Christmas on the data extends past the 25th. Also, this could coincide with other holidays (e.g., Hanukkah, Kwanzaa) that may or may not have a similar effect as well as not occuring on a single day (e.g., Hanukkah). An improvement for the model would take into account the aforementioned "holiday effect". Unit tests Run these unit tests to check the correctness of your network implementation. These tests must all be successful to pass the project. End of explanation """
mwickert/SP-Comm-Tutorial-using-scikit-dsp-comm
tutorial_part3/Arduino_FSK.ipynb
bsd-2-clause
import sk_dsp_comm.pyaudio_helper as pah """ Explanation: Record a Short Message to be PCM Encoded Using pyaudio_helper record a short message that will ultimately be PCM encoded and stored in C header file along with a preamble/frame sync pattern. End of explanation """ pah.available_devices() """ Explanation: Find the mircophone inputs available on this PC for making the recording: End of explanation """ # define an audio record callback # Here we configure the callback to capture a one channel input def callback(in_data, frame_count, time_info, status): DSP_IO.DSP_callback_tic() # convert byte data to ndarray in_data_nda = np.fromstring(in_data, dtype=np.int16) #*********************************************** # DSP operations here # Here we apply a linear filter to the input x = in_data_nda.astype(float32) y = x #*********************************************** # Save data for later analysis # accumulate a new frame of samples DSP_IO.DSP_capture_add_samples(y) #*********************************************** # Convert from float back to int16 y = 0*y.astype(int16) DSP_IO.DSP_callback_toc() # Convert ndarray back to bytes #return (in_data_nda.tobytes(), pyaudio.paContinue) return y.tobytes(), pah.pyaudio.paContinue DSP_IO = pah.DSP_io_stream(callback,0,1,fs=8000) DSP_IO.stream(5) speech = DSP_IO.data_capture[4000:26000] speech /= max(abs(speech)) #ss.to_wav('speech.wav',8000,speech) Audio('speech.wav') fs, speech = ss.from_wav("speech.wav") plot(arange(len(speech))/8000,speech) title(r'Audio Capture for Arduino FSK') ylabel(r'Amplitude') xlabel(r'Time (s)') grid(); x_bits = dc.PCM_encode(speech,8) x_PN63 = dc.m_seq(6) x_PN63.shape def Bits_header(fname_out,h): """ Write a 0/1 data bits Header Files Mark Wickert July 2017 """ M = len(h) N = 25 # Coefficients per line f = open(fname_out,'wt') f.write('//define a Data Bits Array\n\n') f.write('#include <stdint.h>\n\n') f.write('#ifndef M_BIT\n') f.write('#define M_BIT %d\n' % M) f.write('#endif\n') f.write('/************************************************************************/\n'); f.write('/* The Data Bits */\n'); f.write('int8_t x_BITS[M_BIT] = {') kk = 0; for k in range(M): #k_mod = k % M if (kk < N-1) and (k < M-1): f.write('%1d,' % h[k]) kk += 1 elif (kk == N-1) & (k < M-1): f.write('%1d,\n' % h[k]) if k < M: f.write(' ') kk = 0 else: f.write('%1d' % h[k]) f.write('};\n') f.write('/************************************************************************/\n') f.close() #x_tx_bits = hstack((x_PN63,x_bits)) x_tx_bits = x_PN63 Bits_header('x_tx_bits.h',x_tx_bits) """ Explanation: We will use device index 0, which here is an on-board mic End of explanation """ # From the docstring #x = sdr.capture(Tc, fo=88700000.0, fs=2400000.0, gain=40, device_index=0) x = sdr.capture(Tc=8,fo=106.9e6,fs=2.4e6,gain=40,device_index=0) sdr.complex2wav('FSK_1kbps_biphase.wav',2400000,x) fs, x = sdr.wav2complex('FSK_1kbps_biphase.wav') # Plot the power spectral density of the raw capture psd(x,2**12,2400); title(r'FSK Captured at 106.9 MHz') xlabel(r'Frequency (kHz)'); xlim([-200,200]) def FSK_demod(x,fs=2.4e6,B1=200e3,N1=6,B2=10e3,N2=5): """ FSK Demod Defaults ok for Rb = 1 kbps bi-phase encoding """ b = signal.firwin(64,2*B1/float(fs)) # Filter and decimate (should be polyphase) y = signal.lfilter(b,1,x) z = ss.downsample(y,N1) # Apply complex baseband discriminator z_bb = sdr.discrim(z) z_bb -= mean(z_bb) # Design 2nd decimation lowpass filter bb = signal.firwin(64,2*B2/(float(fs)/N1)) # Filter and decimate zz_bb = signal.lfilter(bb,1,z_bb) # Decimate by N2 z_out = ss.downsample(zz_bb,N2) print('Done!') return z_bb, z_out # Demodulate the FSK signal z_bb, z_demod = FSK_demod(x,B1=200e3,N1=6,B2=10e3,N2=5) psd(z_demod,2**10,2400000/60); xlabel(r'Frequency (kHz)') """ Explanation: ** Arduino Maximum array lengths C char a[ 32767 ]; short b[ 16383 ]; int c[ 16383 ]; long d[ 8191 ]; long long e[ 4095 ]; float f[ 8191 ]; double g[ 8191 ]; //double is the same size as float The bit stream sent over the air is: ```C //define a Data Bits Array include <stdint.h> ifndef M_BIT define M_BIT 63 endif /**********/ / The Data Bits */ int16_t x_BITS[M_BIT] = {1,1,1,1,1,1,0,1,0,1,0,1,1,0,0,1,1,0,1,1,1,0,1,1,0, 1,0,0,1,0,0,1,1,1,0,0,0,1,0,1,1,1,1,0,0,1,0,1,0,0, 0,1,1,0,0,0,0,1,0,0,0,0,0}; /***********/ ``` End of explanation """ 2400000//30//1000 # Create a matched filter to recover the bi-phase encoded FSK Nb2 = 40 zf = signal.lfilter(hstack((ones(Nb2)/Nb2,-ones(Nb2)/Nb2)),1,z_demod) subplot(311) plot(z_demod[1000:2000]) grid(); subplot(312) plot(zf[1000:2000]) grid(); subplot(313) plot(sign(zf[1000:2000])) grid(); tight_layout() """ Explanation: The half bit period is $T_b/2 = 500\mu\text{s}$, but with biphase encoding the bit rate is $R_b = 1/T_b = 1$ kbps. The sampling rate following the final decimation stage is $2400000/(30) = 80$ ksps, so we have nominally $80000/1000 = 80$ samples per bit. End of explanation """ rx_symb_d,clk,track = sdr.sccs_bit_sync(sign(zf),80) # 80 = ~number of samples per bit plot(track[:500]) title(r'Bit Synch Sampling Index') ylabel(r'Per Bit Sampling Index') xlabel(r'Bits') grid() dc.strips(rx_symb_d[:100],63) title(r'Recovered Bits: A PN 63 Sequence') grid() """ Explanation: The Arduino clock and SDR clock are asynchronous, so over time the sampling instant used to convert the waveform back to 1's and 0's will have to advance of retard. It is the job of a bit synchronizer to manage this. End of explanation """
pkreissl/espresso
doc/tutorials/visualization/visualization.ipynb
gpl-3.0
import numpy as np import sys import tqdm import logging import matplotlib.pyplot as plt import espressomd logging.basicConfig(level=logging.INFO, stream=sys.stdout) np.random.seed(42) matplotlib_notebook = True # toggle this off when outside IPython/Jupyter espressomd.assert_features("WCA") # interaction parameters (purely repulsive Lennard-Jones) lj_eps = 1.0 lj_sig = 1.0 # system system = espressomd.System(box_l=[10, 10, 10]) system.time_step = 0.0001 system.cell_system.skin = 0.4 # particle parameters (dense liquid) density = 0.7 n_part = int(system.volume() * density) # integration int_steps = 500 int_n_times = 100 ############################################################# # Setup System # ############################################################# # interaction setup system.non_bonded_inter[0, 0].wca.set_params(epsilon=lj_eps, sigma=lj_sig) # particle setup system.part.add(pos=np.random.random((n_part, 3)) * system.box_l) ############################################################# # Energy Minimization # ############################################################# system.integrator.set_steepest_descent(f_max=0, gamma=1.0, max_displacement=lj_eps * 0.01) # minimize until energy difference < 5% or energy < 1e-3 relative_energy_change = float('inf') relative_energy_change_threshold = 0.05 energy_threshold = 1e-3 energy_old = system.analysis.energy()['total'] logging.info(f'Energy minimization starts') logging.info(f'energy: {energy_old:.2e}') for i in range(20): system.integrator.run(50) energy = system.analysis.energy()['total'] logging.info(f'energy: {energy:.2e}') relative_energy_change = (energy_old - energy) / energy_old if relative_energy_change < relative_energy_change_threshold or energy < energy_threshold: break energy_old = energy else: logging.info(f'Energy minimization did not converge in {i + 1} cycles') system.integrator.set_vv() ############################################################# # Thermalization # ############################################################# system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42) system.time = 0 # reset system timer energies = np.zeros((int_n_times, 2)) logging.info(f'Thermalization starts') for i in tqdm.tqdm(range(int_n_times)): system.integrator.run(int_steps) energies[i] = (system.time, system.analysis.energy()['total']) plt.plot(energies[:,0], energies[:,1]) plt.xlabel("Time") plt.ylabel("Energy") plt.show() """ Explanation: Visualization Introduction When you are running a simulation, it is often useful to see what is going on by visualizing particles in a 3D view or by plotting observables over time. That way, you can easily determine things like whether your choice of parameters has led to a stable simulation or whether your system has equilibrated. You may even be able to do your complete data analysis in real time as the simulation progresses. Thanks to ESPResSo's Python interface, we can make use of standard libraries like Mayavi or OpenGL (for interactive 3D views) and Matplotlib (for line graphs) for this purpose. We will also use NumPy, which both of these libraries depend on, to store data and perform some basic analysis. Simulation First, we need to set up a simulation. We will simulate a simple Lennard-Jones liquid. Particles will be placed randomly in the simulation box. We will energy-minimize the system to remove overlaps, and then thermalize the system with Langevin dynamics. We can measure the energy as a function of time using <tt>system.analysis.energy()</tt>. End of explanation """ # setup matplotlib canvas plt.xlabel("Time") plt.ylabel("Energy") plot, = plt.plot([0], [0]) if matplotlib_notebook: from IPython import display else: plt.show(block=False) energies = np.zeros((int_n_times, 2)) # setup matplotlib update function current_time = -1 def update_plot(): i = current_time if i < 3: return None plot.set_xdata(energies[:i + 1, 0]) plot.set_ydata(energies[:i + 1, 1]) plt.xlim(0, energies[i, 0]) plt.ylim(energies[:i + 1, 1].min(), energies[:i + 1, 1].max()) # refresh matplotlib GUI if matplotlib_notebook: display.clear_output(wait=True) display.display(plt.gcf()) else: plt.draw() plt.pause(0.01) # define a callback function def main(): global current_time for i in range(int_n_times): system.integrator.run(int_steps) energies[i] = (system.time, system.analysis.energy()['total']) current_time = i update_plot() if matplotlib_notebook: display.clear_output(wait=True) system.time = 0 # reset system timer main() if not matplotlib_notebook: plt.close() """ Explanation: Live plotting We will write a <tt>main()</tt> callback function to store the total energy at each integration run into a NumPy array. We will also create a function to draw a plot after each integration run. End of explanation """ import espressomd.visualization import threading visualizer = espressomd.visualization.openGLLive(system) # alternative: espressomd.visualization.mayaviLive(system) """ Explanation: Live visualization and plotting To interact with a live visualization, we need to move the main integration loop into a secondary thread and run the visualizer in the main thread (note that visualization or plotting cannot be run in secondary threads). First, choose a visualizer: End of explanation """ def main(): global current_time for i in range(int_n_times): system.integrator.run(int_steps) energies[i] = (system.time, system.analysis.energy()['total']) current_time = i visualizer.update() system.time = 0 # reset system timer """ Explanation: Then, re-define the <tt>main()</tt> function to run the visualizer: End of explanation """ # setup new matplotlib canvas if matplotlib_notebook: plt.xlabel("Time") plt.ylabel("Energy") plot, = plt.plot([0], [0]) # execute main() in a secondary thread t = threading.Thread(target=main) t.daemon = True t.start() # execute the visualizer in the main thread visualizer.register_callback(update_plot, interval=int_steps // 2) visualizer.start() """ Explanation: Next, create a secondary thread for the <tt>main()</tt> function. However, as we now have multiple threads, and the first thread is already used by the visualizer, we cannot call <tt>update_plot()</tt> from the <tt>main()</tt> anymore. The solution is to register the <tt>update_plot()</tt> function as a callback of the visualizer: End of explanation """
qutip/qutip-notebooks
examples/landau-zener.ipynb
lgpl-3.0
%matplotlib inline import matplotlib.pyplot as plt import numpy as np from qutip import * import time def hamiltonian_t(t, args): """ evaluate the hamiltonian at time t. """ H0 = args[0] H1 = args[1] return H0 + t * H1 def qubit_integrate(delta, eps0, A, gamma1, gamma2, psi0, tlist): # Hamiltonian sx = sigmax() sz = sigmaz() sm = destroy(2) H0 = - delta/2.0 * sx - eps0/2.0 * sz H1 = - A/2.0 * sz # collapse operators c_op_list = [] n_th = 0.0 # zero temperature # relaxation rate = gamma1 * (1 + n_th) if rate > 0.0: c_op_list.append(sqrt(rate) * sm) # excitation rate = gamma1 * n_th if rate > 0.0: c_op_list.append(sqrt(rate) * sm.dag()) # dephasing rate = gamma2 if rate > 0.0: c_op_list.append(sqrt(rate) * sz) # evolve and calculate expectation values # method 1: function callback which returns the time-depdent qobj #H_args = (H0, H1) #output = mesolve(hamiltonian_t, psi0, tlist, c_op_list, [sm.dag() * sm], H_args) # method 2: a function callback that returns the coefficient for a qobj #H = [H0, [H1, lambda x,y: x]] #output = mesolve(H, psi0, tlist, c_op_list, [sm.dag() * sm], {}) # method 3: a string that defines the coefficient. The solver generates # and compiles C code using cython. This method is usually the fastest # for large systems or long time evolutions, but there is fixed-time # overhead that makes it inefficient for small and short-time evolutions. H = [H0, [H1, 't']] output = mesolve(H, psi0, tlist, c_op_list, [sm.dag() * sm], {}) return output.expect[0] # # set up the calculation # delta = 0.5 * 2 * np.pi # qubit sigma_x coefficient eps0 = 0.0 * 2 * np.pi # qubit sigma_z coefficient A = 2.0 * 2 * np.pi # sweep rate gamma1 = 0.0 # relaxation rate gamma2 = 0.0 # dephasing rate psi0 = basis(2,0) # initial state tlist = np.linspace(-20.0, 20.0, 5000) start_time = time.time() p_ex = qubit_integrate(delta, eps0, A, gamma1, gamma2, psi0, tlist) print('time elapsed = ' + str(time.time() - start_time)) fig, ax = plt.subplots(figsize=(12,8)) ax.plot(tlist, np.real(p_ex), 'b', tlist, np.real(1-p_ex), 'r') ax.plot(tlist, 1 - np.exp(-np.pi * delta **2 / (2 * A)) * np.ones(shape(tlist)), 'k') ax.set_xlabel('Time') ax.set_ylabel('Occupation probability') ax.set_title('Landau-Zener transition') ax.legend(("Excited state", "Ground state", "Landau-Zener formula"), loc=0); """ Explanation: QuTiP example: Landau-Zener transitions J.R. Johansson and P.D. Nation For more information about QuTiP see http://qutip.org End of explanation """ def qubit_integrate(delta, eps0, A, omega, gamma1, gamma2, psi0, tlist, option): # Hamiltonian sx = sigmax() sz = sigmaz() sm = destroy(2) H0 = - delta/2.0 * sx - eps0/2.0 * sz H1 = - A/2.0 * sz H = [H0, [H1, 'cos(w*t)']] H_args = {'w' : omega} # collapse operators c_op_list = [] n_th = 0.0 # zero temperature # relaxation rate = gamma1 * (1 + n_th) if rate > 0.0: c_op_list.append(np.sqrt(rate) * sm) # excitation rate = gamma1 * n_th if rate > 0.0: c_op_list.append(np.sqrt(rate) * sm.dag()) # dephasing rate = gamma2 if rate > 0.0: c_op_list.append(np.sqrt(rate) * sz) if option == "dynamics": # evolve and calculate expectation values output = mesolve(H, psi0, tlist, c_op_list, [sm.dag() * sm], H_args) return output.expect[0] else: # option = steadystate # find the propagator for one driving period T = 2*np.pi / omega U = propagator(H, T, c_op_list, H_args, options=Odeoptions(nsteps=5000)) # find the steady state of successive application of the propagator rho_ss = propagator_steadystate(U) return np.real(expect(sm.dag() * sm, rho_ss)) # # set up the calculation: a strongly driven two-level system # (repeated LZ transitions) # delta = 0.05 * 2 * np.pi # qubit sigma_x coefficient eps0 = 0.0 * 2 * np.pi # qubit sigma_z coefficient A = 2.0 * 2 * np.pi # sweep rate gamma1 = 0.0001 # relaxation rate gamma2 = 0.005 # dephasing rate psi0 = basis(2,0) # initial state omega = 0.05 * 2 * np.pi # driving frequency T = (2*np.pi)/omega # driving period tlist = np.linspace(0.0, 3 * T, 1500) """ Explanation: Steady state of strongly driven two-level system (repeated LZ transitions) End of explanation """ start_time = time.time() p_ex = qubit_integrate(delta, eps0, A, omega, gamma1, gamma2, psi0, tlist, "dynamics") print('dynamics: time elapsed = ' + str(time.time() - start_time)) start_time = time.time() p_ex_ss = qubit_integrate(delta, eps0, A, omega, gamma1, gamma2, psi0, tlist, "steadystate") print('steady state: time elapsed = ' + str(time.time() - start_time)) fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 8)) ax1.plot(tlist, np.real(p_ex), 'b', tlist, np.real(1-p_ex), 'r', tlist, np.ones(np.shape(tlist)) * p_ex_ss, 'k') ax1.set_xlabel('Time') ax1.set_ylabel('Probability') ax1.set_title('Repeated Landau-Zener-like transitions') ax1.legend(("Excited state", "Ground state", "Excited steady state"), loc=0) ax2.plot(tlist, -delta/2.0 * np.ones(np.shape(tlist)), 'r') ax2.plot(tlist, -(eps0/2.0 + A/2.0 * np.cos(omega * tlist)), 'b') ax2.legend(("sx coeff", "sz coeff")) ax2.set_xlabel('Time') ax2.set_ylabel('Coefficients in the Hamiltonian'); """ Explanation: Steady state and dynamics for a fixed driving amplitude End of explanation """ start_time = time.time() A_vec = 2 * np.pi * np.linspace(0.0, 5.0, 100) p_ex_ss_vec = np.zeros(len(A_vec)) idx = 0 start_time = time.time() for A in A_vec: p_ex_ss_vec[idx] = qubit_integrate(delta, eps0, A, omega, gamma1, gamma2, psi0, tlist, "steadystate") idx += 1 print('time elapsed = ' + str(time.time() - start_time)) fig, ax = plt.subplots() ax.plot(A_vec/(2*np.pi), p_ex_ss_vec, 'b.-') ax.set_title("Steady state of repeated LZ transitions") ax.set_xlabel("driving amplitude") ax.set_ylabel("Occupation probability"); """ Explanation: Steady state as a function of driving amplitude End of explanation """ def hamiltonian_t(t, args): # # evaluate the hamiltonian at time t. # H0 = args[0] H1 = args[1] w = args[2] return H0 + H1 * np.sin(w * t) def sd_qubit_integrate(delta, eps0_vec, A_vec, w, gamma1, gamma2): # Hamiltonian sx = sigmax() sz = sigmaz() sm = destroy(2) # collapse operators c_op_list = [] n_th = 0.0 # zero temperature # relaxation rate = gamma1 * (1 + n_th) if rate > 0.0: c_op_list.append(np.sqrt(rate) * sm) # excitation rate = gamma1 * n_th if rate > 0.0: c_op_list.append(np.sqrt(rate) * sm.dag()) # dephasing rate = gamma2 if rate > 0.0: c_op_list.append(np.sqrt(rate) * sz) N = len(A_vec) M = len(eps0_vec) p_ex = np.zeros([N, M]) #, dtype=complex) T = 2 * np.pi / w sn = sm.dag() * sm # sweep over the driving amplitude and bias point, find the steady state # for each point and store in a matrix for n in range(0, N): for m in range(0, M): H0 = - delta/2.0 * sx - eps0_vec[m]/2.0 * sz H1 = - A_vec[n] * sx H = [H0, [H1, 'sin(w * t)']] H_args = {'w': omega} # find the propagator for one period of the time-dependent # hamiltonian U = propagator(H, T, c_op_list, H_args) # find the steady state of the driven system rho_ss = propagator_steadystate(U) p_ex[n, m] = np.real(expect(sn, rho_ss)) return p_ex # # set up the parameters # delta = 0.2 * 2 * np.pi # qubit sigma_x coefficient w = 1.0 * 2 * np.pi # qubit sigma_z coefficient A_vec = np.linspace(0.0, 4.0, 100) * 2 * np.pi # driving amplitude eps0_vec = np.linspace(0.0, 4.0, 100) * 2 * np.pi # qubit sigma-z bias point gamma1 = 0.05 # relaxation rate gamma2 = 0.0 # dephasing rate start_time = time.time() p_ex = sd_qubit_integrate(delta, eps0_vec, A_vec, w, gamma1, gamma2) print('time elapsed = ' + str(time.time() - start_time)) fig, ax = plt.subplots(figsize=(10,10)) p = ax.pcolor(A_vec, eps0_vec, np.real(p_ex), edgecolors='none') p.set_cmap('RdYlBu_r') ax.set_ylabel(r'$A/\omega$', fontsize=20) ax.set_xlabel(r'$\epsilon_0/\omega$', fontsize=20) ax.axis('tight') ax.set_title('Excitation probabilty of qubit, in steady state', fontsize=16); """ Explanation: Steadystate of a strongly driven two-level system as a function of driving amplitude and qubit bias Find the steady state of a strongly driven qubit as a function of driving amplitude and qubit bias. Note: This calculation can takes a long time. End of explanation """ from qutip.ipynbtools import version_table version_table() """ Explanation: Versions End of explanation """
Upward-Spiral-Science/spect-team
Code/Assignment-11/GridSearch_YatingJing.ipynb
apache-2.0
import pandas as pd import numpy as np df_adhd = pd.read_csv('ADHD_Gender_rCBF.csv') df_bipolar = pd.read_csv('Bipolar_Gender_rCBF.csv') n1, n2 = df_adhd.shape[0], df_bipolar.shape[0] print 'Number of ADHD patients (without Bipolar) is', n1 print 'Number of Bipolar patients (without ADHD) is', n2 print 'Chance before gender separation is', float(n1) / (n1 + n2) # Separate the genders adhd1_id, adhd2_id = list(), list() bipolar1_id, bipolar2_id = list(), list() for i, g in df_adhd[['Patient_ID', 'Gender_id']].values: if g == 1: adhd1_id.append(i) elif g == 2: adhd2_id.append(i) for i, g in df_bipolar[['Patient_ID', 'Gender_id']].values: if g == 1: bipolar1_id.append(i) elif g == 2: bipolar2_id.append(i) print 'Number of Gender 1 ADHD patients (without Bipolar) is', len(adhd1_id) print 'Number of Gender 2 ADHD patients (without Bipolar) is', len(adhd2_id) print 'Number of Gender 1 Bipolar patients (without ADHD) is', len(bipolar1_id) print 'Number of Gender 2 Bipolar patients (without ADHD) is', len(bipolar2_id) # Separate ADHD data gender-wise df_adhd1 = df_adhd.loc[df_adhd['Patient_ID'].isin(adhd1_id)].drop(['Patient_ID', 'Gender_id'], axis=1) df_adhd2 = df_adhd.loc[df_adhd['Patient_ID'].isin(adhd2_id)].drop(['Patient_ID', 'Gender_id'], axis=1) # Separate Bipolar data gender-wise df_bipolar1 = df_bipolar.loc[df_bipolar['Patient_ID'].isin(bipolar1_id)].drop(['Patient_ID', 'Gender_id'], axis=1) df_bipolar2 = df_bipolar.loc[df_bipolar['Patient_ID'].isin(bipolar2_id)].drop(['Patient_ID', 'Gender_id'], axis=1) # Create disorder labels for classification # ADHD: 0, Bipolar: 1 n1_adhd, n1_bipolar = len(adhd1_id), len(bipolar1_id) n2_adhd, n2_bipolar = len(adhd2_id), len(bipolar2_id) # Labels for gender 1 y1 = [0] * n1_adhd + [1] * n1_bipolar # Labels for gender 2 y2 = [0] * n2_adhd + [1] * n2_bipolar print 'Shape check:' print 'ADHD:', df_adhd1.shape, df_adhd2.shape print 'Bipolar:', df_bipolar1.shape, df_bipolar2.shape # Gender1 data df1_all = pd.concat([df_adhd1, df_bipolar1], axis=0) # Gender2 data df2_all = pd.concat([df_adhd2, df_bipolar2], axis=0) print '\nDouble shape check:' print 'Gender 1:', df1_all.shape, len(y1) print 'Gender 2:', df2_all.shape, len(y2) # Compute chances chance1 = float(n1_adhd)/(n1_adhd + n1_bipolar) chance2 = float(n2_adhd)/(n2_adhd + n2_bipolar) print 'Chance for gender 1 is', chance1 print 'Chance for gender 2 is', chance2 """ Explanation: Grid Search for Hyperparameters - by Yating Jing From previous experiments, LLE pipelined with QDA in a gender-distinguished manner gave the best prediction result on the task of separating ADHD and Bipolar disorders. This notebook focuses on selecting the best hyperparameters for LLE, SVM and Random Forest using Grid Search method. End of explanation """ %matplotlib inline import matplotlib.pyplot as plt from sklearn.pipeline import Pipeline from sklearn import preprocessing from sklearn.manifold import LocallyLinearEmbedding from sklearn.grid_search import GridSearchCV from sklearn.qda import QDA from sklearn import svm from sklearn.ensemble import RandomForestClassifier from sklearn.cross_validation import KFold def lle_clf_pipe(X, y, gender, n_feats, k=10): ''' Train and test an LLE - Classifier Pipeline Args: X: training data (2D numpy matrix) y: labels (1D vector) gender: current gender genre (for plotting) n_feats: number of features used in classification k: number of folds (default=10) ''' lle = LocallyLinearEmbedding(n_components=n_feats) qda = QDA() svc = svm.SVC() rf = RandomForestClassifier() pipe_qda = Pipeline(steps=[('lle', lle), ('qda', qda)]) pipe_svc = Pipeline(steps=[('lle', lle), ('svc', svc)]) pipe_rf = Pipeline(steps=[('lle', lle), ('rf', rf)]) X_all, y_all = np.array(X), np.array(y) kf = KFold(len(X), n_folds=k) scores_qda, scores_svm, scores_rf = [], [], [] for i, (train_index, test_index) in enumerate(kf): print 'Fold ' + str(i) + ':', # Train test split X_train, X_test = X_all[train_index], X_all[test_index] y_train, y_test = y_all[train_index], y_all[test_index] ############################ QDA ############################ grid_qda = GridSearchCV(pipe_qda, dict(lle__n_neighbors=[25, 30], lle__method=['ltsa'])) grid_qda.fit(X_train, y_train) best_est_qda = grid_qda.best_estimator_ score_qda = best_est_qda.score(X_test, y_test) scores_qda.append(score_qda) print 'QDA accuracy is', score_qda ############################ SVM ############################ grid_svm = GridSearchCV(pipe_svc, dict(lle__n_neighbors=[25, 30], lle__method=['modified', 'ltsa'], svc__kernel=('linear', 'rbf'), svc__C=[1, 5, 10])) grid_svm.fit(X_train, y_train) best_est_svm = grid_svm.best_estimator_ score_svm = best_est_svm.score(X_test, y_test) scores_svm.append(score_svm) print ' SVM accuracy is', score_svm ############################ Random Forest ############################ # grid_search_estimator = GridSearchCV(pipe, # dict(lle__n_neighbors=[20, 25, 30], # lle__eigen_solver=['auto', 'arpack', 'dense'], # lle__method=['standard', 'hessian', 'modified', 'ltsa'], # lle__neighbors_algorithm=['kd_tree', 'ball_tree'], # rf__criterion=('gini', 'entropy'), # rf__max_features=['sqrt', 'log2', None], # rf__min_samples_leaf=[1, 3, 5], # rf__bootstrap=[True, False])) grid_rf = GridSearchCV(pipe_rf, dict(lle__n_neighbors=[25, 30], lle__method=['modified', 'ltsa'], rf__criterion=('gini', 'entropy'), rf__max_features=['sqrt', 'log2', None], rf__bootstrap=[True, False])) grid_rf.fit(X_train, y_train) best_est_rf = grid_rf.best_estimator_ score_rf = best_est_rf.score(X_test, y_test) scores_rf.append(score_rf) print ' Random Forest accuracy is', score_rf scores_qda = np.array(scores_qda) scores_svm = np.array(scores_svm) scores_rf = np.array(scores_rf) scores = [scores_qda, scores_svm, scores_rf] clf_names = ['QDA', 'SVM', 'Random Forest'] accs = [] print '\n' for clf_name, scores in zip(clf_names, scores): acc, acc_std = scores.mean(), scores.std() accs.append(acc) print clf_name + ' accuracy is %0.4f (+/- %0.3f)' % (acc, acc_std) # Visualize classifier performance x = range(len(accs)) width = 0.1/0.3 plt.bar(x, accs, width) # Compute chance n0, n1 = y.count(0), y.count(1) chance = max(n0, n1) / float(n0 + n1) print 'Chance is', chance fig_title = gender + ' Classifier Performance on ' + 'LLE features' plt.title(fig_title, y=1.08) plt.xticks(x, clf_names, rotation=50) plt.xlabel('Classifier') plt.gca().xaxis.set_label_coords(1.1, -0.025) plt.ylabel('Accuracy') plt.axhline(chance, color='red', linestyle='--', label='chance') # plot chance plt.legend(loc='center left', bbox_to_anchor=(1, 0.85)) """ Explanation: Dimensionality Reduction - Classification Pipeline End of explanation """ print 'Gender 1 Analysis using LLE features:\n' X1 = df1_all.values lle_clf_pipe(X1, y1, 'Gender 1', 15, k=10) """ Explanation: Gender 1 ADHD v.s. Bipolar Analysis End of explanation """ print 'Gender 2 Analysis using LLE features:\n' X2 = df2_all.values lle_clf_pipe(X2, y2, 'Gender 2', 15, k=10) """ Explanation: Gender 2 ADHD v.s. Bipolar Analysis End of explanation """
stinebuu/nest-simulator
doc/userdoc/model_details/aeif_models_implementation.ipynb
gpl-2.0
# Install assimulo package in the current Jupyter kernel import sys !{sys.executable} -m pip install assimulo import numpy as np from scipy.integrate import odeint import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['figure.figsize'] = (15, 6) """ Explanation: NEST implementation of the aeif models Hans Ekkehard Plesser and Tanguy Fardet, 2016-09-09 This notebook provides a reference solution for the Adaptive Exponential Integrate and Fire (AEIF) neuronal model and compares it with several numerical implementations using simpler solvers. In particular this justifies the change of implementation in September 2016 to make the simulation closer to the reference solution. Position of the problem Basics The equations governing the evolution of the AEIF model are $$\left\lbrace\begin{array}{rcl} C_m\dot{V} &=& -g_L(V-E_L) + g_L \Delta_T e^{\frac{V-V_T}{\Delta_T}} + I_e + I_s(t) -w\ \tau_s\dot{w} &=& a(V-E_L) - w \end{array}\right.$$ when $V < V_{peak}$ (threshold/spike detection). Once a spike occurs, we apply the reset conditions: $$V = V_r \quad \text{and} \quad w = w + b$$ Divergence In the AEIF model, the spike is generated by the exponential divergence. In practice, this means that just before threshold crossing (threshpassing), the argument of the exponential can become very large. This can lead to numerical overflow or numerical instabilities in the solver, all the more if $V_{peak}$ is large, or if $\Delta_T$ is small. Tested solutions Old implementation (before September 2016) The orginal solution was to bind the exponential argument to be smaller than 10 (ad hoc value to be close to the original implementation in BRIAN). As will be shown in the notebook, this solution does not converge to the reference LSODAR solution. New implementation The new implementation does not bind the argument of the exponential, but the potential itself, since according to the theoretical model, $V$ should never get larger than $V_{peak}$. We will show that this solution is not only closer to the reference solution in general, but also converges towards it as the timestep gets smaller. Reference solution The reference solution is implemented using the LSODAR solver which is described and compared in the following references: http://www.radford.edu/~thompson/RP/eventlocation.pdf (papers citing this one) http://www.sciencedirect.com/science/article/pii/S0377042712000684 http://www.radford.edu/~thompson/RP/rootfinding.pdf https://computation.llnl.gov/casc/nsde/pubs/u88007.pdf http://www.cs.ucsb.edu/~cse/Files/SCE000136.pdf http://www.sciencedirect.com/science/article/pii/0377042789903348 http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.455.2976&rep=rep1&type=pdf https://theses.lib.vt.edu/theses/available/etd-12092002-105032/unrestricted/etd.pdf Technical details and requirements Implementation of the functions The old and new implementations are reproduced using Scipy and are called by the scipy_aeif function The NEST implementations are not shown here, but keep in mind that for a given time resolution, they are closer to the reference result than the scipy implementation since the GSL implementation uses a RK45 adaptive solver. The reference solution using LSODAR, called reference_aeif, is implemented through the assimulo package. Requirements To run this notebook, you need: numpy and scipy assimulo matplotlib End of explanation """ def rhs_aeif_new(y, _, p): ''' New implementation bounding V < V_peak Parameters ---------- y : list Vector containing the state variables [V, w] _ : unused var p : Params instance Object containing the neuronal parameters. Returns ------- dv : double Derivative of V dw : double Derivative of w ''' v = min(y[0], p.Vpeak) w = y[1] Ispike = 0. if p.DeltaT != 0.: Ispike = p.gL * p.DeltaT * np.exp((v-p.vT)/p.DeltaT) dv = (-p.gL*(v-p.EL) + Ispike - w + p.Ie)/p.Cm dw = (p.a * (v-p.EL) - w) / p.tau_w return dv, dw def rhs_aeif_old(y, _, p): ''' Old implementation bounding the argument of the exponential function (e_arg < 10.). Parameters ---------- y : list Vector containing the state variables [V, w] _ : unused var p : Params instance Object containing the neuronal parameters. Returns ------- dv : double Derivative of V dw : double Derivative of w ''' v = y[0] w = y[1] Ispike = 0. if p.DeltaT != 0.: e_arg = min((v-p.vT)/p.DeltaT, 10.) Ispike = p.gL * p.DeltaT * np.exp(e_arg) dv = (-p.gL*(v-p.EL) + Ispike - w + p.Ie)/p.Cm dw = (p.a * (v-p.EL) - w) / p.tau_w return dv, dw """ Explanation: Scipy functions mimicking the NEST code Right hand side functions End of explanation """ def scipy_aeif(p, f, simtime, dt): ''' Complete aeif model using scipy `odeint` solver. Parameters ---------- p : Params instance Object containing the neuronal parameters. f : function Right-hand side function (either `rhs_aeif_old` or `rhs_aeif_new`) simtime : double Duration of the simulation (will run between 0 and tmax) dt : double Time increment. Returns ------- t : list Times at which the neuronal state was evaluated. y : list State values associated to the times in `t` s : list Spike times. vs : list Values of `V` just before the spike. ws : list Values of `w` just before the spike fos : list List of dictionaries containing additional output information from `odeint` ''' t = np.arange(0, simtime, dt) # time axis n = len(t) y = np.zeros((n, 2)) # V, w y[0, 0] = p.EL # Initial: (V_0, w_0) = (E_L, 5.) y[0, 1] = 5. # Initial: (V_0, w_0) = (E_L, 5.) s = [] # spike times vs = [] # membrane potential at spike before reset ws = [] # w at spike before step fos = [] # full output dict from odeint() # imitate NEST: update time-step by time-step for k in range(1, n): # solve ODE from t_k-1 to t_k d, fo = odeint(f, y[k-1, :], t[k-1:k+1], (p, ), full_output=True) y[k, :] = d[1, :] fos.append(fo) # check for threshold crossing if y[k, 0] >= p.Vpeak: s.append(t[k]) vs.append(y[k, 0]) ws.append(y[k, 1]) y[k, 0] = p.Vreset # reset y[k, 1] += p.b # step return t, y, s, vs, ws, fos """ Explanation: Complete model End of explanation """ from assimulo.solvers import LSODAR from assimulo.problem import Explicit_Problem class Extended_Problem(Explicit_Problem): # need variables here for access sw0 = [ False ] ts_spikes = [] ws_spikes = [] Vs_spikes = [] def __init__(self, p): self.p = p self.y0 = [self.p.EL, 5.] # V, w # reset variables self.ts_spikes = [] self.ws_spikes = [] self.Vs_spikes = [] #The right-hand-side function (rhs) def rhs(self, t, y, sw): """ This is the function we are trying to simulate (aeif model). """ V, w = y[0], y[1] Ispike = 0. if self.p.DeltaT != 0.: Ispike = self.p.gL * self.p.DeltaT * np.exp((V-self.p.vT)/self.p.DeltaT) dotV = ( -self.p.gL*(V-self.p.EL) + Ispike + self.p.Ie - w ) / self.p.Cm dotW = ( self.p.a*(V-self.p.EL) - w ) / self.p.tau_w return np.array([dotV, dotW]) # Sets a name to our function name = 'AEIF_nosyn' # The event function def state_events(self, t, y, sw): """ This is our function that keeps track of our events. When the sign of any of the events has changed, we have an event. """ event_0 = -5 if y[0] >= self.p.Vpeak else 5 # spike if event_0 < 0: if not self.ts_spikes: self.ts_spikes.append(t) self.Vs_spikes.append(y[0]) self.ws_spikes.append(y[1]) elif self.ts_spikes and not np.isclose(t, self.ts_spikes[-1], 0.01): self.ts_spikes.append(t) self.Vs_spikes.append(y[0]) self.ws_spikes.append(y[1]) return np.array([event_0]) #Responsible for handling the events. def handle_event(self, solver, event_info): """ Event handling. This functions is called when Assimulo finds an event as specified by the event functions. """ ev = event_info event_info = event_info[0] # only look at the state events information. if event_info[0] > 0: solver.sw[0] = True solver.y[0] = self.p.Vreset solver.y[1] += self.p.b else: solver.sw[0] = False def initialize(self, solver): solver.h_sol=[] solver.nq_sol=[] def handle_result(self, solver, t, y): Explicit_Problem.handle_result(self, solver, t, y) # Extra output for algorithm analysis if solver.report_continuously: h, nq = solver.get_algorithm_data() solver.h_sol.extend([h]) solver.nq_sol.extend([nq]) """ Explanation: LSODAR reference solution Setting assimulo class End of explanation """ def reference_aeif(p, simtime): ''' Reference aeif model using LSODAR. Parameters ---------- p : Params instance Object containing the neuronal parameters. f : function Right-hand side function (either `rhs_aeif_old` or `rhs_aeif_new`) simtime : double Duration of the simulation (will run between 0 and tmax) dt : double Time increment. Returns ------- t : list Times at which the neuronal state was evaluated. y : list State values associated to the times in `t` s : list Spike times. vs : list Values of `V` just before the spike. ws : list Values of `w` just before the spike h : list List of the minimal time increment at each step. ''' #Create an instance of the problem exp_mod = Extended_Problem(p) #Create the problem exp_sim = LSODAR(exp_mod) #Create the solver exp_sim.atol=1.e-8 exp_sim.report_continuously = True exp_sim.store_event_points = True exp_sim.verbosity = 30 #Simulate t, y = exp_sim.simulate(simtime) #Simulate 10 seconds return t, y, exp_mod.ts_spikes, exp_mod.Vs_spikes, exp_mod.ws_spikes, exp_sim.h_sol """ Explanation: LSODAR reference model End of explanation """ # Regular spiking aeif_param = { 'V_reset': -58., 'V_peak': 0.0, 'V_th': -50., 'I_e': 420., 'g_L': 11., 'tau_w': 300., 'E_L': -70., 'Delta_T': 2., 'a': 3., 'b': 0., 'C_m': 200., 'V_m': -70., #! must be equal to E_L 'w': 5., #! must be equal to 5. 'tau_syn_ex': 0.2 } # Bursting aeif_param2 = { 'V_reset': -46., 'V_peak': 0.0, 'V_th': -50., 'I_e': 500.0, 'g_L': 10., 'tau_w': 120., 'E_L': -58., 'Delta_T': 2., 'a': 2., 'b': 100., 'C_m': 200., 'V_m': -58., #! must be equal to E_L 'w': 5., #! must be equal to 5. } # Close to chaos (use resol < 0.005 and simtime = 200) aeif_param3 = { 'V_reset': -48., 'V_peak': 0.0, 'V_th': -50., 'I_e': 160., 'g_L': 12., 'tau_w': 130., 'E_L': -60., 'Delta_T': 2., 'a': -11., 'b': 30., 'C_m': 100., 'V_m': -60., #! must be equal to E_L 'w': 5., #! must be equal to 5. } class Params(object): ''' Class giving access to the neuronal parameters. ''' def __init__(self): self.params = aeif_param self.Vpeak = aeif_param["V_peak"] self.Vreset = aeif_param["V_reset"] self.gL = aeif_param["g_L"] self.Cm = aeif_param["C_m"] self.EL = aeif_param["E_L"] self.DeltaT = aeif_param["Delta_T"] self.tau_w = aeif_param["tau_w"] self.a = aeif_param["a"] self.b = aeif_param["b"] self.vT = aeif_param["V_th"] self.Ie = aeif_param["I_e"] p = Params() """ Explanation: Set the parameters and simulate the models Params (chose a dictionary) End of explanation """ # Parameters of the simulation simtime = 100. resol = 0.01 t_old, y_old, s_old, vs_old, ws_old, fo_old = scipy_aeif(p, rhs_aeif_old, simtime, resol) t_new, y_new, s_new, vs_new, ws_new, fo_new = scipy_aeif(p, rhs_aeif_new, simtime, resol) t_ref, y_ref, s_ref, vs_ref, ws_ref, h_ref = reference_aeif(p, simtime) """ Explanation: Simulate the 3 implementations End of explanation """ fig, ax = plt.subplots() ax2 = ax.twinx() # Plot the potentials ax.plot(t_ref, y_ref[:,0], linestyle="-", label="V ref.") ax.plot(t_old, y_old[:,0], linestyle="-.", label="V old") ax.plot(t_new, y_new[:,0], linestyle="--", label="V new") # Plot the adaptation variables ax2.plot(t_ref, y_ref[:,1], linestyle="-", c="k", label="w ref.") ax2.plot(t_old, y_old[:,1], linestyle="-.", c="m", label="w old") ax2.plot(t_new, y_new[:,1], linestyle="--", c="y", label="w new") # Show ax.set_xlim([0., simtime]) ax.set_ylim([-65., 40.]) ax.set_xlabel("Time (ms)") ax.set_ylabel("V (mV)") ax2.set_ylim([-20., 20.]) ax2.set_ylabel("w (pA)") ax.legend(loc=6) ax2.legend(loc=2) plt.show() """ Explanation: Plot the results Zoom out End of explanation """ fig, ax = plt.subplots() ax2 = ax.twinx() # Plot the potentials ax.plot(t_ref, y_ref[:,0], linestyle="-", label="V ref.") ax.plot(t_old, y_old[:,0], linestyle="-.", label="V old") ax.plot(t_new, y_new[:,0], linestyle="--", label="V new") # Plot the adaptation variables ax2.plot(t_ref, y_ref[:,1], linestyle="-", c="k", label="w ref.") ax2.plot(t_old, y_old[:,1], linestyle="-.", c="y", label="w old") ax2.plot(t_new, y_new[:,1], linestyle="--", c="m", label="w new") ax.set_xlim([90., 92.]) ax.set_ylim([-65., 40.]) ax.set_xlabel("Time (ms)") ax.set_ylabel("V (mV)") ax2.set_ylim([17.5, 18.5]) ax2.set_ylabel("w (pA)") ax.legend(loc=5) ax2.legend(loc=2) plt.show() """ Explanation: Zoom in End of explanation """ print("spike times:\n-----------") print("ref", np.around(s_ref, 3)) # ref lsodar print("old", np.around(s_old, 3)) print("new", np.around(s_new, 3)) print("\nV at spike time:\n---------------") print("ref", np.around(vs_ref, 3)) # ref lsodar print("old", np.around(vs_old, 3)) print("new", np.around(vs_new, 3)) print("\nw at spike time:\n---------------") print("ref", np.around(ws_ref, 3)) # ref lsodar print("old", np.around(ws_old, 3)) print("new", np.around(ws_new, 3)) """ Explanation: Compare properties at spike times End of explanation """ plt.semilogy(t_ref, h_ref, label='Reference') plt.semilogy(t_old[1:], [d['hu'] for d in fo_old], linewidth=2, label='Old') plt.semilogy(t_new[1:], [d['hu'] for d in fo_new], label='New') plt.legend(loc=6) plt.show(); """ Explanation: Size of minimal integration timestep End of explanation """ plt.plot(t_ref, y_ref[:,0], label="V ref.") resolutions = (0.1, 0.01, 0.001) di_res = {} for resol in resolutions: t_old, y_old, _, _, _, _ = scipy_aeif(p, rhs_aeif_old, simtime, resol) t_new, y_new, _, _, _, _ = scipy_aeif(p, rhs_aeif_new, simtime, resol) di_res[resol] = (t_old, y_old, t_new, y_new) plt.plot(t_old, y_old[:,0], linestyle=":", label="V old, r={}".format(resol)) plt.plot(t_new, y_new[:,0], linestyle="--", linewidth=1.5, label="V new, r={}".format(resol)) plt.xlim(0., simtime) plt.xlabel("Time (ms)") plt.ylabel("V (mV)") plt.legend(loc=2) plt.show(); """ Explanation: Convergence towards LSODAR reference with step size Zoom out End of explanation """ plt.plot(t_ref, y_ref[:,0], label="V ref.") for resol in resolutions: t_old, y_old = di_res[resol][:2] t_new, y_new = di_res[resol][2:] plt.plot(t_old, y_old[:,0], linestyle="--", label="V old, r={}".format(resol)) plt.plot(t_new, y_new[:,0], linestyle="-.", linewidth=2., label="V new, r={}".format(resol)) plt.xlim(90., 92.) plt.ylim([-62., 2.]) plt.xlabel("Time (ms)") plt.ylabel("V (mV)") plt.legend(loc=2) plt.show(); """ Explanation: Zoom in End of explanation """
phoebe-project/phoebe2-docs
development/examples/extinction_BK_binary.ipynb
gpl-3.0
#!pip install -I "phoebe>=2.4,<2.5" """ Explanation: Extinction: B-K Binary In this example, we'll reproduce Figures 1 and 2 in the extinction release paper (Jones et al. 2020). "Let us begin with a rather extreme case, a synthetic binary comprised of a hot, B-type main sequence star(M=6.5 Msol,Teff=17000 K, and R=4.2 Rsol) anda cool K-type giant (M=1.8 Msol,Teff=4000 K, and R=39.5 Rsol)vin a 1000 day orbit -- a system where, while the temperature difference is large, the luminosities are similar." (Jones et al. 2020) <img src="jones+20_fig1.png" alt="Figure 1" width="800px"/> <img src="jones+20_fig2.png" alt="Figure 2" width="400px"/> Setup Let's first make sure we have the latest version of PHOEBE 2.4 installed (uncomment this line if running in an online notebook session such as colab). End of explanation """ import matplotlib matplotlib.rcParams['text.usetex'] = True matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42 matplotlib.rcParams['mathtext.fontset'] = 'stix' matplotlib.rcParams['font.family'] = 'STIXGeneral' from matplotlib import gridspec %matplotlib inline import phoebe from phoebe import u # units import numpy as np import matplotlib.pyplot as plt logger = phoebe.logger('error') b = phoebe.default_binary() """ Explanation: As always, let's do imports and initialize a logger and a new bundle. End of explanation """ b.set_value('period', component='binary', value=1000.0*u.d) b.set_value('teff', component='primary', value=17000*u.K) b.set_value('teff', component='secondary', value=4000*u.K) b.set_value('requiv', component='primary', value=4.22173036*u.solRad) b.set_value('requiv', component='secondary', value=40.732435*u.solRad) b.flip_constraint('mass@primary', solve_for='sma@binary') b.set_value('mass', component='primary', value=6.5*u.solMass) b.flip_constraint('mass@secondary', solve_for='q') b.set_value('mass', component='secondary', value=1.9145*u.solMass) """ Explanation: First we'll define the system parameters End of explanation """ times = phoebe.linspace(-20, 20, 101) b.add_dataset('lc', times=times, dataset='B', passband="Johnson:B") b.add_dataset('lc', times=times, dataset='R', passband="Cousins:R") b.add_dataset('lc', times=times, dataset='KEP', passband="Kepler:mean") """ Explanation: And then create three light curve datasets at the same times, but in different passbands End of explanation """ b.set_value_all('atm', 'ck2004') b.set_value_all('gravb_bol', 0.0) b.set_value_all('ld_mode_bol', 'manual') b.set_value_all('ld_func_bol', 'linear') b.set_value_all('ld_coeffs_bol', [0.0]) """ Explanation: Now we'll set some atmosphere and limb-darkening options End of explanation """ b.flip_constraint('ebv', solve_for='Av') """ Explanation: And flip the extinction constraint so we can provide E(B-V). End of explanation """ b.set_value('ebv', 0.0) b.run_compute(distortion_method='rotstar', irrad_method='none', model='noext') """ Explanation: For comparison, we'll run a model without extinction End of explanation """ b.set_value('ebv', 1.0) b.run_compute(distortion_method='rotstar', irrad_method='none', model='ext') """ Explanation: and then another model with extinction End of explanation """ Bextmags=-2.5*np.log10(b['value@fluxes@B@ext@model']) Bnoextmags=-2.5*np.log10(b['value@fluxes@B@noext@model']) Bextmags_norm=Bextmags-Bextmags.min()+1 Bnoextmags_norm=Bnoextmags-Bnoextmags.min()+1 Bresid=Bextmags_norm-Bnoextmags_norm Rextmags=-2.5*np.log10(b['value@fluxes@R@ext@model']) Rnoextmags=-2.5*np.log10(b['value@fluxes@R@noext@model']) Rextmags_norm=Rextmags-Rextmags.min()+1 Rnoextmags_norm=Rnoextmags-Rnoextmags.min()+1 Rresid=Rextmags_norm-Rnoextmags_norm fig=plt.figure(figsize=(12,6)) gs=gridspec.GridSpec(2,2,height_ratios=[4,1],width_ratios=[1,1]) ax=plt.subplot(gs[0,0]) ax.plot(b['value@times@B@noext@model']/1000,Bnoextmags_norm,color='k',linestyle="--") ax.plot(b['value@times@B@ext@model']/1000,Bextmags_norm,color='k',linestyle="-") ax.set_ylabel('Magnitude') ax.set_xticklabels([]) ax.set_xlim([-0.02,0.02]) ax.set_ylim([3.5,0.8]) ax.set_title('(a) Johnson B') ax2=plt.subplot(gs[0,1]) ax2.plot(b['value@times@R@noext@model']/1000,Rnoextmags_norm,color='k',linestyle="--") ax2.plot(b['value@times@R@ext@model']/1000,Rextmags_norm,color='k',linestyle="-") ax2.set_ylabel('Magnitude') ax2.set_xticklabels([]) ax2.set_xlim([-0.02,0.02]) ax2.set_ylim([3.5,0.8]) ax2.set_title('(b) Cousins Rc') ax_1=plt.subplot(gs[1,0]) ax_1.plot(b['value@times@B@noext@model']/1000,Bresid,color='k',linestyle='-') ax_1.set_ylabel(r'$\Delta m$') ax_1.set_xlabel('Phase') ax_1.set_xlim([-0.02,0.02]) ax_1.set_ylim([0.05,-0.3]) ax_1.axhline(y=0., linestyle='dashed',color='k',linewidth=0.5) ax2_1=plt.subplot(gs[1,1]) ax2_1.plot(b['value@times@R@noext@model']/1000,Rresid,color='k',linestyle='-') ax2_1.set_ylabel(r'$\Delta m$') ax2_1.set_xlabel('Phase') ax2_1.set_xlim([-0.02,0.02]) ax2_1.set_ylim([0.05,-0.3]) ax2_1.axhline(y=0., linestyle='dashed',color='k',linewidth=0.5) plt.tight_layout() fig.canvas.draw() KEPextmags=-2.5*np.log10(b['value@fluxes@KEP@ext@model']) KEPnoextmags=-2.5*np.log10(b['value@fluxes@KEP@noext@model']) KEPextmags_norm=KEPextmags-KEPextmags.min()+1 KEPnoextmags_norm=KEPnoextmags-KEPnoextmags.min()+1 KEPresid=KEPextmags_norm-KEPnoextmags_norm fig=plt.figure(figsize=(6,6)) gs=gridspec.GridSpec(2,1,height_ratios=[4,1]) ax=plt.subplot(gs[0]) ax.plot(b['value@times@KEP@noext@model']/1000,KEPnoextmags_norm,color='k',linestyle="--") ax.plot(b['value@times@KEP@ext@model']/1000,KEPextmags_norm,color='k',linestyle="-") ax.set_ylabel('Magnitude') ax.set_xticklabels([]) ax.set_xlim([-0.02,0.02]) ax.set_ylim([3.5,0.8]) ax.set_title('Kepler K') ax_1=plt.subplot(gs[1]) ax_1.plot(b['value@times@KEP@noext@model']/1000,KEPresid,color='k',linestyle='-') ax_1.set_ylabel(r'$\Delta m$') ax_1.set_xlabel('Phase') ax_1.set_xlim([-0.02,0.02]) ax_1.set_ylim([0.05,-0.3]) ax_1.axhline(y=0., linestyle='dashed',color='k',linewidth=0.5) plt.tight_layout() fig.canvas.draw() """ Explanation: Lastly, we'll convert the model fluxes into magnitudes and format the figures. End of explanation """
OpenAstronomy/workshop_sunpy_astropy
05-images-and-plotting-instructor.ipynb
mit
# Get some import statements out of the way. from __future__ import division, print_function %matplotlib inline import matplotlib.pyplot as plt from skimage import data # Load the data.moon() image and print it moon = data.moon() print(moon) """ Explanation: Images and Image Plotting <section class="objectives panel panel-warning"> <div class="panel-heading"> <h3><span class="fa fa-certificate"></span> Learning Objectives </h3> </div> <ul> <li>Understand the concept of arrays as images</li> <li>Load and display an image</li> <li>Use array slicing operations to crop an image</li> <li>Animate an image plot</li> </ul> </section> Arrays as images All photographic images represent a measurement of how much light hits the receiver. For instance, the Hubble image below is obtained by measuring the brightnesses of distant stars: With traditional optical cameras, this measurement results in an image which is continuous, as it is projected directly onto paper. In order to store images digitally, they need to be divided into discrete chunks, pixels, each of which contains the value of the measurement in that small portion of the image. In this representation, an image is simply a grid of numbers, which allows it to be easily stored as an array with a shape equal to the resolution of the image. The scikit-image (abbreviated to skimage in code) module contains some sample images in the data submodule that we can use to demonstrate this principle. End of explanation """ # Output the image minimum, mean and maximum. print('Image min:', moon.min(), '; Image mean:', moon.mean(), '; Image max: ', moon.max()) # Output the array dtype. print('Data type:', moon.dtype) # Output image size. print('Image size:', moon.shape) """ Explanation: Once read in as an array, the image can be processed in the same ways as any other array. For instance, we can easily find the highest, lowest and mean values of the image, the type of the variables stored in the array, and the resolution of the image: End of explanation """ # Display image array with imshow() plt.imshow(moon) """ Explanation: This tells us that the image has a resolution of 512 x 512 pixels, and is stored as integers between 0 and 255. This is a common way of normalising images, but they can just as easily be stored as floats between 0 and 1. More commonly with astronomical data though, an image will consist of photon counts (i.e. integers), so the minimum will be 0 and any upper limit will likely be defined by the capabilities of the instrument. Plotting images While storing an image as a grid of numbers is very useful for analysis, we still need to be able to visually inspect the image. This can be achieved with plt.imshow(), which allocates a colour to every element in the array according to its value. End of explanation """ plt.imshow(moon) plt.colorbar() """ Explanation: When plotting an image in this way, you will often need to know what actual values correspond to the colours. To find this out, we can draw a colour bar alongside the image which indicates the mapping of values to colours: End of explanation """ # Display the image with a better colour map. plt.imshow(moon, cmap='gray') plt.colorbar() """ Explanation: You may notice that the default mapping of values to colours doesn't show the features of this image very well. Fortunately, matplotlib provides a large variety of colour maps which are suitable for various different purposes (more on this later). plt.imshow() has a cmap keyword argument which can be passed a string defining the desired colour map. End of explanation """ # 1 # Load an image from skimage.data my_image = data.coins() # Print image shape and size print(my_image.shape, my_image.size) # Print data type and min&max of array print(my_image.dtype, my_image.min(), my_image.max()) # 2 # Display my image plt.imshow(my_image, cmap='cubehelix') plt.colorbar() """ Explanation: The full list of available colour maps (for matplotlib 1.5) can be found here. <section class="callout panel panel-info"> <div class="panel-heading"> <h3><span class="fa fa-certificate"></span> Colour maps </h3> </div> As the images above demonstrate, the choice of colour map can make a significant difference to how your image appears, and is therefore extremely important. This is partly due to discrepancies between how quickly the colour map changes and how quickly the data changes, and partly due to the fact that [different people see colour differently](https://en.wikipedia.org/wiki/The_dress_%28viral_phenomenon%29).<br/><br/> In particular, matplotlib's default `'jet'` colour map is notoriously bad for displaying data. This is because it is not designed taking into account how the human eye percieves colour. This leads to some parts of the colour map appearing to change very slowly, while other parts of the colour map shift from one hue to another in a very short space. The practical effect of this is to both smooth some parts of the image, obscuring the data, and to create artificial features in the image where the data is smooth.<br/><br/> There is no single 'best' colour map - different colour maps display different kinds of image most clearly - but the `jet` map is almost never an appropriate choice for displaying any data. In general, colour maps which vary luminosity uniformly (such as the `'gray'` colour map above or the `'cubehelix'` colour map) tend to be better. Plots of various colour maps' luminosities can be found [here](http://matplotlib.org/users/colormaps.html).<br/><br/> For a good background on this topic and a description of a decent all-round colour map scheme, see [this paper](http://www.kennethmoreland.com/color-maps/ColorMapsExpanded.pdf). </section> <section class="challenges panel panel-success"> <div class="panel-heading"> <h3><span class="fa fa-pencil"></span> Load and plot an image </h3> </div> <ol> <li> Try loading and plotting some other image arrays from `skimage.data`. Choose one of these images and print some basic information about the values it contains.</li> <li> Plot your chosen image with `imshow()`. Apply a colour map of your choice and display a colour bar.</li> </ol> </section> End of explanation """ plt.imshow(moon, cmap='gray', vmin=75, vmax=150) plt.colorbar() """ Explanation: Value limits The default behaviour of imshow() in terms of colour mapping is that the colours cover the full range of the data so that the lower end (blue, in the plots above) represents the smallest value in the array, and the higher end (red) represents the greatest value. This is fine if rest of the values are fairly evenly spaced between these extremes. However, if we have a very low minimum or very high maximum compared to the rest of the image, this default scaling is unhelpful. To deal with this problem, imshow() allows you to set the minimum and maximum values used for the scaling with the vmin and vmax keywords. End of explanation """ plt.imshow(moon, cmap='gray', origin='lower') """ Explanation: As you can see, this allows us to increase the contrast of the image at the cost of discounting extreme values, or we can include a broader range of values but see less detail. Similar effects can also be achieved with the norm keyword, which allows you to set how imshow() scales values in order to map them to colours (linear or logarithmic scaling, for example). Axes You will notice in the above plots that the axes are labelled with the pixel coordinates of the image. You will also notice that the origin of the axes is in the top left corner rather than the bottom left. This is a convention in image-drawing, but can be changed if necessary by setting the origin keyword to 'lower' when calling imshow(): End of explanation """ plt.imshow(moon, cmap='gray', origin='lower', extent=[-1, 1, 0, 2]) """ Explanation: imshow() also allows you to change the upper and lower values of each axis, and the appropriate tick labels will be drawn. This feature can be used to apply physical spatial scales to the image (if you know them) rather than going purely on pixel positions, which may be less useful. This is done with the extent keyword, which takes a list of values corresponding to lower and upper x values and the lower and upper y values (in that order). End of explanation """ # 1 # Display the coins image with adjusted value range plt.imshow(my_image, cmap='cubehelix', vmin=60, vmax=180) plt.colorbar() # 2 pixelsize = 0.1 plt.imshow(my_image, cmap='cubehelix', vmin=60, vmax=180, extent=[0, my_image.shape[1]*pixelsize, 0, my_image.shape[0]*pixelsize]) plt.xlabel('x (cm)') plt.ylabel('y (cm)') plt.colorbar() """ Explanation: <section class="objectives panel panel-success"> <div class="panel-heading"> <h3><span class="fa fa-pencil"></span> Value and axes limits </h3> </div> <ol> <li>Plot your chosen image again. Try changing the upper and lower limits of the plotted values to adjust how the image appears.</li> <li>Assume that each pixel of your image has some defined size (you decide a value - not unity). Adjust the axis limits accordingly so that the ticks correspond to physical distances rather than pixel values.</li> </ol> </section> End of explanation """ # Load the Hubble image from fig/galaxy.jpg galaxy_image = plt.imread('fig/galaxy.jpg') # PLot the image with imshow() plt.imshow(galaxy_image) """ Explanation: Loading an image from a file The image used in the examples above uses an image which is already supplied as an array by scikit-image. But what if we have been given an image file and we want to read it into Python? There are many ways to do this, depending on the type of file. Typically in astronomy, images are stored in FITS format, which will be covered in detail later on. For now, we will return to the example of the Hubble image from earlier, which is stored in this repo in fig/galaxy.jpg. To load image data from a JPEG, we need the plt.imread() function. This takes a filename which points at an image file and loads it into Python as a NumPy array. End of explanation """ galaxy_image.shape """ Explanation: You may notice that instead of using a colour map, this image has been plotted in full colour so it looks the same as the original image above. We can see why if we inspect the shape of the image array: End of explanation """ plt.imshow(galaxy_image[..., 0], cmap='Reds') # Plot the red layer of the image plt.show() plt.imshow(galaxy_image[..., 1], cmap='Greens') # Plot the green layer of the image plt.show() plt.imshow(galaxy_image[..., 2], cmap='Blues') # Plot the blue layer of the image plt.show() """ Explanation: Rather than just being a 2D array with a shape equivalent to the image resolution, the array has an extra dimension of length 3. This is because the image has been split into red, blue and green components, each of which are stored in a slice of the array. When given an n x m x 3 array like this, imshow() interprets it as an RGB image and combines the layers into a single image. However, if we wish to see the individual components they can be accessed and displayed by taking a slice of the array corresponding to the layer we wish to use. End of explanation """ # Make a grid of 1 x 3 plots and show the Hubble image on the right. fig, ax = plt.subplots(1, 3) ax[2].imshow(galaxy_image) plt.show() """ Explanation: plt.subplots() As we've already seen, multiple axes can be added to a single figure using plt.add_subplot(). There is also a function that allows you to define several axes and their arrangement at the same time as the figure, plt.subplots(). This function returns a tuple of two objects - the figure and an array of axes objects with the specified shape. Referencing the axes array allows things to be plotted on the individual subplots. End of explanation """ # 1 my_image = data.coffee() # Create 2x2 grid of subplots fig, axes = plt.subplots(2, 2) # Plot image and image components with appropriate colour maps. axes[0, 0].imshow(my_image) axes[0, 1].imshow(my_image[..., 0], cmap='Reds') axes[1, 0].imshow(my_image[..., 1], cmap='Greens') axes[1, 1].imshow(my_image[..., 2], cmap='Blues') """ Explanation: <section class="objectives panel panel-success"> <div class="panel-heading"> <h3><span class="fa fa-pencil"></span> Image components </h3> </div> <ol> <li>Create a 2x2 grid of plots using `plt.subplots()`. For either the Hubble image or another RGB image of your choice from `skimage.data`, plot the true colour image and each RGB component on one of these subplots.</li> </ol> </section> End of explanation """ # Crop the image in x and y directions but keep all three colour components. cropped_galaxy = galaxy_image[240:, 200:400, :] plt.imshow(cropped_galaxy) """ Explanation: Slicing images We saw above that an RGB image array can be sliced to access one colour component. But the array can also be sliced in one or both of the image dimensions to crop the image. For instance, the smaller galaxy at the bottom of the image above occupies the space between about 200 and 400 pixels in the x direction, and stretches from about 240 pixels to the edge of the image in the y direction. This information allows us to slice the array appropriately: End of explanation """ # Crop athe image and use only every other pixel in each direction to reduce the resolution. lowres_galaxy = galaxy_image[240::2, 200:400:2, :] plt.imshow(lowres_galaxy) """ Explanation: Similarly, if we need to reduce the image resolution for whatever reason, this can be done using array slicing operations. End of explanation """ # Image with default interpolation fig, ax = plt.subplots(2, 2, figsize=(16, 16)) smallim = galaxy_image[:100, 250:350, :] ax[0, 0].imshow(smallim) # Default (linear) interpolation ax[0, 1].imshow(smallim, interpolation='bicubic') # Bicubic interpolation ax[1, 0].imshow(smallim, interpolation='nearest') # Nearest-neighbour interpolation ax[1, 1].imshow(smallim, interpolation='none') # No interpolation """ Explanation: IMPORTANT NOTE: you should probably never do the above with actual astronomical data, because you're throwing away three quarters of your measurement. There are better ways to reduce image resolution which preserve much more of the data's integrity, and we will talk about these later. But it's useful to remember you can reduce an image's size like this, as long as you don't need that image for any actual science. Interpolation In order to display a smooth image, imshow() automatically interpolates to find what values should be displayed between the given data points. The default interpolation scheme is 'linear', which interpolates linearly between points, as you might expect. The interpolation can be changed with yet another keyword in imshow(). Here are a few examples: End of explanation """ import matplotlib.animation as ani # We'll need this for displaying animations %matplotlib nbagg fig, ax = plt.subplots() display = plt.imshow(galaxy_image) titles = ['Red component', 'Green component', 'Blue component', 'Combined image'] cmaps = ['Reds_r', 'Greens_r', 'Blues_r'] def animate(i): try: display.set_data(galaxy_image[..., i]) display.set_cmap(cmaps[i]) except IndexError: display.set_data(galaxy_image) ax.set_title(titles[i]) return display myanim = ani.FuncAnimation(fig, animate, range(4), interval=1000) plt.show() """ Explanation: This can be a useful way to change how the image appears. For instance, if the exact values of the data are extremely important, little or no interpolation may be appropriate so the original values are easier to discern, whereas a high level of interpolation can be used if the smoothness of the image is more important than the actual numbers. Note that that 'none' in the imshow() call above is NOT the same as None. None tells imshow() you are not passing it a variable for the interpolation keyword, so it uses the default, whereas 'none' explicitly tells it not to interpolate. Animation We have already seen animation of data points on basic plots in a previous lesson. Animating an image is no different in principle. To demonstrate this, we'll set up an animation that shows the Hubble image and then cycles through each of the RGB components. This task requires all the same parts as an animation of a line or scatter plot: First, we'll need matplotlib.animation, a figure and an axes. Then we'll plot the initial image we want to display and return the plot object to a variable we can use for the animation. Now we need to define the function that will adjust the image. This function, like the ones we used for line plots, needs to take as input an integer which counts the number of 'frames', adjust the displayed data and return the adjusted object. Then we can define the animation object and plot it to see the finished product. End of explanation """ # 1 fig, ax = plt.subplots() y0 = 0 y_ext = 120 display = plt.imshow(galaxy_image[y0:y0+y_ext, 200:400]) def pan(i): y1 = y0 + i display.set_data(galaxy_image[y1:y1+y_ext, 200:400]) return display panimation = ani.FuncAnimation(fig, pan, range(galaxy_image.shape[0]-y_ext), interval=10) plt.show() """ Explanation: <section class="objectives panel panel-success"> <div class="panel-heading"> <h3><span class="fa fa-pencil"></span> Moving around an image </h3> </div> <ol> <li>Plot a small portion at one end of your chosen image. Then animate this plot so that it pans across to the other side of the image.</li> </ol> </section> End of explanation """ from astropy.io import fits import sunpy.data #sunpy.data.download_sample_data() aia_file = fits.open('/home/drew/sunpy/data/sample_data/aia.lev1.193A_2013-09-21T16_00_06.84Z.image_lev1.fits') aia_file.verify('fix') print(type(aia_file)) print(aia_file) print(type(aia_file[1].data), type(aia_file[1].header)) print(aia_file[1].data) print(aia_file[1].header['NAXIS1']) print(aia_file[1].header['NAXIS2']) print(aia_file[1].header['DATE-OBS']) for tag in aia_file[1].header.keys(): print(tag, aia_file[1].header[tag]) """ Explanation: FITS files A type of image file that you are quite likely to come across in astronomy is FITS (Flexible Image Transport System) files. This file type is used for storing various types of astronomical image data, including solar images. The advantage of FITS files is that as well as storing the numerical data which makes up the image, they also store a header associated with these data. The header usually contains information such as the spatial extent of the image, the resolution, the time at which the observation was taken, and various other properties of the data which may be useful when using the image for research. These pairs of data arrays and associated headers are stored in a HDU (Header-Data Unit). Several HDUs can be stored in a FITS file, so they are kept in a container called HDUList. End of explanation """
opalytics/opalytics-ticdat
examples/expert_section/notebooks/gurobi_toehold_problem.ipynb
bsd-2-clause
def exception_thrown(f): try: f() except Exception as e: return str(e) """ Explanation: The toehold problem The "toehold problem" is named after a tech support response from Gurobi. The nature of the problem is that in order to take advantage of the algebraic constraint modeling provided by gurobipy, then the Model.addConstr function needs a "toehold" with which to build a Constr. (Note that Constr is not part of the public package. You shouldn't try to build it directly, but instead let gurobipy create it for you as part of writing out algebraic constraints). So what do I mean, specifically? To begin, let's make a function that captures exceptions, since I'm going to be making mistakes and deliberately throwing exceptions. End of explanation """ import gurobipy as gu m = gu.Model() v = m.addVar(name = "goodstuff") m.update() exception_thrown(lambda : m.addConstr(v <= 100, name = "c1")) m.update() m.getConstrs() """ Explanation: Let's make a constraint without creating any problems. (You'll need to understand lambda to understand this code). End of explanation """ exception_thrown(lambda : m.addConstr(0 <= 300, name = "not_going_to_be_added_to_model")) """ Explanation: Ok, now let's screw up and make a bad constraint. This might happen to you, so pay attention please. End of explanation """ exception_thrown(lambda : m.addConstr(10 == 30, name = "not_going_to_be_added_to_model")) """ Explanation: The numbers and constraint type aren't important. End of explanation """ exception_thrown(lambda : m.addConstr(sum(_ for x in m.getVars() if "bad" in x.VarName.lower()) <= 100, name = "not_going_to_be_added_either")) """ Explanation: Now, why would you ever try to write a dumb constraint like that? Well, it happens naturally in the real world quite easily. Suppose you were summing over a set of variables that happened to be empty as part of building a constraint. End of explanation """ [_ for x in m.getVars() if "bad" in x.VarName.lower()] sum(_ for x in m.getVars() if "bad" in x.VarName.lower()) """ Explanation: How did this happen? It's because we used sum. This returns the number zero if it is passed an empty sequence. End of explanation """ gu.quicksum(_ for x in m.getVars() if "bad" in x.VarName.lower()) """ Explanation: So what's the solution? Usually, it just involves using gurobipy.quicksum. End of explanation """ exception_thrown(lambda : m.addConstr(gu.quicksum(_ for x in m.getVars() if "bad" in x.VarName.lower()) <= 100, name = "c2")) m.update() m.getConstrs() """ Explanation: See what happened there? gu.quicksum will give us a toehold. It's not just faster than sum, it's smarter too. So when we use quicksum, the constraint can be added. End of explanation """
timkpaine/lantern
experimental/widgets/Using Interact.ipynb
apache-2.0
from __future__ import print_function from ipywidgets import interact, interactive, fixed, interact_manual import ipywidgets as widgets """ Explanation: Using Interact The interact function (ipywidgets.interact) automatically creates user interface (UI) controls for exploring code and data interactively. It is the easiest way to get started using IPython's widgets. End of explanation """ def f(x): return x """ Explanation: Basic interact At the most basic level, interact autogenerates UI controls for function arguments, and then calls the function with those arguments when you manipulate the controls interactively. To use interact, you need to define a function that you want to explore. Here is a function that prints its only argument x. End of explanation """ interact(f, x=10); """ Explanation: When you pass this function as the first argument to interact along with an integer keyword argument (x=10), a slider is generated and bound to the function parameter. End of explanation """ interact(f, x=True); """ Explanation: When you move the slider, the function is called, which prints the current value of x. If you pass True or False, interact will generate a checkbox: End of explanation """ interact(f, x='Hi there!'); """ Explanation: If you pass a string, interact will generate a text area. End of explanation """ @interact(x=True, y=1.0) def g(x, y): return (x, y) """ Explanation: interact can also be used as a decorator. This allows you to define a function and interact with it in a single shot. As this example shows, interact also works with functions that have multiple arguments. End of explanation """ def h(p, q): return (p, q) """ Explanation: Fixing arguments using fixed There are times when you may want to explore a function using interact, but fix one or more of its arguments to specific values. This can be accomplished by wrapping values with the fixed function. End of explanation """ interact(h, p=5, q=fixed(20)); """ Explanation: When we call interact, we pass fixed(20) for q to hold it fixed at a value of 20. End of explanation """ interact(f, x=widgets.IntSlider(min=-10,max=30,step=1,value=10)); """ Explanation: Notice that a slider is only produced for p as the value of q is fixed. Widget abbreviations When you pass an integer-valued keyword argument of 10 (x=10) to interact, it generates an integer-valued slider control with a range of [-10,+3*10]. In this case, 10 is an abbreviation for an actual slider widget: python IntSlider(min=-10,max=30,step=1,value=10) In fact, we can get the same result if we pass this IntSlider as the keyword argument for x: End of explanation """ interact(f, x=(0,4)); """ Explanation: This examples clarifies how interact proceses its keyword arguments: If the keyword argument is a Widget instance with a value attribute, that widget is used. Any widget with a value attribute can be used, even custom ones. Otherwise, the value is treated as a widget abbreviation that is converted to a widget before it is used. The following table gives an overview of different widget abbreviations: <table class="table table-condensed table-bordered"> <tr><td><strong>Keyword argument</strong></td><td><strong>Widget</strong></td></tr> <tr><td>`True` or `False`</td><td>Checkbox</td></tr> <tr><td>`'Hi there'`</td><td>Text</td></tr> <tr><td>`value` or `(min,max)` or `(min,max,step)` if integers are passed</td><td>IntSlider</td></tr> <tr><td>`value` or `(min,max)` or `(min,max,step)` if floats are passed</td><td>FloatSlider</td></tr> <tr><td>`['orange','apple']` or `{'one':1,'two':2}`</td><td>Dropdown</td></tr> </table> Note that a dropdown is used if a list or a dict is given (signifying discrete choices), and a slider is used if a tuple is given (signifying a range). You have seen how the checkbox and textarea widgets work above. Here, more details about the different abbreviations for sliders and dropdowns are given. If a 2-tuple of integers is passed (min,max), an integer-valued slider is produced with those minimum and maximum values (inclusively). In this case, the default step size of 1 is used. End of explanation """ interact(f, x=(0,8,2)); """ Explanation: If a 3-tuple of integers is passed (min,max,step), the step size can also be set. End of explanation """ interact(f, x=(0.0,10.0)); """ Explanation: A float-valued slider is produced if the elements of the tuples are floats. Here the minimum is 0.0, the maximum is 10.0 and step size is 0.1 (the default). End of explanation """ interact(f, x=(0.0,10.0,0.01)); """ Explanation: The step size can be changed by passing a third element in the tuple. End of explanation """ @interact(x=(0.0,20.0,0.5)) def h(x=5.5): return x """ Explanation: For both integer and float-valued sliders, you can pick the initial value of the widget by passing a default keyword argument to the underlying Python function. Here we set the initial value of a float slider to 5.5. End of explanation """ interact(f, x=['apples','oranges']); """ Explanation: Dropdown menus are constructed by passing a list of strings. In this case, the strings are both used as the names in the dropdown menu UI and passed to the underlying Python function. End of explanation """ interact(f, x=[('one', 10), ('two', 20)]); """ Explanation: If you want a dropdown menu that passes non-string values to the Python function, you can pass a list of (label, value) pairs. End of explanation """ from IPython.display import display def f(a, b): display(a + b) return a+b """ Explanation: interactive In addition to interact, IPython provides another function, interactive, that is useful when you want to reuse the widgets that are produced or access the data that is bound to the UI controls. Note that unlike interact, the return value of the function will not be displayed automatically, but you can display a value inside the function with IPython.display.display. Here is a function that returns the sum of its two arguments and displays them. The display line may be omitted if you don't want to show the result of the function. End of explanation """ w = interactive(f, a=10, b=20) """ Explanation: Unlike interact, interactive returns a Widget instance rather than immediately displaying the widget. End of explanation """ type(w) """ Explanation: The widget is an interactive, a subclass of VBox, which is a container for other widgets. End of explanation """ w.children """ Explanation: The children of the interactive are two integer-valued sliders and an output widget, produced by the widget abbreviations above. End of explanation """ display(w) """ Explanation: To actually display the widgets, you can use IPython's display function. End of explanation """ w.kwargs """ Explanation: At this point, the UI controls work just like they would if interact had been used. You can manipulate them interactively and the function will be called. However, the widget instance returned by interactive also gives you access to the current keyword arguments and return value of the underlying Python function. Here are the current keyword arguments. If you rerun this cell after manipulating the sliders, the values will have changed. End of explanation """ w.result """ Explanation: Here is the current return value of the function. End of explanation """ def slow_function(i): print(int(i),list(x for x in range(int(i)) if str(x)==str(x)[::-1] and str(x**2)==str(x**2)[::-1])) return %%time slow_function(1e6) """ Explanation: Disabling continuous updates When interacting with long running functions, realtime feedback is a burden instead of being helpful. See the following example: End of explanation """ from ipywidgets import FloatSlider interact(slow_function,i=FloatSlider(min=1e5, max=1e7, step=1e5)); """ Explanation: Notice that the output is updated even while dragging the mouse on the slider. This is not useful for long running functions due to lagging: End of explanation """ interact_manual(slow_function,i=FloatSlider(min=1e5, max=1e7, step=1e5)); """ Explanation: There are two ways to mitigate this. You can either only execute on demand, or restrict execution to mouse release events. interact_manual The interact_manual function provides a variant of interaction that allows you to restrict execution so it is only done on demand. A button is added to the interact controls that allows you to trigger an execute event. End of explanation """ interact(slow_function,i=FloatSlider(min=1e5, max=1e7, step=1e5, continuous_update=False)); """ Explanation: continuous_update If you are using slider widgets, you can set the continuous_update kwarg to False. continuous_update is a kwarg of slider widgets that restricts executions to mouse release events. End of explanation """ a = widgets.IntSlider() b = widgets.IntSlider() c = widgets.IntSlider() ui = widgets.HBox([a, b, c]) def f(a, b, c): print((a, b, c)) out = widgets.interactive_output(f, {'a': a, 'b': b, 'c': c}) display(ui, out) """ Explanation: interactive_output interactive_output provides additional flexibility: you can control how the UI elements are laid out. Unlike interact, interactive, and interact_manual, interactive_output does not generate a user interface for the widgets. This is powerful, because it means you can create a widget, put it in a box, and then pass the widget to interactive_output, and have control over the widget and its layout. End of explanation """ x_widget = FloatSlider(min=0.0, max=10.0, step=0.05) y_widget = FloatSlider(min=0.5, max=10.0, step=0.05, value=5.0) def update_x_range(*args): x_widget.max = 2.0 * y_widget.value y_widget.observe(update_x_range, 'value') def printer(x, y): print(x, y) interact(printer,x=x_widget, y=y_widget); """ Explanation: Arguments that are dependent on each other Arguments that are dependent on each other can be expressed manually using observe. See the following example, where one variable is used to describe the bounds of another. For more information, please see the widget events example notebook. End of explanation """ %matplotlib inline from ipywidgets import interactive import matplotlib.pyplot as plt import numpy as np def f(m, b): plt.figure(2) x = np.linspace(-10, 10, num=1000) plt.plot(x, m * x + b) plt.ylim(-5, 5) plt.show() interactive_plot = interactive(f, m=(-2.0, 2.0), b=(-3, 3, 0.5)) output = interactive_plot.children[-1] output.layout.height = '350px' interactive_plot """ Explanation: Flickering and jumping output On occasion, you may notice interact output flickering and jumping, causing the notebook scroll position to change as the output is updated. The interactive control has a layout, so we can set its height to an appropriate value (currently chosen manually) so that it will not change size as it is updated. End of explanation """
boada/planckClusters
analysis_ir/notebooks/05. Make real models.ipynb
mit
cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7, Tcmb0=2.725) """ Explanation: Setup Cosmology End of explanation """ # check to make sure we have defined the bpz filter path if not os.getenv('EZGAL_FILTERS'): os.environ['EZGAL_FILTERS'] = (f'{os.environ["HOME"]}/Projects/planckClusters/MOSAICpipe/bpz-1.99.3/FILTER/') model = ezgal.model('bc03_ssp_z_0.02_salp.model') model = model.make_exponential(1) model.set_cosmology(Om=cosmo.Om0, Ol=cosmo.Ode0, h=cosmo.h, w=cosmo.w(0)) model.add_filter('g_MOSAICII.res', name='g') model.add_filter('r_MOSAICII.res', name='r') model.add_filter('i_MOSAICII.res', name='i') model.add_filter('z_MOSAICII.res', name='z') model.add_filter('K_KittPeak.res', name='K') # Blanton 2003 Normalization Mr_star = -20.44 + 5 * np.log10(cosmo.h) # abs mag. # set the normalization model.set_normalization('sloan_r', 0.1, Mr_star, vega=False) """ Explanation: Create Stellar Population End of explanation """ # desired formation redshift zf = 6.0 # fetch an array of redshifts out to given formation redshift zs = model.get_zs(zf) # Calculate some cosmological stuff DM = cosmo.distmod(zs) dlum = cosmo.luminosity_distance(zs) """ Explanation: Calculate a few things to get going. End of explanation """ def rho_crit(z, cosmo): # convert G into better units: G = const.G.to(u.km**2 * u.Mpc/(u.M_sun * u.s**2)) return 3 / (8 * np.pi * G) * cosmo.H0**2 * cosmo.efunc(z)**2 # Mpc^3 def schechterL(luminosity, phiStar, alpha, LStar): """Schechter luminosity function.""" LOverLStar = (luminosity/LStar) return (phiStar/LStar) * LOverLStar**alpha * np.exp(- LOverLStar) def schechterM(magnitude, phiStar, alpha, MStar): """Schechter luminosity function by magnitudes.""" MStarMinM = 0.4 * (MStar - magnitude) return (0.4 * np.log(10) * phiStar * 10.0**(MStarMinM * (alpha + 1.)) * np.exp(-10.**MStarMinM)) """ Explanation: Define the functions that we'll need Need to compute the cluster volume... $M_{vir} = 4/3 \pi r^3_{vir} \rho_c(r<r_{vir}) = 4/3 \pi r^3_{vir} \Delta_c \rho_c$ if we let $\Delta_c = 200$ then $M_{200} = 4/3 \pi r^3_{200} 200 \rho_c$ with $\rho_c = \frac{3H(z)^2}{8\pi G}$ or just $M_{200} = V_{200}200\rho_c$. So we'll make a function to calculate $\rho_c$. And we'll make use of the astropy units package to do all the unit analysis for us. Don't forget that $H(z) = H_0E(z)$ We also need to integrate the Schechter luminosity functions.. The Schechter Function: For Luminosity: $\Phi(L) = \phi^\star \frac{L}{L_\star}^\alpha e^{-\frac{L}{L_\star}}$ For Magnitudes: $\Phi(M) = \phi^\star\frac{2}{5}log(10) (10^{\frac{2}{5}(M_\star - M)})^{\alpha+1} e^{-10^{\frac{2}{5}(M_\star - M)}}$ End of explanation """ from astropy.table import Table from scipy.interpolate import interp1d z1 = 0 z2 = 2 dz = 0.025 # build the mass array zarr = np.arange(z1, z2 + dz, dz) ps2 = Table.read('../../catalogs/PSZ2v1.fits') df2 = ps2.to_pandas() data = df2[['REDSHIFT', 'MSZ']] data['REDSHIFT'].replace(-1, np.nan, inplace=True) # redshift bins zbins = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 3] nMasses = 100 big_mass = [] for j in range(nMasses): mass = np.ones_like(zarr) * 1e14 for i in range(len(zbins) - 1): mask = (zbins[i] <= zarr) & (zarr < zbins[i + 1]) mass[mask] *= float(data.loc[(zbins[i] <= data['REDSHIFT']) & (data['REDSHIFT'] < zbins[i + 1]), 'MSZ'].sample()) * cosmo.h big_mass.append(mass) mass = np.vstack(big_mass) mass_func = interp1d(zarr, np.median(mass, axis=0)) """ Explanation: Mass limits from PSZ2 End of explanation """ # So now we are going to calculate the volumes as a function of z #M200 = mass_func(zarr) * u.solMass M200 = 1e15 * u.solMass V200 = M200/ (200 * rho_crit(zs, cosmo)) # Calculate the M_star values Mstar = model.get_absolute_mags(zf, filters='i', zs=zs) # calculate the abs mag of our limiting magnitude as a function of z mlim = 23.5 #Mlim = Mstar - 2.5 * np.log10(0.4) Mlim = mlim - cosmo.distmod(zs).value - model.get_kcorrects(zf, filters='i', zs=zs) # Here are the Schechter function stuff from Liu et al. phi_star = 3.6 * cosmo.efunc(zs)**2 alpha = -1.05 * (1 + zs)**(-2/3) fr = 0.8*(1 + zs)**(-1/2) #alpha = np.ones_like(alpha) * -1 #Mpiv = 6e14 * u.solMass #zpiv = 0.6 #alpha = -0.96 * (M200 / Mpiv)**0.01 * ((1 + zs)/ (1 + zpiv))**-0.94 #phi_star = 1.68 * (M200 / Mpiv)**0.09 * ((1 + zs)/ (1 + zpiv))**0.09 * cosmo.efunc(zs)**2 #fr = 0.62 * (M200 / Mpiv)**0.08 * ((1 + zs)/ (1 + zpiv))** -0.80 LF = [] for phi, a, M_star, M_lim in zip(phi_star, alpha, Mstar, Mlim): if M_lim < M_star - 2.5 * np.log10(0.4): Mlimit = M_lim else: Mlimit = M_star - 2.5 * np.log10(0.4) y, err = quad(schechterM, -30, Mlimit, args=(phi, a, M_star)) #print(M_star - M_lim, y) LF.append(y) plt.figure() plt.plot(zs, (LF * V200.value + 1) * fr) ax = plt.gca() ax.set_yticks(np.arange(0, 75, 10)) plt.xlim(0.1, 5) plt.ylim(0, 80) plt.xlabel('redshift') plt.ylabel('N (r < r200)') plt.grid() # calculate the abs mag of our limiting magnitude as a function of z mlim = 23.5 #Mlim = model.get_absolute_mags(zf, filters='i', zs=zs) - 2.5 * np.log10(0.4) Mlim = mlim - cosmo.distmod(zs).value - model.get_kcorrects(zf, filters='i', zs=zs) plt.figure() plt.plot(zs, model.get_absolute_mags(zf, filters='i', zs=zs), label='Lstar') plt.plot(zs, Mlim, label='Mlimit') plt.plot(zs, model.get_absolute_mags(zf, filters='i', zs=zs) - 2.5 * np.log10(0.4), label='0.4Lstar') plt.grid() plt.xlabel('redshift') plt.ylabel('abs Mag') plt.legend() Mlim Mstar - 2.5 * np.log10(0.4) # 0.4L* magnitudes np.array(LF) # LF integration output alpha phi_star fr # red fraction zs # redshift array V200.value # cluster volume 200 * rho_crit(zs, cosmo) plt.plot(zs, (V200/(4/3 * np.pi))**(1/3)) """ Explanation: Start Calculating things End of explanation """
ohbm/brain-hacking-101
beginner-python/002-plots.ipynb
apache-2.0
import numpy as np import nibabel as nib import matplotlib as mpl import matplotlib.pyplot as plt %matplotlib inline img = nib.load('./data/run1.nii.gz') data = img.get_data() fig, ax = plt.subplots(1) ax.plot(data[32, 32, 15, :]) """ Explanation: Brain-hacking 101 Author: Ariel Rokem, The University of Washington eScience Institute Hack 2: Look at your data A picture is worth a thousand words. Data visualization allows you to look directly at different aspects of the data that are not readily available to you by just looking at the numbers. In this tutorial, we will look at the FIAC data using the Matplotlib software library. Matplotlib is an open-source software library that can be used to create beautiful 2-d data visualizations, such as lines, scatter-plots, and images. It can be used to produce publication-quality figures in a variety of file-formats. Let's End of explanation """ mpl.style.use('bmh') fig, ax = plt.subplots(1) ax.plot(data[32, 32, 15, :]) ax.set_xlabel('Time (TR)') ax.set_ylabel('MRI signal (a.u.)') ax.set_title('Time-series from voxel [32, 32, 15]') fig.set_size_inches([12, 6]) """ Explanation: Congratulations! You first MPL plot. Let's make this a little bit larger, use a style to make it look better, and add some annotations. End of explanation """ fig, ax = plt.subplots(1) ax.plot(data[32, 32, 15, :]) ax.plot(data[32, 32, 14, :]) ax.plot(data[32, 32, 13, :]) ax.plot(data[32, 32, 12, :]) ax.set_xlabel('Time (TR)') ax.set_ylabel('MRI signal (a.u.)') ax.set_title('Time-series from a few voxels') fig.set_size_inches([12, 6]) """ Explanation: Impressions about the data? If we want to compare several voxels side by side we can plot them on the same axis: End of explanation """ fig, ax = plt.subplots(2, 2) # ax is now an array! ax[0, 0].plot(data[32, 32, 15, :]) ax[0, 1].plot(data[32, 32, 14, :]) ax[1, 0].plot(data[32, 32, 13, :]) ax[1, 1].plot(data[32, 32, 12, :]) ax[1, 0].set_xlabel('Time (TR)') ax[1, 1].set_xlabel('Time (TR)') ax[0, 0].set_ylabel('MRI signal (a.u.)') ax[1, 0].set_ylabel('MRI signal (a.u.)') # Note that we now set the title through the fig object! fig.suptitle('Time-series from a few voxels') fig.set_size_inches([12, 6]) """ Explanation: Alternatively, we can create different subplots for each time-series End of explanation """ fig, ax = plt.subplots(1, 2) # We'll use a reasonable colormap, and no smoothing: ax[0].matshow(np.mean(data[:, :, 15], -1), cmap=mpl.cm.hot) ax[0].axis('off') ax[1].matshow(np.std(data[:, :, 15], -1), cmap=mpl.cm.hot) ax[1].axis('off') fig.set_size_inches([12, 6]) # You can save the figure to file: fig.savefig('mean_and_std.png') """ Explanation: Another kind of plot is an image. For example, we can take a look at the mean and standard deviation of the time-series for one entire slice: End of explanation """ fig, ax = plt.subplots(2, 2) # Note the use of `ravel` to create a 1D array: ax[0, 0].hist(np.ravel(data)) ax[0, 0].set_xlabel("fMRI signal") ax[0, 0].set_ylabel("# voxels") # Bars are 0.8 wide: ax[0, 1].bar([0.6, 1.6, 2.6, 3.6], [np.mean(data[:, :, 15]), np.mean(data[:, :, 14]), np.mean(data[:, :, 13]), np.mean(data[:, :, 12])]) ax[0, 1].set_ylabel("Average signal in the slice") ax[0, 1].set_xticks([1,2,3,4]) ax[0, 1].set_xticklabels(["15", "14", "13", "12"]) ax[0, 1].set_xlabel("Slice #") # Compares subsequent time-points: ax[1, 0].scatter(data[:, :, 15, 0], data[:, :, 15, 1]) ax[1, 0].set_xlabel("fMRI signal (time-point 0)") ax[1, 0].set_ylabel("fMRI signal (time-point 1)") # `.T` denotes a transposition ax[1, 1].boxplot(data[32, 32].T) fig.set_size_inches([12, 12]) ax[1, 1].set_xlabel("Position") ax[1, 1].set_ylabel("fMRI signal") """ Explanation: There are many other kinds of figures you could create: End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/bcc/cmip6/models/sandbox-2/atmos.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'bcc', 'sandbox-2', 'atmos') """ Explanation: ES-DOC CMIP6 Model Properties - Atmos MIP Era: CMIP6 Institute: BCC Source ID: SANDBOX-2 Topic: Atmos Sub-Topics: Dynamical Core, Radiation, Turbulence Convection, Microphysics Precipitation, Cloud Scheme, Observation Simulation, Gravity Waves, Solar, Volcanos. Properties: 156 (127 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:53:39 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.overview.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties --&gt; Overview 2. Key Properties --&gt; Resolution 3. Key Properties --&gt; Timestepping 4. Key Properties --&gt; Orography 5. Grid --&gt; Discretisation 6. Grid --&gt; Discretisation --&gt; Horizontal 7. Grid --&gt; Discretisation --&gt; Vertical 8. Dynamical Core 9. Dynamical Core --&gt; Top Boundary 10. Dynamical Core --&gt; Lateral Boundary 11. Dynamical Core --&gt; Diffusion Horizontal 12. Dynamical Core --&gt; Advection Tracers 13. Dynamical Core --&gt; Advection Momentum 14. Radiation 15. Radiation --&gt; Shortwave Radiation 16. Radiation --&gt; Shortwave GHG 17. Radiation --&gt; Shortwave Cloud Ice 18. Radiation --&gt; Shortwave Cloud Liquid 19. Radiation --&gt; Shortwave Cloud Inhomogeneity 20. Radiation --&gt; Shortwave Aerosols 21. Radiation --&gt; Shortwave Gases 22. Radiation --&gt; Longwave Radiation 23. Radiation --&gt; Longwave GHG 24. Radiation --&gt; Longwave Cloud Ice 25. Radiation --&gt; Longwave Cloud Liquid 26. Radiation --&gt; Longwave Cloud Inhomogeneity 27. Radiation --&gt; Longwave Aerosols 28. Radiation --&gt; Longwave Gases 29. Turbulence Convection 30. Turbulence Convection --&gt; Boundary Layer Turbulence 31. Turbulence Convection --&gt; Deep Convection 32. Turbulence Convection --&gt; Shallow Convection 33. Microphysics Precipitation 34. Microphysics Precipitation --&gt; Large Scale Precipitation 35. Microphysics Precipitation --&gt; Large Scale Cloud Microphysics 36. Cloud Scheme 37. Cloud Scheme --&gt; Optical Cloud Properties 38. Cloud Scheme --&gt; Sub Grid Scale Water Distribution 39. Cloud Scheme --&gt; Sub Grid Scale Ice Distribution 40. Observation Simulation 41. Observation Simulation --&gt; Isscp Attributes 42. Observation Simulation --&gt; Cosp Attributes 43. Observation Simulation --&gt; Radar Inputs 44. Observation Simulation --&gt; Lidar Inputs 45. Gravity Waves 46. Gravity Waves --&gt; Orographic Gravity Waves 47. Gravity Waves --&gt; Non Orographic Gravity Waves 48. Solar 49. Solar --&gt; Solar Pathways 50. Solar --&gt; Solar Constant 51. Solar --&gt; Orbital Parameters 52. Solar --&gt; Insolation Ozone 53. Volcanos 54. Volcanos --&gt; Volcanoes Treatment 1. Key Properties --&gt; Overview Top level key properties 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.overview.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of atmosphere model code (CAM 4.0, ARPEGE 3.2,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.overview.model_family') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "AGCM" # "ARCM" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Model Family Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of atmospheric model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "primitive equations" # "non-hydrostatic" # "anelastic" # "Boussinesq" # "hydrostatic" # "quasi-hydrostatic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.4. Basic Approximations Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Basic approximations made in the atmosphere. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Resolution Characteristics of the model resolution 2.1. Horizontal Resolution Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of the model grid, e.g. T42, N48. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.2. Canonical Horizontal Resolution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Expression quoted for gross comparisons of resolution, e.g. 2.5 x 3.75 degrees lat-lon. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.3. Range Horizontal Resolution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Range of horizontal resolution with spatial details, eg. 1 deg (Equator) - 0.5 deg End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 2.4. Number Of Vertical Levels Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of vertical levels resolved on the computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.resolution.high_top') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 2.5. High Top Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does the atmosphere have a high-top? High-Top atmospheres have a fully resolved stratosphere with a model top above the stratopause. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Timestepping Characteristics of the atmosphere model time stepping 3.1. Timestep Dynamics Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Timestep for the dynamics, e.g. 30 min. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.2. Timestep Shortwave Radiative Transfer Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for the shortwave radiative transfer, e.g. 1.5 hours. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.3. Timestep Longwave Radiative Transfer Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for the longwave radiative transfer, e.g. 3 hours. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.orography.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "present day" # "modified" # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Orography Characteristics of the model orography 4.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Time adaptation of the orography. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.orography.changes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "related to ice sheets" # "related to tectonics" # "modified mean" # "modified variance if taken into account in model (cf gravity waves)" # TODO - please enter value(s) """ Explanation: 4.2. Changes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N If the orography type is modified describe the time adaptation changes. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Grid --&gt; Discretisation Atmosphere grid discretisation 5.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of grid discretisation in the atmosphere End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "spectral" # "fixed grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 6. Grid --&gt; Discretisation --&gt; Horizontal Atmosphere discretisation in the horizontal 6.1. Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Horizontal discretisation type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "finite elements" # "finite volumes" # "finite difference" # "centered finite difference" # TODO - please enter value(s) """ Explanation: 6.2. Scheme Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Horizontal discretisation method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "second" # "third" # "fourth" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 6.3. Scheme Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Horizontal discretisation function order End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "filter" # "pole rotation" # "artificial island" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 6.4. Horizontal Pole Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Horizontal discretisation pole singularity treatment End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Gaussian" # "Latitude-Longitude" # "Cubed-Sphere" # "Icosahedral" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 6.5. Grid Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Horizontal grid type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "isobaric" # "sigma" # "hybrid sigma-pressure" # "hybrid pressure" # "vertically lagrangian" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 7. Grid --&gt; Discretisation --&gt; Vertical Atmosphere discretisation in the vertical 7.1. Coordinate Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Type of vertical coordinate system End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Dynamical Core Characteristics of the dynamical core 8.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of atmosphere dynamical core End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.2. Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Commonly used name for the dynamical core of the model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Adams-Bashforth" # "explicit" # "implicit" # "semi-implicit" # "leap frog" # "multi-step" # "Runge Kutta fifth order" # "Runge Kutta second order" # "Runge Kutta third order" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.3. Timestepping Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Timestepping framework type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "surface pressure" # "wind components" # "divergence/curl" # "temperature" # "potential temperature" # "total water" # "water vapour" # "water liquid" # "water ice" # "total water moments" # "clouds" # "radiation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.4. Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of the model prognostic variables End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "sponge layer" # "radiation boundary condition" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 9. Dynamical Core --&gt; Top Boundary Type of boundary layer at the top of the model 9.1. Top Boundary Condition Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Top boundary condition End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.2. Top Heat Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Top boundary heat treatment End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.3. Top Wind Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Top boundary wind treatment End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "sponge layer" # "radiation boundary condition" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10. Dynamical Core --&gt; Lateral Boundary Type of lateral boundary condition (if the model is a regional model) 10.1. Condition Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Type of lateral boundary condition End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11. Dynamical Core --&gt; Diffusion Horizontal Horizontal diffusion scheme 11.1. Scheme Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Horizontal diffusion scheme name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "iterated Laplacian" # "bi-harmonic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11.2. Scheme Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Horizontal diffusion scheme method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Heun" # "Roe and VanLeer" # "Roe and Superbee" # "Prather" # "UTOPIA" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12. Dynamical Core --&gt; Advection Tracers Tracer advection scheme 12.1. Scheme Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Tracer advection scheme name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Eulerian" # "modified Euler" # "Lagrangian" # "semi-Lagrangian" # "cubic semi-Lagrangian" # "quintic semi-Lagrangian" # "mass-conserving" # "finite volume" # "flux-corrected" # "linear" # "quadratic" # "quartic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12.2. Scheme Characteristics Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Tracer advection scheme characteristics End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "dry mass" # "tracer mass" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12.3. Conserved Quantities Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Tracer advection scheme conserved quantities End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "conservation fixer" # "Priestley algorithm" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12.4. Conservation Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Tracer advection scheme conservation method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "VanLeer" # "Janjic" # "SUPG (Streamline Upwind Petrov-Galerkin)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13. Dynamical Core --&gt; Advection Momentum Momentum advection scheme 13.1. Scheme Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Momentum advection schemes name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "2nd order" # "4th order" # "cell-centred" # "staggered grid" # "semi-staggered grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.2. Scheme Characteristics Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Momentum advection scheme characteristics End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Arakawa B-grid" # "Arakawa C-grid" # "Arakawa D-grid" # "Arakawa E-grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.3. Scheme Staggering Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Momentum advection scheme staggering type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Angular momentum" # "Horizontal momentum" # "Enstrophy" # "Mass" # "Total energy" # "Vorticity" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.4. Conserved Quantities Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Momentum advection scheme conserved quantities End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "conservation fixer" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.5. Conservation Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Momentum advection scheme conservation method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.aerosols') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "sulphate" # "nitrate" # "sea salt" # "dust" # "ice" # "organic" # "BC (black carbon / soot)" # "SOA (secondary organic aerosols)" # "POM (particulate organic matter)" # "polar stratospheric ice" # "NAT (nitric acid trihydrate)" # "NAD (nitric acid dihydrate)" # "STS (supercooled ternary solution aerosol particle)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14. Radiation Characteristics of the atmosphere radiation process 14.1. Aerosols Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Aerosols whose radiative effect is taken into account in the atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15. Radiation --&gt; Shortwave Radiation Properties of the shortwave radiation scheme 15.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of shortwave radiation in the atmosphere End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.2. Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Commonly used name for the shortwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "wide-band model" # "correlated-k" # "exponential sum fitting" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15.3. Spectral Integration Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Shortwave radiation scheme spectral integration End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "two-stream" # "layer interaction" # "bulk" # "adaptive" # "multi-stream" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15.4. Transport Calculation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Shortwave radiation transport calculation methods End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.5. Spectral Intervals Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Shortwave radiation scheme number of spectral intervals End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "CO2" # "CH4" # "N2O" # "CFC-11 eq" # "CFC-12 eq" # "HFC-134a eq" # "Explicit ODSs" # "Explicit other fluorinated gases" # "O3" # "H2O" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16. Radiation --&gt; Shortwave GHG Representation of greenhouse gases in the shortwave radiation scheme 16.1. Greenhouse Gas Complexity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Complexity of greenhouse gases whose shortwave radiative effects are taken into account in the atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "CFC-12" # "CFC-11" # "CFC-113" # "CFC-114" # "CFC-115" # "HCFC-22" # "HCFC-141b" # "HCFC-142b" # "Halon-1211" # "Halon-1301" # "Halon-2402" # "methyl chloroform" # "carbon tetrachloride" # "methyl chloride" # "methylene chloride" # "chloroform" # "methyl bromide" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.2. ODS Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Ozone depleting substances whose shortwave radiative effects are explicitly taken into account in the atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "HFC-134a" # "HFC-23" # "HFC-32" # "HFC-125" # "HFC-143a" # "HFC-152a" # "HFC-227ea" # "HFC-236fa" # "HFC-245fa" # "HFC-365mfc" # "HFC-43-10mee" # "CF4" # "C2F6" # "C3F8" # "C4F10" # "C5F12" # "C6F14" # "C7F16" # "C8F18" # "c-C4F8" # "NF3" # "SF6" # "SO2F2" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.3. Other Flourinated Gases Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Other flourinated gases whose shortwave radiative effects are explicitly taken into account in the atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17. Radiation --&gt; Shortwave Cloud Ice Shortwave radiative properties of ice crystals in clouds 17.1. General Interactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N General shortwave radiative interactions with cloud ice crystals End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "bi-modal size distribution" # "ensemble of ice crystals" # "mean projected area" # "ice water path" # "crystal asymmetry" # "crystal aspect ratio" # "effective crystal radius" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.2. Physical Representation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Physical representation of cloud ice crystals in the shortwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "T-matrix" # "geometric optics" # "finite difference time domain (FDTD)" # "Mie theory" # "anomalous diffraction approximation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.3. Optical Methods Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Optical methods applicable to cloud ice crystals in the shortwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18. Radiation --&gt; Shortwave Cloud Liquid Shortwave radiative properties of liquid droplets in clouds 18.1. General Interactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N General shortwave radiative interactions with cloud liquid droplets End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "cloud droplet number concentration" # "effective cloud droplet radii" # "droplet size distribution" # "liquid water path" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18.2. Physical Representation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Physical representation of cloud liquid droplets in the shortwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "geometric optics" # "Mie theory" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18.3. Optical Methods Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Optical methods applicable to cloud liquid droplets in the shortwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Monte Carlo Independent Column Approximation" # "Triplecloud" # "analytic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 19. Radiation --&gt; Shortwave Cloud Inhomogeneity Cloud inhomogeneity in the shortwave radiation scheme 19.1. Cloud Inhomogeneity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method for taking into account horizontal cloud inhomogeneity End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 20. Radiation --&gt; Shortwave Aerosols Shortwave radiative properties of aerosols 20.1. General Interactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N General shortwave radiative interactions with aerosols End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "number concentration" # "effective radii" # "size distribution" # "asymmetry" # "aspect ratio" # "mixing state" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 20.2. Physical Representation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Physical representation of aerosols in the shortwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "T-matrix" # "geometric optics" # "finite difference time domain (FDTD)" # "Mie theory" # "anomalous diffraction approximation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 20.3. Optical Methods Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Optical methods applicable to aerosols in the shortwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 21. Radiation --&gt; Shortwave Gases Shortwave radiative properties of gases 21.1. General Interactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N General shortwave radiative interactions with gases End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22. Radiation --&gt; Longwave Radiation Properties of the longwave radiation scheme 22.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of longwave radiation in the atmosphere End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.2. Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Commonly used name for the longwave radiation scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "wide-band model" # "correlated-k" # "exponential sum fitting" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22.3. Spectral Integration Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Longwave radiation scheme spectral integration End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "two-stream" # "layer interaction" # "bulk" # "adaptive" # "multi-stream" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22.4. Transport Calculation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Longwave radiation transport calculation methods End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 22.5. Spectral Intervals Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Longwave radiation scheme number of spectral intervals End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "CO2" # "CH4" # "N2O" # "CFC-11 eq" # "CFC-12 eq" # "HFC-134a eq" # "Explicit ODSs" # "Explicit other fluorinated gases" # "O3" # "H2O" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23. Radiation --&gt; Longwave GHG Representation of greenhouse gases in the longwave radiation scheme 23.1. Greenhouse Gas Complexity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Complexity of greenhouse gases whose longwave radiative effects are taken into account in the atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "CFC-12" # "CFC-11" # "CFC-113" # "CFC-114" # "CFC-115" # "HCFC-22" # "HCFC-141b" # "HCFC-142b" # "Halon-1211" # "Halon-1301" # "Halon-2402" # "methyl chloroform" # "carbon tetrachloride" # "methyl chloride" # "methylene chloride" # "chloroform" # "methyl bromide" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23.2. ODS Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Ozone depleting substances whose longwave radiative effects are explicitly taken into account in the atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "HFC-134a" # "HFC-23" # "HFC-32" # "HFC-125" # "HFC-143a" # "HFC-152a" # "HFC-227ea" # "HFC-236fa" # "HFC-245fa" # "HFC-365mfc" # "HFC-43-10mee" # "CF4" # "C2F6" # "C3F8" # "C4F10" # "C5F12" # "C6F14" # "C7F16" # "C8F18" # "c-C4F8" # "NF3" # "SF6" # "SO2F2" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23.3. Other Flourinated Gases Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Other flourinated gases whose longwave radiative effects are explicitly taken into account in the atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 24. Radiation --&gt; Longwave Cloud Ice Longwave radiative properties of ice crystals in clouds 24.1. General Interactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N General longwave radiative interactions with cloud ice crystals End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "bi-modal size distribution" # "ensemble of ice crystals" # "mean projected area" # "ice water path" # "crystal asymmetry" # "crystal aspect ratio" # "effective crystal radius" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 24.2. Physical Reprenstation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Physical representation of cloud ice crystals in the longwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "T-matrix" # "geometric optics" # "finite difference time domain (FDTD)" # "Mie theory" # "anomalous diffraction approximation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 24.3. Optical Methods Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Optical methods applicable to cloud ice crystals in the longwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25. Radiation --&gt; Longwave Cloud Liquid Longwave radiative properties of liquid droplets in clouds 25.1. General Interactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N General longwave radiative interactions with cloud liquid droplets End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "cloud droplet number concentration" # "effective cloud droplet radii" # "droplet size distribution" # "liquid water path" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.2. Physical Representation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Physical representation of cloud liquid droplets in the longwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "geometric optics" # "Mie theory" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.3. Optical Methods Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Optical methods applicable to cloud liquid droplets in the longwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Monte Carlo Independent Column Approximation" # "Triplecloud" # "analytic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26. Radiation --&gt; Longwave Cloud Inhomogeneity Cloud inhomogeneity in the longwave radiation scheme 26.1. Cloud Inhomogeneity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method for taking into account horizontal cloud inhomogeneity End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 27. Radiation --&gt; Longwave Aerosols Longwave radiative properties of aerosols 27.1. General Interactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N General longwave radiative interactions with aerosols End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "number concentration" # "effective radii" # "size distribution" # "asymmetry" # "aspect ratio" # "mixing state" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 27.2. Physical Representation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Physical representation of aerosols in the longwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "T-matrix" # "geometric optics" # "finite difference time domain (FDTD)" # "Mie theory" # "anomalous diffraction approximation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 27.3. Optical Methods Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Optical methods applicable to aerosols in the longwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 28. Radiation --&gt; Longwave Gases Longwave radiative properties of gases 28.1. General Interactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N General longwave radiative interactions with gases End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 29. Turbulence Convection Atmosphere Convective Turbulence and Clouds 29.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of atmosphere convection and turbulence End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Mellor-Yamada" # "Holtslag-Boville" # "EDMF" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 30. Turbulence Convection --&gt; Boundary Layer Turbulence Properties of the boundary layer turbulence scheme 30.1. Scheme Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Boundary layer turbulence scheme name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "TKE prognostic" # "TKE diagnostic" # "TKE coupled with water" # "vertical profile of Kz" # "non-local diffusion" # "Monin-Obukhov similarity" # "Coastal Buddy Scheme" # "Coupled with convection" # "Coupled with gravity waves" # "Depth capped at cloud base" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 30.2. Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Boundary layer turbulence scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 30.3. Closure Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Boundary layer turbulence scheme closure order End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 30.4. Counter Gradient Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Uses boundary layer turbulence scheme counter gradient End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 31. Turbulence Convection --&gt; Deep Convection Properties of the deep convection scheme 31.1. Scheme Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Deep convection scheme name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "mass-flux" # "adjustment" # "plume ensemble" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 31.2. Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Deep convection scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "CAPE" # "bulk" # "ensemble" # "CAPE/WFN based" # "TKE/CIN based" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 31.3. Scheme Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Deep convection scheme method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "vertical momentum transport" # "convective momentum transport" # "entrainment" # "detrainment" # "penetrative convection" # "updrafts" # "downdrafts" # "radiative effect of anvils" # "re-evaporation of convective precipitation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 31.4. Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Physical processes taken into account in the parameterisation of deep convection End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "tuning parameter based" # "single moment" # "two moment" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 31.5. Microphysics Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Microphysics scheme for deep convection. Microphysical processes directly control the amount of detrainment of cloud hydrometeor and water vapor from updrafts End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 32. Turbulence Convection --&gt; Shallow Convection Properties of the shallow convection scheme 32.1. Scheme Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Shallow convection scheme name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "mass-flux" # "cumulus-capped boundary layer" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 32.2. Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N shallow convection scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "same as deep (unified)" # "included in boundary layer turbulence" # "separate diagnosis" # TODO - please enter value(s) """ Explanation: 32.3. Scheme Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 shallow convection scheme method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "convective momentum transport" # "entrainment" # "detrainment" # "penetrative convection" # "re-evaporation of convective precipitation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 32.4. Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Physical processes taken into account in the parameterisation of shallow convection End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "tuning parameter based" # "single moment" # "two moment" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 32.5. Microphysics Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Microphysics scheme for shallow convection End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.microphysics_precipitation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 33. Microphysics Precipitation Large Scale Cloud Microphysics and Precipitation 33.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of large scale cloud microphysics and precipitation End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 34. Microphysics Precipitation --&gt; Large Scale Precipitation Properties of the large scale precipitation scheme 34.1. Scheme Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Commonly used name of the large scale precipitation parameterisation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "liquid rain" # "snow" # "hail" # "graupel" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 34.2. Hydrometeors Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Precipitating hydrometeors taken into account in the large scale precipitation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 35. Microphysics Precipitation --&gt; Large Scale Cloud Microphysics Properties of the large scale cloud microphysics scheme 35.1. Scheme Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Commonly used name of the microphysics parameterisation scheme used for large scale clouds. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "mixed phase" # "cloud droplets" # "cloud ice" # "ice nucleation" # "water vapour deposition" # "effect of raindrops" # "effect of snow" # "effect of graupel" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 35.2. Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Large scale cloud microphysics processes End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 36. Cloud Scheme Characteristics of the cloud scheme 36.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of the atmosphere cloud scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 36.2. Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Commonly used name for the cloud scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "atmosphere_radiation" # "atmosphere_microphysics_precipitation" # "atmosphere_turbulence_convection" # "atmosphere_gravity_waves" # "atmosphere_solar" # "atmosphere_volcano" # "atmosphere_cloud_simulator" # TODO - please enter value(s) """ Explanation: 36.3. Atmos Coupling Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Atmosphere components that are linked to the cloud scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 36.4. Uses Separate Treatment Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Different cloud schemes for the different types of clouds (convective, stratiform and boundary layer) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "entrainment" # "detrainment" # "bulk cloud" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 36.5. Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Processes included in the cloud scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 36.6. Prognostic Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the cloud scheme a prognostic scheme? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 36.7. Diagnostic Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the cloud scheme a diagnostic scheme? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "cloud amount" # "liquid" # "ice" # "rain" # "snow" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 36.8. Prognostic Variables Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List the prognostic variables used by the cloud scheme, if applicable. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "random" # "maximum" # "maximum-random" # "exponential" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 37. Cloud Scheme --&gt; Optical Cloud Properties Optical cloud properties 37.1. Cloud Overlap Method Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Method for taking into account overlapping of cloud layers End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.2. Cloud Inhomogeneity Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Method for taking into account cloud inhomogeneity End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # TODO - please enter value(s) """ Explanation: 38. Cloud Scheme --&gt; Sub Grid Scale Water Distribution Sub-grid scale water distribution 38.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Sub-grid scale water distribution type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 38.2. Function Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Sub-grid scale water distribution function name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 38.3. Function Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Sub-grid scale water distribution function type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "coupled with deep" # "coupled with shallow" # "not coupled with convection" # TODO - please enter value(s) """ Explanation: 38.4. Convection Coupling Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Sub-grid scale water distribution coupling with convection End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # TODO - please enter value(s) """ Explanation: 39. Cloud Scheme --&gt; Sub Grid Scale Ice Distribution Sub-grid scale ice distribution 39.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Sub-grid scale ice distribution type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 39.2. Function Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Sub-grid scale ice distribution function name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 39.3. Function Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Sub-grid scale ice distribution function type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "coupled with deep" # "coupled with shallow" # "not coupled with convection" # TODO - please enter value(s) """ Explanation: 39.4. Convection Coupling Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Sub-grid scale ice distribution coupling with convection End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 40. Observation Simulation Characteristics of observation simulation 40.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of observation simulator characteristics End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "no adjustment" # "IR brightness" # "visible optical depth" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 41. Observation Simulation --&gt; Isscp Attributes ISSCP Characteristics 41.1. Top Height Estimation Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Cloud simulator ISSCP top height estimation methodUo End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "lowest altitude level" # "highest altitude level" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 41.2. Top Height Direction Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Cloud simulator ISSCP top height direction End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Inline" # "Offline" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 42. Observation Simulation --&gt; Cosp Attributes CFMIP Observational Simulator Package attributes 42.1. Run Configuration Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Cloud simulator COSP run configuration End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 42.2. Number Of Grid Points Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Cloud simulator COSP number of grid points End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 42.3. Number Of Sub Columns Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Cloud simulator COSP number of sub-cloumns used to simulate sub-grid variability End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 42.4. Number Of Levels Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Cloud simulator COSP number of levels End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 43. Observation Simulation --&gt; Radar Inputs Characteristics of the cloud radar simulator 43.1. Frequency Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Cloud simulator radar frequency (Hz) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "surface" # "space borne" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 43.2. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Cloud simulator radar type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 43.3. Gas Absorption Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Cloud simulator radar uses gas absorption End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 43.4. Effective Radius Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Cloud simulator radar uses effective radius End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "ice spheres" # "ice non-spherical" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 44. Observation Simulation --&gt; Lidar Inputs Characteristics of the cloud lidar simulator 44.1. Ice Types Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Cloud simulator lidar ice type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "max" # "random" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 44.2. Overlap Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Cloud simulator lidar overlap End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 45. Gravity Waves Characteristics of the parameterised gravity waves in the atmosphere, whether from orography or other sources. 45.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of gravity wave parameterisation in the atmosphere End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Rayleigh friction" # "Diffusive sponge layer" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 45.2. Sponge Layer Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Sponge layer in the upper levels in order to avoid gravity wave reflection at the top. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "continuous spectrum" # "discrete spectrum" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 45.3. Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Background wave distribution End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "effect on drag" # "effect on lifting" # "enhanced topography" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 45.4. Subgrid Scale Orography Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Subgrid scale orography effects taken into account. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 46. Gravity Waves --&gt; Orographic Gravity Waves Gravity waves generated due to the presence of orography 46.1. Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Commonly used name for the orographic gravity wave scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "linear mountain waves" # "hydraulic jump" # "envelope orography" # "low level flow blocking" # "statistical sub-grid scale variance" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 46.2. Source Mechanisms Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Orographic gravity wave source mechanisms End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "non-linear calculation" # "more than two cardinal directions" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 46.3. Calculation Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Orographic gravity wave calculation method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "linear theory" # "non-linear theory" # "includes boundary layer ducting" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 46.4. Propagation Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Orographic gravity wave propogation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "total wave" # "single wave" # "spectral" # "linear" # "wave saturation vs Richardson number" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 46.5. Dissipation Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Orographic gravity wave dissipation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 47. Gravity Waves --&gt; Non Orographic Gravity Waves Gravity waves generated by non-orographic processes. 47.1. Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Commonly used name for the non-orographic gravity wave scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "convection" # "precipitation" # "background spectrum" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 47.2. Source Mechanisms Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Non-orographic gravity wave source mechanisms End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "spatially dependent" # "temporally dependent" # TODO - please enter value(s) """ Explanation: 47.3. Calculation Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Non-orographic gravity wave calculation method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "linear theory" # "non-linear theory" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 47.4. Propagation Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Non-orographic gravity wave propogation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "total wave" # "single wave" # "spectral" # "linear" # "wave saturation vs Richardson number" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 47.5. Dissipation Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Non-orographic gravity wave dissipation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 48. Solar Top of atmosphere solar insolation characteristics 48.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of solar insolation of the atmosphere End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "SW radiation" # "precipitating energetic particles" # "cosmic rays" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 49. Solar --&gt; Solar Pathways Pathways for solar forcing of the atmosphere 49.1. Pathways Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Pathways for the solar forcing of the atmosphere model domain End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.solar_constant.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "fixed" # "transient" # TODO - please enter value(s) """ Explanation: 50. Solar --&gt; Solar Constant Solar constant and top of atmosphere insolation characteristics 50.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Time adaptation of the solar constant. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 50.2. Fixed Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If the solar constant is fixed, enter the value of the solar constant (W m-2). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 50.3. Transient Characteristics Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 solar constant transient characteristics (W m-2) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.orbital_parameters.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "fixed" # "transient" # TODO - please enter value(s) """ Explanation: 51. Solar --&gt; Orbital Parameters Orbital parameters and top of atmosphere insolation characteristics 51.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Time adaptation of orbital parameters End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 51.2. Fixed Reference Date Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Reference date for fixed orbital parameters (yyyy) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 51.3. Transient Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Description of transient orbital parameters End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Berger 1978" # "Laskar 2004" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 51.4. Computation Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method used for computing orbital parameters. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 52. Solar --&gt; Insolation Ozone Impact of solar insolation on stratospheric ozone 52.1. Solar Ozone Impact Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does top of atmosphere insolation impact on stratospheric ozone? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.volcanos.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 53. Volcanos Characteristics of the implementation of volcanoes 53.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of the implementation of volcanic effects in the atmosphere End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "high frequency solar constant anomaly" # "stratospheric aerosols optical thickness" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 54. Volcanos --&gt; Volcanoes Treatment Treatment of volcanoes in the atmosphere 54.1. Volcanoes Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How volcanic effects are modeled in the atmosphere. End of explanation """
davidchall/topas2numpy
docs/usage.ipynb
mit
import numpy as np import matplotlib.pyplot as plt %matplotlib inline """ Explanation: Usage Here are some examples of how to use topas2numpy in an IPython notebook. Before starting, we setup plotting with matplotlib. End of explanation """ from topas2numpy import read_ntuple x = read_ntuple('../tests/data/ascii-phasespace.phsp') """ Explanation: Reading ntuples TOPAS ntuples store data in columns, with each row representing a different event in a particle's history. The exact details of what data is written to file and when this is triggered depends upon the scorer implementation. topas2numpy.read_ntuple() allows us to read TOPAS ntuples in ASCII, binary and limited formats. It returns a NumPy structured array. Record arrays are not supported because TOPAS column name can contain characters that are illegal for attribute names. If desired, I can add this feature at the expense of changing column names. Onwards to the example usage! First, we read in the data: End of explanation """ x.dtype.names """ Explanation: Next, we want to check what columns are available in this ntuple: End of explanation """ energy_label = 'Energy (MeV)' species_label = 'Particle Type (in PDG Format)' energy = x[energy_label] species = x[species_label] proton_energy = energy[species==2212] n, bins, patches = plt.hist(proton_energy) plt.xlabel(energy_label) plt.ylabel('Histories') plt.title('Proton energy') """ Explanation: Now we want to make a histogram of the energy of the protons contained in the ntuple. Note that the PDG code for protons is 2212. End of explanation """ from topas2numpy import BinnedResult dose = BinnedResult('../tests/data/Dose.csv') print '{0} [{1}]'.format(dose.quantity, dose.unit) print 'Statistics: {0}'.format(dose.statistics) for dim in dose.dimensions: print '{0} [{1}]: {2} bins'.format(dim.name, dim.unit, dim.n_bins) """ Explanation: Reading binned results TOPAS scorers can also accumulate quantities within binned geometry components. By default a sum is accumulated, but it is also possible to accumulate additional statistics such as the mean, standard deviation, etc. As more metadata is available for binned scorers than ntuple scorers, they are stored in an instance of the BinnedScorer class. The raw data is still kept in a NumPy array, which is kept in the data attribute. Our first example is a Bragg peak caused by a proton beam stopping in water. First we read in the data and see what is available: End of explanation """ ax = plt.subplot(111) z = dose.dimensions[2].get_bin_centers() plt.plot(z, np.squeeze(dose.data['Sum'])) plt.xlabel('Depth [cm]') plt.ylabel('Dose [Gy]') ax.set_xlim(xmax=17) """ Explanation: Since the dose is only binned in the z-dimension, we can numpy.squeeze() the array to remove the other dimensions. Then we plot the dose against the z distance (i.e. depth). In this example, only the Sum statistic is available, but TOPAS can also provide Standard_Deviation, etc. End of explanation """ ntracks = BinnedResult('../tests/data/SurfaceTracks.csv') print '{0} [{1}]'.format(ntracks.quantity, ntracks.unit) print 'Statistics: {0}'.format(ntracks.statistics) for dim in ntracks.dimensions: print '{0} [{1}]: {2} bins'.format(dim.name, dim.unit, dim.n_bins) """ Explanation: Our second example is the number of tracks passing through a surface as a beam of protons passes through a dipole magnet. The charged particles are deflected by the Lorentz force. Let's investigate what data is available. End of explanation """ plt.imshow(np.squeeze(ntracks.data['Sum'])) plt.colorbar() """ Explanation: So the number of tracks is scored on a two-dimensional surface and we can plot this as an image. End of explanation """
cstrelioff/ARM-ipynb
Chapter3/chptr3.1-R.ipynb
mit
%%R # I had to import foreign to get access to read.dta library("foreign") kidiq <- read.dta("../../ARM_Data/child.iq/kidiq.dta") # I won't attach kidiq-- i generally don't attach to avoid confusion(s) #attach(kidiq) """ Explanation: 3.1: One predictor A note on R packages If the arm library is not installed in your R setup, you'll need to figure out how to install the package. On Ubuntu 14.04 I do something like this: Start R using sudo: sudo R Install the arm package using install.packages('arm', dep = TRUE) Read the data Data are in the child.iq directory of the ARM_Data download-- you might have to change the path I use below to reflect the path on your computer. End of explanation """ %%R library("arm") """ Explanation: First regression-- binary predictor, Pg 31 Loading the arm library -- this produces lots of warnings on the first load that I can't figure out how suppress at this point. However, things work just fine despite the distraction of the warnings. Just rerun the cell to clean up the mess. Also, let me know if you can figure out how get rid of the warnings. End of explanation """ %%R fit0 <- lm(kidiq$kid_score ~ kidiq$mom_hs) display(fit0) """ Explanation: Fit the regression using the non-jittered data End of explanation """ %%R # -- note that I use kidiq$kid_score because I did not attach kidiq above kidscore.jitter <- jitter(kidiq$kid_score) """ Explanation: Plot Figure 3.1, Pg 32 Add some jitter to kid_score End of explanation """ %%R # define function jitter.binary <- function(a, jitt=.05){ ifelse (a==0, runif (length(a), 0, jitt), runif (length(a), 1-jitt, 1)) } # use a slightly different name mom_hs.jitter <- jitter.binary(kidiq$mom_hs) """ Explanation: Add some 'binary jitter' to mom_hs End of explanation """ %%R plot(mom_hs.jitter, kidscore.jitter, xlab="Mother completed high school", ylab="Child test score", pch=20, xaxt="n", yaxt="n") axis(1, seq(0,1)) axis(2, c(20,60,100,140)) abline(fit0) """ Explanation: Plot using the jittered data End of explanation """ %%R fit1 <- lm (kidiq$kid_score ~ kidiq$mom_iq) display(fit1) """ Explanation: Second regression -- continuous predictor, Pg 32 End of explanation """ %%R plot(kidiq$mom_iq, kidiq$kid_score, xlab="Mother IQ score", ylab="Child test score", pch=20, xaxt="n", yaxt="n") axis(1, c(80,100,120,140)) axis(2, c(20,60,100,140)) abline (fit1) """ Explanation: Figure 3.2, Pg 33 End of explanation """
chetnapriyadarshini/deep-learning
gan_mnist/Intro_to_GANs_Solution.ipynb
mit
%matplotlib inline import pickle as pkl import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data') """ Explanation: Generative Adversarial Network In this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits! GANs were first reported on in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out: Pix2Pix CycleGAN A whole list The idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks as close as possible to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator. The general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can foold the discriminator. The output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow. End of explanation """ def model_inputs(real_dim, z_dim): inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real') inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z') return inputs_real, inputs_z """ Explanation: Model Inputs First we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input inputs_real and the generator input inputs_z. We'll assign them the appropriate sizes for each of the networks. End of explanation """ def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01): with tf.variable_scope('generator', reuse=reuse): # Hidden layer h1 = tf.layers.dense(z, n_units, activation=None) # Leaky ReLU h1 = tf.maximum(alpha * h1, h1) # Logits and tanh output logits = tf.layers.dense(h1, out_dim, activation=None) out = tf.tanh(logits) return out """ Explanation: Generator network Here we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values. Variable Scope Here we need to use tf.variable_scope for two reasons. Firstly, we're going to make sure all the variable names start with generator. Similarly, we'll prepend discriminator to the discriminator variables. This will help out later when we're training the separate networks. We could just use tf.name_scope to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also sample from it as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the reuse keyword for tf.variable_scope to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again. To use tf.variable_scope, you use a with statement: python with tf.variable_scope('scope_name', reuse=False): # code here Here's more from the TensorFlow documentation to get another look at using tf.variable_scope. Leaky ReLU TensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can use take the outputs from a linear fully connected layer and pass them to tf.maximum. Typically, a parameter alpha sets the magnitude of the output for negative values. So, the output for negative input (x) values is alpha*x, and the output for positive x is x: $$ f(x) = max(\alpha * x, x) $$ Tanh Output The generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1. End of explanation """ def discriminator(x, n_units=128, reuse=False, alpha=0.01): with tf.variable_scope('discriminator', reuse=reuse): # Hidden layer h1 = tf.layers.dense(x, n_units, activation=None) # Leaky ReLU h1 = tf.maximum(alpha * h1, h1) logits = tf.layers.dense(h1, 1, activation=None) out = tf.sigmoid(logits) return out, logits """ Explanation: Discriminator The discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer. End of explanation """ # Size of input image to discriminator input_size = 784 # Size of latent vector to generator z_size = 100 # Sizes of hidden layers in generator and discriminator g_hidden_size = 128 d_hidden_size = 128 # Leak factor for leaky ReLU alpha = 0.01 # Smoothing smooth = 0.1 """ Explanation: Hyperparameters End of explanation """ tf.reset_default_graph() # Create our input placeholders input_real, input_z = model_inputs(input_size, z_size) # Build the model g_model = generator(input_z, input_size) # g_model is the generator output d_model_real, d_logits_real = discriminator(input_real) d_model_fake, d_logits_fake = discriminator(g_model, reuse=True) """ Explanation: Build network Now we're building the network from the functions defined above. First is to get our inputs, input_real, input_z from model_inputs using the sizes of the input and z. Then, we'll create the generator, generator(input_z, input_size). This builds the generator with the appropriate input and output sizes. Then the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as g_model. So the real data discriminator is discriminator(input_real) while the fake discriminator is discriminator(g_model, reuse=True). End of explanation """ # Calculate losses d_loss_real = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_logits_real) * (1 - smooth))) d_loss_fake = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_logits_real))) d_loss = d_loss_real + d_loss_fake g_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_logits_fake))) """ Explanation: Discriminator and Generator Losses Now we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, d_loss = d_loss_real + d_loss_fake. The losses will by sigmoid cross-entropys, which we can get with tf.nn.sigmoid_cross_entropy_with_logits. We'll also wrap that in tf.reduce_mean to get the mean for all the images in the batch. So the losses will look something like python tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)) For the real image logits, we'll use d_logits_real which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter smooth. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like labels = tf.ones_like(tensor) * (1 - smooth) The discriminator loss for the fake data is similar. The logits are d_logits_fake, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that. Finally, the generator losses are using d_logits_fake, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images. End of explanation """ # Optimizers learning_rate = 0.002 # Get the trainable_variables, split into G and D parts t_vars = tf.trainable_variables() g_vars = [var for var in t_vars if var.name.startswith('generator')] d_vars = [var for var in t_vars if var.name.startswith('discriminator')] d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars) g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars) """ Explanation: Optimizers We want to update the generator and discriminator variables separately. So we need to get the variables for each part build optimizers for the two parts. To get all the trainable variables, we use tf.trainable_variables(). This creates a list of all the variables we've defined in our graph. For the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with generator. So, we just need to iterate through the list from tf.trainable_variables() and keep variables to start with generator. Each variable object has an attribute name which holds the name of the variable as a string (var.name == 'weights_0' for instance). We can do something similar with the discriminator. All the variables in the discriminator start with discriminator. Then, in the optimizer we pass the variable lists to var_list in the minimize method. This tells the optimizer to only update the listed variables. Something like tf.train.AdamOptimizer().minimize(loss, var_list=var_list) will only train the variables in var_list. End of explanation """ !mkdir checkpoints batch_size = 100 epochs = 100 samples = [] losses = [] # Only save generator variables saver = tf.train.Saver(var_list=g_vars) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for e in range(epochs): for ii in range(mnist.train.num_examples//batch_size): batch = mnist.train.next_batch(batch_size) # Get images, reshape and rescale to pass to D batch_images = batch[0].reshape((batch_size, 784)) batch_images = batch_images*2 - 1 # Sample random noise for G batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size)) # Run optimizers _ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z}) _ = sess.run(g_train_opt, feed_dict={input_z: batch_z}) # At the end of each epoch, get the losses and print them out train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images}) train_loss_g = g_loss.eval({input_z: batch_z}) print("Epoch {}/{}...".format(e+1, epochs), "Discriminator Loss: {:.4f}...".format(train_loss_d), "Generator Loss: {:.4f}".format(train_loss_g)) # Save losses to view after training losses.append((train_loss_d, train_loss_g)) # Sample from generator as we're training for viewing afterwards sample_z = np.random.uniform(-1, 1, size=(16, z_size)) gen_samples = sess.run( generator(input_z, input_size, reuse=True), feed_dict={input_z: sample_z}) samples.append(gen_samples) saver.save(sess, './checkpoints/generator.ckpt') # Save training generator samples with open('train_samples.pkl', 'wb') as f: pkl.dump(samples, f) """ Explanation: Training End of explanation """ fig, ax = plt.subplots() losses = np.array(losses) plt.plot(losses.T[0], label='Discriminator') plt.plot(losses.T[1], label='Generator') plt.title("Training Losses") plt.legend() """ Explanation: Training loss Here we'll check out the training losses for the generator and discriminator. End of explanation """ def view_samples(epoch, samples): fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True) for ax, img in zip(axes.flatten(), samples[epoch]): ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) im = ax.imshow(img.reshape((28,28)), cmap='Greys_r') return fig, axes # Load samples from generator taken while training with open('train_samples.pkl', 'rb') as f: samples = pkl.load(f) """ Explanation: Generator samples from training Here we can view samples of images from the generator. First we'll look at images taken while training. End of explanation """ _ = view_samples(-1, samples) """ Explanation: These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 1, 7, 3, 2. Since this is just a sample, it isn't representative of the full range of images this generator can make. End of explanation """ rows, cols = 10, 6 fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True) for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes): for img, ax in zip(sample[::int(len(sample)/cols)], ax_row): ax.imshow(img.reshape((28,28)), cmap='Greys_r') ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) """ Explanation: Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion! End of explanation """ saver = tf.train.Saver(var_list=g_vars) with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) sample_z = np.random.uniform(-1, 1, size=(16, z_size)) gen_samples = sess.run( generator(input_z, input_size, reuse=True), feed_dict={input_z: sample_z}) _ = view_samples(0, [gen_samples]) """ Explanation: It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise like 1s and 9s. Sampling from the generator We can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples! End of explanation """
tensorflow/hub
examples/colab/tf_hub_generative_image_module.ipynb
apache-2.0
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ Explanation: Copyright 2018 The TensorFlow Hub Authors. Licensed under the Apache License, Version 2.0 (the "License"); End of explanation """ # Install imageio for creating animations. !pip -q install imageio !pip -q install scikit-image !pip install git+https://github.com/tensorflow/docs #@title Imports and function definitions from absl import logging import imageio import PIL.Image import matplotlib.pyplot as plt import numpy as np import tensorflow as tf tf.random.set_seed(0) import tensorflow_hub as hub from tensorflow_docs.vis import embed import time try: from google.colab import files except ImportError: pass from IPython import display from skimage import transform # We could retrieve this value from module.get_input_shapes() if we didn't know # beforehand which module we will be using. latent_dim = 512 # Interpolates between two vectors that are non-zero and don't both lie on a # line going through origin. First normalizes v2 to have the same norm as v1. # Then interpolates between the two vectors on the hypersphere. def interpolate_hypersphere(v1, v2, num_steps): v1_norm = tf.norm(v1) v2_norm = tf.norm(v2) v2_normalized = v2 * (v1_norm / v2_norm) vectors = [] for step in range(num_steps): interpolated = v1 + (v2_normalized - v1) * step / (num_steps - 1) interpolated_norm = tf.norm(interpolated) interpolated_normalized = interpolated * (v1_norm / interpolated_norm) vectors.append(interpolated_normalized) return tf.stack(vectors) # Simple way to display an image. def display_image(image): image = tf.constant(image) image = tf.image.convert_image_dtype(image, tf.uint8) return PIL.Image.fromarray(image.numpy()) # Given a set of images, show an animation. def animate(images): images = np.array(images) converted_images = np.clip(images * 255, 0, 255).astype(np.uint8) imageio.mimsave('./animation.gif', converted_images) return embed.embed_file('./animation.gif') logging.set_verbosity(logging.ERROR) """ Explanation: Generate Artificial Faces with CelebA Progressive GAN Model <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/hub/tutorials/tf_hub_generative_image_module"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/tf_hub_generative_image_module.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/hub/blob/master/examples/colab/tf_hub_generative_image_module.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/hub/examples/colab/tf_hub_generative_image_module.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> <td> <a href="https://tfhub.dev/google/progan-128/1"><img src="https://www.tensorflow.org/images/hub_logo_32px.png" />See TF Hub model</a> </td> </table> This Colab demonstrates use of a TF Hub module based on a generative adversarial network (GAN). The module maps from N-dimensional vectors, called latent space, to RGB images. Two examples are provided: * Mapping from latent space to images, and * Given a target image, using gradient descent to find a latent vector that generates an image similar to the target image. Optional prerequisites Familiarity with low level Tensorflow concepts. Generative Adversarial Network on Wikipedia. Paper on Progressive GANs: Progressive Growing of GANs for Improved Quality, Stability, and Variation. More models Here you can find all models currently hosted on tfhub.dev that can generate images. Setup End of explanation """ progan = hub.load("https://tfhub.dev/google/progan-128/1").signatures['default'] def interpolate_between_vectors(): v1 = tf.random.normal([latent_dim]) v2 = tf.random.normal([latent_dim]) # Creates a tensor with 25 steps of interpolation between v1 and v2. vectors = interpolate_hypersphere(v1, v2, 50) # Uses module to generate images from the latent space. interpolated_images = progan(vectors)['default'] return interpolated_images interpolated_images = interpolate_between_vectors() animate(interpolated_images) """ Explanation: Latent space interpolation Random vectors Latent space interpolation between two randomly initialized vectors. We will use a TF Hub module progan-128 that contains a pre-trained Progressive GAN. End of explanation """ image_from_module_space = True # @param { isTemplate:true, type:"boolean" } def get_module_space_image(): vector = tf.random.normal([1, latent_dim]) images = progan(vector)['default'][0] return images def upload_image(): uploaded = files.upload() image = imageio.imread(uploaded[list(uploaded.keys())[0]]) return transform.resize(image, [128, 128]) if image_from_module_space: target_image = get_module_space_image() else: target_image = upload_image() display_image(target_image) """ Explanation: Finding closest vector in latent space Fix a target image. As an example use an image generated from the module or upload your own. End of explanation """ tf.random.set_seed(42) initial_vector = tf.random.normal([1, latent_dim]) display_image(progan(initial_vector)['default'][0]) def find_closest_latent_vector(initial_vector, num_optimization_steps, steps_per_image): images = [] losses = [] vector = tf.Variable(initial_vector) optimizer = tf.optimizers.Adam(learning_rate=0.01) loss_fn = tf.losses.MeanAbsoluteError(reduction="sum") for step in range(num_optimization_steps): if (step % 100)==0: print() print('.', end='') with tf.GradientTape() as tape: image = progan(vector.read_value())['default'][0] if (step % steps_per_image) == 0: images.append(image.numpy()) target_image_difference = loss_fn(image, target_image[:,:,:3]) # The latent vectors were sampled from a normal distribution. We can get # more realistic images if we regularize the length of the latent vector to # the average length of vector from this distribution. regularizer = tf.abs(tf.norm(vector) - np.sqrt(latent_dim)) loss = target_image_difference + regularizer losses.append(loss.numpy()) grads = tape.gradient(loss, [vector]) optimizer.apply_gradients(zip(grads, [vector])) return images, losses num_optimization_steps=200 steps_per_image=5 images, loss = find_closest_latent_vector(initial_vector, num_optimization_steps, steps_per_image) plt.plot(loss) plt.ylim([0,max(plt.ylim())]) animate(np.stack(images)) """ Explanation: After defining a loss function between the target image and the image generated by a latent space variable, we can use gradient descent to find variable values that minimize the loss. End of explanation """ display_image(np.concatenate([images[-1], target_image], axis=1)) """ Explanation: Compare the result to the target: End of explanation """
openstreams/wflow
notebooks/BMI-Test.ipynb
gpl-3.0
import wflow.wflow_bmi as bmi import logging reload(bmi) %pylab inline import datetime from IPython.html.widgets import interact """ Explanation: <h1>Basic test of the wflow BMI interface End of explanation """ # This is the LAnd Atmophere (LA) model LA_model = bmi.wflowbmi_csdms() LA_model.initialize('../examples/wflow_rhine_sbm/wflow_sbm_bmi.ini',loglevel=logging.ERROR) # This is the routing (RT) model RT_model = bmi.wflowbmi_csdms() RT_model.initialize('../examples/wflow_rhine_sbm/wflow_routing_bmi.ini',loglevel=logging.ERROR) """ Explanation: Startup two models: The wflow_sbm model calculates the runoff from each cell (the LA land-atmosphere model) the wflow_routing model that uses a kinimatic wave for routing the flow (the RT routing model) End of explanation """ print(LA_model.get_value("timestepsecs")) print LA_model.get_start_time() aa = LA_model.get_attribute_names() LA_model.get_attribute_value("run:reinit") LA_model.set_attribute_value("run:reinit",'1') LA_model.get_attribute_value("run:reinit") imshow(LA_model.get_value("Altitude")) # Save the old dem, chnage the dem in the model and set it back origdem = LA_model.get_value("Altitude") newdem = origdem * 1.6 LA_model.set_value('Altitude',newdem) diff = origdem - LA_model.get_value("Altitude") imshow(diff) imshow(LA_model.get_value("FirstZoneDepth")) imshow(LA_model.get_value("River")) """ Explanation: <h3>Now we can investigate some model parameters End of explanation """ t_end = RT_model.get_end_time() t_start = RT_model.get_start_time() t = RT_model.get_current_time() (t_end - t_start)/(86400) """ Explanation: <h3>Start and end times End of explanation """ t_end = RT_model.get_end_time() t = RT_model.get_start_time() res = [] resq = [] # Loop in time and put output of SBM in seperate routing module - 1way link while t < t_end: LA_model.update() # Now set the output from the LA model (specific Q) as input to the RT model thevar = LA_model.get_value("InwaterMM") RT_model.set_value("IW",thevar) # The IW is set in the wflow_routing.ini var as a forcing RT_model.update() resq.append(RT_model.get_value("SurfaceRunoff")) res.append(thevar) t = RT_model.get_current_time() print datetime.datetime.fromtimestamp(t) LA_model.finalize() RT_model.finalize() """ Explanation: <h3>Now start the models End of explanation """ def browse_res(digits): n = len(digits) def view_image(i): plt.imshow(log(digits[i]+1)) plt.title('Step: %d' % i) plt.colorbar() plt.show() interact(view_image, i=(0,n-1)) browse_res(res) browse_res(resq) """ Explanation: <h4>Define function to view the results End of explanation """
mdda/fossasia-2016_deep-learning
notebooks/2-CNN/6-StyleTransfer/2-Art-Style-Transfer-googlenet_theano.ipynb
mit
import theano import theano.tensor as T import lasagne from lasagne.utils import floatX import numpy as np import scipy import matplotlib.pyplot as plt %matplotlib inline import os # for directory listings import pickle import time AS_PATH='./images/art-style' from model import googlenet net = googlenet.build_model() net_input_var = net['input'].input_var net_output_layer = net['prob'] """ Explanation: Art Style Transfer This notebook is an implementation of the algorithm described in "A Neural Algorithm of Artistic Style" (http://arxiv.org/abs/1508.06576) by Gatys, Ecker and Bethge. Additional details of their method are available at http://arxiv.org/abs/1505.07376 and http://bethgelab.org/deepneuralart/. An image is generated which combines the content of a photograph with the "style" of a painting. This is accomplished by jointly minimizing the squared difference between feature activation maps of the photo and generated image, and the squared difference of feature correlation between painting and generated image. A total variation penalty is also applied to reduce high frequency noise. This notebook was originally sourced from Lasagne Recipes, but has been modified to use a GoogLeNet network (pre-trained and pre-loaded), and given some features to make it easier to experiment with. End of explanation """ params = pickle.load(open('./data/googlenet/blvc_googlenet.pkl', 'rb'), encoding='iso-8859-1') model_param_values = params['param values'] #classes = params['synset words'] lasagne.layers.set_all_param_values(net_output_layer, model_param_values) IMAGE_W=224 print("Loaded Model parameters") """ Explanation: Load the pretrained weights into the network : End of explanation """ photos = [ '%s/photos/%s' % (AS_PATH, f) for f in os.listdir('%s/photos/' % AS_PATH) if not f.startswith('.')] photo_i=-1 # will be incremented in next cell (i.e. to start at [0]) """ Explanation: Choose the Photo to be Enhanced End of explanation """ photo_i += 1 photo = plt.imread(photos[photo_i % len(photos)]) photo_rawim, photo = googlenet.prep_image(photo) plt.imshow(photo_rawim) """ Explanation: Executing the cell below will iterate through the images in the ./images/art-style/photos directory, so you can choose the one you want End of explanation """ styles = [ '%s/styles/%s' % (AS_PATH, f) for f in os.listdir('%s/styles/' % AS_PATH) if not f.startswith('.')] style_i=-1 # will be incremented in next cell (i.e. to start at [0]) """ Explanation: Choose the photo with the required 'Style' End of explanation """ style_i += 1 art = plt.imread(styles[style_i % len(styles)]) art_rawim, art = googlenet.prep_image(art) plt.imshow(art_rawim) """ Explanation: Executing the cell below will iterate through the images in the ./images/art-style/styles directory, so you can choose the one you want End of explanation """ def plot_layout(combined): def no_axes(): plt.gca().xaxis.set_visible(False) plt.gca().yaxis.set_visible(False) plt.figure(figsize=(9,6)) plt.subplot2grid( (2,3), (0,0) ) no_axes() plt.imshow(photo_rawim) plt.subplot2grid( (2,3), (1,0) ) no_axes() plt.imshow(art_rawim) plt.subplot2grid( (2,3), (0,1), colspan=2, rowspan=2 ) no_axes() plt.imshow(combined, interpolation='nearest') plt.tight_layout() def gram_matrix(x): x = x.flatten(ndim=3) g = T.tensordot(x, x, axes=([2], [2])) return g def content_loss(P, X, layer): p = P[layer] x = X[layer] loss = 1./2 * ((x - p)**2).sum() return loss def style_loss(A, X, layer): a = A[layer] x = X[layer] A = gram_matrix(a) G = gram_matrix(x) N = a.shape[1] M = a.shape[2] * a.shape[3] loss = 1./(4 * N**2 * M**2) * ((G - A)**2).sum() return loss def total_variation_loss(x): return (((x[:,:,:-1,:-1] - x[:,:,1:,:-1])**2 + (x[:,:,:-1,:-1] - x[:,:,:-1,1:])**2)**1.25).sum() """ Explanation: This defines various measures of difference that we'll use to compare the current output image with the original sources. End of explanation """ layers = [ # used for 'content' in photo - a mid-tier convolutional layer 'inception_4b/output', # used for 'style' - conv layers throughout model (not same as content one) 'conv1/7x7_s2', 'conv2/3x3', 'inception_3b/output', 'inception_4d/output', ] #layers = [ # # used for 'content' in photo - a mid-tier convolutional layer # 'pool4/3x3_s2', # # # used for 'style' - conv layers throughout model (not same as content one) # 'conv1/7x7_s2', 'conv2/3x3', 'pool3/3x3_s2', 'inception_5b/output', #] layers = {k: net[k] for k in layers} """ Explanation: Here are the GoogLeNet layers that we're going to pay attention to : End of explanation """ input_im_theano = T.tensor4() outputs = lasagne.layers.get_output(layers.values(), input_im_theano) photo_features = {k: theano.shared(output.eval({input_im_theano: photo})) for k, output in zip(layers.keys(), outputs)} art_features = {k: theano.shared(output.eval({input_im_theano: art})) for k, output in zip(layers.keys(), outputs)} # Get expressions for layer activations for generated image generated_image = theano.shared(floatX(np.random.uniform(-128, 128, (1, 3, IMAGE_W, IMAGE_W)))) gen_features = lasagne.layers.get_output(layers.values(), generated_image) gen_features = {k: v for k, v in zip(layers.keys(), gen_features)} """ Explanation: Precompute layer activations for photo and artwork This takes ~ 20 seconds End of explanation """ losses = [] # content loss cl = 10 /1000. losses.append(cl * content_loss(photo_features, gen_features, 'inception_4b/output')) # style loss sl = 20 *1000. losses.append(sl * style_loss(art_features, gen_features, 'conv1/7x7_s2')) losses.append(sl * style_loss(art_features, gen_features, 'conv2/3x3')) losses.append(sl * style_loss(art_features, gen_features, 'inception_3b/output')) losses.append(sl * style_loss(art_features, gen_features, 'inception_4d/output')) #losses.append(sl * style_loss(art_features, gen_features, 'inception_5b/output')) # total variation penalty vp = 0.01 /1000. /1000. losses.append(vp * total_variation_loss(generated_image)) total_loss = sum(losses) """ Explanation: Define the overall loss / badness function End of explanation """ grad = T.grad(total_loss, generated_image) """ Explanation: The Famous Symbolic Gradient operation End of explanation """ # Theano functions to evaluate loss and gradient - takes around 1 minute (!) f_loss = theano.function([], total_loss) f_grad = theano.function([], grad) # Helper functions to interface with scipy.optimize def eval_loss(x0): x0 = floatX(x0.reshape((1, 3, IMAGE_W, IMAGE_W))) generated_image.set_value(x0) return f_loss().astype('float64') def eval_grad(x0): x0 = floatX(x0.reshape((1, 3, IMAGE_W, IMAGE_W))) generated_image.set_value(x0) return np.array(f_grad()).flatten().astype('float64') """ Explanation: Get Ready for Optimisation by SciPy End of explanation """ generated_image.set_value(photo) #generated_image.set_value(floatX(np.random.uniform(-128, 128, (1, 3, IMAGE_W, IMAGE_W)))) x0 = generated_image.get_value().astype('float64') iteration=0 """ Explanation: Initialize with the original photo, since going from noise (the code that's commented out) takes many more iterations. End of explanation """ t0 = time.time() scipy.optimize.fmin_l_bfgs_b(eval_loss, x0.flatten(), fprime=eval_grad, maxfun=40) x0 = generated_image.get_value().astype('float64') iteration += 1 if False: plt.figure(figsize=(8,8)) plt.imshow(googlenet.deprocess(x0), interpolation='nearest') plt.axis('off') plt.text(270, 25, '# {} in {:.1f}sec'.format(iteration, (float(time.time() - t0))), fontsize=14) else: plot_layout(googlenet.deprocess(x0)) print('Iteration {}, ran in {:.1f}sec'.format(iteration, float(time.time() - t0))) """ Explanation: Optimize all those losses, and show the image To refine the result, just keep hitting 'run' on this cell (each iteration is about 60 seconds) : End of explanation """
GHorace/ma2823_2016
lab_notebooks/Lab 4 2016-10-07 Regularized logistic regression.ipynb
mit
import numpy as np %pylab inline # Load the data as usual (here the code for Python 2.7) X = np.loadtxt('data/small_Endometrium_Uterus.csv', delimiter=',', skiprows=1, usecols=range(1, 3001)) y = np.loadtxt('data/small_Endometrium_Uterus.csv', delimiter=',', skiprows=1, usecols=[3001], converters={3001: lambda s: 0 if s=='Endometrium' else 1}, dtype='int') # Set up a stratified 10-fold cross-validation from sklearn import cross_validation folds = cross_validation.StratifiedKFold(y, 10, shuffle=True) # Create a function that does cross-validation and scales the features on each training set. from sklearn import preprocessing def cross_validate_with_scaling(design_matrix, labels, classifier, cv_folds): """ Perform a cross-validation and returns the predictions. Use a scaler to scale the features to mean 0, standard deviation 1. Parameters: ----------- design_matrix: (n_samples, n_features) np.array Design matrix for the experiment. labels: (n_samples, ) np.array Vector of labels. classifier: sklearn classifier object Classifier instance; must have the following methods: - fit(X, y) to train the classifier on the data X, y - predict_proba(X) to apply the trained classifier to the data X and return probability estimates cv_folds: sklearn cross-validation object Cross-validation iterator. Return: ------- pred: (n_samples, ) np.array Vectors of predictions (same order as labels). """ pred = np.zeros(labels.shape) # vector of 0 in which to store the predictions for tr, te in cv_folds: # Restrict data to train/test folds Xtr = design_matrix[tr, :] ytr = labels[tr] Xte = design_matrix[te, :] #print Xtr.shape, ytr.shape, Xte.shape # Scale data scaler = preprocessing.StandardScaler() # create scaler Xtr = scaler.fit_transform(Xtr) # fit the scaler to the training data and transform training data Xte = scaler.transform(Xte) # transform test data # Fit classifier classifier.fit(Xtr, ytr) # Predict probabilities (of belonging to +1 class) on test data yte_pred = classifier.predict_proba(Xte) # two-dimensional array # Identify the index, in yte_pred, of the positive class (y=1) # index_of_class_1 = np.nonzero(classifier.classes_ == 1)[0][0] index_of_class_1 = 1 - ytr[0] # 0 if the first sample is positive, 1 otherwise pred[te] = yte_pred[:, index_of_class_1] return pred """ Explanation: 2016-10-07: Regularized Logistic Regression In this lab, we will appply logistic regression to the Endometrium vs. Uterus cancer data. Let us start by setting up our environment, loading the data, and setting up our cross-validation. End of explanation """ from sklearn import linear_model clf = linear_model.LogisticRegression(penalty='l1') """ Explanation: 1. L1-Regularized Logistic Regression Let us start with default parameters. End of explanation """ from sklearn import grid_search param_grid = {'C':[1e-3, 1e-2, 1e-1, 1., 1e2, 1e3]} clf = grid_search.GridSearchCV(linear_model.LogisticRegression(penalty='l1'), param_grid) """ Explanation: Question Compute the cross-validated predictions of the l1-regularized logistic regression with default parameters on our data. Question Plot the corresponding ROC curve, and compare it to that obtained for non-regularized logistic regression. Setting the C parameter What does the C parameter correspond to? See the documentation at http://scikit-learn.org/stable/modules/linear_model.html#logistic-regression for help. Scikit-learn makes it really easy to use a nested cross-validation to choose a good value for C among a grid of several choices. End of explanation """ print clf.best_estimator_ """ Explanation: Question What criterion is used to chose the optimal C? See the documentation at http://scikit-learn.org/0.17/modules/generated/sklearn.grid_search.GridSearchCV.html#sklearn.grid_search.GridSearchCV. Try changing this criterion http://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter Question Compute the cross-validated predictions of the l1-regularized logistic regression with optimized C parameter on our data. GridSearchCV also uses the optimal parameter(s) it detected to fit a model to its entire training data again, generating a "best model" that is accessible via the best_estimator_ attribute. In our case, because we called GridSearchCV from inside a cross-validation loop, clf.best_estimator_ is the "best model" on the last training fold. End of explanation """ # This code plots the regression weights of the classifier 'clf' plt.plot(range(len(clf.best_estimator_.coef_[0])), clf.best_estimator_.coef_[0], color='blue', marker='+', linestyle='') plt.xlabel('Genes', fontsize=16) plt.ylabel('Weights', fontsize=16) plt.title('Logistic regression weights', fontsize=16) plt.xlim([0, X.shape[1]]) """ Explanation: Question Plot the corresponding ROC curve, and compare to that obtained for * non-regularized logistic regression. * l1-regularized logistic regression with default C parameter. Regression weights Remember the goal of l1-regularization is to build sparse models. End of explanation """ fig = plt.figure(figsize=(10, 5)) ax = fig.add_subplot(121) # use a 1x2 subplot grid; ax will refer to the 1st subplot number_of_weights = #TODO logreg_weights = #TODO ax.plot(range(number_of_weights), logreg_weights, color='blue', marker='+', linestyle='') ax.set_xlabel('Genes', fontsize=16) ax.set_ylabel('Weights', fontsize=16) ax.set_title('Logistic regression weights', fontsize=16) ax.set_xlim([0, X.shape[1]]) ax = fig.add_subplot(122) # use a 1x2 subplot grid; ax will refer to the 2nd subplot l1_logreg_weights = #TODO ax.plot(ange(number_of_weights), l1_logreg_weights, color='blue', marker='+', linestyle='') ax.set_xlabel('Genes', fontsize=16) ax.set_ylabel('Weights', fontsize=16) ax.set_title('Regularized Logistic regression weights', fontsize=16) ax.set_xlim([0, X.shape[1]]) plt.tight_layout() """ Explanation: Question Compare the regression weights obtained with and without l1-regularization, in two side-by-side plots. End of explanation """ clf = grid_search.GridSearchCV(linear_model.LogisticRegression(penalty='l2'), param_grid) """ Explanation: 2. L2-regularized logistic regression Question What is the role of l2 regularization? End of explanation """
mne-tools/mne-tools.github.io
0.16/_downloads/plot_head_positions.ipynb
bsd-3-clause
# Authors: Eric Larson <[email protected]> # # License: BSD (3-clause) from os import path as op import mne print(__doc__) data_path = op.join(mne.datasets.testing.data_path(verbose=True), 'SSS') pos = mne.chpi.read_head_pos(op.join(data_path, 'test_move_anon_raw.pos')) """ Explanation: Visualize subject head movement Show how subjects move as a function of time. End of explanation """ mne.viz.plot_head_positions(pos, mode='traces') """ Explanation: Visualize the subject head movements as traces: End of explanation """ mne.viz.plot_head_positions(pos, mode='field') """ Explanation: Or we can visualize them as a continuous field (with the vectors pointing in the head-upward direction): End of explanation """
GoogleCloudPlatform/asl-ml-immersion
notebooks/jax/solutions/flax.ipynb
apache-2.0
# from typing import Callable, Sequence # used ? import flax from flax import linen as nn """ Explanation: See go/flax-air Flax You probably want to keep the Flax documentation ready in another tab: https://flax.readthedocs.io/ End of explanation """ # Simple module with matmul layer. Note that we could build this in many # different ways using the `scope` for parameter handling. class Matmul: def __init__(self, features): self.features = features def kernel_init(self, key, shape): return jax.random.normal(key, shape) def __call__(self, scope, x): kernel = scope.param( "kernel", self.kernel_init, (x.shape[1], self.features) ) return x @ kernel class Model: def __init__(self, features): self.matmuls = [Matmul(f) for f in features] def __call__(self, scope, x): x = x.reshape([len(x), -1]) for i, matmul in enumerate(self.matmuls): x = scope.child(matmul, f"matmul_{i + 1}")(x) if i < len(self.matmuls) - 1: x = jax.nn.relu(x) x = jax.nn.log_softmax(x) return x model = Model([ds_info.features["label"].num_classes]) y, variables = flax.core.init(model)(key, train_images[:1]) assert (y == flax.core.apply(model)(variables, train_images[:1])).all() # YOUR ACTION REQUIRED: # Check out the parameter structure, try adding/removing "layers" and see how it # changes ##-snip model = Model([50, ds_info.features["label"].num_classes]) _, variables = flax.core.init(model)(key, train_images[:1]) jax.tree_map(jnp.shape, variables) # YOUR ACTION REQUIRED: # Redefine loss_fun(), update_step(), and train() from above to train the new # model. ##-snip @jax.jit def update_step(variables, inputs, targets): def loss_fun(variables): logits = flax.core.apply(model)(variables, inputs) logprobs = logits - jax.scipy.special.logsumexp( logits, axis=-1, keepdims=True ) return -logprobs[jnp.arange(len(targets)), targets].mean() loss, grads = jax.value_and_grad(loss_fun)(variables) updated_variables = jax.tree_multimap( lambda variable, grad: variable - 0.05 * grad, variables, grads ) return updated_variables, loss def train(variables, steps, batch_size=128): losses = [] steps_per_epoch = len(train_images) // batch_size for step in range(steps): i0 = (step % steps_per_epoch) * batch_size variables, loss = update_step( variables, train_images[i0 : i0 + batch_size], train_labels[i0 : i0 + batch_size], ) losses.append(float(loss)) return variables, jnp.array(losses) learnt_variables, losses = train(variables, steps=1_000) plt.plot(losses) print("final loss:", np.mean(losses[-100])) """ Explanation: Functional core End of explanation """ # Reimplementation of above model using the Linen API. class Model(nn.Module): num_classes: int def setup(self): self.dense = nn.Dense(self.num_classes) def __call__(self, x): x = x.reshape([len(x), -1]) x = self.dense(x) x = nn.log_softmax(x) return x model = Model(num_classes=ds_info.features["label"].num_classes) variables = model.init(jax.random.PRNGKey(0), train_images[:1]) jax.tree_map(jnp.shape, variables) # YOUR ACTION REQUIRED: # 1. Rewrite above model using the @nn.compact notation. # 2. Extend the model to use additional layers, see e.g. # convolutions in # http://google3/third_party/py/flax/linen/linear.py ##-snip class Model(nn.Module): num_classes: int @nn.compact def __call__(self, x): x = nn.Conv(features=32, kernel_size=(3, 3))(x) x = nn.relu(x) x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2)) x = nn.Conv(features=64, kernel_size=(3, 3))(x) x = nn.relu(x) x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2)) x = x.reshape((x.shape[0], -1)) # flatten x = nn.Dense(features=256)(x) x = nn.relu(x) x = nn.Dense(features=10)(x) x = nn.log_softmax(x) return x model = Model(ds_info.features["label"].num_classes) variables = model.init(key, train_images[:1]) jax.tree_map(jnp.shape, variables) # Reimplementation of training loop using a Flax optimizer. @jax.jit def update_step_optim(optim, inputs, targets): def loss_fun(params): logits = model.apply(dict(params=params), inputs) logprobs = logits - jax.scipy.special.logsumexp( logits, axis=-1, keepdims=True ) return -logprobs[jnp.arange(len(targets)), targets].mean() loss, grads = jax.value_and_grad(loss_fun)(optim.target) return optim.apply_gradient(grads), loss def train_optim(optim, steps, batch_size=128): losses = [] steps_per_epoch = len(train_images) // batch_size for step in range(steps): i0 = (step % steps_per_epoch) * batch_size optim, loss = update_step_optim( optim, train_images[i0 : i0 + batch_size], train_labels[i0 : i0 + batch_size], ) losses.append(float(loss)) return optim, jnp.array(losses) optim = flax.optim.adam.Adam(learning_rate=0.01).create(variables["params"]) learnt_optim, losses = train_optim(optim, steps=1_000) plt.plot(losses) print("final loss:", np.mean(losses[-100])) # Re-evaluate accuracy. ( model.apply(dict(params=learnt_optim.target), test_images).argmax(axis=-1) == test_labels ).mean() """ Explanation: Stateless Linen module End of explanation """ # Let's add batch norm! # I'm not saying it's a good idea here, but it will allow us study the changes # we need to make for models that have state. class Model(nn.Module): num_classes: int @nn.compact def __call__(self, x, *, train): x = x.reshape([len(x), -1]) x = nn.BatchNorm(use_running_average=not train)(x) x = nn.Dense(self.num_classes)(x) x = nn.log_softmax(x) return x model = Model(num_classes=ds_info.features["label"].num_classes) variables = model.init(jax.random.PRNGKey(0), train_images[:1], train=True) jax.tree_map(jnp.shape, variables) # Note the new "batch_stats" collection ! # YOUR ACTION REQUIRED: # Check below code and add comments for every change compared to the model above # without state. @jax.jit def update_step_optim(optim, batch_stats, inputs, targets): def loss_fun(params): logits, mutated_state = model.apply( dict(params=params, batch_stats=batch_stats), inputs, mutable="batch_stats", train=True, ) logprobs = logits - jax.scipy.special.logsumexp( logits, axis=-1, keepdims=True ) return ( -logprobs[jnp.arange(len(targets)), targets].mean(), variables["batch_stats"], ) (loss, state), grads = jax.value_and_grad(loss_fun, has_aux=True)( optim.target ) return optim.apply_gradient(grads), batch_stats, loss def train_optim(optim, batch_stats, steps, batch_size=128): losses = [] steps_per_epoch = len(train_images) // batch_size for step in range(steps): i0 = (step % steps_per_epoch) * batch_size optim, batch_stats, loss = update_step_optim( optim, batch_stats, train_images[i0 : i0 + batch_size], train_labels[i0 : i0 + batch_size], ) losses.append(float(loss)) return optim, batch_stats, jnp.array(losses) optim = flax.optim.adam.Adam(learning_rate=0.01).create(variables["params"]) learnt_optim, batch_stats, losses = train_optim( optim, variables["batch_stats"], steps=1_000 ) plt.plot(losses) print("final loss:", np.mean(losses[-100])) # YOUR ACTION REQUIRED: # Make predictions with above model with state ##-snip ( model.apply( dict(params=learnt_optim.target, batch_stats=batch_stats), test_images, train=False, ).argmax(axis=-1) == test_labels ).mean() """ Explanation: Linen module with state End of explanation """ # YOUR ACTION REQURIED: # Store the Colab in your personal drive and modify it to use the dataset from # above. # While this might sound boring, you will learn the following things: # - how to load files in public Colab from Github, modify them in the UI and # optionally store them on your personal Google Drive. # - how to use inline TensorBoard on public Colab and export it to tensorboard.dev """ Explanation: Modify MNIST example Check out the Flax MNIST example Colab - you can find a link on Github https://github.com/google/flax/tree/master/linen_examples/mnist End of explanation """
wenduowang/git_home
python/MSBA/intro/group_project/Project_Expedia_test1_yawen.ipynb
gpl-3.0
train["date_time"] = pd.to_datetime(train["date_time"]) train["year"] = train["date_time"].dt.year train["month"] = train["date_time"].dt.month """ Explanation: Convert date time type to seperate the train and test set. becasue the test set data time have to be come later than the train set End of explanation """ import random unique_users = train.user_id.unique() sel_user_ids = [unique_users[i] for i in sorted(random.sample(range(len(unique_users)), 10000)) ] sel_train = train[train.user_id.isin(sel_user_ids)] t1 = sel_train[((sel_train.year == 2013) | ((sel_train.year == 2014) & (sel_train.month < 8)))] t2 = sel_train[((sel_train.year == 2014) & (sel_train.month >= 8))] # remove the empty bookinf in test set t2 = t2[t2.is_booking == True] """ Explanation: pick random 10000 users row as our train data set End of explanation """ t2[:10] most_common_clusters = list(train.hotel_cluster.value_counts().head().index) predictions = [most_common_clusters for i in range(t2.shape[0])] from sklearn.decomposition import PCA pca = PCA(n_components=3) dest_small = pca.fit_transform(destinations[["d{0}".format(i + 1) for i in range(149)]]) dest_small = pd.DataFrame(dest_small) dest_small["srch_destination_id"] = destinations["srch_destination_id"] """ Explanation: Simple predication: use the most 5 common cluster as predication for each data in test End of explanation """
kimkipyo/dss_git_kkp
ํ†ต๊ณ„, ๋จธ์‹ ๋Ÿฌ๋‹ ๋ณต์Šต/160502์›”_1์ผ์ฐจ_๋ถ„์„ ํ™˜๊ฒฝ, ์†Œ๊ฐœ/15.Pandas ํ”ผ๋ด‡๊ณผ ๊ทธ๋ฃน ์—ฐ์‚ฐ.ipynb
mit
data = { 'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada'], 'year': [2000, 2001, 2002, 2001, 2002], 'pop': [1.5, 2.5, 3.0, 2.5, 3.5] } df = pd.DataFrame(data, columns=["state", "year", "pop"]) df df.pivot("state", "year", "pop") """ Explanation: Pandas ํ”ผ๋ด‡๊ณผ ๊ทธ๋ฃน ์—ฐ์‚ฐ ํ”ผ๋ด‡ ํ…Œ์ด๋ธ” ํ”ผ๋ด‡ ํ…Œ์ด๋ธ”(pivot table)์ด๋ž€ ๋ฐ์ดํ„ฐ ์—ด(column) ์ค‘์—์„œ ๋‘ ๊ฐœ๋ฅผ ํ‚ค(key)๋กœ ์‚ฌ์šฉํ•˜์—ฌ ๋ฐ์ดํ„ฐ๋ฅผ ์„ ํƒํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ๋งํ•œ๋‹ค. ํ”ผ๋ด‡ ํ…Œ์ด๋ธ”์„ ์‚ฌ์šฉํ•˜๊ธฐ ์œ„ํ•ด์„œ๋Š” ํ‚ค๊ฐ€ ๋  ์ˆ˜ ์žˆ๋Š” ๋‘ ๊ฐœ์˜ ์—ด(column) ํ˜น์€ ํ•„๋“œ(field)๋ฅผ ์„ ํƒํ•˜์—ฌ ์ด ๋‘ ์—ด์„ ํ–‰ ์ธ๋ฑ์Šค (row index) ์—ด ์ธ๋ฑ์Šค (column index) ๋กœ ๋ณ€๊ฒฝํ•ด์•ผ ํ•œ๋‹ค. <img src="https://datascienceschool.net/upfiles/f281a1a51f834b72add6abc73f9c95a0.png"> pivot ๋ฉ”์„œ๋“œ๋ฅผ ์‚ฌ์šฉํ•˜๋ฉด ํ–‰ ์ธ๋ฑ์Šค, ์—ด ์ธ๋ฑ์Šค, ์ž๋ฃŒ๊ฐ€ ๋  3๊ฐ€์ง€์˜ ์—ด(column)์„ ์ง€์ •ํ•  ์ˆ˜ ์žˆ๋‹ค. End of explanation """ df.pivot("year", "pop", "state") df.set_index(["state", "year"]) df.set_index(["state", "year"]).unstack() """ Explanation: ํ–‰ ์ธ๋ฑ์Šค์™€, ์—ด ์ธ๋ฑ์Šค๊ฐ€ ๋  ์ž๋ฃŒ๋Š” ํ‚ค(key)์˜ ์—ญํ• ์„ ํ•ด์•ผ ํ•œ๋‹ค. ์ฆ‰, ์ด ๊ฐ’์œผ๋กœ ๋ฐ์ดํ„ฐ๊ฐ€ ์œ ์ผํ•˜๊ฒŒ(unique) ๊ฒฐ์ •๋˜์–ด์•ผ ํ•œ๋‹ค. End of explanation """ np.random.seed(0) df = pd.DataFrame({'key1': ['a', 'a', 'b', 'b', 'a'], 'key2': ['one', 'two', 'one', 'two', 'one'], 'data1': np.random.randn(5), 'data2': np.random.randn(5)}) df """ Explanation: ๊ทธ๋ฃน ์—ฐ์‚ฐ ๊ทธ๋ฃน ์—ฐ์‚ฐ์€ ํ”ผ๋ด‡ ํ…Œ์ด๋ธ”๊ณผ ๋‹ฌ๋ฆฌ ํ‚ค์— ์˜ํ•ด์„œ ๊ฒฐ์ •๋˜๋Š” ๋ฐ์ดํ„ฐ๊ฐ€ ๋ณต์ˆ˜๊ฐœ๊ฐ€ ์žˆ์–ด๋„ ๊ดœ์ฐฎ๋‹ค. ๋Œ€์‹  ์—ฐ์‚ฐ์„ ํ†ตํ•ด ๋ณต์ˆ˜๊ฐœ์˜ ๊ทธ๋ฃน ๋ฐ์ดํ„ฐ์— ๋Œ€ํ•œ ๋Œ€ํ‘œ๊ฐ’์„ ์ •ํ•œ๋‹ค. ์ด๋ฅผ split-apply-combine ์—ฐ์‚ฐ์ด๋ผ๊ณ ๋„ ํ•œ๋‹ค. split ๋‹จ๊ณ„ ํŠน์ • Key ๊ฐ’์— ๋”ฐ๋ผ ๋ฐ์ดํ„ฐ ๊ทธ๋ฃน์„ ๋งŒ๋“ ๋‹ค. apply ๋‹จ๊ณ„ ๊ฐ๊ฐ์˜ ๊ทธ๋ฃน์— ๋Œ€ํ•ด ์›ํ•˜๋Š” ์—ฐ์‚ฐ์„ ํ•˜์—ฌ ๋Œ€ํ‘œ๊ฐ’์„ ์ƒ์„ฑํ•œ๋‹ค. count(), mean(), median(), min(), max() sum(), prod(), std(), var(), quantile() first(), last() combine ๋‹จ๊ณ„ ๊ทธ๋ฃน์˜ Key ๊ฐ’์— ๋Œ€ํ•ด ์›ํ•˜๋Š” ์—ฐ์‚ฐ์˜ ๊ฒฐ๊ณผ๋ฅผ Value๋กœ ์ง€์ •ํ•œ dict๋ฅผ ์ƒ์„ฑํ•œ๋‹ค. <img src="https://datascienceschool.net/upfiles/5cf33c481e8041ebbf56a5af1f84d487.png" style="width:80%;"> End of explanation """ df.data1.groupby(df.key1).mean() gs = df.data1.groupby(df.key1) gs print("="*50) for n, g in gs: print("[key]:", n) print("[group]:", type(g)) print("-"*50) print(g) print("-"*50) print("[mean]:", g.mean()) print("="*50) gs.mean() """ Explanation: ๋ฌธ์ œ: key1 ๊ฐ’์— ๋”ฐ๋ฅธ data1์˜ ํ‰๊ท ์€? End of explanation """ means = df.data1.groupby([df.key1, df.key2]).mean() means means = df.data1.groupby([df.key1, df.key2]).mean() means """ Explanation: ๋ฌธ์ œ: ๋ณตํ•ฉ key (key1, key2) ๊ฐ’์— ๋”ฐ๋ฅธ data1์˜ ํ‰๊ท ์€? End of explanation """ np.random.seed(0) people = pd.DataFrame(np.random.randn(5,5), columns=['a','b','c','d','e'], index=['Joe','Steve','Wes','Jim','Travis']) people.ix[2:3, ['b', 'c']] = np.nan people print("="*80) for n, g in people.groupby(people.index): print("[key]:", n) print("[group]:", type(g)) print("-"*80) print(g) print("="*80) mapping = {'Joe': 'J', 'Jim': 'J', 'Steve': 'S', 'Wes': 'S', 'Travis': 'S'} print("="*80) for n, g in people.groupby(mapping): print("[key]:", n) print("[group]:", type(g)) print("-"*80) print(g) print("="*80) cap1 = lambda x: x[0].upper() print("="*80) for n, g in people.groupby(cap1): print("[key]:", n) print("[group]:", type(g)) print("-"*80) print(g) print("="*80) print("="*80) for n, g in people.groupby(people.columns, axis=1): print("[key]:", n) print("[group]:", type(g)) print("-"*80) print(g) print("="*80) mapping = {'a': 'red', 'b': 'red', 'c': 'blue', 'd': 'blue', 'e': 'red', 'f' : 'orange'} for n, g in people.groupby(mapping, axis=1): print("[key]:", n) print("[group]:", type(g)) print("-"*80) print(g) print("="*80) """ Explanation: groupby ๋ช…๋ น์˜ ์ธ์ˆ˜ groupby ๋ช…๋ น์—์„œ Key ์ธ์ˆ˜๋กœ ์ž…๋ ฅํ•  ์ˆ˜ ์žˆ๋Š” ๊ฐ’์€ ๋‹ค์Œ๊ณผ ๊ฐ™๋‹ค. ์—ด ๋˜๋Š” ์—ด์˜ ๋ฆฌ์ŠคํŠธ ํ–‰ ์ธ๋ฑ์Šค ์‚ฌ์ „/ํ•จ์ˆ˜: Column์˜ ๊ฐ’์„ ์‚ฌ์ „์— ๋งคํ•‘(mapping)ํ•˜๊ฑฐ๋‚˜ ํ•จ์ˆ˜ ์ฒ˜๋ฆฌํ•˜์—ฌ ๋‚˜์˜จ ๊ฒฐ๊ด๊ฐ’์„ ํ‚ค๋กœ ์ธ์‹ End of explanation """ %cd /home/dockeruser/data/pydata-book-master tips = pd.read_csv('../../pydata-book-master/ch08/tips.csv') tips.head() tips['tip_pct'] = tips['tip'] / tips['total_bill'] tips.tail() tips.describe() """ Explanation: ํŠน๋ณ„ํ•œ group ๋ณ„ ์—ฐ์‚ฐ ํ†ต๊ณ„ describe() ๊ทธ๋ฃน์„ ๋Œ€ํ‘œํ•˜๋Š” ํ•˜๋‚˜์˜ ๊ฐ’์„ ๊ณ„์‚ฐ agg(), aggregate() ๋Œ€ํ‘œ๊ฐ’์œผ๋กœ ํ•„๋“œ๋ฅผ ๊ต์ฒด transform() ๊ทธ๋ฃน ์ „์ฒด๋ฅผ ๋ณ€ํ˜•ํ•˜๋Š” ๊ณ„์‚ฐ apply() TIP ๋ฐ์ดํ„ฐ ์˜ˆ์ œ End of explanation """ tips.groupby(["sex", "smoker"])[["tip", "tip_pct"]].describe() """ Explanation: ๊ทธ๋ฃน๋ณ„ ํ†ต๊ณ„ End of explanation """ gs = tips.groupby(["sex", "smoker"]) gs_pct = gs["tip_pct"] gs_pct.mean() gs_pct.agg('mean') def peak_to_peak(arr): return arr.max() - arr.min() gs_pct.agg(['mean', 'std', peak_to_peak]) gs.agg({'tip_pct': 'mean', 'total_bill': peak_to_peak}) """ Explanation: ๊ทธ๋ฃน๋ณ„ ์—ฐ์‚ฐ End of explanation """ gs.agg("mean") tips2 = tips.copy() tips2["tips"] = gs.transform("mean")["tip_pct"] tips2.tail(15) """ Explanation: ๊ทธ๋ฃน์˜ ๊ฐ’์„ ๋Œ€ํ‘œ๊ฐ’์œผ๋กœ ๋Œ€์ฒด End of explanation """ def top(df, n=5, column='tip_pct'): return df.sort_values(by=column)[-n:] top(tips, n=6) tips.groupby('smoker').apply(top) tips.groupby(['smoker', 'day']).apply(top, n=1, column='total_bill') f = lambda x: x.describe() tips.groupby(['smoker']).apply(f) """ Explanation: ๊ทธ๋ฃน ์ž์ฒด๋ฅผ ๋Œ€์ฒด apply ๋ฉ”์†Œ๋“œ๋Š” ์ˆ˜์น˜๊ฐ’์ด ์•„๋‹Œ Group์„ ์ถœ๋ ฅ ๋‹จ์ˆœํžˆ ๋Œ€ํ‘œ๊ฐ’์„ ๊ณ„์‚ฐํ•˜๋Š” ๊ฒƒ ๋ฟ ์•„๋‹ˆ๋ผ ์ˆœ์„œ ์ •๋ ฌ, ์ผ๋ถ€ ์‚ญ์ œ ๋“ฑ ๊ทธ๋ฃน ๋‚ด์˜ ๋ ˆ์ฝ”๋“œ ์ž์ฒด๋ฅผ ๋ณ€ํ˜•ํ•˜๋Š” ๊ฒƒ๋„ ๊ฐ€๋Šฅ End of explanation """ tips.pivot_table(index=['sex', 'smoker']) tips.pivot_table(['tip_pct', 'size'], index=['sex', 'day'], columns='smoker') tips.pivot_table(['tip_pct', 'size'], index=['sex', 'day'], columns='smoker', margins=True) tips.pivot_table('tip_pct', index=['sex', 'smoker'], columns='day', aggfunc=len, margins=True) tips.pivot_table('size', index=['time', 'sex', 'smoker'], columns='day', aggfunc='sum', fill_value=0) """ Explanation: pivot_table pivot ๋ช…๋ น๊ณผ groupby ๋ช…๋ น์˜ ์ค‘๊ฐ„์  ์„ฑ๊ฒฉ pivot์„ ์ˆ˜ํ–‰ํ•˜์ง€๋งŒ ๋ฐ์ดํ„ฐ๊ฐ€ ์œ ๋‹ˆํฌํ•˜๊ฒŒ ์„ ํƒ๋˜์ง€ ์•Š์œผ๋ฉด aggfunc ์ธ์ˆ˜๋กœ ์ •์˜๋œ ํ•จ์ˆ˜๋ฅผ ์ˆ˜ํ–‰ํ•˜์—ฌ ๋Œ€ํ‘œ๊ฐ’ ๊ณ„์‚ฐ ๋””ํดํŠธ aggfunc ์€ ํ‰๊ท  ๊ณ„์‚ฐ End of explanation """
saashimi/code_guild
wk5/notebooks/wk5.1.ipynb
mit
class Car(object): wheels = 4 def __init__(self, make, model): self.make = make self.model = model mustang = Car('Ford', 'Mustang') print(mustang.wheels) # 4 print(Car.wheels) # 4 """ Explanation: Instance Attributes and Methods (source: Jeff Knupp) Class attributes Class attributes are attributes that are set at the class-level, as opposed to the instance-level. Normal attributes are introduced in the init method, but some attributes of a class hold for all instances in all cases. For example, consider the following definition of a Car object: End of explanation """ class Car(object): ... def make_car_sound(): print('VRooooommmm!') Car.make_car_sound() # This works ford = Car() ford.make_car_sound() # This doesn't """ Explanation: Static Methods Don't have access to self. End of explanation """ class Car(object): wheels = 4 @staticmethod def make_car_sound(): print('VRooooommmm!') def __init__(self, make, model): self.make = make self.model = model Car.make_car_sound(6) ford = Car('Ford', 'Falcon') ford.make_car_sound() # This works now! """ Explanation: Our make_car_sound static method does not work on an instance of our Car class because the instance tries to pass a self arg. To get around this, we use something called a decorator. In python, decorators start with the @ sign. End of explanation """ class Vehicle(object): ... wheels = 4 @classmethod def is_motorcycle(cls): return cls.wheels == 2 Vehicle.is_motorcycle() """ Explanation: Class Methods Previously we passed instances of a class to our methods using self. For class methods, we pass the whole class (not just a specific instance): End of explanation """ class Car(object): """A car for sale by Jeffco Car Dealership. Attributes: wheels: An integer representing the number of wheels the car has. miles: The integral number of miles driven on the car. make: The make of the car as a string. model: The model of the car as a string. year: The integral year the car was built. sold_on: The date the vehicle was sold. """ def __init__(self, wheels, miles, make, model, year, sold_on): """Return a new Car object.""" self.wheels = wheels self.miles = miles self.make = make self.model = model self.year = year self.sold_on = sold_on def sale_price(self): """Return the sale price for this car as a float amount.""" if self.sold_on is not None: return 0.0 # Already sold return 5000.0 * self.wheels def purchase_price(self): """Return the price for which we would pay to purchase the car.""" if self.sold_on is None: return 0.0 # Not yet sold return 8000 - (.10 * self.miles) ... """ Explanation: Inheritance While Object-oriented Programming is useful as a modeling tool, it truly gains power when the concept of inheritance is introduced. Inherticance is the process by which a "child" class derives the data and behavior of a "parent" class. An example will definitely help us here. Imagine we run a car dealership. We sell all types of vehicles, from motorcycles to trucks. We set ourselves apart from the competition by our prices. Specifically, how we determine the price of a vehicle on our lot: \$5,000 x number of wheels a vehicle has. We love buying back our vehicles as well. We offer a flat rate - 10% of the miles driven on the vehicle. For trucks, that rate is \$10,000. For cars, \$8,000. For motorcycles, \$4,000. If we wanted to create a sales system for our dealership using Object-oriented techniques, how would we do so? What would the objects be? We might have a Sale class, a Customer class, an Inventory class, and so forth, but we'd almost certainly have a Car, Truck, and Motorcycle class. What would these classes look like? Using what we've learned, here's a possible implementation of the Car class: End of explanation """ class Truck(object): """A truck for sale by Jeffco Car Dealership. Attributes: wheels: An integer representing the number of wheels the truck has. miles: The integral number of miles driven on the truck. make: The make of the truck as a string. model: The model of the truck as a string. year: The integral year the truck was built. sold_on: The date the vehicle was sold. """ def __init__(self, wheels, miles, make, model, year, sold_on): """Return a new Truck object.""" self.wheels = wheels self.miles = miles self.make = make self.model = model self.year = year self.sold_on = sold_on def sale_price(self): """Return the sale price for this truck as a float amount.""" if self.sold_on is not None: return 0.0 # Already sold return 5000.0 * self.wheels def purchase_price(self): """Return the price for which we would pay to purchase the truck.""" if self.sold_on is None: return 0.0 # Not yet sold return 10000 - (.10 * self.miles) """ Explanation: Okay, now let's create a truck class: End of explanation """ class Vehicle(object): """A vehicle for sale by Jeffco Car Dealership. Attributes: wheels: An integer representing the number of wheels the vehicle has. miles: The integral number of miles driven on the vehicle. make: The make of the vehicle as a string. model: The model of the vehicle as a string. year: The integral year the vehicle was built. sold_on: The date the vehicle was sold. """ base_sale_price = 0 def __init__(self, wheels, miles, make, model, year, sold_on): """Return a new Vehicle object.""" self.wheels = wheels self.miles = miles self.make = make self.model = model self.year = year self.sold_on = sold_on def sale_price(self): """Return the sale price for this vehicle as a float amount.""" if self.sold_on is not None: return 0.0 # Already sold return 5000.0 * self.wheels def purchase_price(self): """Return the price for which we would pay to purchase the vehicle.""" if self.sold_on is None: return 0.0 # Not yet sold return self.base_sale_price - (.10 * self.miles) """ Explanation: There's a lot of overlap between the car class and the truck class. In the spirit of DRY (don't repeat yourself), we'll try and abstract away the specific differences between car and truck and instead implement an abstract vehicle class. Abstract Classes End of explanation """ class Car(Vehicle): def __init__(self, wheels, miles, make, model, year, sold_on): """Return a new Car object.""" self.wheels = wheels self.miles = miles self.make = make self.model = model self.year = year self.sold_on = sold_on self.base_sale_price = 8000 class Truck(Vehicle): def __init__(self, wheels, miles, make, model, year, sold_on): """Return a new Truck object.""" self.wheels = wheels self.miles = miles self.make = make self.model = model self.year = year self.sold_on = sold_on self.base_sale_price = 10000 """ Explanation: Now we can make the Car and Truck class inherit from the Vehicle class by replacing object in the line class Car(object). The class in parenthesis is the class that is inherited from (object essentially means "no inheritance". We'll discuss exactly why we write that in a bit). End of explanation """ v = Vehicle(4, 0, 'Honda', 'Accord', 2014, None) print(v.purchase_price()) """ Explanation: This is okay, but we're still repeating a lot of code. We'd really like to get rid of all repetition. More problematic is people will be able to make a vehicle object. Do we really want people to make abstract objects (as opposed to cars and trucks) in this case? A Vehicle is just a concept, not a real thing, so what does it mean to say the following: End of explanation """ v = Vehicle(4, 0, 'Honda', 'Accord', 2014, None) """ Explanation: A Vehicle doesn't have a base_sale_price, only the individual child classes like Car and Truck do. The issue is that Vehicle should really be an Abstract Base Class. Abstract Base Classes are classes that are only meant to be inherited from; you can't create instance of an ABC. That means that, if Vehicle is an ABC, the following is illegal: End of explanation """ from abc import ABCMeta, abstractmethod class Vehicle(object): """A vehicle for sale by Jeffco Car Dealership. Attributes: wheels: An integer representing the number of wheels the vehicle has. miles: The integral number of miles driven on the vehicle. make: The make of the vehicle as a string. model: The model of the vehicle as a string. year: The integral year the vehicle was built. sold_on: The date the vehicle was sold. """ __metaclass__ = ABCMeta base_sale_price = 0 def sale_price(self): """Return the sale price for this vehicle as a float amount.""" if self.sold_on is not None: return 0.0 # Already sold return 5000.0 * self.wheels def purchase_price(self): """Return the price for which we would pay to purchase the vehicle.""" if self.sold_on is None: return 0.0 # Not yet sold return self.base_sale_price - (.10 * self.miles) @abstractmethod def vehicle_type(): """"Return a string representing the type of vehicle this is.""" pass v = Vehicle() v.base_sale_price """ Explanation: It makes sense to disallow this, as we never meant for vehicles to be used directly. We just wanted to use it to abstract away some common data and behavior. So how do we make a class an ABC? Simple! The abc module contains a metaclass called ABCMeta (metaclasses are a bit outside the scope of this article). Setting a class's metaclass to ABCMeta and making one of its methods virtual makes it an ABC. A virtual method is one that the ABC says must exist in child classes, but doesn't necessarily actually implement. For example, the Vehicle class may be defined as follows: End of explanation """ from abc import ABCMeta, abstractmethod class Vehicle(object): """A vehicle for sale by Jeffco Car Dealership. Attributes: wheels: An integer representing the number of wheels the vehicle has. miles: The integral number of miles driven on the vehicle. make: The make of the vehicle as a string. model: The model of the vehicle as a string. year: The integral year the vehicle was built. sold_on: The date the vehicle was sold. """ __metaclass__ = ABCMeta base_sale_price = 0 wheels = 0 def __init__(self, miles, make, model, year, sold_on): self.miles = miles self.make = make self.model = model self.year = year self.sold_on = sold_on def sale_price(self): """Return the sale price for this vehicle as a float amount.""" if self.sold_on is not None: return 0.0 # Already sold return 5000.0 * self.wheels def purchase_price(self): """Return the price for which we would pay to purchase the vehicle.""" if self.sold_on is None: return 0.0 # Not yet sold return self.base_sale_price - (.10 * self.miles) @abstractmethod def vehicle_type(self): """"Return a string representing the type of vehicle this is.""" pass class Car(Vehicle): """A car for sale by Jeffco Car Dealership.""" base_sale_price = 8000 wheels = 4 def vehicle_type(self): """"Return a string representing the type of vehicle this is.""" return 'car' class Truck(Vehicle): """A truck for sale by Jeffco Car Dealership.""" base_sale_price = 10000 wheels = 4 def vehicle_type(self): """"Return a string representing the type of vehicle this is.""" return 'truck' """ Explanation: Now, since vehicle_type is an abstractmethod, we can't directly create an instance of Vehicle. As long as Car and Truck inherit from Vehicle and define vehicle_type, we can instantiate those classes just fine. Returning to the repetition in our Car and Truck classes, let see if we can't remove that by hoisting up common functionality to the base class, Vehicle: End of explanation """ class Motorcycle(Vehicle): """A motorcycle for sale by Jeffco Car Dealership.""" base_sale_price = 4000 wheels = 2 def vehicle_type(self): """"Return a string representing the type of vehicle this is.""" return 'motorcycle' honda = Car(0, 'Honda', 'Accord', 2014, None) honda.wheels suzuki = Motorcycle(0, 'Suzuki', 'Ninja', 2015, None) suzuki.wheels """ Explanation: This fits perfectly with our intuition: as far as our system is concerned, the only difference between a car and truck is the base sale price. Defining a Motorcycle class, then, is similarly simple: End of explanation """ def can_speak(animal): if isinstance(animal, Person): return True elif isinstance(animal, Dog): return False else: raise RuntimeError('Unknown animal!') """ Explanation: Inheritance and the LSP So far, we've seen how using inheritance reduced duplication. What we were really doing was providing the proper level of abstraction. We've been working from the perspective of the object builder but what about the object caller's perspective? Quite a bit, it turns out. Imagine we have two classes, Dog and Person, and we want to write a function that takes either type of object and prints out whether or not the instance in question can speak (a dog can't, a person can). We might write code like the following: End of explanation """ def can_speak(animal): return animal.can_speak() """ Explanation: That works when we only have two types of animals, but what if we have twenty, or two hundred? That if...elif chain is going to get quite long. The key insight here is that can_speak shouldn't care what type of animal it's dealing with, the animal class itself should tell us if it can speak. By introducing a common base class, Animal, that defines can_speak, we relieve the function of it's type-checking burden. Now, as long as it knows it was an Animal that was passed in, determining if it can speak is trivial: End of explanation """
metpy/MetPy
v0.7/_downloads/Wind_SLP_Interpolation.ipynb
bsd-3-clause
import cartopy import cartopy.crs as ccrs from matplotlib.colors import BoundaryNorm import matplotlib.pyplot as plt import numpy as np import pandas as pd from metpy.calc import get_wind_components from metpy.cbook import get_test_data from metpy.gridding.gridding_functions import interpolate, remove_nan_observations from metpy.plots import add_metpy_logo from metpy.units import units to_proj = ccrs.AlbersEqualArea(central_longitude=-97., central_latitude=38.) """ Explanation: Wind and Sea Level Pressure Interpolation Interpolate sea level pressure, as well as wind component data, to make a consistent looking analysis, featuring contours of pressure and wind barbs. End of explanation """ with get_test_data('station_data.txt') as f: data = pd.read_csv(f, header=0, usecols=(2, 3, 4, 5, 18, 19), names=['latitude', 'longitude', 'slp', 'temperature', 'wind_dir', 'wind_speed'], na_values=-99999) """ Explanation: Read in data End of explanation """ lon = data['longitude'].values lat = data['latitude'].values xp, yp, _ = to_proj.transform_points(ccrs.Geodetic(), lon, lat).T """ Explanation: Project the lon/lat locations to our final projection End of explanation """ x_masked, y_masked, pres = remove_nan_observations(xp, yp, data['slp'].values) """ Explanation: Remove all missing data from pressure End of explanation """ slpgridx, slpgridy, slp = interpolate(x_masked, y_masked, pres, interp_type='cressman', minimum_neighbors=1, search_radius=400000, hres=100000) """ Explanation: Interpolate pressure using Cressman interpolation End of explanation """ wind_speed = (data['wind_speed'].values * units('m/s')).to('knots') wind_dir = data['wind_dir'].values * units.degree good_indices = np.where((~np.isnan(wind_dir)) & (~np.isnan(wind_speed))) x_masked = xp[good_indices] y_masked = yp[good_indices] wind_speed = wind_speed[good_indices] wind_dir = wind_dir[good_indices] """ Explanation: Get wind information and mask where either speed or direction is unavailable End of explanation """ u, v = get_wind_components(wind_speed, wind_dir) windgridx, windgridy, uwind = interpolate(x_masked, y_masked, np.array(u), interp_type='cressman', search_radius=400000, hres=100000) _, _, vwind = interpolate(x_masked, y_masked, np.array(v), interp_type='cressman', search_radius=400000, hres=100000) """ Explanation: Calculate u and v components of wind and then interpolate both. Both will have the same underlying grid so throw away grid returned from v interpolation. End of explanation """ x_masked, y_masked, t = remove_nan_observations(xp, yp, data['temperature'].values) tempx, tempy, temp = interpolate(x_masked, y_masked, t, interp_type='cressman', minimum_neighbors=3, search_radius=400000, hres=35000) temp = np.ma.masked_where(np.isnan(temp), temp) """ Explanation: Get temperature information End of explanation """ levels = list(range(-20, 20, 1)) cmap = plt.get_cmap('viridis') norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True) fig = plt.figure(figsize=(20, 10)) add_metpy_logo(fig, 360, 120, size='large') view = fig.add_subplot(1, 1, 1, projection=to_proj) view.set_extent([-120, -70, 20, 50]) view.add_feature(cartopy.feature.NaturalEarthFeature(category='cultural', name='admin_1_states_provinces_lakes', scale='50m', facecolor='none')) view.add_feature(cartopy.feature.OCEAN) view.add_feature(cartopy.feature.COASTLINE) view.add_feature(cartopy.feature.BORDERS, linestyle=':') cs = view.contour(slpgridx, slpgridy, slp, colors='k', levels=list(range(990, 1034, 4))) plt.clabel(cs, inline=1, fontsize=12, fmt='%i') mmb = view.pcolormesh(tempx, tempy, temp, cmap=cmap, norm=norm) plt.colorbar(mmb, shrink=.4, pad=0.02, boundaries=levels) view.barbs(windgridx, windgridy, uwind, vwind, alpha=.4, length=5) plt.title('Surface Temperature (shaded), SLP, and Wind.') plt.show() """ Explanation: Set up the map and plot the interpolated grids appropriately. End of explanation """
transcranial/keras-js
notebooks/layers/convolutional/Cropping3D.ipynb
mit
data_in_shape = (3, 5, 3, 3) L = Cropping3D(cropping=((1,1), (1,1), (1,1)), data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(260) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['convolutional.Cropping3D.0'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } """ Explanation: Cropping3D [convolutional.Cropping3D.0] cropping ((1,1), (1,1), (1,1)) on 3x5x3x3 input, data_format='channels_last' End of explanation """ data_in_shape = (3, 5, 3, 3) L = Cropping3D(cropping=((1,1), (1,1), (1,1)), data_format='channels_first') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(261) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['convolutional.Cropping3D.1'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } """ Explanation: [convolutional.Cropping3D.1] cropping ((1,1), (1,1), (1,1)) on 3x5x3x3 input, data_format='channels_first' End of explanation """ data_in_shape = (7, 6, 6, 6) L = Cropping3D(cropping=((3,2), (2,1), (2,3)), data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(262) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['convolutional.Cropping3D.2'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } """ Explanation: [convolutional.Cropping3D.2] cropping ((3,2), (2,1), (2,3)) on 7x6x6x6 input, data_format='channels_last' End of explanation """ data_in_shape = (7, 6, 6, 6) L = Cropping3D(cropping=((3,2), (2,1), (2,3)), data_format='channels_first') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(263) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['convolutional.Cropping3D.3'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } """ Explanation: [convolutional.Cropping3D.3] cropping ((3,2), (2,1), (2,3)) on 7x6x6x6 input, data_format='channels_first' End of explanation """ data_in_shape = (7, 6, 8, 3) L = Cropping3D(cropping=(1, 2, 3), data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(264) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['convolutional.Cropping3D.4'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } """ Explanation: [convolutional.Cropping3D.4] cropping (1, 2, 3) on 7x6x8x3 input, data_format='channels_last' End of explanation """ data_in_shape = (7, 6, 8, 3) L = Cropping3D(cropping=2, data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(265) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['convolutional.Cropping3D.5'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } """ Explanation: [convolutional.Cropping3D.5] cropping 2 on 7x6x8x3 input, data_format='channels_last' End of explanation """ import os filename = '../../../test/data/layers/convolutional/Cropping3D.json' if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) with open(filename, 'w') as f: json.dump(DATA, f) print(json.dumps(DATA)) """ Explanation: export for Keras.js tests End of explanation """