repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
cells
sequence
types
sequence
tensorflow/probability
tensorflow_probability/python/experimental/nn/examples/vib_dose.ipynb
apache-2.0
[ "Copyright 2020 The TensorFlow Authors.\nLicensed under the Apache License, Version 2.0 (the \"License\");", "#@title ##### Licensed under the Apache License, Version 2.0 (the \"License\"); { display-mode: \"form\" }\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "VIB + DoSE\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/probability/blob/main/tensorflow_probability/python/experimental/nn/examples/vib_dose.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/probability/blob/main/tensorflow_probability/python/experimental/nn/examples/vib_dose.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>\n\nIn this example, we train a deep variational information bottleneck model (VIB) on the MNIST dataset. We then use density of states estimation to turn our VIB model into an Out-of-distribution (OOD) detector. Our current implementation achieves near-SOTA performance on both OOD detection and classification simultaneously without any exposure to OOD data during training.\nReferences\nThe VIB paper (Alemi et al. 2016) can be found Here\nThe DoSE paper (Morningstar et al. 2020) can be found Here\n1 Imports", "import functools\nimport sys\nimport time\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport tensorflow.compat.v2 as tf\ntf.enable_v2_behavior()\n\nimport tensorflow_datasets as tfds\nimport tensorflow_probability as tfp\n\n# Globally Enable XLA.\n# tf.config.optimizer.set_jit(True)\n\ntry:\n physical_devices = tf.config.list_physical_devices('GPU')\n tf.config.experimental.set_memory_growth(physical_devices[0], True)\nexcept:\n # Invalid device or cannot modify virtual devices once initialized.\n pass\n\ntfb = tfp.bijectors\ntfd = tfp.distributions\ntfn = tfp.experimental.nn", "2 Load Dataset", "[train_dataset, eval_dataset], datasets_info = tfds.load(\n name='mnist',\n split=['train', 'test'],\n with_info=True,\n shuffle_files=True)\n\ndef _preprocess(sample):\n return (tf.cast(sample['image'], tf.float32) * 2 / 255. - 1.,\n tf.cast(sample['label'], tf.int32))\n\ntrain_size = datasets_info.splits['train'].num_examples\nbatch_size = 32\n\ntrain_dataset = tfn.util.tune_dataset(\n train_dataset,\n batch_size=batch_size,\n shuffle_size=int(train_size / 7),\n preprocess_fn=_preprocess)\n\neval_dataset = tfn.util.tune_dataset(\n eval_dataset,\n repeat_count=1,\n preprocess_fn=_preprocess)\n\nx = next(iter(eval_dataset.batch(10)))[0]\ntfn.util.display_imgs(x)", "3 Define Model", "input_shape = datasets_info.features['image'].shape\nencoded_size = 16\nbase_depth = 32\n\nprior = tfd.MultivariateNormalDiag(\n loc=tf.zeros(encoded_size),\n scale_diag=tf.ones(encoded_size))\n\nConv = functools.partial(\n tfn.Convolution,\n init_bias_fn=tf.zeros_initializer(),\n init_kernel_fn=tf.initializers.he_uniform()) # Better for leaky_relu.\n\nencoder = tfn.Sequential([\n lambda x: 2. * tf.cast(x, tf.float32) - 1., # Center.\n Conv(1, 1 * base_depth, 5, strides=1, padding='same'),\n tf.nn.leaky_relu,\n Conv(1 * base_depth, 1 * base_depth, 5, strides=2, padding='same'),\n tf.nn.leaky_relu,\n Conv(1 * base_depth, 2 * base_depth, 5, strides=1, padding='same'),\n tf.nn.leaky_relu,\n Conv(2 * base_depth, 2 * base_depth, 5, strides=2, padding='same'),\n tf.nn.elu,\n Conv(2 * base_depth, 4 * encoded_size, 7, strides=1, padding='valid'),\n tf.nn.leaky_relu,\n tfn.util.flatten_rightmost(ndims=3),\n tfn.Affine(4*encoded_size, encoded_size + encoded_size * (encoded_size + 1) // 2),\n lambda x: tfd.MultivariateNormalTriL(\n loc=x[..., :encoded_size],\n scale_tril=tfb.FillScaleTriL()(x[..., encoded_size:]))\n], name='encoder')\n\nprint(encoder.summary())\n\nDeConv = functools.partial(\n tfn.ConvolutionTranspose,\n init_kernel_fn=tf.initializers.he_uniform()) # Better for leaky_relu.\n \nAffine = functools.partial(\n tfn.Affine,\n init_kernel_fn=tf.initializers.he_uniform())\n\ndecoder = tfn.Sequential([\n Affine(encoded_size, 10),\n lambda x: tfd.Categorical(logits=x)])\n\nprint(decoder.summary())", "4 Loss / Eval", "def compute_loss(x, y, beta=1.):\n q = encoder(x)\n z = q.sample()\n p = decoder(z)\n kl = tf.reduce_mean(q.log_prob(z) - prior.log_prob(z), axis=-1)\n # Note: we could use exact KL divergence, eg:\n # kl = tf.reduce_mean(tfd.kl_divergence(q, prior))\n # however we generally find that using the Monte Carlo approximation has\n # lower variance.\n nll = -tf.reduce_mean(p.log_prob(y), axis=-1)\n loss = nll + beta * kl\n return loss, (nll, kl), (q, z, p)\n\ntrain_iter = iter(train_dataset)\n\ndef loss():\n x, y = next(train_iter)\n loss, (nll, kl), _ = compute_loss(x, y, beta=0.075)\n return loss, (nll, kl)\n\nopt = tf.optimizers.Adam(learning_rate=1e-3, decay=0.00005)\n\nfit = tfn.util.make_fit_op(\n loss,\n opt,\n decoder.trainable_variables + encoder.trainable_variables,\n grad_summary_fn=lambda gs: tf.nest.map_structure(tf.norm, gs))\n\neval_iter = iter(eval_dataset.batch(5000).repeat())\n\[email protected]\ndef eval():\n x, y = next(eval_iter)\n loss, (nll, kl), _ = compute_loss(x, y, beta=0.05)\n return loss, (nll, kl)", "5 Train", "DEBUG_MODE = False\ntf.config.experimental_run_functions_eagerly(DEBUG_MODE)\n\nnum_train_epochs = 25. # @param { isTemplate: true}\nnum_evals = 200 # @param { isTemplate: true}\n\ndur_sec = dur_num = 0\nnum_train_steps = int(num_train_epochs * train_size // batch_size)\nfor i in range(num_train_steps):\n start = time.time()\n trn_loss, (trn_nll, trn_kl), g = fit()\n stop = time.time()\n dur_sec += stop - start\n dur_num += 1\n if i % int(num_train_steps / num_evals) == 0 or i == num_train_steps - 1:\n tst_loss, (tst_nll, tst_kl) = eval()\n f, x = zip(*[\n ('it:{:5}', opt.iterations),\n ('ms/it:{:6.4f}', dur_sec / max(1., dur_num) * 1000.),\n ('trn_loss:{:6.4f}', trn_loss),\n ('tst_loss:{:6.4f}', tst_loss),\n ('tst_nll:{:6.4f}', tst_nll),\n ('tst_kl:{:6.4f}', tst_kl),\n ('sum_norm_grad:{:6.4f}', sum(g)),\n\n ])\n print(' '.join(f).format(*[getattr(x_, 'numpy', lambda: x_)()\n for x_ in x]))\n sys.stdout.flush()\n dur_sec = dur_num = 0\n # if i % 1000 == 0 or i == maxiter - 1:\n # encoder.save('/tmp/encoder.npz')\n # decoder.save('/tmp/decoder.npz')", "6 Evaluate Classification Accuracy", "def evaluate_accuracy(dataset, encoder, decoder):\n \"\"\"Evaluate the accuracy of your model on a dataset.\n \"\"\"\n this_it = iter(dataset)\n num_correct = 0\n num_total = 0\n attempts = 0\n for xin, xout in this_it:\n xin, xout = next(this_it)\n e = encoder(xin)\n z = e.sample(10000) # 10K samples should have low variance.\n d = decoder(z)\n yhat = d.sample()\n confidence = tf.reduce_mean(d.probs_parameter(), axis=0)\n most_likely = tf.cast(tf.math.argmax(confidence, axis=-1), tf.int32)\n num_correct += np.sum(most_likely == xout, axis=0)\n num_total += xout.shape[0]\n attempts +=1\n return num_correct, num_total\n\nnc, nt = evaluate_accuracy(eval_dataset.batch(100), encoder, decoder)\nprint(\"Accuracy: %.4f\"%(nc/nt))", "The accuracy of one training run with this particular model and training setup was 99.15%, which is within a half of a percent of the state of the art, and comparable to the mnist accuracy reported in Alemi et al. (2016).\nOOD detection using DoSE\nFrom the previous section, we have trained a variational classifier. However, this classifier was trained assuming that all of the inputs are from the distribution which generated the training set. In general, we may not always receive images drawn from this distribution. In these situations, our model prediction is unreliable. We want to be able to identify when this may be the case to avoid serving these flawed predictions. \nIn this section, we turn the VIB classifier into an OOD detector using DoSE.\n1 Get statistics", "def get_statistics(encoder, decoder, prior):\n \"\"\"Setup a function to evaluate statistics given model components.\n \n Args:\n encoder: Callable neural network which takes in an image and \n returns a tfp.distributions.Distribution object.\n decoder: Callable neural network which takes in a vector and \n returns a tfp.distributions.Distribution object.\n prior: A tfp.distributions.Distribution object which operates \n on the same spaces as the encoder.\n Returns:\n T: A function, which takes in a tensor containing an image (or \n batch of images) and evaluates statistics on the model. \n Optionally it also returns the prediction, under the assumption \n that the DoSE model will only dress an actual classifier.\n \"\"\"\n def T(x, return_pred=False):\n \"\"\"Evaluate statistics on an input image or batch of images.\n \n Given an input tensor `x` containing either an image or a batch of \n images, this function evaluates 4 statistics on a VIB model; the\n kl-divergence between the posterior and prior, the expected entropy\n of the decoder computed using samples from the posterior, the \n posterior entopy, and the cross-entropy between the posterior and\n the prior. We also allow for the prediction to be optionally\n returned.\n\n Args: \n x: rank 4 tensor containing a batch of images\n return_pred: Bool indicating whether to return the model\n prediction.\n Returns:\n tf.tensor containing the 4 statistics evaluated on the input.\n pred (optional): The prediction of the model.\n \"\"\"\n pzgx = encoder(x)\n z = pzgx.sample(100, seed=42) # Seed is fixed for determinism.\n pxgz = decoder(z)\n\n kl = pzgx.kl_divergence(prior)[tf.newaxis,...]\n dent = tf.reduce_mean(pxgz.entropy(), axis=0)[tf.newaxis,...]\n eent = pzgx.entropy()[tf.newaxis,...]\n xent = pzgx.cross_entropy(prior)[tf.newaxis,...]\n if return_pred:\n pred = tf.math.argmax(\n tf.reduce_mean(pxgz.probs_parameter, axis=0), \n axis=-1)\n return tf.concat([kl, dent, eent, xent], axis=0), pred\n else:\n return tf.concat([kl, dent, eent, xent], axis=0)\n return T\n\nT = get_statistics(encoder, decoder, prior)", "2 Define DoSE helper classes and functions", "def get_DoSE_KDE(T, dataset):\n \"\"\"Get a distribution and decision rule for OOD detection using DoSE.\n\n Given a tensor of statistics tx, compute a Kernel Density Estimate (KDE) of\n the statistics. This uses a quantiles trick to cut down the number of \n samples used in the KDE (to lower the cost of evaluating a trial point).\n\n Args:\n T: A function which takes an input image and returns a vector of\n statistics evaluated using the model.\n dataset: A tensorflow_datasets `Dataset` which will be used to evaluate\n statistics to construct the estimator.\n Returns:\n is_ood: A function which takes a new point `x` and `threshold`, and \n computes the decision rule KDE.log_prob(T(x)) < threshold\n dose_kde: A tfd.MixtureSameFamily object. The distribution used as the KDE\n from which the log_prob of a batch of statistics can be computed.\n \"\"\"\n\n # First we should evaluate the statistics on the training set.\n it = iter(dataset)\n for x, y in it:\n if not \"tx\" in locals():\n tx = T(x)\n else:\n tx = tf.concat([tx, T(x)], axis=-1)\n\n n = tf.cast(tf.shape(tx)[-1], tx.dtype)\n num_quantiles = int(25)\n q = tfp.stats.quantiles(tx, num_quantiles, axis=-1)\n q = tf.transpose(q, tf.roll(tf.range(tf.rank(q)), shift=-1, axis=0))\n # Scott's Rule:\n h = 3.49 * tf.math.reduce_std(tx, axis=-1, keepdims=True) * (n)**(-1./3.)\n h *= n / num_quantiles\n \n dose_kde = tfd.Independent(\n tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(logits=tf.zeros(num_quantiles + 1)),\n components_distribution=tfd.Normal(loc=q, scale=h)),\n reinterpreted_batch_ndims=1)\n is_ood = lambda x, threshold: dose_kde.log_prob(tf.transpose(T(x), [1, 0])) < tf.math.log(threshold)\n dose_log_prob = lambda x: dose_kde.log_prob(tf.transpose(T(x), [1, 0]))\n\n # T(x) returns shape [T, N], but dose_kde works on shape [N, T]\n return is_ood, dose_kde, dose_log_prob, tx\n\n\nclass DoSE_administrator(object):\n def __init__(self, T, train_dataset, eval_dataset):\n \"\"\"Administrate DoSE for model evaluation in a more efficient way.\n\n This high level object just calls the lower level DoSE methods, but\n also evaluates the DoSE log-probabilities on the evaluation dataset.\n Using these, we can do things like compute auroc much more efficiently.\n \"\"\"\n dose_build = get_DoSE_KDE(T, train_dataset)\n \n # Call DoSE on an image/batch x for lp threshold `threshold`\n self.is_ood = dose_build[0]\n\n # Actual dose distribution\n self.dose_dist = dose_build[1]\n self.dose_lp = lambda t: self.dose_dist.log_prob(tf.transpose(t, [1, 0]))\n\n # Get the log-probability of a batch from dose\n self.dose_log_prob = dose_build[2] \n\n # This helps us evaluate auroc more reliably.\n self.training_stats = dose_build[3]\n \n # Get training_log probs efficiently\n train_size = self.training_stats.shape[-1]\n bs = train_size // 1000\n for i in range(1000):\n tlp = self.dose_lp(self.training_stats[..., bs*i:bs*(i+1)])\n if not hasattr(self, 'training_lp'):\n self.training_lp = tlp\n else:\n self.training_lp = tf.concat([self.training_lp, tlp], axis=0)\n\n # Get log_probs, images, labels, and statistics \n # on the evaluation dataset.\n eval_it = iter(eval_dataset)\n for x, y in eval_it:\n if not hasattr(self, 'eval_lp'):\n self.eval_stats = T(x)\n self.eval_lp = self.dose_lp(self.eval_stats)\n self.eval_label = y\n self.eval_ims = x\n else:\n tx = T(x)\n self.eval_stats = tf.concat([self.eval_stats,\n tx], \n axis=0)\n self.eval_lp = tf.concat([self.eval_lp,\n self.dose_lp(tx)],\n axis=0)\n self.eval_label = tf.concat([self.eval_label, y], axis=0)\n self.eval_ims = tf.concat([self.eval_ims, x], axis=0)\n\n \n def get_acc(self, threshold):\n \"\"\"Evaluate the OOD accuracy for a certain threshold probability.\n \n This computes the decision rule: `log q(x) < tf.math.log(thresh)`\n on the eval dataset. It uses this decision rule to evaluate the\n number of correct predictions, along with the 4 components of the\n confusion matrix.\n\n Args:\n threshold: A threshold on the DoSE probability density.\n Returns:\n nc: Number of correct predictions\n nt: Number of total predictions\n tp: Number of true positives\n tn: Number of true negatives\n fp: Number of false positives\n fn: Number of false negatives\n \"\"\"\n\n yhat = self.eval_lp < tf.math.log(threshold)\n\n fp = tf.reduce_sum(tf.cast(\n tf.logical_and(tf.math.not_equal(yhat, self.eval_label), \n tf.equal(self.eval_label, False)),\n tf.int32),axis=0)\n fn = tf.reduce_sum(tf.cast(\n tf.logical_and(tf.math.not_equal(yhat, self.eval_label), \n tf.equal(self.eval_label, True)),\n tf.int32),axis=0)\n tp = tf.reduce_sum(tf.cast(\n tf.logical_and(tf.equal(yhat, self.eval_label), \n tf.equal(self.eval_label, True)),\n tf.int32),axis=0)\n tn = tf.reduce_sum(tf.cast(\n tf.logical_and(tf.equal(yhat, self.eval_label), \n tf.equal(self.eval_label, False)),\n tf.int32),axis=0)\n nc = tp+tn\n nt = tf.cast(tf.size(self.eval_label), tf.float32) \n return nc, nt, tp, tn, fp, fn\n \n def roc_curve(self, nbins):\n \"\"\"Get the roc curve for the model.\"\"\"\n\n nc, nt, tp, tn, fp, fn = self.get_acc(\n np.float32(np.exp(np.percentile(self.eval_lp, 0.))))\n fpr = [fp.numpy() / (fp.numpy()+tn.numpy())]\n tpr = [tp.numpy()/ (tp.numpy() + fn.numpy())]\n for i in range(1, nbins+1):\n nc, nt, tp, tn, fp, fn = self.get_acc(\n np.float32(np.exp(np.percentile(self.eval_lp, i/float(nbins)*100.))))\n fpr.append(fp.numpy()/ (fp.numpy() + tn.numpy()))\n tpr.append(tp.numpy()/ (tp.numpy() + fn.numpy()))\n return fpr, tpr\n\n def precision_recall_curve(self, nbins):\n \"\"\"Get the precision-recall curve for the model.\"\"\"\n\n nc, nt, tp, tn, fp, fn = self.get_acc(\n np.float32(np.exp(np.percentile(self.eval_lp, 0.))))\n precision = [tp.numpy()/ (tp.numpy() + fp.numpy())]\n recall = [tp.numpy() / (tp.numpy() + fn.numpy())]\n for i in range(1, nbins+1):\n nc, nt, tp, tn, fp, fn = self.get_acc(\n np.float32(np.exp(np.percentile(self.eval_lp, i/float(nbins)*100.))))\n precision.append(tp.numpy()/ (tp.numpy() + fp.numpy()))\n recall.append(tp.numpy() / (tp.numpy() + fn.numpy()))\n return precision, recall", "3 Setup OOD dataset", "# For evaluating statistics on the training set, we need to perform a\n# pass through the dataset.\ntrain_one_pass = tfds.load('mnist')['train']\ntrain_one_pass = tfn.util.tune_dataset(train_one_pass, \n batch_size=1000,\n repeat_count=None,\n preprocess_fn=_preprocess)\n\n# OOD dataset is Fashion_MNIST\nood_data = tfds.load('fashion_mnist')['test'].map(_preprocess).map(\n lambda x, y: (x, tf.ones_like(y, dtype=tf.bool)))\n\n# In-distribution data is the MNIST test set.\nind_data = tfds.load('mnist')['test'].map(_preprocess).map(\n lambda x, y: (x, tf.zeros_like(y, dtype=tf.bool)))\n\n# Our trial dataset is a 50-50 split of the two.\nhybrid_data = ind_data.concatenate(ood_data)\nhybrid_data = tfn.util.tune_dataset(hybrid_data, batch_size=100,\n shuffle_size=20000,repeat_count=None)", "4 Administer DoSE", "DoSE_admin = DoSE_administrator(T, train_one_pass, hybrid_data)", "5 Evaluate OOD performance", "fp, tp = DoSE_admin.roc_curve(10000)\nprecision, recall = DoSE_admin.precision_recall_curve(10000)\nplt.figure(figsize=[10,5])\nplt.subplot(121)\nplt.plot(fp, tp, 'b-')\nplt.xlim(0, 1.)\nplt.ylim(0., 1.)\nplt.xlabel('FPR', fontsize=12)\nplt.ylabel('TPR', fontsize=12)\nplt.title(\"AUROC: %.4f\"%np.trapz(tp, fp), fontsize=12)\nplt.subplot(122)\nplt.plot(recall, precision, 'b-')\nplt.xlim(0, 1.)\nplt.ylim(0., 1.)\nplt.xlabel('Recall', fontsize=12)\nplt.ylabel('Precision', fontsize=12)\nplt.title(\"AUPRC: %.4f\"%np.trapz(precision[1:], recall[1:]), fontsize=12)\n\nSorted_ims = tf.gather(DoSE_admin.eval_ims, tf.argsort(DoSE_admin.eval_lp))\nSorted_labels = tf.gather(DoSE_admin.eval_label, tf.argsort(DoSE_admin.eval_lp))\nsorted_ind = tf.gather(Sorted_ims, tf.where(Sorted_labels == False))[:,0]\nsorted_ood = tf.gather(Sorted_ims, tf.where(Sorted_labels == True))[:,0]\n\nprint(\"Most False Positive\")\ntfn.util.display_imgs(sorted_ind[:20])\nprint(\"Most True Negative\")\ntfn.util.display_imgs(sorted_ind[-20:])\nprint(\"Most False Negative\")\ntfn.util.display_imgs(sorted_ood[-20:])\nprint(\"Most True Positive\")\ntfn.util.display_imgs(sorted_ood[:20])", "DoSE on this particular classifier appears to get 0.982-0.993 AUROC and 0.982-0.993 AUPRC. This is comparable to the accuracy of the classifier itself, but was trained without access to the OOD dataset in question." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mne-tools/mne-tools.github.io
dev/_downloads/c6baf7c1a2f53fda44e93271b91f45b8/50_beamformer_lcmv.ipynb
bsd-3-clause
[ "%matplotlib inline", "Source reconstruction using an LCMV beamformer\nThis tutorial gives an overview of the beamformer method and shows how to\nreconstruct source activity using an LCMV beamformer.", "# Authors: Britta Westner <[email protected]>\n# Eric Larson <[email protected]>\n#\n# License: BSD-3-Clause\n\nimport matplotlib.pyplot as plt\nimport mne\nfrom mne.datasets import sample, fetch_fsaverage\nfrom mne.beamformer import make_lcmv, apply_lcmv", "Introduction to beamformers\nA beamformer is a spatial filter that reconstructs source activity by\nscanning through a grid of pre-defined source points and estimating activity\nat each of those source points independently. A set of weights is\nconstructed for each defined source location which defines the contribution\nof each sensor to this source.\nBeamformers are often used for their focal reconstructions and their ability\nto reconstruct deeper sources. They can also suppress external noise sources.\nThe beamforming method applied in this tutorial is the linearly constrained\nminimum variance (LCMV) beamformer :footcite:VanVeenEtAl1997 operates on\ntime series.\nFrequency-resolved data can be reconstructed with the dynamic imaging of\ncoherent sources (DICS) beamforming method :footcite:GrossEtAl2001.\nAs we will see in the following, the spatial filter is computed from two\ningredients: the forward model solution and the covariance matrix of the\ndata.\nData processing\nWe will use the sample data set for this tutorial and reconstruct source\nactivity on the trials with left auditory stimulation.", "data_path = sample.data_path()\nsubjects_dir = data_path / 'subjects'\nmeg_path = data_path / 'MEG' / 'sample'\nraw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif'\n\n# Read the raw data\nraw = mne.io.read_raw_fif(raw_fname)\nraw.info['bads'] = ['MEG 2443'] # bad MEG channel\n\n# Set up the epoching\nevent_id = 1 # those are the trials with left-ear auditory stimuli\ntmin, tmax = -0.2, 0.5\nevents = mne.find_events(raw)\n\n# pick relevant channels\nraw.pick(['meg', 'eog']) # pick channels of interest\n\n# Create epochs\nproj = False # already applied\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax,\n baseline=(None, 0), preload=True, proj=proj,\n reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))\n\n# for speed purposes, cut to a window of interest\nevoked = epochs.average().crop(0.05, 0.15)\n\n# Visualize averaged sensor space data\nevoked.plot_joint()\n\ndel raw # save memory", "Computing the covariance matrices\nSpatial filters use the data covariance to estimate the filter\nweights. The data covariance matrix will be inverted_ during the spatial\nfilter computation, so it is valuable to plot the covariance matrix and its\neigenvalues to gauge whether matrix inversion will be possible.\nAlso, because we want to combine different channel types (magnetometers and\ngradiometers), we need to account for the different amplitude scales of these\nchannel types. To do this we will supply a noise covariance matrix to the\nbeamformer, which will be used for whitening.\nThe data covariance matrix should be estimated from a time window that\nincludes the brain signal of interest,\nand incorporate enough samples for a stable estimate. A rule of thumb is to\nuse more samples than there are channels in the data set; see\n:footcite:BrookesEtAl2008 for more detailed advice on covariance estimation\nfor beamformers. Here, we use a time\nwindow incorporating the expected auditory response at around 100 ms post\nstimulus and extend the period to account for a low number of trials (72) and\nlow sampling rate of 150 Hz.", "data_cov = mne.compute_covariance(epochs, tmin=0.01, tmax=0.25,\n method='empirical')\nnoise_cov = mne.compute_covariance(epochs, tmin=tmin, tmax=0,\n method='empirical')\ndata_cov.plot(epochs.info)\ndel epochs", "When looking at the covariance matrix plots, we can see that our data is\nslightly rank-deficient as the rank is not equal to the number of channels.\nThus, we will have to regularize the covariance matrix before inverting it\nin the beamformer calculation. This can be achieved by setting the parameter\nreg=0.05 when calculating the spatial filter with\n:func:~mne.beamformer.make_lcmv. This corresponds to loading the diagonal\nof the covariance matrix with 5% of the sensor power.\nThe forward model\nThe forward model is the other important ingredient for the computation of a\nspatial filter. Here, we will load the forward model from disk; more\ninformation on how to create a forward model can be found in this tutorial:\ntut-forward.\nNote that beamformers are usually computed in a :class:volume source space\n&lt;mne.VolSourceEstimate&gt;, because estimating only cortical surface\nactivation can misrepresent the data.", "# Read forward model\n\nfwd_fname = meg_path / 'sample_audvis-meg-vol-7-fwd.fif'\nforward = mne.read_forward_solution(fwd_fname)", "Handling depth bias\nThe forward model solution is inherently biased toward superficial sources.\nWhen analyzing single conditions it is best to mitigate the depth bias\nsomehow. There are several ways to do this:\n\n:func:mne.beamformer.make_lcmv has a depth parameter that normalizes\n the forward model prior to computing the spatial filters. See the docstring\n for details.\nUnit-noise gain beamformers handle depth bias by normalizing the\n weights of the spatial filter. Choose this by setting\n weight_norm='unit-noise-gain'.\nWhen computing the Neural activity index, the depth bias is handled by\n normalizing both the weights and the estimated noise (see\n :footcite:VanVeenEtAl1997). Choose this by setting weight_norm='nai'.\n\nNote that when comparing conditions, the depth bias will cancel out and it is\npossible to set both parameters to None.\nCompute the spatial filter\nNow we can compute the spatial filter. We'll use a unit-noise gain beamformer\nto deal with depth bias, and will also optimize the orientation of the\nsources such that output power is maximized.\nThis is achieved by setting pick_ori='max-power'.\nThis gives us one source estimate per source (i.e., voxel), which is known\nas a scalar beamformer.", "filters = make_lcmv(evoked.info, forward, data_cov, reg=0.05,\n noise_cov=noise_cov, pick_ori='max-power',\n weight_norm='unit-noise-gain', rank=None)\n\n# You can save the filter for later use with:\n# filters.save('filters-lcmv.h5')", "It is also possible to compute a vector beamformer, which gives back three\nestimates per voxel, corresponding to the three direction components of the\nsource. This can be achieved by setting\npick_ori='vector' and will yield a :class:volume vector source estimate\n&lt;mne.VolVectorSourceEstimate&gt;. So we will compute another set of filters\nusing the vector beamformer approach:", "filters_vec = make_lcmv(evoked.info, forward, data_cov, reg=0.05,\n noise_cov=noise_cov, pick_ori='vector',\n weight_norm='unit-noise-gain', rank=None)\n# save a bit of memory\nsrc = forward['src']\ndel forward", "Apply the spatial filter\nThe spatial filter can be applied to different data types: raw, epochs,\nevoked data or the data covariance matrix to gain a static image of power.\nThe function to apply the spatial filter to :class:~mne.Evoked data is\n:func:~mne.beamformer.apply_lcmv which is\nwhat we will use here. The other functions are\n:func:~mne.beamformer.apply_lcmv_raw,\n:func:~mne.beamformer.apply_lcmv_epochs, and\n:func:~mne.beamformer.apply_lcmv_cov.", "stc = apply_lcmv(evoked, filters)\nstc_vec = apply_lcmv(evoked, filters_vec)\ndel filters, filters_vec", "Visualize the reconstructed source activity\nWe can visualize the source estimate in different ways, e.g. as a volume\nrendering, an overlay onto the MRI, or as an overlay onto a glass brain.\nThe plots for the scalar beamformer show brain activity in the right temporal\nlobe around 100 ms post stimulus. This is expected given the left-ear\nauditory stimulation of the experiment.", "lims = [0.3, 0.45, 0.6]\nkwargs = dict(src=src, subject='sample', subjects_dir=subjects_dir,\n initial_time=0.087, verbose=True)", "On MRI slices (orthoview; 2D)", "stc.plot(mode='stat_map', clim=dict(kind='value', pos_lims=lims), **kwargs)", "On MNI glass brain (orthoview; 2D)", "stc.plot(mode='glass_brain', clim=dict(kind='value', lims=lims), **kwargs)", "Volumetric rendering (3D) with vectors\nThese plots can also be shown using a volumetric rendering via\n:meth:~mne.VolVectorSourceEstimate.plot_3d. Let's try visualizing the\nvector beamformer case. Here we get three source time courses out per voxel\n(one for each component of the dipole moment: x, y, and z), which appear\nas small vectors in the visualization (in the 2D plotters, only the\nmagnitude can be shown):", "brain = stc_vec.plot_3d(\n clim=dict(kind='value', lims=lims), hemi='both', size=(600, 600),\n views=['sagittal'],\n # Could do this for a 3-panel figure:\n # view_layout='horizontal', views=['coronal', 'sagittal', 'axial'],\n brain_kwargs=dict(silhouette=True),\n **kwargs)", "Visualize the activity of the maximum voxel with all three components\nWe can also visualize all three components in the peak voxel. For this, we\nwill first find the peak voxel and then plot the time courses of this voxel.", "peak_vox, _ = stc_vec.get_peak(tmin=0.08, tmax=0.1, vert_as_index=True)\n\nori_labels = ['x', 'y', 'z']\nfig, ax = plt.subplots(1)\nfor ori, label in zip(stc_vec.data[peak_vox, :, :], ori_labels):\n ax.plot(stc_vec.times, ori, label='%s component' % label)\nax.legend(loc='lower right')\nax.set(title='Activity per orientation in the peak voxel', xlabel='Time (s)',\n ylabel='Amplitude (a. u.)')\nmne.viz.utils.plt_show()\ndel stc_vec", "Morph the output to fsaverage\nWe can also use volumetric morphing to get the data to fsaverage space. This\nis for example necessary when comparing activity across subjects. Here, we\nwill use the scalar beamformer example.\nWe pass a :class:mne.SourceMorph as the src argument to\nmne.VolSourceEstimate.plot. To save some computational load when applying\nthe morph, we will crop the stc:", "fetch_fsaverage(subjects_dir) # ensure fsaverage src exists\nfname_fs_src = subjects_dir / 'fsaverage' / 'bem' / 'fsaverage-vol-5-src.fif'\n\nsrc_fs = mne.read_source_spaces(fname_fs_src)\nmorph = mne.compute_source_morph(\n src, subject_from='sample', src_to=src_fs, subjects_dir=subjects_dir,\n niter_sdr=[5, 5, 2], niter_affine=[5, 5, 2], zooms=7, # just for speed\n verbose=True)\nstc_fs = morph.apply(stc)\ndel stc\n\nstc_fs.plot(\n src=src_fs, mode='stat_map', initial_time=0.085, subjects_dir=subjects_dir,\n clim=dict(kind='value', pos_lims=lims), verbose=True)", "References\n.. footbibliography::\n.. LINKS" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
4dsolutions/Python5
Comparing JavaScript with Python.ipynb
mit
[ "Python for Everyone!<br/>Oregon Curriculum Network\nPython and JavaScript\nJavaScript has been moving a lot closer to Python, nowadays supporting classes, with a constructor like __init__ and this for self within that construct (ES has had this for awhile). \nHere in a Jupyter Notebook, we have a way to run both JavaScript and Python together.\nI'm using source code from this article, A Gentle Introduction to Data Structures: How Queues Work by Michael Olorunnisola, to show off the similarities between the two languages.", "%%javascript\n\nclass Queue {\n constructor(){\n this._storage = {};\n this._start = -1; //replicating 0 index used for arrays\n this._end = -1; //replicating 0 index used for arrays\n }\n\n enqueue(val){\n this._storage[++this._end] = val; \n }\n\n dequeue(){\n if(this.size()){ \n let nextUp = this._storage[++this._start];\n delete this._storage[this._start];\n\n if(!this.size()){ \n this._start = -1;\n this._end = -1; \n }\n\n return nextUp;\n }\n }\n \n size(){\n return this._end - this._start;\n }\n} //end Queue\n\nvar microsoftQueue = new Queue();\n\nmicrosoftQueue.enqueue(\"{user: [email protected]}\");\nmicrosoftQueue.enqueue(\"{user: [email protected]}\");\nmicrosoftQueue.enqueue(\"{user: [email protected]}\");\nmicrosoftQueue.enqueue(\"{user: [email protected]}\");\n\nvar sendTo = function(s){\n element.append(s + \" gets a Surface Studio<br />\");\n}\n\n//Function to send everyone their Surface Studio!\nlet sendSurface = recepient => {\n sendTo(recepient);\n}\n\n//When your server is ready to handle this queue, execute this:\nwhile(microsoftQueue.size() > 0){\n sendSurface(microsoftQueue.dequeue());\n}", "If you don't see four lines of output above, you might be rendering this on Github. If you want to see the output, same as the Python output below, cut and paste the Github URL to nbviewer.jupyter.org, which will do a more thorough rendering job.\nNow lets do the same thing in Python. Yes, Python has it's own collections.deque or we could use a list object as a queue, but the point here is to show off similarities, so lets stick with a dict-based implementation, mirroring the JavaScript.", "class Queue:\n \n def __init__(self):\n self._storage = {}\n self._start = -1 # replicating 0 index used for arrays\n self._end = -1 # replicating 0 index used for arrays\n \n def size(self):\n return self._end - self._start\n\n def enqueue(self, val):\n self._end += 1\n self._storage[self._end] = val\n\n def dequeue(self):\n if self.size():\n self._start += 1\n nextUp = self._storage[self._start]\n del self._storage[self._start]\n \n if not self.size(): \n self._start = -1\n self._end = -1\n return nextUp\n \nmicrosoftQueue = Queue()\n\nmicrosoftQueue.enqueue(\"{user: [email protected]}\")\nmicrosoftQueue.enqueue(\"{user: [email protected]}\")\nmicrosoftQueue.enqueue(\"{user: [email protected]}\")\nmicrosoftQueue.enqueue(\"{user: [email protected]}\") \n\ndef sendTo(recipient):\n print(recipient, \"gets a Surface Studio\")\n\n# Function to send everyone their Surface Studio!\ndef sendSurface(recepient):\n sendTo(recepient)\n\n# When your server is ready to handle this queue, execute this:\n\nwhile microsoftQueue.size() > 0:\n sendSurface(microsoftQueue.dequeue())", "Another example of features JavaScript is acquiring with ES6 (Sixth Edition we might call it), are rest and default parameters. A \"rest\" parameter has nothing to do with RESTful, and everything to do with \"the rest\" as in \"whatever is left over.\"\nFor example, in the function below, we pass in more ingredients than some recipe requires, yet because of the rest argument, which has to be the last, the extra ingredients are kept. Pre ES6, JavaScript had no simple mechanism for allowing parameters to \"rise to the occasion.\" Instead they would match up, or stay undefined.", "%%javascript\nvar sendTo = function(s){\n element.append(s + \"<br />\");\n}\n\n//Function to send everyone their Surface Studio!\nlet sendSurface = recepient => {\n sendTo(recepient);\n}\n\nfunction recipe(ingredient0, ingre1, ing2, ...more){\n sendSurface(ingredient0 + \" is one ingredient.\");\n sendSurface(more[1] + \" is another.\");\n}\nrecipe(\"shrimp\", \"avocado\", \"tomato\", \"potato\", \"squash\", \"peanuts\");", "In Python we have both sequence and dictionary parameters, which we could say are both rest parameters, one for scooping up positionals, the other for gathering the named. Here's how that looks:", "def recipe(ingr0, *more, ingr1, meat=\"turkey\", **others):\n print(more)\n print(others)\n \nrecipe(\"avocado\", \"tomato\", \"potato\", ingr1=\"squash\", dessert=\"peanuts\", meat = \"shrimp\")", "Thanks to *more being a sequence parameter, the parameter ingr1 may only be reached with a named argument, yet need have no default value. **others scoops up anything named that doesn't match anything explicitly required as such.\nYou'll see the sequence parameter *more begets a tuple with its contents, whereas the dict parameter **others begets a dict." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
alepoydes/introduction-to-numerical-simulation
practice/What does mean mean mean.ipynb
mit
[ "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt", "Вычисление сумм\nВ статистике часто приходится считать выборочное среднее, т.е. по данной выборке значений $x_k$, $k=1..N$, нужно вычислить\n$$\\bar x=\\frac1N\\sum_{k=1}^N x_k.$$\nС точки зрения математики не имеет значения, как считать указанную сумму, так как результат сложения всегда будет один и тот же.\nОднако при вычислениях с плавающей запятой ответ будет зависеть от порядка выполнения операций, хотя бы потому, что сложения чисел с плавающей запятой не ассоциативно.\nНо будет ли зависеть точность вычислений от порядка операций?\nДавайте это проверим.\nСконструируем выборку таким образом, что сумма всех элементов равна $1$, и порядок элементов меняется в широком диапазоне.\nДля этого разобьем единицу на $K$ частей, и $k$-ую часть разобьем на $7^k$ равных значений.\nПолученные элементы перемешаем.", "base=10 # параметр, может принимать любые целые значения > 1\n\ndef exact_sum(K):\n \"\"\"Точное значение суммы всех элементов.\"\"\"\n return 1.\n\ndef samples(K):\n \"\"\"\"Элементы выборки\".\"\"\"\n # создаем K частей из base^k одинаковых значений\n parts=[np.full((base**k,), float(base)**(-k)/K) for k in range(0, K)] \n # создаем выборку объединяя части\n samples=np.concatenate(parts) \n # перемешиваем элементы выборки и возвращаем\n return np.random.permutation(samples)\n\ndef direct_sum(x):\n \"\"\"Последовательная сумма всех элементов вектора x\"\"\"\n s=0.\n for e in x: \n s+=e\n return s\n\ndef number_of_samples(K):\n \"\"\"Число элементов в выборке\"\"\"\n return np.sum([base**k for k in range(0, K)])\n\ndef exact_mean(K):\n \"\"\"Значение среднего арифметического по выборке с близкой к машинной точностью.\"\"\"\n return 1./number_of_samples(K)\n\ndef exact_variance(K):\n \"\"\"Значение оценки дисперсии с близкой к машинной точностью.\"\"\"\n # разные значения элементов выборки\n values=np.asarray([float(base)**(-k)/K for k in range(0, K)], dtype=np.double)\n # сколько раз значение встречается в выборке\n count=np.asarray([base**k for k in range(0, K)])\n return np.sum(count*(values-exact_mean(K))**2)/number_of_samples(K)", "Создадим выборку из значений, отличающихся на 6 порядков, и просуммируем элементы выборки.", "K=7 # число слагаемых\nx=samples(K) # сохраняем выборку в массив\nprint(\"Число элементов:\", len(x))\nprint(\"Самое маленькое и большое значения:\", np.min(x), np.max(x))\n\nexact_sum_for_x=exact_sum(K) # значение суммы с близкой к машинной погрешностью\ndirect_sum_for_x=direct_sum(x) # сумма всех элементов по порядку\n\ndef relative_error(x0, x):\n \"\"\"Погрешность x при точном значении x0\"\"\"\n return np.abs(x0-x)/np.abs(x)\n\nprint(\"Погрешность прямого суммирования:\", relative_error(exact_sum_for_x, direct_sum_for_x))", "Попробуем теперь просуммировать элементы в порядке возрастания.", "sorted_x=x[np.argsort(x)]\nsorted_sum_for_x=direct_sum(sorted_x)\nprint(\"Погрешность суммирования по возрастанию:\", relative_error(exact_sum_for_x, sorted_sum_for_x))", "Попробуем просуммировать в порядке убывания.", "sorted_x=x[np.argsort(x)[::-1]]\nsorted_sum_for_x=direct_sum(sorted_x)\nprint(\"Погрешность суммирования по убыванию:\", relative_error(exact_sum_for_x, sorted_sum_for_x))", "Таким образом погрешность результата зависит от порядка суммирования. \nКак можно объяснить этот эффект?\nНа практике суммирование предпочтительно проводить не наивным способом, а компенсационным суммированием (см. алгоритм Кэхэна.", "def Kahan_sum(x):\n s=0.0 # частичная сумма\n c=0.0 # сумма погрешностей\n for i in x:\n y=i-c # первоначально y равно следующему элементу последовательности\n t=s+y # сумма s может быть велика, поэтому младшие биты y будут потеряны\n c=(t-s)-y # (t-s) отбрасывает старшие биты, вычитание y восстанавливает младшие биты\n s=t # новое значение старших битов суммы\n return s\n\nKahan_sum_for_x=Kahan_sum(x) # сумма всех элементов по порядку\nprint(\"Погрешность суммирования по Кэхэну:\", relative_error(exact_sum_for_x, Kahan_sum_for_x))", "Задания\n\nОбъясните различие в погрешностях при различных порядках суммирования.\nПочему алгорит Кэхэна имеет значительно лучшую точность, чем последовательное суммирование?\nПолучим ли мы те же значения погрешностей, если будем суммировать последовательность со слагаемыми разных знаков? Проверьте на следующей последовательности: \n$$x_k=\\sin k.$$\nЧто произойдет с погрешностью, если элементы выборки с разными знаками упорядочить по возрастанию? По возрастанию абсолютной величины? Проверьте экспериментально.\n\nПодсказка\nСумма первых $N$ элементов последовательности из задания 4 может быть найдена явна:\n$$\\sum_{k=1}^N\\sin k=\\frac{1}{2}\\bigg(\\sin n-\\mathrm{ctg}\\frac{1}{2}\\cos n+\\mathrm{ctg}\\frac{1}{2}\\bigg).$$\nВычисление дисперсии\nКроме вычисления оценки математического ожидания, часто требуется вычислить оценку среднеквадратического отклонения или его квадрата - дисперсии.\nДисперсия $D[X]$ случайной величины $X$ определена через математическое ожидание $E[X]$ следующим образом:\n$$D[X]=E[(X-E[X])^2].$$\nДля оценки дисперсии мы можем воспользоваться формулой для оценки математического ожидания через выборочное среднее:\n$$E[X]\\approx\\frac1N\\sum_{n=1}^N x_n,$$\nт.е. можно предложить следующую формулу для оценки дисперсии (первая формула):\n$$D[X]\\approx\\frac1N\\sum_{n=1}^N\\left(x_n-\\frac1N\\sum_{n=1}^Nx_n\\right)^2.$$\nПолученная оценка является смещенной, т.е. ее мат. ожидание не совпадает с верным значением дисперсии, поэтому на практике нужно использовать следующую несмещенную оценку:\n$$D[X]\\approx\\frac1{N-1}\\sum_{n=1}^N\\left(x_n-\\frac1N\\sum_{n=1}^Nx_n\\right)^2,$$\nоднако в этой работе мы удовлетворимся смещенной оценкой.\nК сожалению, наша формула не позволяет обновлять значения дисперсии при добавлении значения в выборку, так как требует двух проходов по выборке: сначала считается среднее, затем считается дисперсия.\nОднако в учебниках теории вероятности можно встретить и другую эквивалентную формулу для дисперсии, получим ее, опираясь на свойства мат. ожидания:\n$$D[X]=E[(X-E[X])^2]=E[X^2-2E[X]X+E[X]^2]=E[X^2]-2E[X]E[X]+E[E[X]^2]=E[X^2]-E[X]^2.$$\nСнова заменяя мат. ожидание на выборочное среднее, получаем новую оценку для дисперсии (вторая формула):\n$$D[X]\\approx \\frac1N\\sum_{n=1}^N x_n^2-\\left(\\frac1N\\sum_{n=1}^Nx_n\\right)^2.$$\nВторая формулы для вычисления дисперсии более привлекательна, так как обе суммы могут вычисляться одновременно, а значения мат. ожидания и дисперсии вычислить, последовательно добавляя значения.\nДействительно, введем обозначения для оценок мат. ожидания и дисперсии по первым $n$ членам выборки:\n$$E_n=\\frac1n\\sum_{k=1}^n x_n,\\quad D_n=\\frac1n\\sum_{k=1}^n x_n^2-E_n^2.$$\nОтсюда легко вывести рекуррентные формулы:\n$$E_{n}=\\frac{x_{n}+(n-1)E_{n-1}}{n},\\quad D_{n}=\\frac{x_{n}^2+(n-1)D_{n-1}}{n}-E_{n}^2.$$\nХотя эти формулы и просты, погрешность вычислений по второй формуле может быть значительно выше, чем по первой. Проверим это.\nРассмотрим выборку, среднее для которой на порядки больше среднеквадратического отклонения. Пусть ровно половина значений больше среднего на $delta$, а половина меньше на $delta$.\nОценка дисперсии и мат. ожидания в этом случае легко вычисляются явно.", "# параметры выборки\nmean=1e6 # среднее\ndelta=1e-5 # величина отклонения от среднего\n\ndef samples(N_over_two):\n \"\"\"Генерирует выборку из 2*N_over_two значений с данным средним и среднеквадратическим \n отклонением.\"\"\"\n x=np.full((2*N_over_two,), mean, dtype=np.double)\n x[:N_over_two]+=delta\n x[N_over_two:]-=delta\n return np.random.permutation(x)\n\ndef exact_mean():\n \"\"\"Значение среднего арифметического по выборке с близкой к машинной точностью.\"\"\"\n return mean\n\ndef exact_variance():\n \"\"\"Значение оценки дисперсии с близкой к машинной точностью.\"\"\"\n return delta**2\n\nx=samples(1000000)\n\nprint(\"Размер выборки:\", len(x))\nprint(\"Среднее значение:\", exact_mean())\nprint(\"Оценка дисперсии:\", exact_variance())\nprint(\"Ошибка среднего для встроенной функции:\",relative_error(exact_mean(),np.mean(x)))\nprint(\"Ошибка дисперсии для встроенной функции:\",relative_error(exact_variance(),np.var(x)))\n\ndef direct_mean(x):\n \"\"\"Среднее через последовательное суммирование.\"\"\"\n return direct_sum(x)/len(x)\n\nprint(\"Ошибка среднего для последовательного суммирования:\",relative_error(exact_mean(),direct_mean(x)))\n\ndef direct_second_var(x):\n \"\"\"Вторая оценка дисперсии через последовательное суммирование.\"\"\"\n return direct_mean(x**2)-direct_mean(x)**2\n\ndef online_second_var(x):\n \"\"\"Вторая оценка дисперсии через один проход по выборке\"\"\"\n m=x[0] # накопленное среднее \n m2=x[0]**2 # накопленное среднее квадратов\n for n in range(1,len(x)):\n m=(m*(n-1)+x[n])/n\n m2=(m2*(n-1)+x[n]**2)/n\n return m2-m**2\n\nprint(\"Ошибка второй оценки дисперсии для последовательного суммирования:\",relative_error(exact_variance(),direct_second_var(x)))\nprint(\"Ошибка второй оценки дисперсии для однопроходного суммирования:\",relative_error(exact_variance(),online_second_var(x)))\n\ndef direct_first_var(x):\n \"\"\"Первая оценка дисперсии через последовательное суммирование.\"\"\"\n return direct_mean((x-direct_mean(x))**2)\n\nprint(\"Ошибка первой оценки дисперсии для последовательного суммирования:\",relative_error(exact_variance(),direct_first_var(x)))\n", "Как мы видим, суммирование по первой формуле дает наиболее точный результат, суммирование по второй формуле менее точно, а однопроходная формула наименее точна.\nЗадания\n\nОбьясните, почему формулы оценки дисперсии имеют разные погрешности, хотя чтобы их применить, нужно выполнить одни и те же действия, но в разном порядке? Оцените погрешности обоих формул.\nПредложите однопроходную формулу для оценки мат. ожидания и дисперсии, основанную на первой формуле для дисперсии. Воспользуйтесь компенсационным суммированием, чтобы увеличить точность. Попробуйте увеличить точность вычисления по сравнению со второй формулой хотя бы на два порядка.\n\nСуммирование ряда для экспоненты\nПоказательная функция имеет одно из самых простых разложений в ряд Тейлора:\n$$e^x = \\sum_{k=0}^\\infty \\frac{x^k}{k!}.$$\nЕстественным желанием при решении задачи вычисления показательной функции является воспользоваться этим рядом.\nВ данном разделе мы рассмотрим результативность этого подхода.\nТак как на практике мы не можем суммировать бесконечное число слагаемых, то будем приближать ряд его частичной суммой:\n$$e^x \\approx \\sum_{k=0}^N \\frac{x^k}{k!}.$$\nТак как частичная сумма является многочленом, то для практического счета удобно воспользоваться (схемой Горнера)[ru.wikipedia.org/wiki/Схема_Горнера]:\n$$e^x \\approx 1+x\\bigg(1+\\frac{x}{2}\\bigg(1+\\frac{x}{3}\\bigg(1+\\frac{x}{4}\\bigg(\\ldots+\\frac{x}{N}\\bigg(1\\bigg)\\ldots\\bigg)\\bigg)\\bigg)\\bigg).$$\nПроведем эксперимент по оценки точности этого разложения.\nСравнивать будем с библиотечной функцией numpy.exp, которая не дает совершенно точный ответ.\nОценим погрешность библитечной функции, предполагая, что она вычисляется с максимальной возможной точностью.\nЧисло обусловленности показательной функции для относительной погрешности равно $\\kappa_{exp}(x)=|x|$,\nтогда учитывая погрешности округления до числа с плавающей запятой, мы ожидаем предельную погрешность результата не менее $|x|\\epsilon/2+\\epsilon$.", "def exp_taylor(x, N=None):\n \"\"\"N-ая частичная сумма ряда Тейлора для экспоненты.\"\"\"\n acc = 1 # k-ая частичная сумму. Начинаем с k=0.\n xk = 1 # Степени x^k.\n inv_fact = 1 # 1/k!.\n for k in range(1, N+1):\n xk = xk*x\n inv_fact /= k\n acc += xk*inv_fact\n return acc\n\ndef exp_horner(x, N=None):\n \"\"\"N-ая частичная сумма ряда Тейлора для экспоненты методом Горнера.\"\"\"\n if N<=0: return 1 # Избегаем деления на ноль.\n acc = 1 # Выражение во вложенных скобках в схеме Горнера\n for k in range(N, 0, -1):\n acc = acc/k*x + 1\n return acc\n\ndef make_exp_test(fns, args={}, xmin=-1, xmax=1):\n \"\"\"Проводит тест приближения fn показательной функции.\"\"\"\n x = np.linspace(xmin, xmax, 1000)\n standard = np.exp(x)\n \n theoretical_relative_error = (np.abs(x)/2+1)*np.finfo(float).eps\n theoretical_absolute_error = theoretical_relative_error * standard\n \n fig, ax1 = plt.subplots(1,1,figsize=(10,5))\n ax2 = plt.twinx(ax1)\n ax1.set_xlabel(\"Argument\")\n ax1.set_ylabel(\"Absolute error\")\n ax2.set_ylabel(\"Relative error\")\n\n ax1.semilogy(x, theoretical_absolute_error, '-r')\n \n line, = ax2.semilogy(x, theoretical_relative_error, '--r')\n line.set_label(\"theory (relative)\")\n \n for fn in fns:\n subject = fn(x, **args)\n absolute_error = np.abs(standard-subject)\n relative_error = absolute_error/standard\n \n ax1.semilogy(x, absolute_error, '-')\n \n line, = ax2.semilogy(x, relative_error, '--')\n line.set_label(\"{} (relative)\".format(fn.__name__))\n \n \n plt.legend()\n plt.show()\n \n \nmake_exp_test([exp_taylor, exp_horner], args={\"N\": 3}, xmin=-0.001, xmax=0.001) \nmake_exp_test([exp_taylor, exp_horner], args={\"N\": 3}, xmin=-1, xmax=1)\nmake_exp_test([exp_taylor, exp_horner], args={\"N\": 3}, xmin=-10, xmax=10)", "Ясно, что 4-x слагаемых слишком мало, чтобы хорошо приблизить ряд. Попробуем взять больше.", "make_exp_test([exp_taylor, exp_horner], args={\"N\": 15}, xmin=-0.001, xmax=0.001) \nmake_exp_test([exp_taylor, exp_horner], args={\"N\": 15}, xmin=-1, xmax=1)\nmake_exp_test([exp_taylor, exp_horner], args={\"N\": 15}, xmin=-10, xmax=10)", "Точность приближения растет с увеличением числа слагаемых, однако даже для умеренно больших аргументов ни одного верного знака в ответе не получается. Посмотрим, как погрешность изменяется в зависимости от числа слагаемых.", "def cum_exp_taylor(x, N=None):\n \"\"\"Вычисляет все частичные суммы ряда Тейлора для экспоненты по N-ую включительно.\"\"\"\n acc = np.empty(N+1, dtype=float)\n acc[0] = 1 # k-ая частичная сумму. Начинаем с k=0.\n xk = 1 # Степени x^k.\n inv_fact = 1 # 1/k!.\n for k in range(1, N+1):\n xk = xk*x\n inv_fact /= k\n acc[k] = acc[k-1]+xk*inv_fact\n return acc\n\nx = -10\nstandard = np.exp(x)\ntheoretical_relative_error = (np.abs(x)/2+1)*np.finfo(float).eps\ntheoretical_absolute_error = theoretical_relative_error * standard\nNs = np.arange(100)\n\npartial_sums = cum_exp_taylor(x, N=Ns[-1])\nabsolute_error = np.abs(partial_sums-standard)\nrelative_error = absolute_error/standard\n\nfig, ax1 = plt.subplots(1,1,figsize=(10,5))\nax2 = plt.twinx(ax1)\nax1.set_xlabel(\"Argument\")\nax1.set_ylabel(\"Absolute error\")\nax2.set_ylabel(\"Relative error\")\n\nax1.semilogy(Ns, Ns*0+theoretical_absolute_error, '-r')\n\nline, = ax2.semilogy(Ns, Ns*0+theoretical_relative_error, '--r')\nline.set_label(\"theory (relative)\")\n\nax1.semilogy(Ns, absolute_error, '-')\n \nline, = ax2.semilogy(Ns, relative_error, '--')\nline.set_label(\"experiment (relative)\")\n\nplt.legend()\nplt.show()", "Оказывается, что даже суммируя очень большое число слагаемых, мы не можем достигнуть максимальной точности. \nЗадания\n\nОтносительная ошибка приближения частичной суммой ряда Тейлора показательной функцией много больше для отрицательных аргументов. Объясните причину этого. Воспользуйтесь свойствами показательной функции, чтобы выравнить точность вычислений при положительных и отрицательных аргументах.\nПочему абсолютная погрешность мала при аргументах близких к нулю? Как именно погрешность зависит от аргумента? \nАбсолютная погрешность приближения функции частичной суммой ряда равна остатку этого ряда. Оцените остаток ряда Тейлора для экспоненты и найдите число слагаемых, необходимое для вычисления экспоненты с наперед заданной точностью. Проведите эксперимент и убедитесь, что предсказанная вами точность отличается от фактической не более чем на порядок.\nОшибка вычисления через частичную сумму складывается из ошибки отбрасывание остатка ряда и ошибки вычисления умножений и сложений. При увеличинии числа слагаемых первая ошибка уменьшается, но вторая растет. Для произвольного x оцените число слагаемых, при которых точность вычисления показательной функции максимальна.\nСхема Горнера дает несколько меньшую погрешность, чем суммирование одночленов. Почему?\nМожете предложить лучший способ вычисления показательной функции?" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
yuvrajsingh86/DeepLearning_Udacity
sentiment-network/Sentiment_Classification_Projects.ipynb
mit
[ "Sentiment Classification & How To \"Frame Problems\" for a Neural Network\nby Andrew Trask\n\nTwitter: @iamtrask\nBlog: http://iamtrask.github.io\n\nWhat You Should Already Know\n\nneural networks, forward and back-propagation\nstochastic gradient descent\nmean squared error\nand train/test splits\n\nWhere to Get Help if You Need it\n\nRe-watch previous Udacity Lectures\nLeverage the recommended Course Reading Material - Grokking Deep Learning (Check inside your classroom for a discount code)\nShoot me a tweet @iamtrask\n\nTutorial Outline:\n\n\nIntro: The Importance of \"Framing a Problem\" (this lesson)\n\n\nCurate a Dataset\n\nDeveloping a \"Predictive Theory\"\n\nPROJECT 1: Quick Theory Validation\n\n\nTransforming Text to Numbers\n\n\nPROJECT 2: Creating the Input/Output Data\n\n\nPutting it all together in a Neural Network (video only - nothing in notebook)\n\n\nPROJECT 3: Building our Neural Network\n\n\nUnderstanding Neural Noise\n\n\nPROJECT 4: Making Learning Faster by Reducing Noise\n\n\nAnalyzing Inefficiencies in our Network\n\n\nPROJECT 5: Making our Network Train and Run Faster\n\n\nFurther Noise Reduction\n\n\nPROJECT 6: Reducing Noise by Strategically Reducing the Vocabulary\n\n\nAnalysis: What's going on in the weights?\n\n\nLesson: Curate a Dataset<a id='lesson_1'></a>\nThe cells from here until Project 1 include code Andrew shows in the videos leading up to mini project 1. We've included them so you can run the code along with the videos without having to type in everything.", "def pretty_print_review_and_label(i):\n print(labels[i] + \"\\t:\\t\" + reviews[i][:80] + \"...\")\n\ng = open('reviews.txt','r') # What we know!\nreviews = list(map(lambda x:x[:-1],g.readlines()))\ng.close()\n\ng = open('labels.txt','r') # What we WANT to know!\nlabels = list(map(lambda x:x[:-1].upper(),g.readlines()))\ng.close()", "Note: The data in reviews.txt we're using has already been preprocessed a bit and contains only lower case characters. If we were working from raw data, where we didn't know it was all lower case, we would want to add a step here to convert it. That's so we treat different variations of the same word, like The, the, and THE, all the same way.", "len(reviews)\n\nreviews[0]\n\nlabels[0]", "Lesson: Develop a Predictive Theory<a id='lesson_2'></a>", "print(\"labels.txt \\t : \\t reviews.txt\\n\")\npretty_print_review_and_label(2137)\npretty_print_review_and_label(12816)\npretty_print_review_and_label(6267)\npretty_print_review_and_label(21934)\npretty_print_review_and_label(5297)\npretty_print_review_and_label(4998)", "Project 1: Quick Theory Validation<a id='project_1'></a>\nThere are multiple ways to implement these projects, but in order to get your code closer to what Andrew shows in his solutions, we've provided some hints and starter code throughout this notebook.\nYou'll find the Counter class to be useful in this exercise, as well as the numpy library.", "from collections import Counter\nimport numpy as np", "We'll create three Counter objects, one for words from postive reviews, one for words from negative reviews, and one for all the words.", "# Create three Counter objects to store positive, negative and total counts\npositive_counts = Counter()\nnegative_counts = Counter()\ntotal_counts = Counter()", "TODO: Examine all the reviews. For each word in a positive review, increase the count for that word in both your positive counter and the total words counter; likewise, for each word in a negative review, increase the count for that word in both your negative counter and the total words counter.\nNote: Throughout these projects, you should use split(' ') to divide a piece of text (such as a review) into individual words. If you use split() instead, you'll get slightly different results than what the videos and solutions show.", "# TODO: Loop over all the words in all the reviews and increment the counts in the appropriate counter objects", "Run the following two cells to list the words used in positive reviews and negative reviews, respectively, ordered from most to least commonly used.", "# Examine the counts of the most common words in positive reviews\npositive_counts.most_common()\n\n# Examine the counts of the most common words in negative reviews\nnegative_counts.most_common()", "As you can see, common words like \"the\" appear very often in both positive and negative reviews. Instead of finding the most common words in positive or negative reviews, what you really want are the words found in positive reviews more often than in negative reviews, and vice versa. To accomplish this, you'll need to calculate the ratios of word usage between positive and negative reviews.\nTODO: Check all the words you've seen and calculate the ratio of postive to negative uses and store that ratio in pos_neg_ratios. \n\nHint: the positive-to-negative ratio for a given word can be calculated with positive_counts[word] / float(negative_counts[word]+1). Notice the +1 in the denominator – that ensures we don't divide by zero for words that are only seen in positive reviews.", "# Create Counter object to store positive/negative ratios\npos_neg_ratios = Counter()\n\n# TODO: Calculate the ratios of positive and negative uses of the most common words\n# Consider words to be \"common\" if they've been used at least 100 times", "Examine the ratios you've calculated for a few words:", "print(\"Pos-to-neg ratio for 'the' = {}\".format(pos_neg_ratios[\"the\"]))\nprint(\"Pos-to-neg ratio for 'amazing' = {}\".format(pos_neg_ratios[\"amazing\"]))\nprint(\"Pos-to-neg ratio for 'terrible' = {}\".format(pos_neg_ratios[\"terrible\"]))", "Looking closely at the values you just calculated, we see the following:\n\nWords that you would expect to see more often in positive reviews – like \"amazing\" – have a ratio greater than 1. The more skewed a word is toward postive, the farther from 1 its positive-to-negative ratio will be.\nWords that you would expect to see more often in negative reviews – like \"terrible\" – have positive values that are less than 1. The more skewed a word is toward negative, the closer to zero its positive-to-negative ratio will be.\nNeutral words, which don't really convey any sentiment because you would expect to see them in all sorts of reviews – like \"the\" – have values very close to 1. A perfectly neutral word – one that was used in exactly the same number of positive reviews as negative reviews – would be almost exactly 1. The +1 we suggested you add to the denominator slightly biases words toward negative, but it won't matter because it will be a tiny bias and later we'll be ignoring words that are too close to neutral anyway.\n\nOk, the ratios tell us which words are used more often in postive or negative reviews, but the specific values we've calculated are a bit difficult to work with. A very positive word like \"amazing\" has a value above 4, whereas a very negative word like \"terrible\" has a value around 0.18. Those values aren't easy to compare for a couple of reasons:\n\nRight now, 1 is considered neutral, but the absolute value of the postive-to-negative rations of very postive words is larger than the absolute value of the ratios for the very negative words. So there is no way to directly compare two numbers and see if one word conveys the same magnitude of positive sentiment as another word conveys negative sentiment. So we should center all the values around netural so the absolute value fro neutral of the postive-to-negative ratio for a word would indicate how much sentiment (positive or negative) that word conveys.\nWhen comparing absolute values it's easier to do that around zero than one. \n\nTo fix these issues, we'll convert all of our ratios to new values using logarithms.\nTODO: Go through all the ratios you calculated and convert them to logarithms. (i.e. use np.log(ratio))\nIn the end, extremely positive and extremely negative words will have positive-to-negative ratios with similar magnitudes but opposite signs.", "# TODO: Convert ratios to logs", "Examine the new ratios you've calculated for the same words from before:", "print(\"Pos-to-neg ratio for 'the' = {}\".format(pos_neg_ratios[\"the\"]))\nprint(\"Pos-to-neg ratio for 'amazing' = {}\".format(pos_neg_ratios[\"amazing\"]))\nprint(\"Pos-to-neg ratio for 'terrible' = {}\".format(pos_neg_ratios[\"terrible\"]))", "If everything worked, now you should see neutral words with values close to zero. In this case, \"the\" is near zero but slightly positive, so it was probably used in more positive reviews than negative reviews. But look at \"amazing\"'s ratio - it's above 1, showing it is clearly a word with positive sentiment. And \"terrible\" has a similar score, but in the opposite direction, so it's below -1. It's now clear that both of these words are associated with specific, opposing sentiments.\nNow run the following cells to see more ratios. \nThe first cell displays all the words, ordered by how associated they are with postive reviews. (Your notebook will most likely truncate the output so you won't actually see all the words in the list.)\nThe second cell displays the 30 words most associated with negative reviews by reversing the order of the first list and then looking at the first 30 words. (If you want the second cell to display all the words, ordered by how associated they are with negative reviews, you could just write reversed(pos_neg_ratios.most_common()).)\nYou should continue to see values similar to the earlier ones we checked – neutral words will be close to 0, words will get more positive as their ratios approach and go above 1, and words will get more negative as their ratios approach and go below -1. That's why we decided to use the logs instead of the raw ratios.", "# words most frequently seen in a review with a \"POSITIVE\" label\npos_neg_ratios.most_common()\n\n# words most frequently seen in a review with a \"NEGATIVE\" label\nlist(reversed(pos_neg_ratios.most_common()))[0:30]\n\n# Note: Above is the code Andrew uses in his solution video, \n# so we've included it here to avoid confusion.\n# If you explore the documentation for the Counter class, \n# you will see you could also find the 30 least common\n# words like this: pos_neg_ratios.most_common()[:-31:-1]", "End of Project 1.\nWatch the next video to see Andrew's solution, then continue on to the next lesson.\nTransforming Text into Numbers<a id='lesson_3'></a>\nThe cells here include code Andrew shows in the next video. We've included it so you can run the code along with the video without having to type in everything.", "from IPython.display import Image\n\nreview = \"This was a horrible, terrible movie.\"\n\nImage(filename='sentiment_network.png')\n\nreview = \"The movie was excellent\"\n\nImage(filename='sentiment_network_pos.png')", "Project 2: Creating the Input/Output Data<a id='project_2'></a>\nTODO: Create a set named vocab that contains every word in the vocabulary.", "# TODO: Create set named \"vocab\" containing all of the words from all of the reviews\nvocab = None", "Run the following cell to check your vocabulary size. If everything worked correctly, it should print 74074", "vocab_size = len(vocab)\nprint(vocab_size)", "Take a look at the following image. It represents the layers of the neural network you'll be building throughout this notebook. layer_0 is the input layer, layer_1 is a hidden layer, and layer_2 is the output layer.", "from IPython.display import Image\nImage(filename='sentiment_network_2.png')", "TODO: Create a numpy array called layer_0 and initialize it to all zeros. You will find the zeros function particularly helpful here. Be sure you create layer_0 as a 2-dimensional matrix with 1 row and vocab_size columns.", "# TODO: Create layer_0 matrix with dimensions 1 by vocab_size, initially filled with zeros\nlayer_0 = None", "Run the following cell. It should display (1, 74074)", "layer_0.shape\n\nfrom IPython.display import Image\nImage(filename='sentiment_network.png')", "layer_0 contains one entry for every word in the vocabulary, as shown in the above image. We need to make sure we know the index of each word, so run the following cell to create a lookup table that stores the index of every word.", "# Create a dictionary of words in the vocabulary mapped to index positions\n# (to be used in layer_0)\nword2index = {}\nfor i,word in enumerate(vocab):\n word2index[word] = i\n \n# display the map of words to indices\nword2index", "TODO: Complete the implementation of update_input_layer. It should count \n how many times each word is used in the given review, and then store\n those counts at the appropriate indices inside layer_0.", "def update_input_layer(review):\n \"\"\" Modify the global layer_0 to represent the vector form of review.\n The element at a given index of layer_0 should represent\n how many times the given word occurs in the review.\n Args:\n review(string) - the string of the review\n Returns:\n None\n \"\"\"\n global layer_0\n # clear out previous state by resetting the layer to be all 0s\n layer_0 *= 0\n \n # TODO: count how many times each word is used in the given review and store the results in layer_0 ", "Run the following cell to test updating the input layer with the first review. The indices assigned may not be the same as in the solution, but hopefully you'll see some non-zero values in layer_0.", "update_input_layer(reviews[0])\nlayer_0", "TODO: Complete the implementation of get_target_for_labels. It should return 0 or 1, \n depending on whether the given label is NEGATIVE or POSITIVE, respectively.", "def get_target_for_label(label):\n \"\"\"Convert a label to `0` or `1`.\n Args:\n label(string) - Either \"POSITIVE\" or \"NEGATIVE\".\n Returns:\n `0` or `1`.\n \"\"\"\n # TODO: Your code here", "Run the following two cells. They should print out'POSITIVE' and 1, respectively.", "labels[0]\n\nget_target_for_label(labels[0])", "Run the following two cells. They should print out 'NEGATIVE' and 0, respectively.", "labels[1]\n\nget_target_for_label(labels[1])", "End of Project 2.\nWatch the next video to see Andrew's solution, then continue on to the next lesson.\nProject 3: Building a Neural Network<a id='project_3'></a>\nTODO: We've included the framework of a class called SentimentNetork. Implement all of the items marked TODO in the code. These include doing the following:\n- Create a basic neural network much like the networks you've seen in earlier lessons and in Project 1, with an input layer, a hidden layer, and an output layer. \n- Do not add a non-linearity in the hidden layer. That is, do not use an activation function when calculating the hidden layer outputs.\n- Re-use the code from earlier in this notebook to create the training data (see TODOs in the code)\n- Implement the pre_process_data function to create the vocabulary for our training data generating functions\n- Ensure train trains over the entire corpus\nWhere to Get Help if You Need it\n\nRe-watch earlier Udacity lectures\nChapters 3-5 - Grokking Deep Learning - (Check inside your classroom for a discount code)", "import time\nimport sys\nimport numpy as np\n\n# Encapsulate our neural network in a class\nclass SentimentNetwork:\n def __init__(self, reviews, labels, hidden_nodes = 10, learning_rate = 0.1):\n \"\"\"Create a SentimenNetwork with the given settings\n Args:\n reviews(list) - List of reviews used for training\n labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews\n hidden_nodes(int) - Number of nodes to create in the hidden layer\n learning_rate(float) - Learning rate to use while training\n \n \"\"\"\n # Assign a seed to our random number generator to ensure we get\n # reproducable results during development \n np.random.seed(1)\n\n # process the reviews and their associated labels so that everything\n # is ready for training\n self.pre_process_data(reviews, labels)\n \n # Build the network to have the number of hidden nodes and the learning rate that\n # were passed into this initializer. Make the same number of input nodes as\n # there are vocabulary words and create a single output node.\n self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)\n\n def pre_process_data(self, reviews, labels):\n \n review_vocab = set()\n # TODO: populate review_vocab with all of the words in the given reviews\n # Remember to split reviews into individual words \n # using \"split(' ')\" instead of \"split()\".\n \n # Convert the vocabulary set to a list so we can access words via indices\n self.review_vocab = list(review_vocab)\n \n label_vocab = set()\n # TODO: populate label_vocab with all of the words in the given labels.\n # There is no need to split the labels because each one is a single word.\n \n # Convert the label vocabulary set to a list so we can access labels via indices\n self.label_vocab = list(label_vocab)\n \n # Store the sizes of the review and label vocabularies.\n self.review_vocab_size = len(self.review_vocab)\n self.label_vocab_size = len(self.label_vocab)\n \n # Create a dictionary of words in the vocabulary mapped to index positions\n self.word2index = {}\n # TODO: populate self.word2index with indices for all the words in self.review_vocab\n # like you saw earlier in the notebook\n \n # Create a dictionary of labels mapped to index positions\n self.label2index = {}\n # TODO: do the same thing you did for self.word2index and self.review_vocab, \n # but for self.label2index and self.label_vocab instead\n \n \n def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n # Store the number of nodes in input, hidden, and output layers.\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # Store the learning rate\n self.learning_rate = learning_rate\n\n # Initialize weights\n \n # TODO: initialize self.weights_0_1 as a matrix of zeros. These are the weights between\n # the input layer and the hidden layer.\n self.weights_0_1 = None\n \n # TODO: initialize self.weights_1_2 as a matrix of random values. \n # These are the weights between the hidden layer and the output layer.\n self.weights_1_2 = None\n \n # TODO: Create the input layer, a two-dimensional matrix with shape \n # 1 x input_nodes, with all values initialized to zero\n self.layer_0 = np.zeros((1,input_nodes))\n \n \n def update_input_layer(self,review):\n # TODO: You can copy most of the code you wrote for update_input_layer \n # earlier in this notebook. \n #\n # However, MAKE SURE YOU CHANGE ALL VARIABLES TO REFERENCE\n # THE VERSIONS STORED IN THIS OBJECT, NOT THE GLOBAL OBJECTS.\n # For example, replace \"layer_0 *= 0\" with \"self.layer_0 *= 0\"\n pass\n \n def get_target_for_label(self,label):\n # TODO: Copy the code you wrote for get_target_for_label \n # earlier in this notebook. \n pass\n \n def sigmoid(self,x):\n # TODO: Return the result of calculating the sigmoid activation function\n # shown in the lectures\n pass\n \n def sigmoid_output_2_derivative(self,output):\n # TODO: Return the derivative of the sigmoid activation function, \n # where \"output\" is the original output from the sigmoid fucntion \n pass\n\n def train(self, training_reviews, training_labels):\n \n # make sure out we have a matching number of reviews and labels\n assert(len(training_reviews) == len(training_labels))\n \n # Keep track of correct predictions to display accuracy during training \n correct_so_far = 0\n \n # Remember when we started for printing time statistics\n start = time.time()\n\n # loop through all the given reviews and run a forward and backward pass,\n # updating weights for every item\n for i in range(len(training_reviews)):\n \n # TODO: Get the next review and its correct label\n \n # TODO: Implement the forward pass through the network. \n # That means use the given review to update the input layer, \n # then calculate values for the hidden layer,\n # and finally calculate the output layer.\n # \n # Do not use an activation function for the hidden layer,\n # but use the sigmoid activation function for the output layer.\n \n # TODO: Implement the back propagation pass here. \n # That means calculate the error for the forward pass's prediction\n # and update the weights in the network according to their\n # contributions toward the error, as calculated via the\n # gradient descent and back propagation algorithms you \n # learned in class.\n \n # TODO: Keep track of correct predictions. To determine if the prediction was\n # correct, check that the absolute value of the output error \n # is less than 0.5. If so, add one to the correct_so_far count.\n \n # For debug purposes, print out our prediction accuracy and speed \n # throughout the training process. \n\n elapsed_time = float(time.time() - start)\n reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0\n \n sys.stdout.write(\"\\rProgress:\" + str(100 * i/float(len(training_reviews)))[:4] \\\n + \"% Speed(reviews/sec):\" + str(reviews_per_second)[0:5] \\\n + \" #Correct:\" + str(correct_so_far) + \" #Trained:\" + str(i+1) \\\n + \" Training Accuracy:\" + str(correct_so_far * 100 / float(i+1))[:4] + \"%\")\n if(i % 2500 == 0):\n print(\"\")\n \n def test(self, testing_reviews, testing_labels):\n \"\"\"\n Attempts to predict the labels for the given testing_reviews,\n and uses the test_labels to calculate the accuracy of those predictions.\n \"\"\"\n \n # keep track of how many correct predictions we make\n correct = 0\n\n # we'll time how many predictions per second we make\n start = time.time()\n\n # Loop through each of the given reviews and call run to predict\n # its label. \n for i in range(len(testing_reviews)):\n pred = self.run(testing_reviews[i])\n if(pred == testing_labels[i]):\n correct += 1\n \n # For debug purposes, print out our prediction accuracy and speed \n # throughout the prediction process. \n\n elapsed_time = float(time.time() - start)\n reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0\n \n sys.stdout.write(\"\\rProgress:\" + str(100 * i/float(len(testing_reviews)))[:4] \\\n + \"% Speed(reviews/sec):\" + str(reviews_per_second)[0:5] \\\n + \" #Correct:\" + str(correct) + \" #Tested:\" + str(i+1) \\\n + \" Testing Accuracy:\" + str(correct * 100 / float(i+1))[:4] + \"%\")\n \n def run(self, review):\n \"\"\"\n Returns a POSITIVE or NEGATIVE prediction for the given review.\n \"\"\"\n # TODO: Run a forward pass through the network, like you did in the\n # \"train\" function. That means use the given review to \n # update the input layer, then calculate values for the hidden layer,\n # and finally calculate the output layer.\n #\n # Note: The review passed into this function for prediction \n # might come from anywhere, so you should convert it \n # to lower case prior to using it.\n \n # TODO: The output layer should now contain a prediction. \n # Return `POSITIVE` for predictions greater-than-or-equal-to `0.5`, \n # and `NEGATIVE` otherwise.\n pass\n", "Run the following cell to create a SentimentNetwork that will train on all but the last 1000 reviews (we're saving those for testing). Here we use a learning rate of 0.1.", "mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)", "Run the following cell to test the network's performance against the last 1000 reviews (the ones we held out from our training set). \nWe have not trained the model yet, so the results should be about 50% as it will just be guessing and there are only two possible values to choose from.", "mlp.test(reviews[-1000:],labels[-1000:])", "Run the following cell to actually train the network. During training, it will display the model's accuracy repeatedly as it trains so you can see how well it's doing.", "mlp.train(reviews[:-1000],labels[:-1000])", "That most likely didn't train very well. Part of the reason may be because the learning rate is too high. Run the following cell to recreate the network with a smaller learning rate, 0.01, and then train the new network.", "mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.01)\nmlp.train(reviews[:-1000],labels[:-1000])", "That probably wasn't much different. Run the following cell to recreate the network one more time with an even smaller learning rate, 0.001, and then train the new network.", "mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.001)\nmlp.train(reviews[:-1000],labels[:-1000])", "With a learning rate of 0.001, the network should finall have started to improve during training. It's still not very good, but it shows that this solution has potential. We will improve it in the next lesson.\nEnd of Project 3.\nWatch the next video to see Andrew's solution, then continue on to the next lesson.\nUnderstanding Neural Noise<a id='lesson_4'></a>\nThe following cells include includes the code Andrew shows in the next video. We've included it here so you can run the cells along with the video without having to type in everything.", "from IPython.display import Image\nImage(filename='sentiment_network.png')\n\ndef update_input_layer(review):\n \n global layer_0\n \n # clear out previous state, reset the layer to be all 0s\n layer_0 *= 0\n for word in review.split(\" \"):\n layer_0[0][word2index[word]] += 1\n\nupdate_input_layer(reviews[0])\n\nlayer_0\n\nreview_counter = Counter()\n\nfor word in reviews[0].split(\" \"):\n review_counter[word] += 1\n\nreview_counter.most_common()", "Project 4: Reducing Noise in Our Input Data<a id='project_4'></a>\nTODO: Attempt to reduce the noise in the input data like Andrew did in the previous video. Specifically, do the following:\n* Copy the SentimentNetwork class you created earlier into the following cell.\n* Modify update_input_layer so it does not count how many times each word is used, but rather just stores whether or not a word was used.", "# TODO: -Copy the SentimentNetwork class from Projet 3 lesson\n# -Modify it to reduce noise, like in the video ", "Run the following cell to recreate the network and train it. Notice we've gone back to the higher learning rate of 0.1.", "mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)\nmlp.train(reviews[:-1000],labels[:-1000])", "That should have trained much better than the earlier attempts. It's still not wonderful, but it should have improved dramatically. Run the following cell to test your model with 1000 predictions.", "mlp.test(reviews[-1000:],labels[-1000:])", "End of Project 4.\nAndrew's solution was actually in the previous video, so rewatch that video if you had any problems with that project. Then continue on to the next lesson.\nAnalyzing Inefficiencies in our Network<a id='lesson_5'></a>\nThe following cells include the code Andrew shows in the next video. We've included it here so you can run the cells along with the video without having to type in everything.", "Image(filename='sentiment_network_sparse.png')\n\nlayer_0 = np.zeros(10)\n\nlayer_0\n\nlayer_0[4] = 1\nlayer_0[9] = 1\n\nlayer_0\n\nweights_0_1 = np.random.randn(10,5)\n\nlayer_0.dot(weights_0_1)\n\nindices = [4,9]\n\nlayer_1 = np.zeros(5)\n\nfor index in indices:\n layer_1 += (1 * weights_0_1[index])\n\nlayer_1\n\nImage(filename='sentiment_network_sparse_2.png')\n\nlayer_1 = np.zeros(5)\n\nfor index in indices:\n layer_1 += (weights_0_1[index])\n\nlayer_1", "Project 5: Making our Network More Efficient<a id='project_5'></a>\nTODO: Make the SentimentNetwork class more efficient by eliminating unnecessary multiplications and additions that occur during forward and backward propagation. To do that, you can do the following:\n* Copy the SentimentNetwork class from the previous project into the following cell.\n* Remove the update_input_layer function - you will not need it in this version.\n* Modify init_network:\n\n\nYou no longer need a separate input layer, so remove any mention of self.layer_0\nYou will be dealing with the old hidden layer more directly, so create self.layer_1, a two-dimensional matrix with shape 1 x hidden_nodes, with all values initialized to zero\nModify train:\nChange the name of the input parameter training_reviews to training_reviews_raw. This will help with the next step.\nAt the beginning of the function, you'll want to preprocess your reviews to convert them to a list of indices (from word2index) that are actually used in the review. This is equivalent to what you saw in the video when Andrew set specific indices to 1. Your code should create a local list variable named training_reviews that should contain a list for each review in training_reviews_raw. Those lists should contain the indices for words found in the review.\nRemove call to update_input_layer\nUse self's layer_1 instead of a local layer_1 object.\nIn the forward pass, replace the code that updates layer_1 with new logic that only adds the weights for the indices used in the review.\nWhen updating weights_0_1, only update the individual weights that were used in the forward pass.\nModify run:\nRemove call to update_input_layer \nUse self's layer_1 instead of a local layer_1 object.\nMuch like you did in train, you will need to pre-process the review so you can work with word indices, then update layer_1 by adding weights for the indices used in the review.", "# TODO: -Copy the SentimentNetwork class from Project 4 lesson\n# -Modify it according to the above instructions ", "Run the following cell to recreate the network and train it once again.", "mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)\nmlp.train(reviews[:-1000],labels[:-1000])", "That should have trained much better than the earlier attempts. Run the following cell to test your model with 1000 predictions.", "mlp.test(reviews[-1000:],labels[-1000:])", "End of Project 5.\nWatch the next video to see Andrew's solution, then continue on to the next lesson.\nFurther Noise Reduction<a id='lesson_6'></a>", "Image(filename='sentiment_network_sparse_2.png')\n\n# words most frequently seen in a review with a \"POSITIVE\" label\npos_neg_ratios.most_common()\n\n# words most frequently seen in a review with a \"NEGATIVE\" label\nlist(reversed(pos_neg_ratios.most_common()))[0:30]\n\nfrom bokeh.models import ColumnDataSource, LabelSet\nfrom bokeh.plotting import figure, show, output_file\nfrom bokeh.io import output_notebook\noutput_notebook()\n\nhist, edges = np.histogram(list(map(lambda x:x[1],pos_neg_ratios.most_common())), density=True, bins=100, normed=True)\n\np = figure(tools=\"pan,wheel_zoom,reset,save\",\n toolbar_location=\"above\",\n title=\"Word Positive/Negative Affinity Distribution\")\np.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color=\"#555555\")\nshow(p)\n\nfrequency_frequency = Counter()\n\nfor word, cnt in total_counts.most_common():\n frequency_frequency[cnt] += 1\n\nhist, edges = np.histogram(list(map(lambda x:x[1],frequency_frequency.most_common())), density=True, bins=100, normed=True)\n\np = figure(tools=\"pan,wheel_zoom,reset,save\",\n toolbar_location=\"above\",\n title=\"The frequency distribution of the words in our corpus\")\np.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color=\"#555555\")\nshow(p)", "Project 6: Reducing Noise by Strategically Reducing the Vocabulary<a id='project_6'></a>\nTODO: Improve SentimentNetwork's performance by reducing more noise in the vocabulary. Specifically, do the following:\n* Copy the SentimentNetwork class from the previous project into the following cell.\n* Modify pre_process_data:\n\n\nAdd two additional parameters: min_count and polarity_cutoff\nCalculate the positive-to-negative ratios of words used in the reviews. (You can use code you've written elsewhere in the notebook, but we are moving it into the class like we did with other helper code earlier.)\nAndrew's solution only calculates a postive-to-negative ratio for words that occur at least 50 times. This keeps the network from attributing too much sentiment to rarer words. You can choose to add this to your solution if you would like. \nChange so words are only added to the vocabulary if they occur in the vocabulary more than min_count times.\nChange so words are only added to the vocabulary if the absolute value of their postive-to-negative ratio is at least polarity_cutoff\nModify __init__:\nAdd the same two parameters (min_count and polarity_cutoff) and use them when you call pre_process_data", "# TODO: -Copy the SentimentNetwork class from Project 5 lesson\n# -Modify it according to the above instructions ", "Run the following cell to train your network with a small polarity cutoff.", "mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.05,learning_rate=0.01)\nmlp.train(reviews[:-1000],labels[:-1000])", "And run the following cell to test it's performance. It should be", "mlp.test(reviews[-1000:],labels[-1000:])", "Run the following cell to train your network with a much larger polarity cutoff.", "mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.8,learning_rate=0.01)\nmlp.train(reviews[:-1000],labels[:-1000])", "And run the following cell to test it's performance.", "mlp.test(reviews[-1000:],labels[-1000:])", "End of Project 6.\nWatch the next video to see Andrew's solution, then continue on to the next lesson.\nAnalysis: What's Going on in the Weights?<a id='lesson_7'></a>", "mlp_full = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=0,polarity_cutoff=0,learning_rate=0.01)\n\nmlp_full.train(reviews[:-1000],labels[:-1000])\n\nImage(filename='sentiment_network_sparse.png')\n\ndef get_most_similar_words(focus = \"horrible\"):\n most_similar = Counter()\n\n for word in mlp_full.word2index.keys():\n most_similar[word] = np.dot(mlp_full.weights_0_1[mlp_full.word2index[word]],mlp_full.weights_0_1[mlp_full.word2index[focus]])\n \n return most_similar.most_common()\n\nget_most_similar_words(\"excellent\")\n\nget_most_similar_words(\"terrible\")\n\nimport matplotlib.colors as colors\n\nwords_to_visualize = list()\nfor word, ratio in pos_neg_ratios.most_common(500):\n if(word in mlp_full.word2index.keys()):\n words_to_visualize.append(word)\n \nfor word, ratio in list(reversed(pos_neg_ratios.most_common()))[0:500]:\n if(word in mlp_full.word2index.keys()):\n words_to_visualize.append(word)\n\npos = 0\nneg = 0\n\ncolors_list = list()\nvectors_list = list()\nfor word in words_to_visualize:\n if word in pos_neg_ratios.keys():\n vectors_list.append(mlp_full.weights_0_1[mlp_full.word2index[word]])\n if(pos_neg_ratios[word] > 0):\n pos+=1\n colors_list.append(\"#00ff00\")\n else:\n neg+=1\n colors_list.append(\"#000000\")\n\nfrom sklearn.manifold import TSNE\ntsne = TSNE(n_components=2, random_state=0)\nwords_top_ted_tsne = tsne.fit_transform(vectors_list)\n\np = figure(tools=\"pan,wheel_zoom,reset,save\",\n toolbar_location=\"above\",\n title=\"vector T-SNE for most polarized words\")\n\nsource = ColumnDataSource(data=dict(x1=words_top_ted_tsne[:,0],\n x2=words_top_ted_tsne[:,1],\n names=words_to_visualize,\n color=colors_list))\n\np.scatter(x=\"x1\", y=\"x2\", size=8, source=source, fill_color=\"color\")\n\nword_labels = LabelSet(x=\"x1\", y=\"x2\", text=\"names\", y_offset=6,\n text_font_size=\"8pt\", text_color=\"#555555\",\n source=source, text_align='center')\np.add_layout(word_labels)\n\nshow(p)\n\n# green indicates positive words, black indicates negative words" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
hmenke/pairinteraction
doc/sphinx/examples_python/vdw_near_surface.ipynb
gpl-3.0
[ "Dispersion Coefficients Near Surfaces\nIn this tutorial we reproduce the results depicted in Figure 5 from J. Block and S. Scheel \"van der Waals interaction potential between Rydberg atoms near surfaces\" Phys. Rev. A 96, 062509 (2017). We calculate the van der Waals $C_6$-coefficient between two Rubidium Rydberg atoms that are equidistantly placed in front of a perfect mirror (i.e. in horizontal alignment in front of a perfectly conducting plate). One finds that the relevant length scale is interatomic distance devided by distance from surface and that for decreasing surface distance the $C_6$ coefficient is significantly reduced.\nAs described in the introduction, we start our code with some preparations and load the necessary modules.", "%matplotlib inline\n\n# Arrays\nimport numpy as np\n\n# Plotting\nimport matplotlib.pyplot as plt\n\n# Operating system interfaces\nimport os\n\n# pairinteraction :-)\nfrom pairinteraction import pireal as pi\n\n# Create cache for matrix elements\nif not os.path.exists(\"./cache\"):\n os.makedirs(\"./cache\")\ncache = pi.MatrixElementCache(\"./cache\")", "The plate lies in the $xy$-plane with the surface at $z = 0$. The atoms lie in the $xz$-plane with $z>0$.\nWe can set the angle between the interatomic axis and the z-axis theta and the center of mass distance from the surface distance_surface. distance_atom defines the interatomic distances for which the pair potential is plotted. The units of the respective quantities are given as comments.\nBe careful: theta = np.pi/2 corresponds to horizontal alignment of the two atoms with respect to the surface. For different angles, large interatomic distances distance_atom might lead to one of the atoms being placed inside the plate. Make sure that distance_surface is larger than distance_atom*np.cos(theta)/2", "theta = np.pi/2 # rad\ndistance_atoms = 10 # µm\ndistance_surface = np.linspace(distance_atoms*np.abs(np.cos(theta))/2, 2*distance_atoms,30) # µm", "Next we define the state that we are interested in using pairinteraction's StateOne class . As shown in Figures 4 and 5 of Phys. Rev. A 96, 062509 (2017) we expect changes of about 50% for the $C_6$ coefficient of the $|69s_{1/2},m_j=1/2;72s_{1/2},m_j=1/2\\rangle$ pair state of Rubidium, so this provides a good example. \nWe set up the one-atom system using restrictions of energy, main quantum number n and angular momentum l. This is done by means of the restrict... functions in SystemOne.", "state_one1 = pi.StateOne(\"Rb\", 69, 0, 0.5, 0.5)\nstate_one2 = pi.StateOne(\"Rb\", 72, 0, 0.5, 0.5)\n\n# Set up one-atom system\nsystem_one = pi.SystemOne(state_one1.getSpecies(), cache)\nsystem_one.restrictEnergy(min(state_one1.getEnergy(),state_one2.getEnergy()) - 30, \\\n max(state_one1.getEnergy(),state_one2.getEnergy()) + 30)\nsystem_one.restrictN(min(state_one1.getN(),state_one2.getN()) - 3, \\\n max(state_one1.getN(),state_one2.getN()) + 3)\nsystem_one.restrictL(min(state_one1.getL(),state_one2.getL()) - 1, \\\n max(state_one1.getL(),state_one2.getL()) + 1)", "The pair state state_two is created from the one atom states state_one1 and state_one2 using the StateTwo class.\nFrom the previously set up system_one we define system_two using SystemTwo class. This class also contains methods set.. to set angle, distance, surface distance and to enableGreenTensor in order implement a surface.", "# Set up pair state\nstate_two = pi.StateTwo(state_one1, state_one2)\n \n# Set up two-atom system\nsystem_two = pi.SystemTwo(system_one, system_one, cache)\nsystem_two.restrictEnergy(state_two.getEnergy() - 3, state_two.getEnergy() + 3)\n\nsystem_two.setAngle(theta)\nsystem_two.setDistance(distance_atoms)\nsystem_two.setSurfaceDistance(distance_surface[0])\nsystem_two.enableGreenTensor(True)\nsystem_two.buildInteraction()", "We calculate the $C_6$ coefficients. The energyshift is given by the difference between the interaction energy at given surface_distance and the unperturbed energy of the two atom state state_two.getEnergy(). The $C_6$ coefficient is then given by the product of energyshift and distance_atoms**6.\nidx is the index of the two atom state. The command getOverlap(state_two, 0, -theta, 0) rotates the quantisation axis of state_two by theta around the y-axis. The rotation is given by the Euler angles (0, -theta, 0) in zyz convention. The negative sign of theta is needed because the Euler angles used by pairinteraction represent a rotation of the coordinate system. Thus, the quantisation axis has to be rotated by the inverse angle.", "# Calculate C6 coefficients\nC6 = []\nfor d in distance_surface:\n system_two.setSurfaceDistance(d)\n system_two.diagonalize()\n idx = np.argmax(system_two.getOverlap(state_two, 0, -theta, 0))\n energyshift = system_two.getHamiltonian().diagonal()[idx]-state_two.getEnergy()\n C6.append(energyshift*distance_atoms**6)\n\n# Plot results\nplt.plot(distance_surface/distance_atoms, np.abs(C6))\nplt.xlim(min(distance_surface/distance_atoms), max(distance_surface/distance_atoms))\nplt.xlabel(\"distance to surface / interatomic distance\")\nplt.ylabel(\"|C$_6$| (GHz $\\mu m^6$)\");" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
metpy/MetPy
v0.9/_downloads/ef4bfbf049be071a6c648d7918a50105/Simple_Sounding.ipynb
bsd-3-clause
[ "%matplotlib inline", "Simple Sounding\nUse MetPy as straightforward as possible to make a Skew-T LogP plot.", "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nimport metpy.calc as mpcalc\nfrom metpy.cbook import get_test_data\nfrom metpy.plots import add_metpy_logo, SkewT\nfrom metpy.units import units\n\n# Change default to be better for skew-T\nplt.rcParams['figure.figsize'] = (9, 9)\n\n# Upper air data can be obtained using the siphon package, but for this example we will use\n# some of MetPy's sample data.\n\ncol_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']\n\ndf = pd.read_fwf(get_test_data('jan20_sounding.txt', as_file_obj=False),\n skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)\n\ndf['u_wind'], df['v_wind'] = mpcalc.wind_components(df['speed'],\n np.deg2rad(df['direction']))\n\n# Drop any rows with all NaN values for T, Td, winds\ndf = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',\n 'u_wind', 'v_wind'), how='all').reset_index(drop=True)", "We will pull the data out of the example dataset into individual variables and\nassign units.", "p = df['pressure'].values * units.hPa\nT = df['temperature'].values * units.degC\nTd = df['dewpoint'].values * units.degC\nwind_speed = df['speed'].values * units.knots\nwind_dir = df['direction'].values * units.degrees\nu, v = mpcalc.wind_components(wind_speed, wind_dir)\n\nskew = SkewT()\n\n# Plot the data using normal plotting functions, in this case using\n# log scaling in Y, as dictated by the typical meteorological plot\nskew.plot(p, T, 'r')\nskew.plot(p, Td, 'g')\nskew.plot_barbs(p, u, v)\n\n# Add the relevant special lines\nskew.plot_dry_adiabats()\nskew.plot_moist_adiabats()\nskew.plot_mixing_lines()\nskew.ax.set_ylim(1000, 100)\n\n# Add the MetPy logo!\nfig = plt.gcf()\nadd_metpy_logo(fig, 115, 100)\n\n# Example of defining your own vertical barb spacing\nskew = SkewT()\n\n# Plot the data using normal plotting functions, in this case using\n# log scaling in Y, as dictated by the typical meteorological plot\nskew.plot(p, T, 'r')\nskew.plot(p, Td, 'g')\n\n# Set spacing interval--Every 50 mb from 1000 to 100 mb\nmy_interval = np.arange(100, 1000, 50) * units('mbar')\n\n# Get indexes of values closest to defined interval\nix = mpcalc.resample_nn_1d(p, my_interval)\n\n# Plot only values nearest to defined interval values\nskew.plot_barbs(p[ix], u[ix], v[ix])\n\n# Add the relevant special lines\nskew.plot_dry_adiabats()\nskew.plot_moist_adiabats()\nskew.plot_mixing_lines()\nskew.ax.set_ylim(1000, 100)\n\n# Add the MetPy logo!\nfig = plt.gcf()\nadd_metpy_logo(fig, 115, 100)\n\n# Show the plot\nplt.show()" ]
[ "code", "markdown", "code", "markdown", "code" ]
intel-analytics/BigDL
apps/ray/parameter_server/sharded_parameter_server.ipynb
apache-2.0
[ "This notebook is adapted from:\nhttps://github.com/ray-project/tutorial/tree/master/examples/sharded_parameter_server.ipynb\nSharded Parameter Servers\nGOAL: The goal of this exercise is to use actor handles to implement a sharded parameter server example for distributed asynchronous stochastic gradient descent.\nBefore doing this exercise, make sure you understand the concepts from the exercise on Actor Handles.\nParameter Servers\nA parameter server is simply an object that stores the parameters (or \"weights\") of a machine learning model (this could be a neural network, a linear model, or something else). It exposes two methods: one for getting the parameters and one for updating the parameters.\nIn a typical machine learning training application, worker processes will run in an infinite loop that does the following:\n1. Get the latest parameters from the parameter server.\n2. Compute an update to the parameters (using the current parameters and some data).\n3. Send the update to the parameter server.\nThe workers can operate synchronously (that is, in lock step), in which case distributed training with multiple workers is algorithmically equivalent to serial training with a larger batch of data. Alternatively, workers can operate independently and apply their updates asynchronously. The main benefit of asynchronous training is that a single slow worker will not slow down the other workers. The benefit of synchronous training is that the algorithm behavior is more predictable and reproducible.", "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport ray\nimport time", "Init SparkContext", "from bigdl.dllib.nncontext import init_spark_on_local, init_spark_on_yarn\nimport numpy as np\nimport os\nhadoop_conf_dir = os.environ.get('HADOOP_CONF_DIR')\n\nif hadoop_conf_dir:\n sc = init_spark_on_yarn(\n hadoop_conf=hadoop_conf_dir,\n conda_name=os.environ.get(\"ZOO_CONDA_NAME\", \"zoo\"), # The name of the created conda-env\n num_executors=2,\n executor_cores=4,\n executor_memory=\"2g\",\n driver_memory=\"2g\",\n driver_cores=1,\n extra_executor_memory_for_ray=\"3g\")\nelse:\n sc = init_spark_on_local(cores = 8, conf = {\"spark.driver.memory\": \"2g\"})\n\n# It may take a while to ditribute the local environment including python and java to cluster\nimport ray\nfrom bigdl.orca.ray import OrcaRayContext\nray_ctx = OrcaRayContext(sc=sc, object_store_memory=\"4g\")\nray_ctx.init()\n#ray.init(num_cpus=30, include_webui=False, ignore_reinit_error=True)", "A simple parameter server can be implemented as a Python class in a few lines of code.\nEXERCISE: Make the ParameterServer class an actor.", "dim = 10\[email protected]\nclass ParameterServer(object):\n def __init__(self, dim):\n self.parameters = np.zeros(dim)\n \n def get_parameters(self):\n return self.parameters\n \n def update_parameters(self, update):\n self.parameters += update\n\n\nps = ParameterServer.remote(dim)\n", "A worker can be implemented as a simple Python function that repeatedly gets the latest parameters, computes an update to the parameters, and sends the update to the parameter server.", "@ray.remote\ndef worker(ps, dim, num_iters):\n for _ in range(num_iters):\n # Get the latest parameters.\n parameters = ray.get(ps.get_parameters.remote())\n # Compute an update.\n update = 1e-3 * parameters + np.ones(dim)\n # Update the parameters.\n ps.update_parameters.remote(update)\n # Sleep a little to simulate a real workload.\n time.sleep(0.5)\n\n# Test that worker is implemented correctly. You do not need to change this line.\nray.get(worker.remote(ps, dim, 1))\n\n# Start two workers.\nworker_results = [worker.remote(ps, dim, 100) for _ in range(2)]", "As the worker tasks are executing, you can query the parameter server from the driver and see the parameters changing in the background.", "print(ray.get(ps.get_parameters.remote()))", "Sharding a Parameter Server\nAs the number of workers increases, the volume of updates being sent to the parameter server will increase. At some point, the network bandwidth into the parameter server machine or the computation down by the parameter server may be a bottleneck.\nSuppose you have $N$ workers and $1$ parameter server, and suppose each of these is an actor that lives on its own machine. Furthermore, suppose the model size is $M$ bytes. Then sending all of the parameters from the workers to the parameter server will mean that $N * M$ bytes in total are sent to the parameter server. If $N = 100$ and $M = 10^8$, then the parameter server must receive ten gigabytes, which, assuming a network bandwidth of 10 gigabits per second, would take 8 seconds. This would be prohibitive.\nOn the other hand, if the parameters are sharded (that is, split) across K parameter servers, K is 100, and each parameter server lives on a separate machine, then each parameter server needs to receive only 100 megabytes, which can be done in 80 milliseconds. This is much better.\nEXERCISE: The code below defines a parameter server shard class. Modify this class to make ParameterServerShard an actor. We will need to revisit this code soon and increase num_shards.", "@ray.remote\nclass ParameterServerShard(object):\n def __init__(self, sharded_dim):\n self.parameters = np.zeros(sharded_dim)\n \n def get_parameters(self):\n return self.parameters\n \n def update_parameters(self, update):\n self.parameters += update\n\n\ntotal_dim = (10 ** 8) // 8 # This works out to 100MB (we have 25 million\n # float64 values, which are each 8 bytes).\nnum_shards = 2 # The number of parameter server shards.\n\nassert total_dim % num_shards == 0, ('In this exercise, the number of shards must '\n 'perfectly divide the total dimension.')\n\n# Start some parameter servers.\nps_shards = [ParameterServerShard.remote(total_dim // num_shards) for _ in range(num_shards)]\n\nassert hasattr(ParameterServerShard, 'remote'), ('You need to turn ParameterServerShard into an '\n 'actor (by using the ray.remote keyword).')", "The code below implements a worker that does the following.\n1. Gets the latest parameters from all of the parameter server shards.\n2. Concatenates the parameters together to form the full parameter vector.\n3. Computes an update to the parameters.\n4. Partitions the update into one piece for each parameter server.\n5. Applies the right update to each parameter server shard.", "@ray.remote\ndef worker_task(total_dim, num_iters, *ps_shards):\n # Note that ps_shards are passed in using Python's variable number\n # of arguments feature. We do this because currently actor handles\n # cannot be passed to tasks inside of lists or other objects.\n for _ in range(num_iters):\n # Get the current parameters from each parameter server.\n parameter_shards = [ray.get(ps.get_parameters.remote()) for ps in ps_shards]\n assert all([isinstance(shard, np.ndarray) for shard in parameter_shards]), (\n 'The parameter shards must be numpy arrays. Did you forget to call ray.get?')\n # Concatenate them to form the full parameter vector.\n parameters = np.concatenate(parameter_shards)\n assert parameters.shape == (total_dim,)\n\n # Compute an update.\n update = np.ones(total_dim)\n # Shard the update.\n update_shards = np.split(update, len(ps_shards))\n \n # Apply the updates to the relevant parameter server shards.\n for ps, update_shard in zip(ps_shards, update_shards):\n ps.update_parameters.remote(update_shard)\n\n\n# Test that worker_task is implemented correctly. You do not need to change this line.\nray.get(worker_task.remote(total_dim, 1, *ps_shards))", "EXERCISE: Experiment by changing the number of parameter server shards, the number of workers, and the size of the data.\nNOTE: Because these processes are all running on the same machine, network bandwidth will not be a limitation and sharding the parameter server will not help. To see the difference, you would need to run the application on multiple machines. There are still regimes where sharding a parameter server can help speed up computation on the same machine (by parallelizing the computation that the parameter server processes have to do). If you want to see this effect, you should implement a synchronous training application. In the asynchronous setting, the computation is staggered and so speeding up the parameter server usually does not matter.", "num_workers = 4\n\n# Start some workers. Try changing various quantities and see how the\n# duration changes.\nstart = time.time()\nray.get([worker_task.remote(total_dim, 5, *ps_shards) for _ in range(num_workers)])\nprint('This took {} seconds.'.format(time.time() - start))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
llooker/public-datasets-pipelines
samples/tutorial.ipynb
apache-2.0
[ "Overview\nAdd a brief description of this tutorial here.", "%%capture\n\n# Installing the required libraries:\n!pip install matplotlib pandas scikit-learn tensorflow pyarrow tqdm\n!pip install google-cloud-bigquery google-cloud-bigquery-storage\n!pip install flake8 pycodestyle pycodestyle_magic\n\n# Python Builtin Libraries\nfrom datetime import datetime\n\n# Third Party Libraries\nfrom google.cloud import bigquery\n\n# Configurations\n%matplotlib inline", "Authentication\nIn order to run this tutorial successfully, we need to be authenticated first. \nDepending on where we are running this notebook, the authentication steps may vary:\n| Runner | Authentiction Steps |\n| ----------- | ----------- |\n| Local Computer | Use a service account, or run the following command: <br><br>gcloud auth login |\n| Colab | Run the following python code and follow the instructions: <br><br>from google.colab import auth <br> auth.authenticate_user() |\n| Vertext AI (Workbench) | Authentication is provided by Workbench |", "try:\n from google.colab import auth\n\n print(\"Authenticating in Colab\")\n auth.authenticate_user()\n print(\"Authenticated\")\nexcept: # noqa\n print(\"This notebook is not running on Colab.\")\n print(\"Please make sure to follow the authentication steps.\")", "Configurations\nLet's make sure we enter the name of our GCP project in the next cell.", "# ENTER THE GCP PROJECT HERE\ngcp_project = \"YOUR-GCP-PROJECT\"\nprint(f\"gcp_project is set to {gcp_project}\")\n\ndef helper_function():\n \"\"\"\n Add a description about what this function does.\n \"\"\"\n return None", "Data Preparation\nQuery the Data", "query = \"\"\"\n SELECT\n created_date, category, complaint_type, neighborhood, latitude, longitude\n FROM\n `bigquery-public-data.san_francisco_311.311_service_requests`\n LIMIT 1000;\n\"\"\"\n\nbqclient = bigquery.Client(project=gcp_project)\ndataframe = bqclient.query(query).result().to_dataframe()", "Check the Dataframe", "print(dataframe.shape)\ndataframe.head()", "Process the Dataframe", "# Convert the datetime to date\ndataframe['created_date'] = dataframe['created_date'].apply(datetime.date)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
stereoboy/Study
Issues/algorithms/Linked Lists.ipynb
mit
[ "Add two numbers represented by linked lists", "class Node():\n def __init__(self, value, Next=None):\n self.value = value\n self.next = Next\n def __str__(self):\n if self.next == None:\n return str(self.value)\n return str(self.value) + '-' + self.next.__str__()\n\ndef getSize(node):\n count = 0\n while node != None:\n count += 1\n node = node.next\n return count\n \ndef _add(node0, size0, node1, size1):\n \n if node0 == None:\n return None, 0\n \n if size0 > size1:\n node, carry = _add(node0.next, size0 - 1, node1, size1)\n \n new = node0.value + carry\n else:\n node, carry = _add(node0.next, size0 - 1, node1.next, size1 -1)\n \n new = node0.value + node1.value + carry\n \n carry = new//10 \n new = new%10\n print(new, carry)\n return Node(new, node), carry\n\ndef add(node0, size0, node1, size1):\n \n node, carry = _add(node0, size0, node1, size1)\n if carry > 0:\n node = Node(1, node)\n \n return node\n\na = Node(1, Node(2, Node(5, Node(6, Node(3, None)))))\nb = Node(8, Node(4, Node(2, None)))\n\nprint(add(a, getSize(a), b, getSize(b)))", "2.1 Remove Dups:\nWrite code to remove duplicates from an unsorted linked list.\nFOLLOW UP\nHow would you solve this problem if a temporary buffer is not allowed?", " \nList = Node(1, Node(2, Node(3, Node(4, Node(4, Node(4, Node(3, Node(2, Node(1)))))))))\n\ndef remove_dups(List):\n marks = {}\n cur = List\n prev = None\n while cur != None:\n if marks.get(cur.value, 0) == 0: # not duplicated\n marks[cur.value] = 1\n else: # duplicated\n prev.next = cur.next\n cur = prev\n \n prev = cur\n cur = cur.next\n\nprint('input:' + str(List))\nremove_dups(List)\nprint('output:' + str(List))\n\ndef remove_dups_wo_buffer(List):\n cur0 = List\n while cur0 != None:\n prev = cur0\n cur1 = cur0.next\n while cur1 != None:\n if cur1.value == cur0.value:\n prev.next = cur1.next\n cur1 = prev\n prev = cur1\n cur1 = cur1.next\n \n cur0 = cur0.next\n \nList = Node(1, Node(2, Node(3, Node(4, Node(4, Node(4, Node(3, Node(2, Node(1, Node(3, Node(2)))))))))))\n\nprint('input:' + str(List))\nremove_dups_wo_buffer(List)\nprint('output:' + str(List))", "2.2 Return Kth to Last:\nImplement an algorithm to find the kth to last element of a singly linked list.", "List = Node(1, Node(2, Node(3, Node(4, Node(4, Node(4, Node(3, Node(2, Node(1, Node(3, Node(2)))))))))))\n\ndef kth_to_last(List, k):\n cur = List\n size = 0\n while cur != None:\n size += 1\n cur = cur.next\n if size < k:\n return None\n \n cur = List\n for _ in range(size - k):\n cur = cur.next\n return cur.value\n\nprint(kth_to_last(List, 4))\n\ndef kth_to_last(head, k, i):\n\n if head == None:\n return None\n \n node = kth_to_last(head.next, k, i)\n i[0] = i[0] + 1\n if i[0] == k:\n return head\n else:\n return node\n \nprint(kth_to_last(List, 4, [0]))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
TimothyADavis/KinMSpy
kinms/docs/KinMSpy_tutorial.ipynb
mit
[ "KinMS galaxy fitting tutorial\nThis tutorial aims at getting you up and running with galaxy kinematic modelling using KinMS! To start you will need to download the KinMSpy code and have it in your python path. \nTo do this you can simply call pip install kinms\nTo get started with kinematic modelling we will complete the following steps:\n1. Generate a model to fit (can be skipped if you have your own observed data cube)\n2. Read in that cube, and extract the important information from the header\n3. Fit the data using an MCMC code\nWe will start by importing a variety of modules we will need to work with KinMS, and plot its output.", "from kinms import KinMS\nimport numpy as np\nfrom astropy.io import fits\nfrom kinms.utils.KinMS_figures import KinMS_plotter", "Generate a model\nFirst we will generate a simple galaxy model using KinMS itself, that we can attempt to determine the parameters of later. If you have your own observed galaxy to fit then of course this step can be skipped!\nThe make_model function below creates a simple exponential disc:\n$\n\\begin{align}\n\\large \\Sigma_{H2}(r) \\propto e^{\\frac{-r}{d_{scale}}}\n\\end{align}\n$\nwith a circular velocity profile which is parameterized using an arctan function:\n$\n\\begin{align}\n\\large V(r) = \\frac{2V_{flat}}{\\pi} \\arctan\\left(\\frac{r}{r_{turn}}\\right)\n\\end{align}\n$", "def make_model(param,obspars,rad,filename=None,plot=False):\n '''\n This function takes in the `param` array (along with obspars; the observational setup,\n and a radius vector `rad`) and uses it to create a KinMS model.\n '''\n \n total_flux=param[0]\n posAng=param[1]\n inc=param[2]\n v_flat=param[3]\n r_turn=param[4]\n scalerad=param[5]\n \n ### Here we use an exponential disk model for the surface brightness of the gas ###\n sbprof = np.exp((-1)*rad/scalerad)\n\n ### We use a very simple arctan rotation curve model with two free parameters. ###\n vel=(v_flat*2/np.pi)*np.arctan(rad/r_turn)\n\n ### This returns the model\n return KinMS(obspars['xsize'],obspars['ysize'],obspars['vsize'],obspars['cellsize'],obspars['dv'],\\\n obspars['beamsize'],inc,sbProf=sbprof,sbRad=rad,velRad=rad,velProf=vel,\\\n intFlux=total_flux,posAng=posAng,fixSeed=True,fileName=filename).model_cube(toplot=plot)\n ", "Note that we have set fixSeed=True in the KinMS call - this is crucial if you are fitting with KinMS. It ensures if you generate two models with the same input parameters you will get an identical output model! \nNow we have our model function, lets use it to generate a model which we will later fit. The first thing we need is to define the setup of our desired datacube (typically if you are fitting real data this will all be determined from the header keywords- see below).", "### Setup cube parameters ###\nobspars={}\nobspars['xsize']=64.0 # arcseconds\nobspars['ysize']=64.0 # arcseconds\nobspars['vsize']=500.0 # km/s\nobspars['cellsize']=1.0 # arcseconds/pixel\nobspars['dv']=20.0 # km/s/channel\nobspars['beamsize']=np.array([4.0,4.0,0]) # [bmaj,bmin,bpa] in (arcsec, arcsec, degrees)", "We also need to create a radius vector- you ideally want this to oversample your pixel grid somewhat to avoid interpolation errors!", "rad=np.arange(0,100,0.3)", "Now we have all the ingredients we can create our data to fit. Here we will also output the model to disc, so we can demonstrate how to read in the header keywords from real ALMA/VLA etc data.", "'''\nTrue values for the flux, posang, inc etc, as defined in the model function\n'''\n\nguesses=np.array([30.,270.,45.,200.,2.,5.]) \n\n'''\nRMS of data. Here we are making our own model so this is arbitary. \nWhen fitting real data this should be the observational RMS\n'''\nerror=np.array(1e-3)\n\n\nfdata=make_model(guesses,obspars,rad, filename=\"Test\",plot=True)", "Read in the data\nIn this example we already have our data in memory. But if you are fitting a real datacube this wont be the case! Here we read in the model we just created from a FITS file to make it clear how to do this.", "### Load in your observational data ###\nhdulist = fits.open('Test_simcube.fits',ignore_blank=True)\nfdata = hdulist[0].data.T \n\n\n### Setup cube parameters ###\nobspars={}\nobspars['cellsize']=np.abs(hdulist[0].header['cdelt1']*3600.) # arcseconds/pixel\nobspars['dv']=np.abs(hdulist[0].header['cdelt3']/1e3) # km/s/channel\nobspars['xsize']=hdulist[0].header['naxis1']*obspars['cellsize'] # arcseconds\nobspars['ysize']=hdulist[0].header['naxis2']*obspars['cellsize'] # arcseconds\nobspars['vsize']=hdulist[0].header['naxis3']*obspars['dv'] # km/s\nobspars['beamsize']=np.array([hdulist[0].header['bmaj']*3600.,hdulist[0].header['bmin']*3600.,hdulist[0].header['bpa']])# [bmaj,bmin,bpa] in (arcsec, arcsec, degrees)\n", "Fit the model\nNow we have our 'observational' data read into memory, and a model function defined, we can fit one to the other! As our fake model is currently noiseless, lets add some gaussian noise (obviously dont do this if your data is from a real telecope!):", "fdata+=(np.random.normal(size=fdata.shape)*error)", "Below we will proceed using the MCMC code GAStimator which was specifically designed to work with KinMS, however any minimiser should work in principle. For full details of how this code works, and a tutorial, see https://github.com/TimothyADavis/GAStimator .", "from gastimator import gastimator,corner_plot\n\nmcmc = gastimator(make_model,obspars,rad)\n\nmcmc.labels=np.array(['Flux','posAng',\"Inc\",\"VFlat\",\"R_turn\",\"scalerad\"])\nmcmc.min=np.array([30.,1.,10,50,0.1,0.1])\nmcmc.max=np.array([30.,360.,80,400,20,10])\nmcmc.fixed=np.array([True,False,False,False,False,False])\nmcmc.precision=np.array([1.,1.,1.,10,0.1,0.1])\nmcmc.guesses=np.array([30.,275.,55.,210.,2.5,4.5]) #starting guesses, purposefully off!", "Setting good priors on the flux of your source is crucial to ensure the model outputs are physical. Luckily the integrated flux of your source should be easy to measure from your datacube! If you have a good measurement of this, then I would recommend forcing the total flux to that value by fixing it in the model (set mcmc.fixed=True for that parameter). If you can only get a guess then set as tight a prior as you can. This stops the model hiding bad fitting components below the noise level. \nIts always a good idea to plot your model over your data before you start a fitting processes. That allows you to check that the model is reasonable, and tweak the parameters by hand to get good starting guesses. Firs you should generate a cube from your model function, then you can overplot it on your data using the simple plotting tool included with KinMS:", "model=make_model(mcmc.guesses,obspars,rad) # make a model from your guesses\n\nKinMS_plotter(fdata, obspars['xsize'], obspars['ysize'], obspars['vsize'], obspars['cellsize'],\\\n obspars['dv'], obspars['beamsize'], posang=guesses[1],overcube=model,rms=error).makeplots()", "As you can see, the black contours of the model arent a perfect match to the moment zero, spectrum and position-velocity diagram extracted from our \"observed\" datacube. One could tweak by hand, but as these are already close we can go on to do a fit! \nIf you are experimenting then running until convergence should be good enough to get an idea if the model is physical (setting a low number of iterations, ~3000 works for me).", "outputvalue, outputll= mcmc.run(fdata,error,3000,plot=False)", "As you can see, the final parameters (listed in the output with their 1sigma errors) are pretty close to those we input! One could use the cornor_plot routine shipped with GAStimator to visualize our results, but with only 3000 steps (and a $\\approx$30% acceptance rate) these wont be very pretty. If you need good error estimates/nice looking cornor plots for publication then I recommend at least 30,000 iterations, which may take several hours/days depending on your system, and the size of your datacube. \nOne can visualize the best-fit model again to check how we did - turns out pretty well! (Note the flux in the integrated spectrum isnt perfect, this is because of the masking of the noisy data).", "bestmodel=make_model(np.median(outputvalue,1),obspars,rad) # make a model from your guesses\n\nKinMS_plotter(fdata, obspars['xsize'], obspars['ysize'], obspars['vsize'], obspars['cellsize'],\\\n obspars['dv'], obspars['beamsize'], posang=guesses[1],overcube=bestmodel,rms=error).makeplots()", "Tiny error problem\nI have found that fitting whole datacubes with kinematic modelling tools such as KinMS can yield unphysically small uncertanties, for instance constraining inclination to $\\pm\\approx0.1^{\\circ}$ in the fit example performed above. This is essentially a form of model mismatch - you are finding the very best model of a given type that fits the data - and as you have a large number of free-parameters in a data cube you can find the best model (no matter how bad it is at actually fitting the data!) really well. \nIn works such as Smith et al. (2019) we have attempted to get around by taking into account the variance of the $\\chi^2$ statistic.\nAs observed data are noisy, the $\\chi^2$ statistic has an additional uncertainty associated with it, following the chi-squared distribution (Andrae 2010). This distribution has a variance of $2(N - P)$, where $N$ is the number of constraints and $P$ the number of inferred parameters. For fitting datacubes $N$ is very large, so the variance becomes $\\approx2N$. \nSystematic effects can produce variations of $\\chi^2$ of the order of this variance, and ignoring this effect yields unrealistically small uncertainty estimates. In order to mitigate this effect van\nden Bosch & van de Ven (2009) proposed to increase the $1\\sigma$ confidence interval to $\\Delta\\chi^2=\\sqrt{2N}$. To achieve the same effect within the Bayesian MCMC approach discussed above we need to scale the log-likelihood, by increasing the RMS estimate provided to GAStimator by $(2N)^{1/4}$. This approach appears to yield physically credible formal uncertainties in the inferred parameters, whereas otherwise these uncertainties are unphysically small. \nLets try that with the example above:", "error*=((2.0*fdata.size)**(0.25))\noutputvalue, outputll= mcmc.run(fdata,error,3000,plot=False)", "As you can see we now get a much more reasonable error estimates, for instance a 1$\\sigma$ inclination error of $\\pm2^{\\circ}$. \nIf you want to implement this fix yourself there is a wrinkle to consider. You need to be careful choosing $N$. Formally this should be the number of constraints- i.e. the total number of pixels in your cube. But consider a large datacube with signal only in a small section (although fitting such a datacube would be inefficient anyway; see speed tips below), all of the actual constraints are coming from a small number of pixels. If you find yourself in this situation I would recommend setting $N$ to the number of pixels with actual detected flux in your datacube. \nSpeed tips\nThere are some common ways to make sure you don't spend a lot of time MCMCing rather than doing science. \n\n\nCut down your observed data to only include spaxels and frequency channels near signal. Ideally you want some padding around your observed signal so the model knows it must not include flux in those positions, but not so much as to drastically increase runtime! On a similar note...<p>\n\n\nMake sure your observed data (and thus models) have spatial dimensions that are $2^n$. If this is impossible then $3^n$ and $6^n$ are pretty good too. This is because convolving the KinMS model with the beam takes the majority of the computation time, and FFTs are faster when working with such dimensions.<p>\n\n\nDon't provide a radius vector that is very oversampled/overlong well beyond the projected dimensions of the cube. This can slow down internal interpolation routines." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
napsternxg/ipython-notebooks
Monte Carlo Integration.ipynb
apache-2.0
[ "Introduction to Monte Carlo Integration\nInspired from the following posts:\n\nhttp://nbviewer.jupyter.org/github/cs109/content/blob/master/labs/lab7/GibbsSampler.ipynb\nhttp://twiecki.github.io/blog/2015/11/10/mcmc-sampling/\nhttps://en.wikipedia.org/wiki/Monte_Carlo_integration", "%matplotlib inline\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom numba import jit # Use it for speed\n\nfrom scipy import stats", "What is Monte Carlo (MC) Integration?\nLet us say that we want to approximate the area between the curve defined by $f(x) = x^2 + 3x + \\ln{x}$ between $x\\in (0,5]$ and the x-axis.", "def f(x):\n return x**2 + 3*x + np.log(x)\n\nstep= 0.001\nx = np.arange(1,5+step*0.1,step)\ny = f(x)\nprint x.min(), x.max()\nprint y.min(), y.max()\nplt.plot(x, y, lw=2., color=\"r\")\nplt.fill_between(x, 0, y, color=\"r\", alpha=0.5)\nplt.axhline(y=0, lw=1., color=\"k\", linestyle=\"--\")\nplt.axhline(y=y.max(), lw=1., color=\"k\", linestyle=\"--\")\nplt.axvline(x=x.min(), lw=1., color=\"k\", linestyle=\"--\")\nplt.axvline(x=x.max(), lw=1., color=\"k\", linestyle=\"--\")\nplt.xlabel(\"x\")\nplt.ylabel(\"y\")\nplt.title(\"$f(x) = x^2 + 3x + \\ln{x}, x\\in[1,5]$\")", "Concretely, we are interested in knowing the area of the red-shaded region in the above figure. Furthermore, I have also provided a rectangular bounding box for the range of values of $x$ and $y$. The true value of the area under the curve is $\\sim{81.381}$ using its analytic integral formula (see http://www.wolframalpha.com/input/?i=integrate+x%5E2+%2B+3x+%2B+ln(x),+x+in+%5B1,5%5D).\nThe most accurate way to get the value of the area is to find the value of the definite integral $\\int_{1}^{5} f(x) dx$. However, in many cases analytically finding this integral is very tough, especially if the function is not easily integrable. This is where numerical methods for approximating the integral come handy. Monte Carlo (MC) techniques are one of the most popular form of numerical solution used for definite integral calculation.\nA basic intuition of the Monte Carlo Integration is as follows:\n* Define the input domain $[a, b]$ of the integral $\\int_{a}^{b} f(x) dx$.\n* Uniformly, sample $N$ points from rectangular region between $[a, b)$ and $[\\min(f(x)), \\max(f(x)))$\n* Find the proportion of points that lie in the region included in the area of $f(x)$, call it $p$\n* Multiply the area of the rectangular region ($A$) by $p$ to get the area under the curve $A^=pA$\n* As $N \\to \\infty$, the area of the shaded region $A^* \\to \\int_{a}^{b} f(x) dx$\n* Usually, a much smaller value of $N$ will give approximate value within a reasonable error span.\nBelow, we will try to approximate the area of the curve using the MC integration method described above. We will use $N = 10^5$, and plot the points which fall in the region of the area in red and the other points in grey.", "@jit\ndef get_MC_area(x, y, f, N=10**5, plot=False):\n x_rands = x.min() + np.random.rand(N) * (x.max() - x.min())\n y_rands = np.random.rand(N) * y.max()\n y_true = f(x_rands)\n integral_idx = (y_rands <= y_true)\n if plot:\n plt.plot(x_rands[integral_idx], y_rands[integral_idx],\n alpha=0.3, color=\"r\", linestyle='none',\n marker='.', markersize=0.5)\n plt.plot(x_rands[~integral_idx], y_rands[~integral_idx],\n alpha=0.3, color=\"0.5\", linestyle='none',\n marker='.', markersize=0.5)\n plt.axhline(y=0, lw=1., color=\"k\", linestyle=\"--\")\n plt.axhline(y=y.max(), lw=1., color=\"k\", linestyle=\"--\")\n plt.axvline(x=x.min(), lw=1., color=\"k\", linestyle=\"--\")\n plt.axvline(x=x.max(), lw=1., color=\"k\", linestyle=\"--\")\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.title(\"$f(x) = x^2 + 3x + \\ln{x}, x\\in[1,5]; N=%s$\" % N)\n print \"Proportion points in space: %.3f\" % (integral_idx).mean()\n area = (integral_idx).mean() * (\n (x_rands.max() - x_rands.min()) * (y_rands.max() - y_rands.min())\n )\n return area\n \n\narea = get_MC_area(x, y, f, N=10**5, plot=True)\nprint \"Area is: %.3f\" % area", "As we can observe, the number of points which fall inside the region of interest, are proportional to the area of the region. The area however, marginally close to the true area of $81.38$. Let us also try with a higher value of $N=10^7$", "area = get_MC_area(x, y, f, N=10**7, plot=True)\nprint \"Area is: %.3f\" % area", "The above figure, shows that for $N=10^7$, the region covered by the sampled points is almost as smooth as the shaded region. Furthermore, the area is closer to the true value of $81.38$.\nNow, let us also analyze, how the value of the calculated area changes with the order of number of sampled points.", "for i in xrange(2,8):\n area = get_MC_area(x, y, f, N=10**i, plot=False)\n print i, area", "Clearly, as the number of points increase, the area becomres closer to the true value.\nLet us further examine this change by starting with $10^3$ points and then going all the way till $10^6$ points.", "%%time\nN_vals = 1000 + np.arange(1000)*1000\nareas = np.zeros_like(N_vals, dtype=\"float\")\nfor i, N in enumerate(N_vals):\n area = get_MC_area(x, y, f, N=N, plot=False)\n areas[i] = area\n\nprint \"Mean area of last 100 points: %.3f\" % np.mean(areas[-100:])\nprint \"Areas of last 10 points: \", areas[-10:]\n\nplt.plot(N_vals, areas, color=\"0.1\", alpha=0.7)\nplt.axhline(y=np.mean(areas[100:]), linestyle=\"--\", lw=1., color=\"k\")\nplt.ylabel(\"Area\")\nplt.xlabel(\"Number of samples\")\n#plt.xscale(\"log\")", "As we can observe from the figure above, for lower number of sampled points, the estimates of MC integration are quite noisy. However, for larger number of points this value converges to the true estimates." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ES-DOC/esdoc-jupyterhub
notebooks/nasa-giss/cmip6/models/giss-e2-1h/aerosol.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Aerosol\nMIP Era: CMIP6\nInstitute: NASA-GISS\nSource ID: GISS-E2-1H\nTopic: Aerosol\nSub-Topics: Transport, Emissions, Concentrations, Optical Radiative Properties, Model. \nProperties: 69 (37 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:20\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'nasa-giss', 'giss-e2-1h', 'aerosol')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --&gt; Software Properties\n3. Key Properties --&gt; Timestep Framework\n4. Key Properties --&gt; Meteorological Forcings\n5. Key Properties --&gt; Resolution\n6. Key Properties --&gt; Tuning Applied\n7. Transport\n8. Emissions\n9. Concentrations\n10. Optical Radiative Properties\n11. Optical Radiative Properties --&gt; Absorption\n12. Optical Radiative Properties --&gt; Mixtures\n13. Optical Radiative Properties --&gt; Impact Of H2o\n14. Optical Radiative Properties --&gt; Radiative Scheme\n15. Optical Radiative Properties --&gt; Cloud Interactions\n16. Model \n1. Key Properties\nKey properties of the aerosol model\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of aerosol model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of aerosol model code", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.3. Scheme Scope\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nAtmospheric domains covered by the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.scheme_scope') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"troposhere\" \n# \"stratosphere\" \n# \"mesosphere\" \n# \"mesosphere\" \n# \"whole atmosphere\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.4. Basic Approximations\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBasic approximations made in the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.basic_approximations') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.5. Prognostic Variables Form\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPrognostic variables in the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"3D mass/volume ratio for aerosols\" \n# \"3D number concenttration for aerosols\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.6. Number Of Tracers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of tracers in the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.number_of_tracers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "1.7. Family Approach\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre aerosol calculations generalized into families of species?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.family_approach') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Software Properties\nSoftware properties of aerosol code\n2.1. Repository\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nLocation of code for this component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.2. Code Version\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCode version identifier.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.3. Code Languages\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nCode language(s).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Timestep Framework\nPhysical properties of seawater in ocean\n3.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMathematical method deployed to solve the time evolution of the prognostic variables", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses atmospheric chemistry time stepping\" \n# \"Specific timestepping (operator splitting)\" \n# \"Specific timestepping (integrated)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3.2. Split Operator Advection Timestep\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTimestep for aerosol advection (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.3. Split Operator Physical Timestep\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTimestep for aerosol physics (in seconds).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.4. Integrated Timestep\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTimestep for the aerosol model (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.5. Integrated Scheme Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecify the type of timestep scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Implicit\" \n# \"Semi-implicit\" \n# \"Semi-analytic\" \n# \"Impact solver\" \n# \"Back Euler\" \n# \"Newton Raphson\" \n# \"Rosenbrock\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Meteorological Forcings\n**\n4.1. Variables 3D\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nThree dimensionsal forcing variables, e.g. U, V, W, T, Q, P, conventive mass flux", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Variables 2D\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTwo dimensionsal forcing variables, e.g. land-sea mask definition", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Frequency\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nFrequency with which meteological forcings are applied (in seconds).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5. Key Properties --&gt; Resolution\nResolution in the aersosol model grid\n5.1. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Canonical Horizontal Resolution\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.3. Number Of Horizontal Gridpoints\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5.4. Number Of Vertical Levels\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nNumber of vertical levels resolved on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5.5. Is Adaptive Grid\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDefault is False. Set true if grid resolution changes during execution.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6. Key Properties --&gt; Tuning Applied\nTuning methodology for aerosol model\n6.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.2. Global Mean Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList set of metrics of the global mean state used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.3. Regional Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList of regional metrics of mean state used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.4. Trend Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList observed trend metrics used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7. Transport\nAerosol transport\n7.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of transport in atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod for aerosol transport modeling", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses Atmospheric chemistry transport scheme\" \n# \"Specific transport scheme (eulerian)\" \n# \"Specific transport scheme (semi-lagrangian)\" \n# \"Specific transport scheme (eulerian and semi-lagrangian)\" \n# \"Specific transport scheme (lagrangian)\" \n# TODO - please enter value(s)\n", "7.3. Mass Conservation Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMethod used to ensure mass conservation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses Atmospheric chemistry transport scheme\" \n# \"Mass adjustment\" \n# \"Concentrations positivity\" \n# \"Gradients monotonicity\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "7.4. Convention\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nTransport by convention", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.convention') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses Atmospheric chemistry transport scheme\" \n# \"Convective fluxes connected to tracers\" \n# \"Vertical velocities connected to tracers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8. Emissions\nAtmospheric aerosol emissions\n8.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of emissions in atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMethod used to define aerosol species (several methods allowed because the different species may not use the same method).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Prescribed (climatology)\" \n# \"Prescribed CMIP6\" \n# \"Prescribed above surface\" \n# \"Interactive\" \n# \"Interactive above surface\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.3. Sources\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nSources of the aerosol species are taken into account in the emissions scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.sources') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Vegetation\" \n# \"Volcanos\" \n# \"Bare ground\" \n# \"Sea surface\" \n# \"Lightning\" \n# \"Fires\" \n# \"Aircraft\" \n# \"Anthropogenic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.4. Prescribed Climatology\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify the climatology type for aerosol emissions", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.prescribed_climatology') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Interannual\" \n# \"Annual\" \n# \"Monthly\" \n# \"Daily\" \n# TODO - please enter value(s)\n", "8.5. Prescribed Climatology Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and prescribed via a climatology", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.6. Prescribed Spatially Uniform Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and prescribed as spatially uniform", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.7. Interactive Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and specified via an interactive method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.8. Other Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and specified via an &quot;other method&quot;", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.other_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.9. Other Method Characteristics\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCharacteristics of the &quot;other method&quot; used for aerosol emissions", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.other_method_characteristics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Concentrations\nAtmospheric aerosol concentrations\n9.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of concentrations in atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.2. Prescribed Lower Boundary\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed at the lower boundary.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.3. Prescribed Upper Boundary\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed at the upper boundary.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.4. Prescribed Fields Mmr\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed as mass mixing ratios.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.5. Prescribed Fields Mmr\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed as AOD plus CCNs.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Optical Radiative Properties\nAerosol optical and radiative properties\n10.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of optical and radiative properties", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11. Optical Radiative Properties --&gt; Absorption\nAbsortion properties in aerosol scheme\n11.1. Black Carbon\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAbsorption mass coefficient of black carbon at 550nm (if non-absorbing enter 0)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.2. Dust\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAbsorption mass coefficient of dust at 550nm (if non-absorbing enter 0)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.3. Organics\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAbsorption mass coefficient of organics at 550nm (if non-absorbing enter 0)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "12. Optical Radiative Properties --&gt; Mixtures\n**\n12.1. External\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there external mixing with respect to chemical composition?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "12.2. Internal\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there internal mixing with respect to chemical composition?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "12.3. Mixing Rule\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf there is internal mixing with respect to chemical composition then indicate the mixinrg rule", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13. Optical Radiative Properties --&gt; Impact Of H2o\n**\n13.1. Size\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes H2O impact size?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "13.2. Internal Mixture\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes H2O impact internal mixture?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "14. Optical Radiative Properties --&gt; Radiative Scheme\nRadiative scheme for aerosol\n14.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of radiative scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.2. Shortwave Bands\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of shortwave bands", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14.3. Longwave Bands\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of longwave bands", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "15. Optical Radiative Properties --&gt; Cloud Interactions\nAerosol-cloud interactions\n15.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of aerosol-cloud interactions", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.2. Twomey\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the Twomey effect included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "15.3. Twomey Minimum Ccn\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf the Twomey effect is included, then what is the minimum CCN number?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "15.4. Drizzle\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the scheme affect drizzle?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "15.5. Cloud Lifetime\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the scheme affect cloud lifetime?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "15.6. Longwave Bands\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of longwave bands", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "16. Model\nAerosol model\n16.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "16.2. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nProcesses included in the Aerosol model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Dry deposition\" \n# \"Sedimentation\" \n# \"Wet deposition (impaction scavenging)\" \n# \"Wet deposition (nucleation scavenging)\" \n# \"Coagulation\" \n# \"Oxidation (gas phase)\" \n# \"Oxidation (in cloud)\" \n# \"Condensation\" \n# \"Ageing\" \n# \"Advection (horizontal)\" \n# \"Advection (vertical)\" \n# \"Heterogeneous chemistry\" \n# \"Nucleation\" \n# TODO - please enter value(s)\n", "16.3. Coupling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nOther model components coupled to the Aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Radiation\" \n# \"Land surface\" \n# \"Heterogeneous chemistry\" \n# \"Clouds\" \n# \"Ocean\" \n# \"Cryosphere\" \n# \"Gas phase chemistry\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.4. Gas Phase Precursors\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of gas phase aerosol precursors.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.gas_phase_precursors') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"DMS\" \n# \"SO2\" \n# \"Ammonia\" \n# \"Iodine\" \n# \"Terpene\" \n# \"Isoprene\" \n# \"VOC\" \n# \"NOx\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.5. Scheme Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nType(s) of aerosol scheme used by the aerosols model (potentially multiple: some species may be covered by one type of aerosol scheme and other species covered by another type).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Bulk\" \n# \"Modal\" \n# \"Bin\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.6. Bulk Scheme Species\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of species covered by the bulk scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.bulk_scheme_species') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Sulphate\" \n# \"Nitrate\" \n# \"Sea salt\" \n# \"Dust\" \n# \"Ice\" \n# \"Organic\" \n# \"Black carbon / soot\" \n# \"SOA (secondary organic aerosols)\" \n# \"POM (particulate organic matter)\" \n# \"Polar stratospheric ice\" \n# \"NAT (Nitric acid trihydrate)\" \n# \"NAD (Nitric acid dihydrate)\" \n# \"STS (supercooled ternary solution aerosol particule)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ES-DOC/esdoc-jupyterhub
notebooks/cnrm-cerfacs/cmip6/models/cnrm-cm6-1/seaice.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Seaice\nMIP Era: CMIP6\nInstitute: CNRM-CERFACS\nSource ID: CNRM-CM6-1\nTopic: Seaice\nSub-Topics: Dynamics, Thermodynamics, Radiative Processes. \nProperties: 80 (63 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:53:52\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'cnrm-cerfacs', 'cnrm-cm6-1', 'seaice')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties --&gt; Model\n2. Key Properties --&gt; Variables\n3. Key Properties --&gt; Seawater Properties\n4. Key Properties --&gt; Resolution\n5. Key Properties --&gt; Tuning Applied\n6. Key Properties --&gt; Key Parameter Values\n7. Key Properties --&gt; Assumptions\n8. Key Properties --&gt; Conservation\n9. Grid --&gt; Discretisation --&gt; Horizontal\n10. Grid --&gt; Discretisation --&gt; Vertical\n11. Grid --&gt; Seaice Categories\n12. Grid --&gt; Snow On Seaice\n13. Dynamics\n14. Thermodynamics --&gt; Energy\n15. Thermodynamics --&gt; Mass\n16. Thermodynamics --&gt; Salt\n17. Thermodynamics --&gt; Salt --&gt; Mass Transport\n18. Thermodynamics --&gt; Salt --&gt; Thermodynamics\n19. Thermodynamics --&gt; Ice Thickness Distribution\n20. Thermodynamics --&gt; Ice Floe Size Distribution\n21. Thermodynamics --&gt; Melt Ponds\n22. Thermodynamics --&gt; Snow Processes\n23. Radiative Processes \n1. Key Properties --&gt; Model\nName of seaice model used.\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of sea ice model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.model.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.model.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Variables\nList of prognostic variable in the sea ice model.\n2.1. Prognostic\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of prognostic variables in the sea ice component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.variables.prognostic') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Sea ice temperature\" \n# \"Sea ice concentration\" \n# \"Sea ice thickness\" \n# \"Sea ice volume per grid cell area\" \n# \"Sea ice u-velocity\" \n# \"Sea ice v-velocity\" \n# \"Sea ice enthalpy\" \n# \"Internal ice stress\" \n# \"Salinity\" \n# \"Snow temperature\" \n# \"Snow depth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Seawater Properties\nProperties of seawater relevant to sea ice\n3.1. Ocean Freezing Point\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEquation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"TEOS-10\" \n# \"Constant\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3.2. Ocean Freezing Point Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf using a constant seawater freezing point, specify this value.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Resolution\nResolution of the sea ice grid\n4.1. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Canonical Horizontal Resolution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Number Of Horizontal Gridpoints\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5. Key Properties --&gt; Tuning Applied\nTuning applied to sea ice model component\n5.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Target\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.target') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.3. Simulations\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\n*Which simulations had tuning applied, e.g. all, not historical, only pi-control? *", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.4. Metrics Used\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList any observed metrics used in tuning model/parameters", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.5. Variables\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nWhich variables were changed during the tuning process?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6. Key Properties --&gt; Key Parameter Values\nValues of key parameters\n6.1. Typical Parameters\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nWhat values were specificed for the following parameters if used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ice strength (P*) in units of N m{-2}\" \n# \"Snow conductivity (ks) in units of W m{-1} K{-1} \" \n# \"Minimum thickness of ice created in leads (h0) in units of m\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "6.2. Additional Parameters\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7. Key Properties --&gt; Assumptions\nAssumptions made in the sea ice model\n7.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral overview description of any key assumptions made in this model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.description') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. On Diagnostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nNote any assumptions that specifically affect the CMIP6 diagnostic sea ice variables.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.3. Missing Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8. Key Properties --&gt; Conservation\nConservation in the sea ice component\n8.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nProvide a general description of conservation methodology.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Properties\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nProperties conserved in sea ice by the numerical schemes.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.properties') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Energy\" \n# \"Mass\" \n# \"Salt\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.3. Budget\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nFor each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.budget') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.4. Was Flux Correction Used\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes conservation involved flux correction?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "8.5. Corrected Conserved Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList any variables which are conserved by more than the numerical scheme alone.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Grid --&gt; Discretisation --&gt; Horizontal\nSea ice discretisation in the horizontal\n9.1. Grid\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGrid on which sea ice is horizontal discretised?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ocean grid\" \n# \"Atmosphere Grid\" \n# \"Own Grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.2. Grid Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the type of sea ice grid?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Structured grid\" \n# \"Unstructured grid\" \n# \"Adaptive grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.3. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the advection scheme?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Finite differences\" \n# \"Finite elements\" \n# \"Finite volumes\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.4. Thermodynamics Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the time step in the sea ice model thermodynamic component in seconds.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "9.5. Dynamics Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the time step in the sea ice model dynamic component in seconds.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "9.6. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any additional horizontal discretisation details.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Grid --&gt; Discretisation --&gt; Vertical\nSea ice vertical properties\n10.1. Layering\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhat type of sea ice vertical layers are implemented for purposes of thermodynamic calculations?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Zero-layer\" \n# \"Two-layers\" \n# \"Multi-layers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.2. Number Of Layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIf using multi-layers specify how many.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "10.3. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any additional vertical grid details.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11. Grid --&gt; Seaice Categories\nWhat method is used to represent sea ice categories ?\n11.1. Has Mulitple Categories\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSet to true if the sea ice model has multiple sea ice categories.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "11.2. Number Of Categories\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIf using sea ice categories specify how many.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.3. Category Limits\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIf using sea ice categories specify each of the category limits.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.4. Ice Thickness Distribution Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the sea ice thickness distribution scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.5. Other\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.other') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12. Grid --&gt; Snow On Seaice\nSnow on sea ice details\n12.1. Has Snow On Ice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs snow on ice represented in this model?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "12.2. Number Of Snow Levels\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of vertical levels of snow on ice?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "12.3. Snow Fraction\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how the snow fraction on sea ice is determined", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12.4. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any additional details related to snow on ice.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13. Dynamics\nSea Ice Dynamics\n13.1. Horizontal Transport\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of horizontal advection of sea ice?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.horizontal_transport') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Incremental Re-mapping\" \n# \"Prather\" \n# \"Eulerian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.2. Transport In Thickness Space\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of sea ice transport in thickness space (i.e. in thickness categories)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Incremental Re-mapping\" \n# \"Prather\" \n# \"Eulerian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.3. Ice Strength Formulation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhich method of sea ice strength formulation is used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Hibler 1979\" \n# \"Rothrock 1975\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.4. Redistribution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhich processes can redistribute sea ice (including thickness)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.redistribution') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Rafting\" \n# \"Ridging\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.5. Rheology\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRheology, what is the ice deformation formulation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.rheology') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Free-drift\" \n# \"Mohr-Coloumb\" \n# \"Visco-plastic\" \n# \"Elastic-visco-plastic\" \n# \"Elastic-anisotropic-plastic\" \n# \"Granular\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14. Thermodynamics --&gt; Energy\nProcesses related to energy in sea ice thermodynamics\n14.1. Enthalpy Formulation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the energy formulation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pure ice latent heat (Semtner 0-layer)\" \n# \"Pure ice latent and sensible heat\" \n# \"Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)\" \n# \"Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.2. Thermal Conductivity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat type of thermal conductivity is used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pure ice\" \n# \"Saline ice\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.3. Heat Diffusion\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of heat diffusion?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Conduction fluxes\" \n# \"Conduction and radiation heat fluxes\" \n# \"Conduction, radiation and latent heat transport\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.4. Basal Heat Flux\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod by which basal ocean heat flux is handled?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Heat Reservoir\" \n# \"Thermal Fixed Salinity\" \n# \"Thermal Varying Salinity\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.5. Fixed Salinity Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14.6. Heat Content Of Precipitation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method by which the heat content of precipitation is handled.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.7. Precipitation Effects On Salinity\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15. Thermodynamics --&gt; Mass\nProcesses related to mass in sea ice thermodynamics\n15.1. New Ice Formation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method by which new sea ice is formed in open water.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.2. Ice Vertical Growth And Melt\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method that governs the vertical growth and melt of sea ice.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.3. Ice Lateral Melting\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of sea ice lateral melting?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Floe-size dependent (Bitz et al 2001)\" \n# \"Virtual thin ice melting (for single-category)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.4. Ice Surface Sublimation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method that governs sea ice surface sublimation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.5. Frazil Ice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method of frazil ice formation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "16. Thermodynamics --&gt; Salt\nProcesses related to salt in sea ice thermodynamics.\n16.1. Has Multiple Sea Ice Salinities\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "16.2. Sea Ice Salinity Thermal Impacts\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes sea ice salinity impact the thermal properties of sea ice?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "17. Thermodynamics --&gt; Salt --&gt; Mass Transport\nMass transport of salt\n17.1. Salinity Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is salinity determined in the mass transport of salt calculation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Prescribed salinity profile\" \n# \"Prognostic salinity profile\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.2. Constant Salinity Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf using a constant salinity value specify this value in PSU?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "17.3. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the salinity profile used.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18. Thermodynamics --&gt; Salt --&gt; Thermodynamics\nSalt thermodynamics\n18.1. Salinity Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is salinity determined in the thermodynamic calculation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Prescribed salinity profile\" \n# \"Prognostic salinity profile\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "18.2. Constant Salinity Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf using a constant salinity value specify this value in PSU?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "18.3. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the salinity profile used.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19. Thermodynamics --&gt; Ice Thickness Distribution\nIce thickness distribution details.\n19.1. Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is the sea ice thickness distribution represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Virtual (enhancement of thermal conductivity, thin ice melting)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20. Thermodynamics --&gt; Ice Floe Size Distribution\nIce floe-size distribution details.\n20.1. Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is the sea ice floe-size represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Parameterised\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20.2. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nPlease provide further details on any parameterisation of floe-size.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "21. Thermodynamics --&gt; Melt Ponds\nCharacteristics of melt ponds.\n21.1. Are Included\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre melt ponds included in the sea ice model?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "21.2. Formulation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat method of melt pond formulation is used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Flocco and Feltham (2010)\" \n# \"Level-ice melt ponds\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "21.3. Impacts\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhat do melt ponds have an impact on?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Albedo\" \n# \"Freshwater\" \n# \"Heat\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22. Thermodynamics --&gt; Snow Processes\nThermodynamic processes in snow on sea ice\n22.1. Has Snow Aging\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSet to True if the sea ice model has a snow aging scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "22.2. Snow Aging Scheme\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the snow aging scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.3. Has Snow Ice Formation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSet to True if the sea ice model has snow ice formation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "22.4. Snow Ice Formation Scheme\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the snow ice formation scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.5. Redistribution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the impact of ridging on snow cover?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.6. Heat Diffusion\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the heat diffusion through snow methodology in sea ice thermodynamics?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Single-layered heat diffusion\" \n# \"Multi-layered heat diffusion\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23. Radiative Processes\nSea Ice Radiative Processes\n23.1. Surface Albedo\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod used to handle surface albedo.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.radiative_processes.surface_albedo') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Delta-Eddington\" \n# \"Parameterized\" \n# \"Multi-band albedo\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.2. Ice Radiation Transmission\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMethod by which solar radiation through sea ice is handled.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Delta-Eddington\" \n# \"Exponential attenuation\" \n# \"Ice radiation transmission per category\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
google/starthinker
colabs/cm360_segmentology.ipynb
apache-2.0
[ "CM360 Segmentology\nCM360 funnel analysis using Census data.\nLicense\nCopyright 2020 Google LLC,\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\nhttps://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\nDisclaimer\nThis is not an officially supported Google product. It is a reference implementation. There is absolutely NO WARRANTY provided for using this code. The code is Apache Licensed and CAN BE fully modified, white labeled, and disassembled by your team.\nThis code generated (see starthinker/scripts for possible source):\n - Command: \"python starthinker_ui/manage.py colab\"\n - Command: \"python starthinker/tools/colab.py [JSON RECIPE]\"\n1. Install Dependencies\nFirst install the libraries needed to execute recipes, this only needs to be done once, then click play.", "!pip install git+https://github.com/google/starthinker\n", "2. Set Configuration\nThis code is required to initialize the project. Fill in required fields and press play.\n\nIf the recipe uses a Google Cloud Project:\n\nSet the configuration project value to the project identifier from these instructions.\n\n\nIf the recipe has auth set to user:\n\nIf you have user credentials:\nSet the configuration user value to your user credentials JSON.\n\n\n\nIf you DO NOT have user credentials:\n\nSet the configuration client value to downloaded client credentials.\n\n\n\nIf the recipe has auth set to service:\n\nSet the configuration service value to downloaded service credentials.", "from starthinker.util.configuration import Configuration\n\n\nCONFIG = Configuration(\n project=\"\",\n client={},\n service={},\n user=\"/content/user.json\",\n verbose=True\n)\n\n", "3. Enter CM360 Segmentology Recipe Parameters\n\nWait for BigQuery->->->Census_Join to be created.\nJoin the StarThinker Assets Group to access the following assets\nCopy CM360 Segmentology Sample. Leave the Data Source as is, you will change it in the next step.\nClick Edit Connection, and change to BigQuery->->->Census_Join.\nOr give these intructions to the client.\nModify the values below for your use case, can be done multiple times, then click play.", "FIELDS = {\n 'account':'',\n 'auth_read':'user', # Credentials used for reading data.\n 'auth_write':'service', # Authorization used for writing data.\n 'recipe_name':'', # Name of report, not needed if ID used.\n 'date_range':'LAST_365_DAYS', # Timeframe to run report for.\n 'recipe_slug':'', # Name of Google BigQuery dataset to create.\n 'advertisers':[], # Comma delimited list of CM360 advertiser ids.\n}\n\nprint(\"Parameters Set To: %s\" % FIELDS)\n", "4. Execute CM360 Segmentology\nThis does NOT need to be modified unless you are changing the recipe, click play.", "from starthinker.util.configuration import execute\nfrom starthinker.util.recipe import json_set_fields\n\nTASKS = [\n {\n 'dataset':{\n 'description':'Create a dataset for bigquery tables.',\n 'hour':[\n 4\n ],\n 'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Credentials used for writing data.'}},\n 'dataset':{'field':{'name':'recipe_slug','kind':'string','suffix':'_Segmentology','description':'Place where tables will be created in BigQuery.'}}\n }\n },\n {\n 'bigquery':{\n 'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Credentials used for writing function.'}},\n 'function':'Pearson Significance Test',\n 'to':{\n 'dataset':{'field':{'name':'recipe_slug','kind':'string','suffix':'_Segmentology','order':4,'default':'','description':'Name of Google BigQuery dataset to create.'}}\n }\n }\n },\n {\n 'google_api':{\n 'auth':'user',\n 'api':'dfareporting',\n 'version':'v3.4',\n 'function':'accounts.get',\n 'kwargs':{\n 'id':{'field':{'name':'account','kind':'integer','order':5,'default':'','description':'Campaign Manager Account ID'}},\n 'fields':'id,name'\n },\n 'results':{\n 'bigquery':{\n 'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Credentials used for writing function.'}},\n 'dataset':{'field':{'name':'recipe_slug','kind':'string','suffix':'_Segmentology','order':4,'default':'','description':'Name of Google BigQuery dataset to create.'}},\n 'table':'CM360_Account'\n }\n }\n }\n },\n {\n 'dcm':{\n 'auth':{'field':{'name':'auth_read','kind':'authentication','order':0,'default':'user','description':'Credentials used for reading data.'}},\n 'report':{\n 'filters':{\n 'advertiser':{\n 'values':{'field':{'name':'advertisers','kind':'integer_list','order':6,'default':[],'description':'Comma delimited list of CM360 advertiser ids.'}}\n }\n },\n 'account':{'field':{'name':'account','kind':'string','order':5,'default':'','description':'Campaign Manager Account ID'}},\n 'body':{\n 'name':{'field':{'name':'recipe_name','kind':'string','suffix':' Segmentology','description':'The report name.','default':''}},\n 'criteria':{\n 'dateRange':{\n 'kind':'dfareporting#dateRange',\n 'relativeDateRange':{'field':{'name':'date_range','kind':'choice','order':3,'default':'LAST_365_DAYS','choices':['LAST_7_DAYS','LAST_14_DAYS','LAST_30_DAYS','LAST_365_DAYS','LAST_60_DAYS','LAST_7_DAYS','LAST_90_DAYS','LAST_24_MONTHS','MONTH_TO_DATE','PREVIOUS_MONTH','PREVIOUS_QUARTER','PREVIOUS_WEEK','PREVIOUS_YEAR','QUARTER_TO_DATE','WEEK_TO_DATE','YEAR_TO_DATE'],'description':'Timeframe to run report for.'}}\n },\n 'dimensions':[\n {\n 'kind':'dfareporting#sortedDimension',\n 'name':'advertiserId'\n },\n {\n 'kind':'dfareporting#sortedDimension',\n 'name':'advertiser'\n },\n {\n 'kind':'dfareporting#sortedDimension',\n 'name':'zipCode'\n }\n ],\n 'metricNames':[\n 'impressions',\n 'clicks',\n 'totalConversions'\n ]\n },\n 'type':'STANDARD',\n 'delivery':{\n 'emailOwner':False\n },\n 'format':'CSV'\n }\n }\n }\n },\n {\n 'dcm':{\n 'auth':{'field':{'name':'auth_read','kind':'authentication','order':0,'default':'user','description':'Credentials used for reading data.'}},\n 'report':{\n 'account':{'field':{'name':'account','kind':'string','default':''}},\n 'name':{'field':{'name':'recipe_name','kind':'string','order':3,'suffix':' Segmentology','default':'','description':'Name of report, not needed if ID used.'}}\n },\n 'out':{\n 'bigquery':{\n 'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Authorization used for writing data.'}},\n 'dataset':{'field':{'name':'recipe_slug','kind':'string','suffix':'_Segmentology','order':4,'default':'','description':'Name of Google BigQuery dataset to create.'}},\n 'table':'CM360_KPI',\n 'header':True\n }\n }\n }\n },\n {\n 'bigquery':{\n 'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Authorization used for writing data.'}},\n 'from':{\n 'query':'SELECT Id AS Partner_Id, Name AS Partner, Advertiser_Id, Advertiser, Zip_Postal_Code AS Zip, SAFE_DIVIDE(Impressions, SUM(Impressions) OVER(PARTITION BY Advertiser_Id)) AS Impression, SAFE_DIVIDE(Clicks, Impressions) AS Click, SAFE_DIVIDE(Total_Conversions, Impressions) AS Conversion, Impressions AS Impressions FROM `{dataset}.CM360_KPI` CROSS JOIN `{dataset}.CM360_Account` ',\n 'parameters':{\n 'dataset':{'field':{'name':'recipe_slug','kind':'string','suffix':'_Segmentology','description':'Place where tables will be created in BigQuery.'}}\n },\n 'legacy':False\n },\n 'to':{\n 'dataset':{'field':{'name':'recipe_slug','kind':'string','suffix':'_Segmentology','description':'Place where tables will be written in BigQuery.'}},\n 'view':'CM360_KPI_Normalized'\n }\n }\n },\n {\n 'census':{\n 'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Authorization used for writing data.'}},\n 'normalize':{\n 'census_geography':'zip_codes',\n 'census_year':'2018',\n 'census_span':'5yr'\n },\n 'to':{\n 'dataset':{'field':{'name':'recipe_slug','kind':'string','suffix':'_Segmentology','order':4,'default':'','description':'Name of Google BigQuery dataset to create.'}},\n 'type':'view'\n }\n }\n },\n {\n 'census':{\n 'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Authorization used for writing data.'}},\n 'correlate':{\n 'join':'Zip',\n 'pass':[\n 'Partner_Id',\n 'Partner',\n 'Advertiser_Id',\n 'Advertiser'\n ],\n 'sum':[\n 'Impressions'\n ],\n 'correlate':[\n 'Impression',\n 'Click',\n 'Conversion'\n ],\n 'dataset':{'field':{'name':'recipe_slug','kind':'string','suffix':'_Segmentology','order':4,'default':'','description':'Name of Google BigQuery dataset to create.'}},\n 'table':'CM360_KPI_Normalized',\n 'significance':80\n },\n 'to':{\n 'dataset':{'field':{'name':'recipe_slug','kind':'string','suffix':'_Segmentology','order':4,'default':'','description':'Name of Google BigQuery dataset to create.'}},\n 'type':'view'\n }\n }\n }\n]\n\njson_set_fields(TASKS, FIELDS)\n\nexecute(CONFIG, TASKS, force=True)\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
aborgher/Main-useful-functions-for-ML
.ipynb_checkpoints/NLP-checkpoint.ipynb
gpl-3.0
[ "Table of Contents\n<p><div class=\"lev1 toc-item\"><a href=\"#Correction-with-enchant\" data-toc-modified-id=\"Correction-with-enchant-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Correction with enchant</a></div><div class=\"lev2 toc-item\"><a href=\"#Add-your-own-dictionary\" data-toc-modified-id=\"Add-your-own-dictionary-11\"><span class=\"toc-item-num\">1.1&nbsp;&nbsp;</span>Add your own dictionary</a></div><div class=\"lev2 toc-item\"><a href=\"#check-entire-phrase\" data-toc-modified-id=\"check-entire-phrase-12\"><span class=\"toc-item-num\">1.2&nbsp;&nbsp;</span>check entire phrase</a></div><div class=\"lev2 toc-item\"><a href=\"#tokenization\" data-toc-modified-id=\"tokenization-13\"><span class=\"toc-item-num\">1.3&nbsp;&nbsp;</span>tokenization</a></div><div class=\"lev1 toc-item\"><a href=\"#Word2vec\" data-toc-modified-id=\"Word2vec-2\"><span class=\"toc-item-num\">2&nbsp;&nbsp;</span>Word2vec</a></div><div class=\"lev1 toc-item\"><a href=\"#Translate-using-google-translate\" data-toc-modified-id=\"Translate-using-google-translate-3\"><span class=\"toc-item-num\">3&nbsp;&nbsp;</span>Translate using google translate</a></div><div class=\"lev1 toc-item\"><a href=\"#TreeTagger-usage-to-tag-an-italian-(or-other-languages)-sentence\" data-toc-modified-id=\"TreeTagger-usage-to-tag-an-italian-(or-other-languages)-sentence-4\"><span class=\"toc-item-num\">4&nbsp;&nbsp;</span>TreeTagger usage to tag an italian (or other languages) sentence</a></div>\n\n# Correction with enchant\n- install via pip install pyenchant\n- add ita dictionary: sudo apt-get install myspell-it myspell-es\n- Tutorial at: http://pythonhosted.org/pyenchant/tutorial.html", "import enchant\n\n# The underlying programming model provided by the Enchant library is based on the notion of Providers. \n# A provider is a piece of code that provides spell-checking services which Enchant can use to perform its work. \n# Different providers exist for performing spellchecking using different frameworks - \n# for example there is an aspell provider and a MySpell provider.\n## no need to check brokers while running enchant, this is just a simple check if all is installed\nb = enchant.Broker()\nprint(b.describe())\nb.list_dicts()\n\nenchant.list_languages()\n\nd = enchant.Dict(\"it_IT\")\n\nd.check('Giulia'), d.check('pappapero')\n\nprint( d.suggest(\"potreima\") )\nprint( d.suggest(\"marema\") )\nprint( d.suggest(\"se metto troppe parole lo impallo\") )\nprint( d.suggest(\"van no\") )\nprint( d.suggest(\"due parole\") )", "Add your own dictionary", "# Dict objects can also be used to check words against a custom list of correctly-spelled words \n# known as a Personal Word List. This is simply a file listing the words to be considered, one word per line. \n# The following example creates a Dict object for the personal word list stored in “mywords.txt”:\npwl = enchant.request_pwl_dict(\"../Data_nlp/mywords.txt\")\n\npwl.check('pappapero'), pwl.suggest('cittin'), pwl.check('altro')\n\n# PyEnchant also provides the class DictWithPWL which can be used to combine a language dictionary \n# and a personal word list file:\nd2 = enchant.DictWithPWL(\"it_IT\", \"../Data_nlp/mywords.txt\")\n\nd2.check('altro') & d2.check('pappapero'), d2.suggest('cittin')\n\n%%timeit\nd2.suggest('poliza')", "check entire phrase", "from enchant.checker import SpellChecker\nchkr = SpellChecker(\"it_IT\")\n\nchkr.set_text(\"questo è un picclo esmpio per dire cm funziona\")\nfor err in chkr:\n print(err.word)\n print(chkr.suggest(err.word))\n\nprint(chkr.word, chkr.wordpos)\n\nchkr.replace('pippo')\nchkr.get_text()", "tokenization\nAs explained above, the module enchant.tokenize provides the ability to split text into its component words. The current implementation is based only on the rules for the English language, and so might not be completely suitable for your language of choice. Fortunately, it is straightforward to extend the functionality of this module.\nTo implement a new tokenization routine for the language TAG, simply create a class/function “tokenize” within the module “enchant.tokenize.TAG”. This function will automatically be detected by the module’s get_tokenizer function and used when appropriate. The easiest way to accomplish this is to copy the module “enchant.tokenize.en” and modify it to suit your needs.", "from enchant.tokenize import get_tokenizer\ntknzr = get_tokenizer(\"en_US\") # not tak for it_IT up to now\n[w for w in tknzr(\"this is some simple text\")]\n\nfrom enchant.tokenize import get_tokenizer, HTMLChunker\ntknzr = get_tokenizer(\"en_US\")\n[w for w in tknzr(\"this is <span class='important'>really important</span> text\")]\n\ntknzr = get_tokenizer(\"en_US\",chunkers=(HTMLChunker,))\n[w for w in tknzr(\"this is <span class='important'>really important</span> text\")]\n\nfrom enchant.tokenize import get_tokenizer, EmailFilter\ntknzr = get_tokenizer(\"en_US\")\n[w for w in tknzr(\"send an email to [email protected] please\")]\n\ntknzr = get_tokenizer(\"en_US\", filters = [EmailFilter])\n[w for w in tknzr(\"send an email to [email protected] please\")]", "Other modules:\n- CmdLineChecker\nThe module enchant.checker.CmdLineChecker provides the class CmdLineChecker which can be used to interactively check the spelling of some text. It uses standard input and standard output to interact with the user through a command-line interface. The code below shows how to create and use this class from within a python application, along with a short sample checking session:\n\nwxSpellCheckerDialog\n\nThe module enchant.checker.wxSpellCheckerDialog provides the class wxSpellCheckerDialog which can be used to interactively check the spelling of some text. The code below shows how to create and use such a dialog from within a wxPython application.\nWord2vec\n\npip install gensim\npip install pyemd\nhttps://radimrehurek.com/gensim/models/word2vec.html", "import gensim, logging\nfrom gensim.models import Word2Vec\n\nmodel = gensim.models.KeyedVectors.load_word2vec_format(\n '../Data_nlp/GoogleNews-vectors-negative300.bin.gz', binary=True)\n\nmodel.doesnt_match(\"breakfast brian dinner lunch\".split())\n\n# give text with w1 w2 your_distance to check if model and w1-w2 have give the same distance\nmodel.evaluate_word_pairs() \n\nlen(model.index2word)\n\n# check accuracy against a premade grouped words\nquestions_words = model.accuracy('../Data_nlp/word2vec/trunk/questions-words.txt')\nphrases_words = model.accuracy('../Data_nlp/word2vec/trunk/questions-phrases.txt')\n\nquestions_words[4]['incorrect']\n\nprint( model.n_similarity(['pasta'], ['spaghetti']) )\nprint( model.n_similarity(['pasta'], ['tomato']) )\nprint( model.n_similarity(['pasta'], ['car']) )\nprint( model.n_similarity(['cat'], ['dog']) )\n\nmodel.similar_by_vector( model.word_vec('welcome') )\n\nmodel.similar_by_word('welcome')\n\nmodel.syn0[4,]\n\nmodel.index2word[4]\n\nmodel.word_vec('is')\n\nmodel.syn0norm[4,]\n\nmodel.vector_size\n\nimport numpy as np\nmodel.similar_by_vector( (model.word_vec('Goofy') + model.word_vec('Minni'))/2 )\n\nimport pyemd\n# This method only works if `pyemd` is installed (can be installed via pip, but requires a C compiler).\n\nsentence_obama = 'Obama speaks to the media in Illinois'.lower().split()\nsentence_president = 'The president greets the press in Chicago'.lower().split()\n\n# Remove their stopwords.\nimport nltk\nstopwords = nltk.corpus.stopwords.words('english')\nsentence_obama = [w for w in sentence_obama if w not in stopwords]\nsentence_president = [w for w in sentence_president if w not in stopwords]\n\n# Compute WMD.\ndistance = model.wmdistance(sentence_obama, sentence_president)\nprint(distance)\n\nimport nltk\nstopwords = nltk.corpus.stopwords.words('english')\n\ndef sentence_distance(s1, s2):\n sentence_obama = [w for w in s1.split() if w not in stopwords]\n sentence_president = [w for w in s2.split() if w not in stopwords]\n print(sentence_obama, sentence_president, sep='\\t')\n print(model.wmdistance(sentence_obama, sentence_president), end='\\n\\n')\n\nsentence_distance('I run every day in the morning', 'I like football')\nsentence_distance('I run every day in the morning', 'I run since I was born')\nsentence_distance('I run every day in the morning', 'you are idiot')\nsentence_distance('I run every day in the morning', 'Are you idiot?')\nsentence_distance('I run every day in the morning', 'Is it possible to die?')\nsentence_distance('I run every day in the morning', 'Is it possible to die')\nsentence_distance('I run every day in the morning', 'I run every day')\nsentence_distance('I run every day in the morning', 'I eat every day')\nsentence_distance('I run every day in the morning', 'I have breakfast in the morning')\nsentence_distance('I run every day in the morning', 'I have breakfast every day in the morning')\nsentence_distance('I run every day in the morning', 'Each day I run')\nsentence_distance('I run every day in the morning', 'I run every day in the morning')\n\nsentence_distance('I run every day in the morning', 'Each day I run')\nsentence_distance('I run every day in the morning', 'Each I run')\nsentence_distance('I run every day in the morning', 'Each day run')\nsentence_distance('I run every day in the morning', 'Each day I')\nsentence_distance('I every day in the morning', 'Each day I run')\nsentence_distance('I run day in the morning', 'Each day I run')\nsentence_distance('I run every in morning', 'Each day I run')\nsentence_distance('I run every in', 'Each day I run')\n\ndef get_vect(w):\n try:\n return model.word_vec(w)\n except KeyError:\n return np.zeros(model.vector_size)\n \ndef calc_avg(s):\n ws = [get_vect(w) for w in s.split() if w not in stopwords]\n avg_vect = sum(ws)/len(ws)\n return avg_vect\n\n\nfrom scipy.spatial import distance\ndef get_euclidean(s1, s2):\n return distance.euclidean(calc_avg(s1), calc_avg(s2))\n\n# same questions\ns1 = 'Astrology: I am a Capricorn Sun Cap moon and cap rising...what does that say about me?'\ns2 = \"I'm a triple Capricorn (Sun, Moon and ascendant in Capricorn) What does this say about me?\"\nsentence_distance(s1, s2)\nprint(get_euclidean(s1, s2))\n\n# same questions as above without punctuations\ns1 = 'Astrology I am a Capricorn Sun Cap moon and cap rising what does that say about me'\ns2 = \"I am a triple Capricorn Sun Moon and ascendant in Capricorn What does this say about me\"\nsentence_distance(s1, s2)\nprint(get_euclidean(s1, s2))\n\n# same questions\ns1 = 'What is best way to make money online'\ns2 = 'What is best way to ask for money online?'\nsentence_distance(s1,s2)\nprint(get_euclidean(s1, s2))\n\n# different questions\ns1 = 'How did Darth Vader fought Darth Maul in Star Wars Legends?'\ns2 = 'Does Quora have a character limit for profile descriptions?'\nsentence_distance(s1,s2)\nprint(get_euclidean(s1, s2))\n\n# the order of the words doesn't change the distanace bewteeen the two phrases\ns1ws = [w for w in s1.split() if w not in stopwords]\ns2ws = [w for w in s2.split() if w not in stopwords]\nprint(model.wmdistance(s1ws, s2ws) )\nprint(model.wmdistance(s1ws[::-1], s2ws) )\nprint(model.wmdistance(s1ws, s2ws[::-1]) )\nprint(model.wmdistance(s1ws[3:]+s1ws[0:3], s2ws[::-1]) )", "conclusion:\n- distance work well\n- the order of the words is not taken into account\nTranslate using google translate\n\nhttps://github.com/ssut/py-googletrans\nshould be free and unlimted, interned connection required\npip install googletrans", "from googletrans import Translator\n\no = open(\"../AliceNelPaeseDelleMeraviglie.txt\")\nall = ''\nfor l in o: all += l\n\ntranslator = Translator()\n\nfor i in range(42, 43, 1):\n print(all[i * 1000:i * 1000 + 1000], end='\\n\\n')\n print(translator.translate(all[i * 1000:i * 1000 + 1000], dest='en').text)\n\n## if language is not passed it is guessed, so it can detect a language\nfrase = \"Ciao Giulia, ti va un gelato?\"\ndet = translator.detect(frase)\nprint(\"Languge:\", det.lang, \" with confidence:\", det.confidence)\n\n# command line usage, but it seems to don't work to me\n!translate \"veritas lux mea\" -s la -d en\n\ntranslations = translator.translate(\n ['The quick brown fox', 'jumps over', 'the lazy dog'], dest='ko')\nfor translation in translations:\n print(translation.origin, ' -> ', translation.text)\n\nphrase = translator.translate(frase, 'en')\nphrase.origin, phrase.text, phrase.src, phrase.pronunciation, phrase.dest", "TreeTagger usage to tag an italian (or other languages) sentence\nHow To install:\n- nltk need to be already installed and working\n- follow the instruction from http://www.cis.uni-muenchen.de/~schmid/tools/TreeTagger/ \n- run TreeTagger on terminal (echo 'Ciao Giulia come stai?' | tree-tagger-italian) to see if everything is working\n- download the github to get the python support from: https://github.com/miotto/treetagger-python\n- run /home/ale/anaconda3/bin/python setup.py install and everything should work (note that you need to specify which python you want, the default is python2)\nInfos:\n- The maximum character limit on a single text is 15k.\n- this API does not guarantee that the library would work properly at all times\n- for a more stability API use the non-free https://cloud.google.com/translate/docs/\n- If you get HTTP 5xx error or errors like #6, it's probably because Google has banned your client IP address", "from treetagger import TreeTagger\ntt = TreeTagger(language='english')\ntt.tag('What is the airspeed of an unladen swallow?')\n\ntt = TreeTagger(language='italian')\ntt.tag('Proviamo a vedere un pò se funziona bene questo tagger')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ES-DOC/esdoc-jupyterhub
notebooks/nerc/cmip6/models/ukesm1-0-ll/aerosol.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Aerosol\nMIP Era: CMIP6\nInstitute: NERC\nSource ID: UKESM1-0-LL\nTopic: Aerosol\nSub-Topics: Transport, Emissions, Concentrations, Optical Radiative Properties, Model. \nProperties: 69 (37 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:26\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'nerc', 'ukesm1-0-ll', 'aerosol')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --&gt; Software Properties\n3. Key Properties --&gt; Timestep Framework\n4. Key Properties --&gt; Meteorological Forcings\n5. Key Properties --&gt; Resolution\n6. Key Properties --&gt; Tuning Applied\n7. Transport\n8. Emissions\n9. Concentrations\n10. Optical Radiative Properties\n11. Optical Radiative Properties --&gt; Absorption\n12. Optical Radiative Properties --&gt; Mixtures\n13. Optical Radiative Properties --&gt; Impact Of H2o\n14. Optical Radiative Properties --&gt; Radiative Scheme\n15. Optical Radiative Properties --&gt; Cloud Interactions\n16. Model \n1. Key Properties\nKey properties of the aerosol model\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of aerosol model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of aerosol model code", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.3. Scheme Scope\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nAtmospheric domains covered by the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.scheme_scope') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"troposhere\" \n# \"stratosphere\" \n# \"mesosphere\" \n# \"mesosphere\" \n# \"whole atmosphere\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.4. Basic Approximations\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBasic approximations made in the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.basic_approximations') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.5. Prognostic Variables Form\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPrognostic variables in the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"3D mass/volume ratio for aerosols\" \n# \"3D number concenttration for aerosols\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.6. Number Of Tracers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of tracers in the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.number_of_tracers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "1.7. Family Approach\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre aerosol calculations generalized into families of species?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.family_approach') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Software Properties\nSoftware properties of aerosol code\n2.1. Repository\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nLocation of code for this component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.2. Code Version\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCode version identifier.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.3. Code Languages\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nCode language(s).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Timestep Framework\nPhysical properties of seawater in ocean\n3.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMathematical method deployed to solve the time evolution of the prognostic variables", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses atmospheric chemistry time stepping\" \n# \"Specific timestepping (operator splitting)\" \n# \"Specific timestepping (integrated)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3.2. Split Operator Advection Timestep\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTimestep for aerosol advection (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.3. Split Operator Physical Timestep\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTimestep for aerosol physics (in seconds).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.4. Integrated Timestep\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTimestep for the aerosol model (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.5. Integrated Scheme Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecify the type of timestep scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Implicit\" \n# \"Semi-implicit\" \n# \"Semi-analytic\" \n# \"Impact solver\" \n# \"Back Euler\" \n# \"Newton Raphson\" \n# \"Rosenbrock\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Meteorological Forcings\n**\n4.1. Variables 3D\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nThree dimensionsal forcing variables, e.g. U, V, W, T, Q, P, conventive mass flux", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Variables 2D\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTwo dimensionsal forcing variables, e.g. land-sea mask definition", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Frequency\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nFrequency with which meteological forcings are applied (in seconds).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5. Key Properties --&gt; Resolution\nResolution in the aersosol model grid\n5.1. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Canonical Horizontal Resolution\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.3. Number Of Horizontal Gridpoints\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5.4. Number Of Vertical Levels\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nNumber of vertical levels resolved on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5.5. Is Adaptive Grid\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDefault is False. Set true if grid resolution changes during execution.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6. Key Properties --&gt; Tuning Applied\nTuning methodology for aerosol model\n6.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.2. Global Mean Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList set of metrics of the global mean state used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.3. Regional Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList of regional metrics of mean state used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.4. Trend Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList observed trend metrics used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7. Transport\nAerosol transport\n7.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of transport in atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod for aerosol transport modeling", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses Atmospheric chemistry transport scheme\" \n# \"Specific transport scheme (eulerian)\" \n# \"Specific transport scheme (semi-lagrangian)\" \n# \"Specific transport scheme (eulerian and semi-lagrangian)\" \n# \"Specific transport scheme (lagrangian)\" \n# TODO - please enter value(s)\n", "7.3. Mass Conservation Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMethod used to ensure mass conservation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses Atmospheric chemistry transport scheme\" \n# \"Mass adjustment\" \n# \"Concentrations positivity\" \n# \"Gradients monotonicity\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "7.4. Convention\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nTransport by convention", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.convention') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses Atmospheric chemistry transport scheme\" \n# \"Convective fluxes connected to tracers\" \n# \"Vertical velocities connected to tracers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8. Emissions\nAtmospheric aerosol emissions\n8.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of emissions in atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMethod used to define aerosol species (several methods allowed because the different species may not use the same method).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Prescribed (climatology)\" \n# \"Prescribed CMIP6\" \n# \"Prescribed above surface\" \n# \"Interactive\" \n# \"Interactive above surface\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.3. Sources\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nSources of the aerosol species are taken into account in the emissions scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.sources') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Vegetation\" \n# \"Volcanos\" \n# \"Bare ground\" \n# \"Sea surface\" \n# \"Lightning\" \n# \"Fires\" \n# \"Aircraft\" \n# \"Anthropogenic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.4. Prescribed Climatology\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify the climatology type for aerosol emissions", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.prescribed_climatology') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Interannual\" \n# \"Annual\" \n# \"Monthly\" \n# \"Daily\" \n# TODO - please enter value(s)\n", "8.5. Prescribed Climatology Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and prescribed via a climatology", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.6. Prescribed Spatially Uniform Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and prescribed as spatially uniform", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.7. Interactive Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and specified via an interactive method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.8. Other Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and specified via an &quot;other method&quot;", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.other_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.9. Other Method Characteristics\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCharacteristics of the &quot;other method&quot; used for aerosol emissions", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.other_method_characteristics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Concentrations\nAtmospheric aerosol concentrations\n9.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of concentrations in atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.2. Prescribed Lower Boundary\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed at the lower boundary.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.3. Prescribed Upper Boundary\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed at the upper boundary.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.4. Prescribed Fields Mmr\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed as mass mixing ratios.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.5. Prescribed Fields Mmr\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed as AOD plus CCNs.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Optical Radiative Properties\nAerosol optical and radiative properties\n10.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of optical and radiative properties", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11. Optical Radiative Properties --&gt; Absorption\nAbsortion properties in aerosol scheme\n11.1. Black Carbon\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAbsorption mass coefficient of black carbon at 550nm (if non-absorbing enter 0)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.2. Dust\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAbsorption mass coefficient of dust at 550nm (if non-absorbing enter 0)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.3. Organics\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAbsorption mass coefficient of organics at 550nm (if non-absorbing enter 0)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "12. Optical Radiative Properties --&gt; Mixtures\n**\n12.1. External\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there external mixing with respect to chemical composition?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "12.2. Internal\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there internal mixing with respect to chemical composition?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "12.3. Mixing Rule\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf there is internal mixing with respect to chemical composition then indicate the mixinrg rule", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13. Optical Radiative Properties --&gt; Impact Of H2o\n**\n13.1. Size\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes H2O impact size?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "13.2. Internal Mixture\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes H2O impact internal mixture?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "14. Optical Radiative Properties --&gt; Radiative Scheme\nRadiative scheme for aerosol\n14.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of radiative scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.2. Shortwave Bands\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of shortwave bands", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14.3. Longwave Bands\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of longwave bands", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "15. Optical Radiative Properties --&gt; Cloud Interactions\nAerosol-cloud interactions\n15.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of aerosol-cloud interactions", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.2. Twomey\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the Twomey effect included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "15.3. Twomey Minimum Ccn\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf the Twomey effect is included, then what is the minimum CCN number?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "15.4. Drizzle\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the scheme affect drizzle?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "15.5. Cloud Lifetime\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the scheme affect cloud lifetime?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "15.6. Longwave Bands\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of longwave bands", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "16. Model\nAerosol model\n16.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "16.2. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nProcesses included in the Aerosol model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Dry deposition\" \n# \"Sedimentation\" \n# \"Wet deposition (impaction scavenging)\" \n# \"Wet deposition (nucleation scavenging)\" \n# \"Coagulation\" \n# \"Oxidation (gas phase)\" \n# \"Oxidation (in cloud)\" \n# \"Condensation\" \n# \"Ageing\" \n# \"Advection (horizontal)\" \n# \"Advection (vertical)\" \n# \"Heterogeneous chemistry\" \n# \"Nucleation\" \n# TODO - please enter value(s)\n", "16.3. Coupling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nOther model components coupled to the Aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Radiation\" \n# \"Land surface\" \n# \"Heterogeneous chemistry\" \n# \"Clouds\" \n# \"Ocean\" \n# \"Cryosphere\" \n# \"Gas phase chemistry\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.4. Gas Phase Precursors\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of gas phase aerosol precursors.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.gas_phase_precursors') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"DMS\" \n# \"SO2\" \n# \"Ammonia\" \n# \"Iodine\" \n# \"Terpene\" \n# \"Isoprene\" \n# \"VOC\" \n# \"NOx\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.5. Scheme Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nType(s) of aerosol scheme used by the aerosols model (potentially multiple: some species may be covered by one type of aerosol scheme and other species covered by another type).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Bulk\" \n# \"Modal\" \n# \"Bin\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.6. Bulk Scheme Species\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of species covered by the bulk scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.bulk_scheme_species') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Sulphate\" \n# \"Nitrate\" \n# \"Sea salt\" \n# \"Dust\" \n# \"Ice\" \n# \"Organic\" \n# \"Black carbon / soot\" \n# \"SOA (secondary organic aerosols)\" \n# \"POM (particulate organic matter)\" \n# \"Polar stratospheric ice\" \n# \"NAT (Nitric acid trihydrate)\" \n# \"NAD (Nitric acid dihydrate)\" \n# \"STS (supercooled ternary solution aerosol particule)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
minh5/cpsc
reports/neiss.ipynb
mit
[ "Table of Contents\n<p><div class=\"lev2 toc-item\"><a href=\"#Are-there-products-we-should-be-aware-of?\" data-toc-modified-id=\"Are-there-products-we-should-be-aware-of?-01\"><span class=\"toc-item-num\">0.1&nbsp;&nbsp;</span>Are there products we should be aware of?</a></div><div class=\"lev2 toc-item\"><a href=\"#Could-be-useful-to-compare-stratum-types---Do-large-hospitals-see-different-rates-of-injury-than-small-hospitals?\" data-toc-modified-id=\"Could-be-useful-to-compare-stratum-types---Do-large-hospitals-see-different-rates-of-injury-than-small-hospitals?-02\"><span class=\"toc-item-num\">0.2&nbsp;&nbsp;</span>Could be useful to compare stratum types - Do large hospitals see different rates of injury than small hospitals?</a></div><div class=\"lev2 toc-item\"><a href=\"#Do-we-see-meaningful-trends-when-race-is-reported?\" data-toc-modified-id=\"Do-we-see-meaningful-trends-when-race-is-reported?-03\"><span class=\"toc-item-num\">0.3&nbsp;&nbsp;</span>Do we see meaningful trends when race is reported?</a></div><div class=\"lev2 toc-item\"><a href=\"#Conclusion\" data-toc-modified-id=\"Conclusion-04\"><span class=\"toc-item-num\">0.4&nbsp;&nbsp;</span>Conclusion</a></div>", "import pandas as pd\nimport statsmodels.formula.api as smf\nimport statsmodels.api as sm\nimport numpy as np\n\nimport neiss\nimport plotly.offline\n\nplotly.offline\n\n#loading in data and preparations\nraw = pd.read_csv('/home/datauser/cpsc/data/processed/neiss/neiss-2015.csv')\ncleaned = neiss.cleaner(raw)\ndata = neiss.query(cleaned.processed_data, cleaned.crosstab)", "This analysis was done by DataKind DC on behalf of the Consumer Product Safety Commission. This serves as a preliminary study of the NEISS dataset. We have been been contact with the CPSC and figuring out what questions of importance that we can offer insight to. The questions that were analyzed were:\n\nAre there products we should be aware of?\nAre there differences between the sizes of hospitals?\nAre there differences where race was reported or between different races?\n\nAre there products we should be aware of?\nTo answer this question, I approached it two ways. One way is to tabulate the total number of producted queried by hospitals and another is to look at the top items reported by each item.\nThe top ten producted reported by hospitals are listed below. It appears that 1842 and 1807 are the top products that most hospital report.", "data.data['product'].value_counts()[0:9]", "Looking further, I examine what hospitals report this the most, so we can examine hospitals that report these products the most.", "data.get_hospitals_by_product('product_1842')\n\ndata.get_hospitals_by_product('product_1807')", "We can also view these as plots and compare the incident rates of these products through different hospitals", "data.plot_product('product_1842')\n\ndata.plot_product('product_1807')", "Looking at these, it appears that there are some overlap between the hospitals. Hospital 17, 21, 42, and 95 are the 4 common hospital that are in the top ten of both these products. We will turn to a hospital examination down the road.", "set(data.get_hospitals_by_product('product_1842').index.tolist()) & set(data.get_hospitals_by_product('product_1807').index.tolist())", "Could be useful to compare stratum types - Do large hospitals see different rates of injury than small hospitals?\nAnother way of examining product harm would not only to count the total numbers of products but also to see what is the top product that is reported for each hosptial. Here we can look at not only the sheer number which could be due to over reporting or awareness but also to see if there are geographic differences for product harm. However, after examining this, we see that 70 out of the 82 hospitals surveyed have product 1842 and 1807 as the top product.\nHowever an interesting finding is that product_1267, product_3299, and product_3283 are in the top ten list of top products by hospital but not in the top ten overall. However, the number is small as it only affects 5 hospitals and 14,844 reported cases. It would be interesting to see where these five hospital are at and why these products are the top of their product harm.", "data.top_product_for_hospital()", "Another way of approaching would be to fit a Negative Binomial Regression to see if there are any meaningful differences between the sizes of the hospitals. I use a negative binomial regression rather than a poisson regression because there is strong evidence of overdispersion, that is, the variance of the data is much higher than the mean, as shown below. This also occurs across all stratum (only shown for small, medium, and large).", "counts = data.data.ix[data.data['product'] == 'product_1842',:]['hospital'].value_counts()\nprint('variance of product 1842 counts:', np.var(counts.values))\nprint('mean of product 1842 counts:', np.mean(counts.values))\n\n\ndata.plot_stratum_dist('product_1842', 'S')\n\ndata.plot_stratum_dist('product_1842', 'M')\n\ndata.plot_stratum_dist('product_1842', 'L')\n\ndf = data.prepare_stratum_modeling('product_1842')\ndf.head()\n\n\nmodel = smf.glm(\"counts ~ stratum\", data=df,\n family=sm.families.NegativeBinomial()).fit()\nmodel.summary()", "From the model, we see that there are only significant differences between Medium and Small hospital. Given the coefficients, the log count difference between Medium and Small hospitals is -1.55. Other than that there doesn't seem to be any other signficant differences between hospital sizes for Product 1842. \nWe can do the same to examine the 2nd most reported product, Product 1807. Below I check the assumption to fit a negative binomial regression, that the variance is far greater than the mean. In this case we see that it is the case.", "data.plot_stratum_dist('product_1807', 'S')\n\n\ndata.plot_stratum_dist('product_1807', 'M')\n\ndata.plot_stratum_dist('product_1807', 'L')", "The assumptions have been met and after building the model, we see very similar results as the previous model, that there are only significant differences between the small and large hospitals. For future research, we can use similar techniques to see significant differences between hospital sizes for all products.", "df2 = data.prepare_stratum_modeling('product_1807')\nmodel = smf.glm(\"counts ~ stratum\", data=df,\n family=sm.families.NegativeBinomial()).fit()\nmodel.summary()", "Do we see meaningful trends when race is reported?\nFrom the top items, we don't see any meaningful differences between the top ten items for people who have race reported and race not reported. Even among the data where we do have race reported, there doesn't seem to be much variation when it comes to the top ten products causes most harm.", "data.retrieve_query('race_reported', 'reported', 'product')\n\ndata.retrieve_query('race_reported', 'not reported', 'product')\n\nraces = ['white', 'black', 'hispanic', 'other']\nfor race in races:\n print(race)\n print(data.retrieve_query('new_race', race, 'product'))", "Conclusion\nThe analysis here is still preliminary and exploratory. Most of the analysis revolved around two product, Product 1842 and 1807, because they vastly outnumbered all the other reported products. Future analysis could include running more negative binomial or Poisson (if the mean and variance are similar) regression and more standard hypothesis tests to see evaluate statistical significant differences. One question that I could not answer is to figure out any regional differences because we do not know the exact location of the hospital. \nWe have also attached a document that conducts a much more comprehensive break down of product harm by various segments. This document serves as a starting point for all the analysis done here and will be a value reference for any future research on the NEISS dataset." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
JAmarel/Phys202
Integration/IntegrationEx02.ipynb
mit
[ "Integration Exercise 2\nImports", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom scipy import integrate", "Indefinite integrals\nHere is a table of definite integrals. Many of these integrals has a number of parameters $a$, $b$, etc.\nFind five of these integrals and perform the following steps:\n\nTypeset the integral using LateX in a Markdown cell.\nDefine an integrand function that computes the value of the integrand.\nDefine an integral_approx funciton that uses scipy.integrate.quad to peform the integral.\nDefine an integral_exact function that computes the exact value of the integral.\nCall and print the return value of integral_approx and integral_exact for one set of parameters.\n\nHere is an example to show what your solutions should look like:\nExample\nHere is the integral I am performing:\n$$ I_1 = \\int_0^\\infty \\frac{dx}{x^2 + a^2} = \\frac{\\pi}{2a} $$", "def integrand(x, a):\n return 1.0/(x**2 + a**2)\n\ndef integral_approx(a):\n # Use the args keyword argument to feed extra arguments to your integrand\n I, e = integrate.quad(integrand, 0, np.inf, args=(a,))\n return I\n\ndef integral_exact(a):\n return 0.5*np.pi/a\n\nprint(\"Numerical: \", integral_approx(1.0))\nprint(\"Exact : \", integral_exact(1.0))\n\n\nassert True # leave this cell to grade the above integral", "Integral 1\n$$ I_1 = \\int_0^a {\\sqrt{a^2-x^2} dx} = \\frac{\\pi a^2}{4} $$", "def integrand(x, a):\n return np.sqrt(a**2 - x**2)\n\ndef integral_approx(a):\n # Use the args keyword argument to feed extra arguments to your integrand\n I, e = integrate.quad(integrand, 0, a, args=(a,))\n return I\n\ndef integral_exact(a):\n return 0.25*np.pi\n\nprint(\"Numerical: \", integral_approx(1.0))\nprint(\"Exact : \", integral_exact(1.0))\n\nassert True # leave this cell to grade the above integral", "Integral 2\n$$ I_2 = \\int_0^{\\frac{\\pi}{2}} {\\sin^2{x}}{ } {dx} = \\frac{\\pi}{4} $$", "def integrand(x):\n return np.sin(x)**2\n\ndef integral_approx():\n I, e = integrate.quad(integrand, 0, np.pi/2)\n return I\n\ndef integral_exact():\n return 0.25*np.pi\n\nprint(\"Numerical: \", integral_approx())\nprint(\"Exact : \", integral_exact())\n\nassert True # leave this cell to grade the above integral", "Integral 3\n$$ I_3 = \\int_0^{2\\pi} \\frac{dx}{a+b\\sin{x}} = {\\frac{2\\pi}{\\sqrt{a^2-b^2}}} $$", "def integrand(x,a,b):\n return 1/(a+ b*np.sin(x))\n\ndef integral_approx(a,b):\n I, e = integrate.quad(integrand, 0, 2*np.pi,args=(a,b))\n return I\n\ndef integral_exact(a,b):\n return 2*np.pi/np.sqrt(a**2-b**2)\n\nprint(\"Numerical: \", integral_approx(10,0))\nprint(\"Exact : \", integral_exact(10,0))\n\nassert True # leave this cell to grade the above integral", "Integral 4\n$$ I_4 = \\int_0^{\\infty} \\frac{x}{e^{x}+1} = {\\frac{\\pi^2}{12}} $$", "def integrand(x):\n return x/(np.exp(x)+1)\n\ndef integral_approx():\n I, e = integrate.quad(integrand, 0, np.inf)\n return I\n\ndef integral_exact():\n return (1/12)*np.pi**2\n\nprint(\"Numerical: \", integral_approx())\nprint(\"Exact : \", integral_exact())\n\nassert True # leave this cell to grade the above integral", "Integral 5\n$$ I_5 = \\int_0^{\\infty} \\frac{x}{e^{x}-1} = {\\frac{\\pi^2}{6}} $$", "def integrand(x):\n return x/(np.exp(x)-1)\n\ndef integral_approx():\n I, e = integrate.quad(integrand, 0, np.inf)\n return I\n\ndef integral_exact():\n return (1/6)*np.pi**2\n\nprint(\"Numerical: \", integral_approx())\nprint(\"Exact : \", integral_exact())\n\nassert True # leave this cell to grade the above integral" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ES-DOC/esdoc-jupyterhub
notebooks/inm/cmip6/models/sandbox-2/ocean.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Ocean\nMIP Era: CMIP6\nInstitute: INM\nSource ID: SANDBOX-2\nTopic: Ocean\nSub-Topics: Timestepping Framework, Advection, Lateral Physics, Vertical Physics, Uplow Boundaries, Boundary Forcing. \nProperties: 133 (101 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:05\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'inm', 'sandbox-2', 'ocean')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --&gt; Seawater Properties\n3. Key Properties --&gt; Bathymetry\n4. Key Properties --&gt; Nonoceanic Waters\n5. Key Properties --&gt; Software Properties\n6. Key Properties --&gt; Resolution\n7. Key Properties --&gt; Tuning Applied\n8. Key Properties --&gt; Conservation\n9. Grid\n10. Grid --&gt; Discretisation --&gt; Vertical\n11. Grid --&gt; Discretisation --&gt; Horizontal\n12. Timestepping Framework\n13. Timestepping Framework --&gt; Tracers\n14. Timestepping Framework --&gt; Baroclinic Dynamics\n15. Timestepping Framework --&gt; Barotropic\n16. Timestepping Framework --&gt; Vertical Physics\n17. Advection\n18. Advection --&gt; Momentum\n19. Advection --&gt; Lateral Tracers\n20. Advection --&gt; Vertical Tracers\n21. Lateral Physics\n22. Lateral Physics --&gt; Momentum --&gt; Operator\n23. Lateral Physics --&gt; Momentum --&gt; Eddy Viscosity Coeff\n24. Lateral Physics --&gt; Tracers\n25. Lateral Physics --&gt; Tracers --&gt; Operator\n26. Lateral Physics --&gt; Tracers --&gt; Eddy Diffusity Coeff\n27. Lateral Physics --&gt; Tracers --&gt; Eddy Induced Velocity\n28. Vertical Physics\n29. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Details\n30. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Tracers\n31. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Momentum\n32. Vertical Physics --&gt; Interior Mixing --&gt; Details\n33. Vertical Physics --&gt; Interior Mixing --&gt; Tracers\n34. Vertical Physics --&gt; Interior Mixing --&gt; Momentum\n35. Uplow Boundaries --&gt; Free Surface\n36. Uplow Boundaries --&gt; Bottom Boundary Layer\n37. Boundary Forcing\n38. Boundary Forcing --&gt; Momentum --&gt; Bottom Friction\n39. Boundary Forcing --&gt; Momentum --&gt; Lateral Friction\n40. Boundary Forcing --&gt; Tracers --&gt; Sunlight Penetration\n41. Boundary Forcing --&gt; Tracers --&gt; Fresh Water Forcing \n1. Key Properties\nOcean key properties\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of ocean model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of ocean model code (NEMO 3.6, MOM 5.0,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.3. Model Family\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of ocean model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.model_family') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"OGCM\" \n# \"slab ocean\" \n# \"mixed layer ocean\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.4. Basic Approximations\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nBasic approximations made in the ocean.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.basic_approximations') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Primitive equations\" \n# \"Non-hydrostatic\" \n# \"Boussinesq\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.5. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of prognostic variables in the ocean component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Potential temperature\" \n# \"Conservative temperature\" \n# \"Salinity\" \n# \"U-velocity\" \n# \"V-velocity\" \n# \"W-velocity\" \n# \"SSH\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Seawater Properties\nPhysical properties of seawater in ocean\n2.1. Eos Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of EOS for sea water", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Linear\" \n# \"Wright, 1997\" \n# \"Mc Dougall et al.\" \n# \"Jackett et al. 2006\" \n# \"TEOS 2010\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "2.2. Eos Functional Temp\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTemperature used in EOS for sea water", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Potential temperature\" \n# \"Conservative temperature\" \n# TODO - please enter value(s)\n", "2.3. Eos Functional Salt\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSalinity used in EOS for sea water", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Practical salinity Sp\" \n# \"Absolute salinity Sa\" \n# TODO - please enter value(s)\n", "2.4. Eos Functional Depth\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDepth or pressure used in EOS for sea water ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pressure (dbars)\" \n# \"Depth (meters)\" \n# TODO - please enter value(s)\n", "2.5. Ocean Freezing Point\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEquation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"TEOS 2010\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "2.6. Ocean Specific Heat\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecific heat in ocean (cpocean) in J/(kg K)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "2.7. Ocean Reference Density\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBoussinesq reference density (rhozero) in kg / m3", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Bathymetry\nProperties of bathymetry in ocean\n3.1. Reference Dates\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nReference date of bathymetry", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Present day\" \n# \"21000 years BP\" \n# \"6000 years BP\" \n# \"LGM\" \n# \"Pliocene\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3.2. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the bathymetry fixed in time in the ocean ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "3.3. Ocean Smoothing\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe any smoothing or hand editing of bathymetry in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3.4. Source\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe source of bathymetry in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.source') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Nonoceanic Waters\nNon oceanic waters treatement in ocean\n4.1. Isolated Seas\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how isolated seas is performed", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. River Mouth\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how river mouth mixing or estuaries specific treatment is performed", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5. Key Properties --&gt; Software Properties\nSoftware properties of ocean code\n5.1. Repository\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nLocation of code for this component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Code Version\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCode version identifier.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.3. Code Languages\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nCode language(s).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6. Key Properties --&gt; Resolution\nResolution in the ocean grid\n6.1. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.2. Canonical Horizontal Resolution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.3. Range Horizontal Resolution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRange of horizontal resolution with spatial details, eg. 50(Equator)-100km or 0.1-0.5 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.4. Number Of Horizontal Gridpoints\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "6.5. Number Of Vertical Levels\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of vertical levels resolved on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "6.6. Is Adaptive Grid\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDefault is False. Set true if grid resolution changes during execution.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6.7. Thickness Level 1\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThickness of first surface ocean level (in meters)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "7. Key Properties --&gt; Tuning Applied\nTuning methodology for ocean component\n7.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. Global Mean Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList set of metrics of the global mean state used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.3. Regional Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList of regional metrics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.4. Trend Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList observed trend metrics used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8. Key Properties --&gt; Conservation\nConservation in the ocean component\n8.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBrief description of conservation methodology", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nProperties conserved in the ocean by the numerical schemes", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.scheme') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Energy\" \n# \"Enstrophy\" \n# \"Salt\" \n# \"Volume of ocean\" \n# \"Momentum\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.3. Consistency Properties\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAny additional consistency properties (energy conversion, pressure gradient discretisation, ...)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.4. Corrected Conserved Prognostic Variables\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSet of variables which are conserved by more than the numerical scheme alone.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.5. Was Flux Correction Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDoes conservation involve flux correction ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "9. Grid\nOcean grid\n9.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of grid in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Grid --&gt; Discretisation --&gt; Vertical\nProperties of vertical discretisation in ocean\n10.1. Coordinates\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of vertical coordinates in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Z-coordinate\" \n# \"Z*-coordinate\" \n# \"S-coordinate\" \n# \"Isopycnic - sigma 0\" \n# \"Isopycnic - sigma 2\" \n# \"Isopycnic - sigma 4\" \n# \"Isopycnic - other\" \n# \"Hybrid / Z+S\" \n# \"Hybrid / Z+isopycnic\" \n# \"Hybrid / other\" \n# \"Pressure referenced (P)\" \n# \"P*\" \n# \"Z**\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.2. Partial Steps\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nUsing partial steps with Z or Z vertical coordinate in ocean ?*", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "11. Grid --&gt; Discretisation --&gt; Horizontal\nType of horizontal discretisation scheme in ocean\n11.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHorizontal grid type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Lat-lon\" \n# \"Rotated north pole\" \n# \"Two north poles (ORCA-style)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "11.2. Staggering\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nHorizontal grid staggering type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Arakawa B-grid\" \n# \"Arakawa C-grid\" \n# \"Arakawa E-grid\" \n# \"N/a\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "11.3. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHorizontal discretisation scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Finite difference\" \n# \"Finite volumes\" \n# \"Finite elements\" \n# \"Unstructured grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "12. Timestepping Framework\nOcean Timestepping Framework\n12.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of time stepping in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12.2. Diurnal Cycle\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDiurnal cycle type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Via coupling\" \n# \"Specific treatment\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13. Timestepping Framework --&gt; Tracers\nProperties of tracers time stepping in ocean\n13.1. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTracers time stepping scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Leap-frog + Asselin filter\" \n# \"Leap-frog + Periodic Euler\" \n# \"Predictor-corrector\" \n# \"Runge-Kutta 2\" \n# \"AM3-LF\" \n# \"Forward-backward\" \n# \"Forward operator\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.2. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTracers time step (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14. Timestepping Framework --&gt; Baroclinic Dynamics\nBaroclinic dynamics in ocean\n14.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBaroclinic dynamics type", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Preconditioned conjugate gradient\" \n# \"Sub cyling\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBaroclinic dynamics scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Leap-frog + Asselin filter\" \n# \"Leap-frog + Periodic Euler\" \n# \"Predictor-corrector\" \n# \"Runge-Kutta 2\" \n# \"AM3-LF\" \n# \"Forward-backward\" \n# \"Forward operator\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.3. Time Step\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nBaroclinic time step (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "15. Timestepping Framework --&gt; Barotropic\nBarotropic time stepping in ocean\n15.1. Splitting\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime splitting method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"split explicit\" \n# \"implicit\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.2. Time Step\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nBarotropic time step (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "16. Timestepping Framework --&gt; Vertical Physics\nVertical physics time stepping in ocean\n16.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDetails of vertical time stepping in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17. Advection\nOcean advection\n17.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of advection in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18. Advection --&gt; Momentum\nProperties of lateral momemtum advection scheme in ocean\n18.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of lateral momemtum advection scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.momentum.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Flux form\" \n# \"Vector form\" \n# TODO - please enter value(s)\n", "18.2. Scheme Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of ocean momemtum advection scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.momentum.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18.3. ALE\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nUsing ALE for vertical advection ? (if vertical coordinates are sigma)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.momentum.ALE') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "19. Advection --&gt; Lateral Tracers\nProperties of lateral tracer advection scheme in ocean\n19.1. Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOrder of lateral tracer advection scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "19.2. Flux Limiter\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMonotonic flux limiter for lateral tracer advection scheme in ocean ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "19.3. Effective Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEffective order of limited lateral tracer advection scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "19.4. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescriptive text for lateral tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19.5. Passive Tracers\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nPassive tracers advected", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ideal age\" \n# \"CFC 11\" \n# \"CFC 12\" \n# \"SF6\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "19.6. Passive Tracers Advection\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIs advection of passive tracers different than active ? if so, describe.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "20. Advection --&gt; Vertical Tracers\nProperties of vertical tracer advection scheme in ocean\n20.1. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescriptive text for vertical tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.vertical_tracers.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "20.2. Flux Limiter\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMonotonic flux limiter for vertical tracer advection scheme in ocean ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "21. Lateral Physics\nOcean lateral physics\n21.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of lateral physics in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "21.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of transient eddy representation in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Eddy active\" \n# \"Eddy admitting\" \n# TODO - please enter value(s)\n", "22. Lateral Physics --&gt; Momentum --&gt; Operator\nProperties of lateral physics operator for momentum in ocean\n22.1. Direction\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDirection of lateral physics momemtum scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Horizontal\" \n# \"Isopycnal\" \n# \"Isoneutral\" \n# \"Geopotential\" \n# \"Iso-level\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22.2. Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOrder of lateral physics momemtum scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Harmonic\" \n# \"Bi-harmonic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22.3. Discretisation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDiscretisation of lateral physics momemtum scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Second order\" \n# \"Higher order\" \n# \"Flux limiter\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23. Lateral Physics --&gt; Momentum --&gt; Eddy Viscosity Coeff\nProperties of eddy viscosity coeff in lateral physics momemtum scheme in the ocean\n23.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nLateral physics momemtum eddy viscosity coeff type in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Space varying\" \n# \"Time + space varying (Smagorinsky)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.2. Constant Coefficient\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant, value of eddy viscosity coeff in lateral physics momemtum scheme (in m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "23.3. Variable Coefficient\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf space-varying, describe variations of eddy viscosity coeff in lateral physics momemtum scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "23.4. Coeff Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe background eddy viscosity coeff in lateral physics momemtum scheme (give values in m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "23.5. Coeff Backscatter\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there backscatter in eddy viscosity coeff in lateral physics momemtum scheme ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "24. Lateral Physics --&gt; Tracers\nProperties of lateral physics for tracers in ocean\n24.1. Mesoscale Closure\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there a mesoscale closure in the lateral physics tracers scheme ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "24.2. Submesoscale Mixing\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there a submesoscale mixing parameterisation (i.e Fox-Kemper) in the lateral physics tracers scheme ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "25. Lateral Physics --&gt; Tracers --&gt; Operator\nProperties of lateral physics operator for tracers in ocean\n25.1. Direction\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDirection of lateral physics tracers scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Horizontal\" \n# \"Isopycnal\" \n# \"Isoneutral\" \n# \"Geopotential\" \n# \"Iso-level\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25.2. Order\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOrder of lateral physics tracers scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Harmonic\" \n# \"Bi-harmonic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "25.3. Discretisation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDiscretisation of lateral physics tracers scheme in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Second order\" \n# \"Higher order\" \n# \"Flux limiter\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "26. Lateral Physics --&gt; Tracers --&gt; Eddy Diffusity Coeff\nProperties of eddy diffusity coeff in lateral physics tracers scheme in the ocean\n26.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nLateral physics tracers eddy diffusity coeff type in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Space varying\" \n# \"Time + space varying (Smagorinsky)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "26.2. Constant Coefficient\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant, value of eddy diffusity coeff in lateral physics tracers scheme (in m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "26.3. Variable Coefficient\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf space-varying, describe variations of eddy diffusity coeff in lateral physics tracers scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "26.4. Coeff Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe background eddy diffusity coeff in lateral physics tracers scheme (give values in m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "26.5. Coeff Backscatter\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there backscatter in eddy diffusity coeff in lateral physics tracers scheme ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "27. Lateral Physics --&gt; Tracers --&gt; Eddy Induced Velocity\nProperties of eddy induced velocity (EIV) in lateral physics tracers scheme in the ocean\n27.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of EIV in lateral physics tracers in the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"GM\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "27.2. Constant Val\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf EIV scheme for tracers is constant, specify coefficient value (M2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "27.3. Flux Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of EIV flux (advective or skew)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "27.4. Added Diffusivity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of EIV added diffusivity (constant, flow dependent or none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "28. Vertical Physics\nOcean Vertical Physics\n28.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of vertical physics in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "29. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Details\nProperties of vertical physics in ocean\n29.1. Langmuir Cells Mixing\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there Langmuir cells mixing in upper ocean ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "30. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Tracers\n*Properties of boundary layer (BL) mixing on tracers in the ocean *\n30.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of boundary layer mixing for tracers in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure - TKE\" \n# \"Turbulent closure - KPP\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Turbulent closure - Bulk Mixed Layer\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "30.2. Closure Order\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf turbulent BL mixing of tracers, specific order of closure (0, 1, 2.5, 3)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "30.3. Constant\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant BL mixing of tracers, specific coefficient (m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "30.4. Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBackground BL mixing of tracers coefficient, (schema and value in m2/s - may by none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "31. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Momentum\n*Properties of boundary layer (BL) mixing on momentum in the ocean *\n31.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of boundary layer mixing for momentum in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure - TKE\" \n# \"Turbulent closure - KPP\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Turbulent closure - Bulk Mixed Layer\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "31.2. Closure Order\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf turbulent BL mixing of momentum, specific order of closure (0, 1, 2.5, 3)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "31.3. Constant\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant BL mixing of momentum, specific coefficient (m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "31.4. Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBackground BL mixing of momentum coefficient, (schema and value in m2/s - may by none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "32. Vertical Physics --&gt; Interior Mixing --&gt; Details\n*Properties of interior mixing in the ocean *\n32.1. Convection Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of vertical convection in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Non-penetrative convective adjustment\" \n# \"Enhanced vertical diffusion\" \n# \"Included in turbulence closure\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "32.2. Tide Induced Mixing\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how tide induced mixing is modelled (barotropic, baroclinic, none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "32.3. Double Diffusion\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there double diffusion", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "32.4. Shear Mixing\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there interior shear mixing", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "33. Vertical Physics --&gt; Interior Mixing --&gt; Tracers\n*Properties of interior mixing on tracers in the ocean *\n33.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of interior mixing for tracers in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure / TKE\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "33.2. Constant\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant interior mixing of tracers, specific coefficient (m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "33.3. Profile\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the background interior mixing using a vertical profile for tracers (i.e is NOT constant) ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "33.4. Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBackground interior mixing of tracers coefficient, (schema and value in m2/s - may by none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "34. Vertical Physics --&gt; Interior Mixing --&gt; Momentum\n*Properties of interior mixing on momentum in the ocean *\n34.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of interior mixing for momentum in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure / TKE\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "34.2. Constant\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf constant interior mixing of momentum, specific coefficient (m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "34.3. Profile\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the background interior mixing using a vertical profile for momentum (i.e is NOT constant) ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "34.4. Background\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBackground interior mixing of momentum coefficient, (schema and value in m2/s - may by none)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "35. Uplow Boundaries --&gt; Free Surface\nProperties of free surface in ocean\n35.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of free surface in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "35.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nFree surface scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Linear implicit\" \n# \"Linear filtered\" \n# \"Linear semi-explicit\" \n# \"Non-linear implicit\" \n# \"Non-linear filtered\" \n# \"Non-linear semi-explicit\" \n# \"Fully explicit\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "35.3. Embeded Seaice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the sea-ice embeded in the ocean model (instead of levitating) ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "36. Uplow Boundaries --&gt; Bottom Boundary Layer\nProperties of bottom boundary layer in ocean\n36.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of bottom boundary layer in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "36.2. Type Of Bbl\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of bottom boundary layer in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Diffusive\" \n# \"Acvective\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "36.3. Lateral Mixing Coef\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf bottom BL is diffusive, specify value of lateral mixing coefficient (in m2/s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "36.4. Sill Overflow\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe any specific treatment of sill overflows", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37. Boundary Forcing\nOcean boundary forcing\n37.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of boundary forcing in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.2. Surface Pressure\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how surface pressure is transmitted to ocean (via sea-ice, nothing specific,...)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.3. Momentum Flux Correction\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe any type of ocean surface momentum flux correction and, if applicable, how it is applied and where.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.4. Tracers Flux Correction\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe any type of ocean surface tracers flux correction and, if applicable, how it is applied and where.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.5. Wave Effects\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how wave effects are modelled at ocean surface.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.wave_effects') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.6. River Runoff Budget\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how river runoff from land surface is routed to ocean and any global adjustment done.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "37.7. Geothermal Heating\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe if/how geothermal heating is present at ocean bottom.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "38. Boundary Forcing --&gt; Momentum --&gt; Bottom Friction\nProperties of momentum bottom friction in ocean\n38.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of momentum bottom friction in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Linear\" \n# \"Non-linear\" \n# \"Non-linear (drag function of speed of tides)\" \n# \"Constant drag coefficient\" \n# \"None\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "39. Boundary Forcing --&gt; Momentum --&gt; Lateral Friction\nProperties of momentum lateral friction in ocean\n39.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of momentum lateral friction in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Free-slip\" \n# \"No-slip\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "40. Boundary Forcing --&gt; Tracers --&gt; Sunlight Penetration\nProperties of sunlight penetration scheme in ocean\n40.1. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of sunlight penetration scheme in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"1 extinction depth\" \n# \"2 extinction depth\" \n# \"3 extinction depth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "40.2. Ocean Colour\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the ocean sunlight penetration scheme ocean colour dependent ?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "40.3. Extinction Depth\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe and list extinctions depths for sunlight penetration scheme (if applicable).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "41. Boundary Forcing --&gt; Tracers --&gt; Fresh Water Forcing\nProperties of surface fresh water forcing in ocean\n41.1. From Atmopshere\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of surface fresh water forcing from atmos in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Freshwater flux\" \n# \"Virtual salt flux\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "41.2. From Sea Ice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of surface fresh water forcing from sea-ice in ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Freshwater flux\" \n# \"Virtual salt flux\" \n# \"Real salt flux\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "41.3. Forced Mode Restoring\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nType of surface salinity restoring in forced mode (OMIP)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mmckerns/tutmom
solutions.ipynb
bsd-3-clause
[ "Solutions to exercises\nEXERCISE: Solve the constrained programming problem by any of the means above.\nMinimize: f = -1x[0] + 4x[1]\nSubject to: <br>\n-3x[0] + 1x[1] <= 6 <br>\n1x[0] + 2x[1] <= 4 <br>\nx[1] >= -3 <br>\nwhere: -inf <= x[0] <= inf", "import cvxopt as cvx\nfrom cvxopt import solvers as cvx_solvers\nQ = cvx.matrix([[0.,0.],[0.,0.]])\np = cvx.matrix([-1., 4.])\nG = cvx.matrix([[-3., 1., 0.],[1., 2., -1.]])\nh = cvx.matrix([6., 4., 3.])\nsol = cvx_solvers.qp(Q, p, G, h)\nprint(sol['x'])", "EXERCISE: Use any of the solvers we've seen thus far to find the minimum of the zimmermann function (i.e. use mystic.models.zimmermann as the objective). Use the bounds suggested below, if your choice of solver allows it.", "import scipy.optimize as opt\nimport mystic.models\nresult = opt.minimize(mystic.models.zimmermann, [10., 1.], method='powell')\nprint(result.x)", "EXERCISE: Do the same for the fosc3d function found at mystic.models.fosc3d, using the bounds suggested by the documentation, if your chosen solver accepts bounds or constraints.", "import scipy.optimize as opt\nimport mystic.models\nresult = opt.minimize(mystic.models.fosc3d, [-5., 0.5], method='powell')\nprint(result.x)", "EXERCISE: Use mystic to find the minimum for the peaks test function, with the bound specified by the mystic.models.peaks documentation.", "import mystic\nimport mystic.models\nresult = mystic.solvers.fmin_powell(mystic.models.peaks, [0., -2.], bounds=[(-5.,5.)]*2)\nprint(result)", "EXERCISE: Use mystic to do a fit to the noisy data in the scipy.optimize.curve_fit example (the least squares fit).", "import numpy as np\nimport scipy.stats as stats\nfrom mystic.solvers import fmin_powell\nfrom mystic import reduced\n\n# Define the function to fit.\ndef function(coeffs, x):\n a,b,f,phi = coeffs\n return a * np.exp(-b * np.sin(f * x + phi))\n\n# Create a noisy data set around the actual parameters\ntrue_params = [3, 2, 1, np.pi/4]\nprint(\"target parameters: {}\".format(true_params))\nx = np.linspace(0, 2*np.pi, 25)\nexact = function(true_params, x)\nnoisy = exact + 0.3*stats.norm.rvs(size=len(x))\n\n# Define an objective that fits against the noisy data\n@reduced(lambda x,y: abs(x)+abs(y))\ndef objective(coeffs, x, y):\n return function(coeffs, x) - y\n\n# Use curve_fit to estimate the function parameters from the noisy data.\ninitial_guess = [1,1,1,1]\nargs = (x, noisy)\nestimated_params = fmin_powell(objective, initial_guess, args=args)\nprint(\"solved parameters: {}\".format(estimated_params))", "EXERCISE: Solve the chebyshev8.cost example exactly, by applying the knowledge that the last term in the chebyshev polynomial will always be be one. Use numpy.round or mystic.constraints.integers or to constrain solutions to the set of integers. Does using mystic.suppressed to supress small numbers accelerate the solution?", "# Differential Evolution solver\nfrom mystic.solvers import DifferentialEvolutionSolver2\n\n# Chebyshev polynomial and cost function\nfrom mystic.models.poly import chebyshev8, chebyshev8cost\nfrom mystic.models.poly import chebyshev8coeffs\n\n# tools\nfrom mystic.termination import VTR, CollapseAt, Or\nfrom mystic.strategy import Best1Exp\nfrom mystic.monitors import VerboseMonitor\nfrom mystic.tools import random_seed\nfrom mystic.math import poly1d\nimport numpy as np\n\n\nif __name__ == '__main__':\n\n print(\"Differential Evolution\")\n print(\"======================\")\n ndim = 9\n random_seed(123)\n\n # configure monitor\n stepmon = VerboseMonitor(50,50)\n\n # build a constraints function\n def constraints(x):\n x[-1] = 1.\n return np.round(x)\n\n stop = Or(VTR(0.0001), CollapseAt(0.0, generations=2))\n\n # use DE to solve 8th-order Chebyshev coefficients\n npop = 10*ndim\n solver = DifferentialEvolutionSolver2(ndim,npop)\n solver.SetRandomInitialPoints(min=[-100]*ndim, max=[100]*ndim)\n solver.SetGenerationMonitor(stepmon)\n solver.SetConstraints(constraints)\n solver.enable_signal_handler()\n solver.Solve(chebyshev8cost, termination=stop, strategy=Best1Exp, \\\n CrossProbability=1.0, ScalingFactor=0.9)\n solution = solver.Solution()\n\n # use monitor to retrieve results information\n iterations = len(stepmon)\n cost = stepmon.y[-1]\n print(\"Generation %d has best Chi-Squared: %f\" % (iterations, cost))\n\n # use pretty print for polynomials\n print(poly1d(solution))\n\n # compare solution with actual 8th-order Chebyshev coefficients\n print(\"\\nActual Coefficients:\\n %s\\n\" % poly1d(chebyshev8coeffs))", "EXERCISE: Replace the symbolic constraints in the following \"Pressure Vessel Design\" code with explicit penalty functions (i.e. use a compound penalty built with mystic.penalty.quadratic_inequality).", "\"Pressure Vessel Design\"\n\ndef objective(x):\n x0,x1,x2,x3 = x\n return 0.6224*x0*x2*x3 + 1.7781*x1*x2**2 + 3.1661*x0**2*x3 + 19.84*x0**2*x2\n\nbounds = [(0,1e6)]*4\n# with penalty='penalty' applied, solution is:\nxs = [0.72759093, 0.35964857, 37.69901188, 240.0]\nys = 5804.3762083\n\nfrom mystic.constraints import as_constraint\nfrom mystic.penalty import quadratic_inequality\n\ndef penalty1(x): # <= 0.0\n return -x[0] + 0.0193*x[2]\n\ndef penalty2(x): # <= 0.0\n return -x[1] + 0.00954*x[2]\n\ndef penalty3(x): # <= 0.0\n from math import pi\n return -pi*x[2]**2*x[3] - (4/3.)*pi*x[2]**3 + 1296000.0\n\ndef penalty4(x): # <= 0.0\n return x[3] - 240.0\n\n@quadratic_inequality(penalty1, k=1e12)\n@quadratic_inequality(penalty2, k=1e12)\n@quadratic_inequality(penalty3, k=1e12)\n@quadratic_inequality(penalty4, k=1e12)\ndef penalty(x):\n return 0.0\n\n\nif __name__ == '__main__':\n\n from mystic.solvers import diffev2\n from mystic.math import almostEqual\n\n result = diffev2(objective, x0=bounds, bounds=bounds, penalty=penalty,\n npop=40, gtol=500, disp=True, full_output=True)\n print(result[0])", "EXERCISE: Solve the cvxopt \"qp\" example with mystic. Use symbolic constaints, penalty functions, or constraints operators. If you get it quickly, do all three methods.", "def objective(x):\n x0,x1 = x\n return 2*x0**2 + x1**2 + x0*x1 + x0 + x1\n\nbounds = [(0.0, None),(0.0, None)]\n\n# with penalty='penalty' applied, solution is:\nxs = [0.25, 0.75]\nys = 1.875\n\nfrom mystic.math.measures import normalize\n\ndef constraint(x): # impose exactly\n return normalize(x, 1.0)\n\n\nif __name__ == '__main__':\n\n from mystic.solvers import diffev2, fmin_powell\n\n result = diffev2(objective, x0=bounds, bounds=bounds, npop=40,\n constraints=constraint, disp=False, full_output=True)\n print(result[0])", "EXERCISE: Convert one of our previous mystic examples to use parallel computing. Note that if the solver has a SetMapper method, it can take a parallel map.", "from mystic.termination import VTR, ChangeOverGeneration, And, Or\nstop = Or(And(VTR(), ChangeOverGeneration()), VTR(1e-8))\n\nfrom mystic.models import rosen\nfrom mystic.monitors import VerboseMonitor\nfrom mystic.solvers import DifferentialEvolutionSolver2\n\nfrom pathos.pools import ThreadPool\n\n\nif __name__ == '__main__':\n\n solver = DifferentialEvolutionSolver2(3,40)\n solver.SetRandomInitialPoints([-10,-10,-10],[10,10,10])\n solver.SetGenerationMonitor(VerboseMonitor(10))\n solver.SetMapper(ThreadPool().map) #NOTE: evaluation of objective in parallel\n solver.SetTermination(stop)\n solver.SetObjective(rosen)\n solver.SetStrictRanges([-10,-10,-10],[10,10,10])\n solver.SetEvaluationLimits(generations=600)\n solver.Solve()\n\n print(solver.bestSolution)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Krastanov/cutiepy
examples/Lindblad_Master_Equation_Solver_Examples.ipynb
bsd-3-clause
[ "Table of Contents\n\nRabi Oscillations\nSimulating the Full Hamiltonian\nWith Rotating Wave Approximation\nWith $\\gamma_1$ collapse\nWith $\\gamma_2$ collapse\n\n\n\n\nCoherent State in a Harmonic Oscillator", "from cutiepy import *\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# TODO: implement sparse Lindblad operator\ncutiepy.operators.SPARSITY_N_CUTOFF = 60", "Rabi Oscillations\n$\\hat{H} = \\hat{H}_0 + \\Omega \\sin((\\omega_0+\\Delta)t) \\hat{\\sigma}_x$\n$\\hat{H}_0 = \\frac{\\omega_0}{2}\\hat{\\sigma}_z$", "initial_state = basis(2, 0)\ninitial_state\n\nω0 = 1\nΔ = 0.002\nΩ = 0.005\nts = 6*np.pi/Ω*np.linspace(0,1,120)\nH = ω0/2 * sigmaz() + Ω * sigmax() * sin((ω0+Δ)*t)\nH\n\nres = mesolve(H, [], initial_state, ts)\n\nσz_expect = expect(sigmaz(), res)\n\nres[20]\n\nplt.plot(ts*Ω/np.pi, σz_expect, 'r.', label='numerical result')\nΩp = (Ω**2+Δ**2)**0.5\nplt.plot(ts*Ω/np.pi, 1-(Ω/Ωp)**2*2*np.sin(Ωp*ts/2)**2, 'b-',\n label=r'$1-2(\\Omega^\\prime/\\Omega)^2\\sin^2(\\Omega^\\prime t/2)$')\nplt.title(r'$\\langle\\sigma_z\\rangle$-vs-$t\\Omega/\\pi$ at '\n r'$\\Delta/\\Omega=%.2f$, $\\omega_0/\\Omega=%.2f$'%(Δ/Ω, ω0/Ω))\nplt.ylim(-1,1)\nplt.legend(loc=3);", "With Rotating Wave Approximation\n$\\hat{H}^\\prime = e^{i\\hat{H}_0 t}\\hat{H} e^{-i\\hat{H}_0 t} \\approx \\frac{\\Delta}{2} \\hat{\\sigma}_z + \\frac{\\Omega}{2} \\hat{\\sigma}_x$", "Hp = Δ/2 * sigmaz() + Ω/2 * sigmax()\nHp\n\nres = mesolve(Hp, [], initial_state, ts)\n\nσz_expect = expect(sigmaz(), res)\n\nplt.plot(ts*Ω/np.pi, σz_expect, 'r.', label='numerical result')\nΩp = (Ω**2+Δ**2)**0.5\nplt.plot(ts*Ω/np.pi, 1-(Ω/Ωp)**2*2*np.sin(Ωp*ts/2)**2, 'b-',\n label=r'$1-2(\\Omega^\\prime/\\Omega)^2\\sin^2(\\Omega^\\prime t/2)$')\nplt.title(r'$\\langle\\sigma_z\\rangle$-vs-$t\\Omega/\\pi$ at '\n r'$\\Delta/\\Omega=%.2f$ in RWA'%(Δ/Ω))\nplt.ylim(-1,1)\nplt.legend(loc=3);", "With $\\gamma_1$ collapse", "γ1 = 0.2*Ω\nc1 = γ1**0.5 * sigmam()\nc1\n\nres = mesolve(Hp, [c1], initial_state, ts)\n\nσz_expect = expect(sigmaz(), res)\n\nplt.plot(ts*Ω/np.pi, σz_expect, 'r.', label='numerical result')\nplt.ylim(-1,1)\nplt.title(r'$\\langle\\sigma_z\\rangle$-vs-$t\\Omega/\\pi$ at '\n r'$\\Delta/\\Omega=%.2f$ in RWA'%(Δ/Ω) + '\\n' +\n r'with $\\gamma_1 \\hat{\\sigma}_-$ at $\\gamma_1=0.2\\Omega$')\nplt.hlines(0,0,ts[-1]*Ω/np.pi)\nplt.legend(loc=3);", "With $\\gamma_2$ collapse", "γ2 = 0.2*Ω\nc2 = γ2**0.5 * sigmaz()\nc2\n\nres = mesolve(Hp, [c2], initial_state, ts)\n\nσz_expect = expect(sigmaz(), res)\n\nplt.plot(ts*Ω/np.pi, σz_expect, 'r.', label='numerical result')\nplt.ylim(-1,1)\nplt.title(r'$\\langle\\sigma_z\\rangle$-vs-$t\\Omega/\\pi$ at '\n r'$\\Delta/\\Omega=%.2f$ in RWA'%(Δ/Ω) + '\\n' +\n r'with $\\gamma_2 \\hat{\\sigma}_z$ at $\\gamma_2=0.2\\Omega$')\nplt.hlines(0,0,ts[-1]*Ω/np.pi)\nplt.legend(loc=3);", "Coherent State in a Harmonic Oscillator\n$|\\alpha\\rangle$ evolving under $\\hat{H} = \\hat{n}$ coupled to a zero temperature heat bath $\\kappa = 0.5$", "N_cutoff = 40\nα = 2.5\ninitial_state = coherent(N_cutoff, α)\ninitial_state\n\nH = num(N_cutoff)\nH\n\nκ = 0.5\nn_th = 0\n\nc_down = (κ * (1 + n_th))**2 * destroy(N_cutoff)\nc_down\n\nts = 2*np.pi*np.linspace(0,1,41)\nres = mesolve(H, [c_down], initial_state, ts)\na = destroy(N_cutoff)\na_expect = expect(a, res, keep_complex=True)\n\nplt.figure(figsize=(4,4))\nplt.plot(np.real(a_expect), np.imag(a_expect), 'b-')\nfor t, alpha in list(zip(ts,a_expect))[:40:4]:\n plt.plot(np.real(alpha), np.imag(alpha), 'r.')\n plt.text(np.real(alpha), np.imag(alpha), r'$t=%.1f\\pi$'%(t/np.pi), fontsize=14)\nplt.title(r'$\\langle\\hat{a}\\rangle$-vs-$t$')\nplt.ylabel(r'$\\mathcal{I}(\\alpha)$')\nplt.xlabel(r'$\\mathcal{R}(\\alpha)$')\nl = abs(a_expect[0])\nplt.xlim(-l,l)\nplt.ylim(-l,l);" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
albahnsen/ML_SecurityInformatics
notebooks/14-KaggleCompetition.ipynb
mit
[ "14 - Kaggle Competition\nFraud Detection\nhttps://inclass.kaggle.com/c/easy-ml-class\nby Alejandro Correa Bahnsen\nversion 0.1, May 2016\nPart of the class Machine Learning for Security Informatics\nThis notebook is licensed under a [Creative Commons Attribution-ShareAlike 3.0 Unported License]\npip install tqdm\nFraud Detection", "import pandas as pd\nimport zipfile\nwith zipfile.ZipFile('../datasets/fraud_transactions_kaggle.csv.zip', 'r') as z:\n f = z.open('fraud_transactions_kaggle.csv')\n data = pd.read_csv(f, index_col=0)\n\ndata.head()\n\ndata.tail()\n\ndata.fraud.value_counts(dropna=False)", "Estimate aggregated features", "from datetime import datetime, timedelta\nfrom tqdm import tqdm", "Split for each account and create the date as index", "card_numbers = data['card_number'].unique()\ndata['trx_id'] = data.index\ndata.index = pd.DatetimeIndex(data['date'])\n\ndata_ = []\nfor card_number in tqdm(card_numbers):\n data_.append(data.query('card_number == ' + str(card_number)))", "Create Aggregated Features for one account", "res_agg = pd.DataFrame(index=data['trx_id'].values, \n columns=['Trx_sum_7D', 'Trx_count_1D'])\n\ntrx = data_[0]\n\nfor i in range(trx.shape[0]):\n date = trx.index[i]\n trx_id = int(trx.ix[i, 'trx_id'])\n # Sum 7 D\n agg_ = trx[date-pd.datetools.to_offset('7D').delta:date-timedelta(0,0,1)]\n res_agg.loc[trx_id, 'Trx_sum_7D'] = agg_['amount'].sum()\n # Count 1D\n agg_ = trx[date-pd.datetools.to_offset('1D').delta:date-timedelta(0,0,1)]\n res_agg.loc[trx_id, 'Trx_count_1D'] = agg_['amount'].shape[0]\n\nres_agg.mean()", "All accounts", "for trx in tqdm(data_):\n for i in range(trx.shape[0]):\n date = trx.index[i]\n trx_id = int(trx.ix[i, 'trx_id'])\n # Sum 7 D\n agg_ = trx[date-pd.datetools.to_offset('7D').delta:date-timedelta(0,0,1)]\n res_agg.loc[trx_id, 'Trx_sum_7D'] = agg_['amount'].sum()\n # Count 1D\n agg_ = trx[date-pd.datetools.to_offset('1D').delta:date-timedelta(0,0,1)]\n res_agg.loc[trx_id, 'Trx_count_1D'] = agg_['amount'].shape[0]\n\nres_agg.head()\n\ndata.index = data.trx_id\n\ndata = data.join(res_agg)\n\ndata.sample(15, random_state=42).sort_index()", "Split train and test", "X = data.loc[~data.fraud.isnull()]\n\ny = X.fraud\n\nX = X.drop(['fraud', 'date', 'card_number'], axis=1)\n\nX_kaggle = data.loc[data.fraud.isnull()]\nX_kaggle = X_kaggle.drop(['fraud', 'date', 'card_number'], axis=1)\n\nX_kaggle.head()", "Simple Random Forest", "from sklearn.ensemble import RandomForestClassifier\n\nclf = RandomForestClassifier(n_estimators=100, n_jobs=-1, class_weight='balanced')\n\nfrom sklearn.metrics import fbeta_score", "KFold cross-validation", "from sklearn.cross_validation import KFold\n\nkf = KFold(X.shape[0], n_folds=5)\nres = []\nfor train, test in kf:\n X_train, X_test, y_train, y_test = X.iloc[train], X.iloc[test], y.iloc[train], y.iloc[test]\n clf.fit(X_train, y_train)\n y_pred_proba = clf.predict_proba(X_test)[:, 1]\n y_pred = (y_pred_proba>0.05).astype(int)\n res.append(fbeta_score(y_test, y_pred, beta=2))\n\npd.Series(res).describe()", "Train with all\nPredict and send to Kaggle", "clf.fit(X, y)\n\ny_pred = clf.predict_proba(X_kaggle)[:, 1]\n\ny_pred = (y_pred>0.05).astype(int)\n\ny_pred = pd.Series(y_pred,name='fraud', index=X_kaggle.index)\n\ny_pred.head(10)\n\ny_pred.to_csv('fraud_transactions_kaggle_1.csv', header=True, index_label='ID')", "Main Issues\n\nClass imbalance\nFeature creation\nModel selection\nThreshold selection" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
GoogleCloudPlatform/vertex-ai-samples
notebooks/official/pipelines/google_cloud_pipeline_components_automl_text.ipynb
apache-2.0
[ "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "Vertex AI Pipelines: AutoML text classification pipelines using google-cloud-pipeline-components\n<table align=\"left\">\n <td>\n <a href=\"https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/official/pipelines/google_cloud_pipeline_components_automl_text.ipynb\">\n <img src=\"https://cloud.google.com/ml-engine/images/colab-logo-32px.png\" alt=\"Colab logo\"> Run in Colab\n </a>\n </td>\n <td>\n <a href=\"https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/official/pipelines/google_cloud_pipeline_components_automl_text.ipynb\">\n <img src=\"https://cloud.google.com/ml-engine/images/github-logo-32px.png\" alt=\"GitHub logo\">\n View on GitHub\n </a>\n </td>\n <td>\n <a href=\"https://console.cloud.google.com/vertex-ai/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/official/pipelines/google_cloud_pipeline_components_automl_text.ipynb\">\n Open in Vertex AI Workbench\n </a>\n </td>\n</table>\n<br/><br/><br/>\nOverview\nThis notebook shows how to use the components defined in google_cloud_pipeline_components to build an AutoML text classification workflow on Vertex AI Pipelines.\nDataset\nThe dataset used for this tutorial is the Happy Moments dataset from Kaggle Datasets. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket.\nObjective\nIn this tutorial, you create an AutoML text classification using a pipeline with components from google_cloud_pipeline_components.\nThe steps performed include:\n\nCreate a Dataset resource.\nTrain an AutoML Model resource.\nCreates an Endpoint resource.\nDeploys the Model resource to the Endpoint resource.\n\nThe components are documented here.\nCosts\nThis tutorial uses billable components of Google Cloud:\n\nVertex AI\nCloud Storage\n\nLearn about Vertex AI\npricing and Cloud Storage\npricing, and use the Pricing\nCalculator\nto generate a cost estimate based on your projected usage.\nSet up your local development environment\nIf you are using Colab or Google Cloud Notebook, your environment already meets all the requirements to run this notebook. You can skip this step.\nOtherwise, make sure your environment meets this notebook's requirements. You need the following:\n\nThe Cloud Storage SDK\nGit\nPython 3\nvirtualenv\nJupyter notebook running in a virtual environment with Python 3\n\nThe Cloud Storage guide to Setting up a Python development environment and the Jupyter installation guide provide detailed instructions for meeting these requirements. The following steps provide a condensed set of instructions:\n\n\nInstall and initialize the SDK.\n\n\nInstall Python 3.\n\n\nInstall virtualenv and create a virtual environment that uses Python 3.\n\n\nActivate that environment and run pip3 install Jupyter in a terminal shell to install Jupyter.\n\n\nRun jupyter notebook on the command line in a terminal shell to launch Jupyter.\n\n\nOpen this notebook in the Jupyter Notebook Dashboard.\n\n\nInstallation\nInstall the latest version of Vertex AI SDK for Python.", "import os\n\n# Google Cloud Notebook\nif os.path.exists(\"/opt/deeplearning/metadata/env_version\"):\n USER_FLAG = \"--user\"\nelse:\n USER_FLAG = \"\"\n\n! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG", "Install the latest GA version of google-cloud-storage library as well.", "! pip3 install -U google-cloud-storage $USER_FLAG", "Install the latest GA version of google-cloud-pipeline-components library as well.", "! pip3 install $USER kfp google-cloud-pipeline-components --upgrade", "Restart the kernel\nOnce you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.", "import os\n\nif not os.getenv(\"IS_TESTING\"):\n # Automatically restart kernel after installs\n import IPython\n\n app = IPython.Application.instance()\n app.kernel.do_shutdown(True)", "Check the versions of the packages you installed. The KFP SDK version should be >=1.6.", "! python3 -c \"import kfp; print('KFP SDK version: {}'.format(kfp.__version__))\"\n! python3 -c \"import google_cloud_pipeline_components; print('google_cloud_pipeline_components version: {}'.format(google_cloud_pipeline_components.__version__))\"", "Before you begin\nGPU runtime\nThis tutorial does not require a GPU runtime.\nSet up your Google Cloud project\nThe following steps are required, regardless of your notebook environment.\n\n\nSelect or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs.\n\n\nMake sure that billing is enabled for your project.\n\n\nEnable the Vertex AI APIs, Compute Engine APIs, and Cloud Storage.\n\n\nThe Google Cloud SDK is already installed in Google Cloud Notebook.\n\n\nEnter your project ID in the cell below. Then run the cell to make sure the\nCloud SDK uses the right project for all the commands in this notebook.\n\n\nNote: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $.", "PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\"}\n\nif PROJECT_ID == \"\" or PROJECT_ID is None or PROJECT_ID == \"[your-project-id]\":\n # Get your GCP project id from gcloud\n shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null\n PROJECT_ID = shell_output[0]\n print(\"Project ID:\", PROJECT_ID)\n\n! gcloud config set project $PROJECT_ID", "Region\nYou can also change the REGION variable, which is used for operations\nthroughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.\n\nAmericas: us-central1\nEurope: europe-west4\nAsia Pacific: asia-east1\n\nYou may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.\nLearn more about Vertex AI regions", "REGION = \"us-central1\" # @param {type: \"string\"}", "Timestamp\nIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.", "from datetime import datetime\n\nTIMESTAMP = datetime.now().strftime(\"%Y%m%d%H%M%S\")", "Authenticate your Google Cloud account\nIf you are using Google Cloud Notebook, your environment is already authenticated. Skip this step.\nIf you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.\nOtherwise, follow these steps:\nIn the Cloud Console, go to the Create service account key page.\nClick Create service account.\nIn the Service account name field, enter a name, and click Create.\nIn the Grant this service account access to project section, click the Role drop-down list. Type \"Vertex\" into the filter box, and select Vertex Administrator. Type \"Storage Object Admin\" into the filter box, and select Storage Object Admin.\nClick Create. A JSON file that contains your key downloads to your local environment.\nEnter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.", "# If you are running this notebook in Colab, run this cell and follow the\n# instructions to authenticate your GCP account. This provides access to your\n# Cloud Storage bucket and lets you submit training jobs and prediction\n# requests.\n\nimport os\nimport sys\n\n# If on Google Cloud Notebook, then don't execute this code\nif not os.path.exists(\"/opt/deeplearning/metadata/env_version\"):\n if \"google.colab\" in sys.modules:\n from google.colab import auth as google_auth\n\n google_auth.authenticate_user()\n\n # If you are running this notebook locally, replace the string below with the\n # path to your service account key and run this cell to authenticate your GCP\n # account.\n elif not os.getenv(\"IS_TESTING\"):\n %env GOOGLE_APPLICATION_CREDENTIALS ''", "Create a Cloud Storage bucket\nThe following steps are required, regardless of your notebook environment.\nWhen you initialize the Vertex AI SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.\nSet the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.", "BUCKET_NAME = \"gs://[your-bucket-name]\" # @param {type:\"string\"}\n\nif BUCKET_NAME == \"\" or BUCKET_NAME is None or BUCKET_NAME == \"gs://[your-bucket-name]\":\n BUCKET_NAME = \"gs://\" + PROJECT_ID + \"aip-\" + TIMESTAMP", "Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket.", "! gsutil mb -l $REGION $BUCKET_NAME", "Finally, validate access to your Cloud Storage bucket by examining its contents:", "! gsutil ls -al $BUCKET_NAME", "Service Account\nIf you don't know your service account, try to get your service account using gcloud command by executing the second cell below.", "SERVICE_ACCOUNT = \"[your-service-account]\" # @param {type:\"string\"}\n\nif (\n SERVICE_ACCOUNT == \"\"\n or SERVICE_ACCOUNT is None\n or SERVICE_ACCOUNT == \"[your-service-account]\"\n):\n # Get your GCP project id from gcloud\n shell_output = !gcloud auth list 2>/dev/null\n SERVICE_ACCOUNT = shell_output[2].strip()\n print(\"Service Account:\", SERVICE_ACCOUNT)", "Set service account access for Vertex AI Pipelines\nRun the following commands to grant your service account access to read and write pipeline artifacts in the bucket that you created in the previous step -- you only need to run these once per service account.", "! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectCreator $BUCKET_NAME\n\n! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectViewer $BUCKET_NAME", "Set up variables\nNext, set up some variables used throughout the tutorial.\nImport libraries and define constants", "import google.cloud.aiplatform as aip", "Vertex AI Pipelines constants\nSetup up the following constants for Vertex AI Pipelines:", "PIPELINE_ROOT = \"{}/pipeline_root/happydb\".format(BUCKET_NAME)", "Additional imports.", "import kfp", "Initialize Vertex AI SDK for Python\nInitialize the Vertex AI SDK for Python for your project and corresponding bucket.", "aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)", "Define AutoML text classification model pipeline that uses components from google_cloud_pipeline_components\nNext, you define the pipeline.\nCreate and deploy an AutoML text classification Model resource using a Dataset resource.", "IMPORT_FILE = \"gs://cloud-ml-data/NL-classification/happiness.csv\"\n\n\[email protected](name=\"automl-text-classification\" + TIMESTAMP)\ndef pipeline(\n project: str = PROJECT_ID, region: str = REGION, import_file: str = IMPORT_FILE\n):\n from google_cloud_pipeline_components import aiplatform as gcc_aip\n from google_cloud_pipeline_components.v1.endpoint import (EndpointCreateOp,\n ModelDeployOp)\n\n dataset_create_task = gcc_aip.TextDatasetCreateOp(\n display_name=\"train-automl-happydb\",\n gcs_source=import_file,\n import_schema_uri=aip.schema.dataset.ioformat.text.multi_label_classification,\n project=project,\n )\n\n training_run_task = gcc_aip.AutoMLTextTrainingJobRunOp(\n dataset=dataset_create_task.outputs[\"dataset\"],\n display_name=\"train-automl-happydb\",\n prediction_type=\"classification\",\n multi_label=True,\n training_fraction_split=0.6,\n validation_fraction_split=0.2,\n test_fraction_split=0.2,\n model_display_name=\"train-automl-happydb\",\n project=project,\n )\n\n endpoint_op = EndpointCreateOp(\n project=project,\n location=region,\n display_name=\"train-automl-flowers\",\n )\n\n ModelDeployOp(\n model=training_run_task.outputs[\"model\"],\n endpoint=endpoint_op.outputs[\"endpoint\"],\n automatic_resources_min_replica_count=1,\n automatic_resources_max_replica_count=1,\n )", "Compile the pipeline\nNext, compile the pipeline.", "from kfp.v2 import compiler # noqa: F811\n\ncompiler.Compiler().compile(\n pipeline_func=pipeline,\n package_path=\"text classification_pipeline.json\".replace(\" \", \"_\"),\n)", "Run the pipeline\nNext, run the pipeline.", "DISPLAY_NAME = \"happydb_\" + TIMESTAMP\n\njob = aip.PipelineJob(\n display_name=DISPLAY_NAME,\n template_path=\"text classification_pipeline.json\".replace(\" \", \"_\"),\n pipeline_root=PIPELINE_ROOT,\n enable_caching=False,\n)\n\njob.run()\n\n! rm text_classification_pipeline.json", "Click on the generated link to see your run in the Cloud Console.\n<!-- It should look something like this as it is running:\n\n<a href=\"https://storage.googleapis.com/amy-jo/images/mp/automl_tabular_classif.png\" target=\"_blank\"><img src=\"https://storage.googleapis.com/amy-jo/images/mp/automl_tabular_classif.png\" width=\"40%\"/></a> -->\n\nIn the UI, many of the pipeline DAG nodes will expand or collapse when you click on them. Here is a partially-expanded view of the DAG (click image to see larger version).\n<a href=\"https://storage.googleapis.com/amy-jo/images/mp/automl_text_classif.png\" target=\"_blank\"><img src=\"https://storage.googleapis.com/amy-jo/images/mp/automl_text_classif.png\" width=\"40%\"/></a>\nCleaning up\nTo clean up all Google Cloud resources used in this project, you can delete the Google Cloud\nproject you used for the tutorial.\nOtherwise, you can delete the individual resources you created in this tutorial -- Note: this is auto-generated and not all resources may be applicable for this tutorial:\n\nDataset\nPipeline\nModel\nEndpoint\nBatch Job\nCustom Job\nHyperparameter Tuning Job\nCloud Storage Bucket", "delete_dataset = True\ndelete_pipeline = True\ndelete_model = True\ndelete_endpoint = True\ndelete_batchjob = True\ndelete_customjob = True\ndelete_hptjob = True\ndelete_bucket = True\n\ntry:\n if delete_model and \"DISPLAY_NAME\" in globals():\n models = aip.Model.list(\n filter=f\"display_name={DISPLAY_NAME}\", order_by=\"create_time\"\n )\n model = models[0]\n aip.Model.delete(model)\n print(\"Deleted model:\", model)\nexcept Exception as e:\n print(e)\n\ntry:\n if delete_endpoint and \"DISPLAY_NAME\" in globals():\n endpoints = aip.Endpoint.list(\n filter=f\"display_name={DISPLAY_NAME}_endpoint\", order_by=\"create_time\"\n )\n endpoint = endpoints[0]\n endpoint.undeploy_all()\n aip.Endpoint.delete(endpoint.resource_name)\n print(\"Deleted endpoint:\", endpoint)\nexcept Exception as e:\n print(e)\n\nif delete_dataset and \"DISPLAY_NAME\" in globals():\n if \"text\" == \"tabular\":\n try:\n datasets = aip.TabularDataset.list(\n filter=f\"display_name={DISPLAY_NAME}\", order_by=\"create_time\"\n )\n dataset = datasets[0]\n aip.TabularDataset.delete(dataset.resource_name)\n print(\"Deleted dataset:\", dataset)\n except Exception as e:\n print(e)\n\n if \"text\" == \"image\":\n try:\n datasets = aip.ImageDataset.list(\n filter=f\"display_name={DISPLAY_NAME}\", order_by=\"create_time\"\n )\n dataset = datasets[0]\n aip.ImageDataset.delete(dataset.resource_name)\n print(\"Deleted dataset:\", dataset)\n except Exception as e:\n print(e)\n\n if \"text\" == \"text\":\n try:\n datasets = aip.TextDataset.list(\n filter=f\"display_name={DISPLAY_NAME}\", order_by=\"create_time\"\n )\n dataset = datasets[0]\n aip.TextDataset.delete(dataset.resource_name)\n print(\"Deleted dataset:\", dataset)\n except Exception as e:\n print(e)\n\n if \"text\" == \"video\":\n try:\n datasets = aip.VideoDataset.list(\n filter=f\"display_name={DISPLAY_NAME}\", order_by=\"create_time\"\n )\n dataset = datasets[0]\n aip.VideoDataset.delete(dataset.resource_name)\n print(\"Deleted dataset:\", dataset)\n except Exception as e:\n print(e)\n\ntry:\n if delete_pipeline and \"DISPLAY_NAME\" in globals():\n pipelines = aip.PipelineJob.list(\n filter=f\"display_name={DISPLAY_NAME}\", order_by=\"create_time\"\n )\n pipeline = pipelines[0]\n aip.PipelineJob.delete(pipeline.resource_name)\n print(\"Deleted pipeline:\", pipeline)\nexcept Exception as e:\n print(e)\n\nif delete_bucket and \"BUCKET_NAME\" in globals():\n ! gsutil rm -r $BUCKET_NAME" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
zzsza/Datascience_School
16. 과최적화와 정규화/02. 교차 검증.ipynb
mit
[ "교차 검증\n모형 검증\n예측 모형의 최종 성능을 객관적으로 측정하려면 모수 추정(parameter fitting) 즉 트레이닝(training)에 사용되지 않은 새로운 데이터, 즉 테스트 데이터를 사용해야 한다. 모형의 모수 갯수를 증가시킨다든가 커널 모형, 신경망 모형과 같은 비선형 모형을 사용하게 되면 트레이닝 데이터에 대한 예측 성능을 얼마든지 높일 수 있기 때문이다. 이러한 방법에 의해 과최적화(overfitting)가 일어나면 트레이닝 데이터에 대해서는 예측이 잘되지만 테스트 데이터에 대해서는 예측 성능이 급격히 떨어지는 현상이 발생한다.\n교차 검증\n위에서 지적한 바와 같이 모형 성능을 정상적으로 검사하려면 테스트 데이터가 별도로 있어야 하기 때문에 현실에서는 확보한 데이터 중 일부를 떼어내어 테스트 데이터로 사용한다. 그런데 테스트 데이터를 어떻게 골라내느냐에 따라 모형의 성능이 달라지므로 한 개의 테스트 데이터만 사용하는 것이 아니라 각기 다른 방법으로 서로 다른 테스트 데이터를 여러번 골라내서 복수의 테스트를 실시하는 것이 일반적이다.\n이러한 테스트 방법을 교차 검증(cross validation)이라고 한다. 교차 검증을 통한 모형 성능은 보통 다음과 같은 두 가지 값으로 나타난다.\n\n오차 평균(mean performance): 트레이닝에 사용되지 않은 테스트 데이터(test data)에 대해서 평균 오차의 크기가 얼마나 작은가?\n오차 분산(variance): 트레이닝에 사용되지 않은 테스트 데이터(test data)에 대해 오차의 크기가 얼마나 달라지는가?\n\n이 중에서 오차 분산을 계산하려면 테스트 데이터 셋이 최소한 세 개 세트가 있어야 한다.\nScikit-Learn의 교차 검증 기능\nScikit-Learn에서는 교차 검증을 위해 전체 데이터 셋에서 트레이닝용 데이터나 테스트용 데이터를 분리해 내는 여러가지 방법을 제공한다.\n\ndata를 train set과 test set으로 단순 분리\ndata splitter\n\ntrain_test_split() 명령\n\n\n복수의 test set 준비\n\ncross validation iterator\nKFold\nStratifiedKFold\nLabelKFold\nLeaveOneOut\nLeavePOut\nLeaveOneLabelOut\nLeavePLabelOut\nShuffleSplit\n\nLabelShuffleSplit\n\n\n복수의 test set 사용하여 평가 과정 반복\n\ncross validation calculator\ncross_val_score() \n\n단순 데이터 분리\ntrain_test_split() 명령은 데이터를 단순히 트레이닝 데이터와 테스트 데이터로 분리한다.\n\n인수\narrays : 데이터\ntest_size : 테스트 데이터 사이즈\ntrain_size : 사이즈\n\nrandom_state : 난수 시드\n\n\n반환값\n\n배열 리스트", "X = np.arange(10).reshape((5, 2))\nX\n\ny = np.arange(5)\ny\n\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\n\nX_train\n\ny_train\n\nX_test\n\ny_test", "K-fold CV\nK-fold CV(cross-validation) 방법은 데이터 셋을 K개의 sub-set로 분리하는 방법이다. 분리된 K개의 sub-set 중 하나만 제외한 K-1개의 sub-sets를 training set으로 이용하여 K개의 모형 추정한다. \n<img src=\"https://docs.google.com/drawings/d/1JdgUDzuE75LBxqT5sKOhlPgP6umEkvD3Sm-gKnu-jqA/pub?w=762&h=651\" style=\"margin: 0 auto 0 auto;\">\nScikit-Learn 의 cross_validation 서브 패키지는 K-Fold를 위한 KFold 클래스를 제공한다.", "N = 5\nX = np.arange(8 * N).reshape(-1, 2) * 10\ny = np.hstack([np.ones(N), np.ones(N) * 2, np.ones(N) * 3, np.ones(N) * 4])\nprint(\"X:\\n\", X, sep=\"\")\nprint(\"y:\\n\", y, sep=\"\")\n\nfrom sklearn.cross_validation import KFold\ncv = KFold(len(X), n_folds=3, random_state=0)\nfor train_index, test_index in cv:\n print(\"test y:\", y[test_index])\n print(\".\" * 80 ) \n print(\"train y:\", y[train_index])\n print(\"=\" * 80 )", "Stratified K-Fold\n\ntarget class가 어느 한 data set에 몰리지 않도록 한다", "from sklearn.cross_validation import StratifiedKFold\ncv = StratifiedKFold(y, n_folds=3, random_state=0)\nfor train_index, test_index in cv:\n print(\"test X:\\n\", X[test_index])\n print(\".\" * 80 ) \n print(\"test y:\", y[test_index])\n print(\"=\" * 80 )", "Leave-One-Out (LOO)\n\n하나의 sample만을 test set으로 남긴다.", "from sklearn.cross_validation import LeaveOneOut\ncv = LeaveOneOut(5)\nfor train_index, test_index in cv:\n print(\"test X:\", X[test_index])\n print(\".\" * 80 ) \n print(\"test y:\", y[test_index])\n print(\"=\" * 80 )", "Label K-Fold\n\n같은 label이 test와 train에 동시에 들어가지 않게 조절\nlabel에 의한 영향을 최소화", "from sklearn.cross_validation import LabelKFold\ncv = LabelKFold(y, n_folds=3)\nfor train_index, test_index in cv:\n print(\"test y:\", y[test_index])\n print(\".\" * 80 ) \n print(\"train y:\", y[train_index])\n print(\"=\" * 80 )", "ShuffleSplit\n\n중복된 데이터를 허용", "from sklearn.cross_validation import ShuffleSplit\ncv = ShuffleSplit(5)\nfor train_index, test_index in cv:\n print(\"test X:\", X[test_index])\n print(\"=\" * 20 ) ", "교차 평가 시행\nCV는 단순히 데이터 셋을 나누는 역할을 수행할 뿐이다. 실제로 모형의 성능(편향 오차 및 분산)을 구하려면 이렇게 나누어진 데이터셋을 사용하여 평가를 반복하여야 한다. 이 과정을 자동화하는 명령이 cross_val_score() 이다.\n\ncross_val_score(estimator, X, y=None, scoring=None, cv=None) \n\ncross validation iterator cv를 이용하여 X, y data 를 분할하고 estimator에 넣어서 scoring metric을 구하는 과정을 반복\n\n\n인수\n\nestimator : ‘fit’메서드가 제공되는 모형\nX : 배열\n독립 변수 데이터\ny : 배열\n종속 변수 데이터\nscoring : 문자열\n성능 검증에 사용할 함수\ncv : Cross Validator\nNone 이면 디폴트인 3-폴드 CV\n숫자 K 이면 K-폴드 CV\n\nCross Validator 클래스 객체\n\n\n반환값\n\nscores \n계산된 성능 값의 리스트", "from sklearn.datasets import make_regression\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\nX, y, coef = make_regression(n_samples=1000, n_features=1, noise=20, coef=True, random_state=0)\nmodel = LinearRegression()\ncv = KFold(1000, 10)\n\nscores = np.zeros(10)\nfor i, (train_index, test_index) in enumerate(cv):\n X_train = X[train_index]\n y_train = y[train_index]\n X_test = X[test_index]\n y_test = y[test_index]\n model.fit(X_train, y_train)\n y_pred = model.predict(X_test)\n scores[i] = mean_squared_error(y_test, y_pred)\n\nscores\n\nfrom sklearn.cross_validation import cross_val_score\ncross_val_score(model, X, y, \"mean_squared_error\", cv)", "회귀 분석에 사용되는 성능 함수들\n\nr2_score(y_true, y_pred[, ...]): R^2 (coefficient of determination) regression score function.\nexplained_variance_score(y_true, y_pred): Explained variance regression score function\nmean_squared_error(y_true, y_pred[, ...]): Mean squared error regression loss\nmean_absolute_error(y_true, y_pred): Mean absolute error regression loss\nmedian_absolute_error(y_true, y_pred): Median absolute error regression loss" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
phuongxuanpham/SelfDrivingCar
CarND-Camera-Calibration/camera_calibration.ipynb
gpl-3.0
[ "%%HTML\n<style> code {background-color : pink !important;} </style>", "Camera Calibration with OpenCV\nRun the code in the cell below to extract object points and image points for camera calibration.", "import numpy as np\nimport cv2\nimport glob\nimport matplotlib.pyplot as plt\n%matplotlib qt\n\n# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\nobjp = np.zeros((6*8,3), np.float32)\nobjp[:,:2] = np.mgrid[0:8, 0:6].T.reshape(-1,2)\n\n# Arrays to store object points and image points from all the images.\nobjpoints = [] # 3d points in real world space\nimgpoints = [] # 2d points in image plane.\n\n# Make a list of calibration images\nimages = glob.glob('calibration_wide/GO*.jpg')\n\n# Step through the list and search for chessboard corners\nfor idx, fname in enumerate(images):\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (8,6), None)\n\n # If found, add object points, image points\n if ret == True:\n objpoints.append(objp)\n imgpoints.append(corners)\n\n # Draw and display the corners\n cv2.drawChessboardCorners(img, (8,6), corners, ret)\n #write_name = 'corners_found'+str(idx)+'.jpg'\n #cv2.imwrite(write_name, img)\n cv2.imshow('img', img)\n cv2.waitKey(500)\n\ncv2.destroyAllWindows()", "If the above cell ran sucessfully, you should now have objpoints and imgpoints needed for camera calibration. Run the cell below to calibrate, calculate distortion coefficients, and test undistortion on an image!", "import pickle\n%matplotlib inline\n\n# Test undistortion on an image\nimg = cv2.imread('calibration_wide/test_image.jpg')\nimg_size = (img.shape[1], img.shape[0])\n\n# Do camera calibration given object points and image points\nret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)\n\n\ndst = cv2.undistort(img, mtx, dist, None, mtx)\ncv2.imwrite('calibration_wide/test_undist.jpg',dst)\n\n# Save the camera calibration result for later use (we won't worry about rvecs / tvecs)\ndist_pickle = {}\ndist_pickle[\"mtx\"] = mtx\ndist_pickle[\"dist\"] = dist\npickle.dump( dist_pickle, open( \"calibration_wide/wide_dist_pickle.p\", \"wb\" ) )\n#dst = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)\n# Visualize undistortion\nf, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))\nax1.imshow(img)\nax1.set_title('Original Image', fontsize=30)\nax2.imshow(dst)\nax2.set_title('Undistorted Image', fontsize=30)" ]
[ "code", "markdown", "code", "markdown", "code" ]
paris-saclay-cds/python-workshop
Day_2_Software_engineering_best_practices/solutions/03_code_style.ipynb
bsd-3-clause
[ "import itertools\n\nimport six\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n# preprocessing\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\n\n# classifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import LinearSVC\n\n# regressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.linear_model import RidgeCV\n\n# meta-estimator\nfrom sklearn.pipeline import make_pipeline\n\n# metric\nfrom sklearn.metrics import (r2_score, median_absolute_error,\n confusion_matrix)", "IO: Reading and preprocess the data\nWe can define a function which will read the data and process them.", "def read_spectra(path_csv):\n \"\"\"Read and parse data in pandas DataFrames.\n \n Parameters\n ----------\n path_csv : str\n Path to the CSV file to read.\n \n Returns\n -------\n spectra : pandas DataFrame, shape (n_spectra, n_freq_point)\n DataFrame containing all Raman spectra.\n \n concentration : pandas Series, shape (n_spectra,)\n Series containing the concentration of the molecule.\n \n molecule : pandas Series, shape (n_spectra,)\n Series containing the type of chemotherapeutic agent.\n \n \"\"\"\n if not isinstance(path_csv, six.string_types):\n raise TypeError(\"'path_csv' needs to be string. Got {}\"\n \" instead.\".format(type(path_csv)))\n else:\n if not path_csv.endswith('.csv'):\n raise ValueError('Wrong file format. Expecting csv file')\n \n data = pd.read_csv(path_csv)\n concentration = data['concentration']\n molecule = data['molecule']\n spectra_string = data['spectra']\n spectra = []\n for spec in spectra_string:\n # remove the first and last bracket and convert to a numpy array\n spectra.append(np.fromstring(spec[1:-1], sep=','))\n spectra = pd.DataFrame(spectra)\n \n return spectra, concentration, molecule\n\n# read the frequency and get a pandas serie\nfrequency = pd.read_csv('data/freq.csv')['freqs']\n\n# read all data for training\nfilenames = ['data/spectra_{}.csv'.format(i)\n for i in range(4)]\n\nspectra, concentration, molecule = [], [], []\nfor filename in filenames:\n spectra_file, concentration_file, molecule_file = read_spectra(filename)\n spectra.append(spectra_file)\n concentration.append(concentration_file)\n molecule.append(molecule_file)\n\n# Concatenate in single DataFrame and Serie\nspectra = pd.concat(spectra)\nconcentration = pd.concat(concentration)\nmolecule = pd.concat(molecule)", "Plot helper functions\nWe can create two functions: (i) to plot all spectra and (ii) plot the mean spectra with the std intervals.\nWe will make a \"private\" function which will be used by both plot types.", "def _apply_axis_layout(ax, title):\n \"\"\"Apply despine style and add labels to axis.\"\"\"\n ax.set_xlabel('Frequency')\n ax.set_ylabel('Intensity')\n ax.set_title(title)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n ax.spines['left'].set_position(('outward', 10))\n ax.spines['bottom'].set_position(('outward', 10))\n\ndef plot_spectra(frequency, spectra, title=''):\n \"\"\"Plot a bunch of Raman spectra.\n \n Parameters\n ----------\n frequency : pandas Series, shape (n_freq_points,)\n Frequencies for which the Raman spectra were acquired.\n \n spectra : pandas DataFrame, shape (n_spectra, n_freq_points)\n DataFrame containing all Raman spectra.\n \n title : str\n Title added to the plot.\n \n Returns\n -------\n None\n \n \"\"\"\n fig, ax = plt.subplots()\n ax.plot(frequency, spectra.T)\n _apply_axis_layout(ax, title)\n return fig, ax\n \ndef plot_spectra_by_type(frequency, spectra, classes, title=''):\n \"\"\"Plot mean spectrum with its variance for a given class.\n \n Parameters\n ----------\n frequency : pandas Series, shape (n_freq_points,)\n Frequencies for which the Raman spectra were acquired.\n \n spectra : pandas DataFrame, shape (n_spectra, n_freq_points)\n DataFrame containing all Raman spectra.\n \n classes : array-like, shape (n_classes,)\n Array contining the different spectra class which will be plotted.\n \n title : str\n Title added to the plot.\n \n Returns\n -------\n None\n \n \"\"\"\n fig, ax = plt.subplots()\n for label in np.unique(classes):\n label_index = np.flatnonzero(classes == label)\n spectra_mean = np.mean(spectra.iloc[label_index], axis=0)\n spectra_std = np.std(spectra.iloc[label_index], axis=0)\n ax.plot(frequency, spectra_mean, label=label)\n ax.fill_between(frequency,\n spectra_mean + spectra_std,\n spectra_mean - spectra_std,\n alpha=0.2)\n _apply_axis_layout(ax, title)\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n return fig, ax\n\nfig, ax = plot_spectra(frequency, spectra, 'All training spectra')\n\nfig, ax = plot_spectra_by_type(frequency, spectra, molecule)\nax.set_title('Mean spectra in function of the molecules')\n\nfig, ax = plot_spectra_by_type(frequency, spectra, concentration,\n 'Mean spectra in function of the concentrations')", "Reusability for new data:", "spectra_test, concentration_test, molecule_test = read_spectra('data/spectra_4.csv')\n\nplot_spectra(frequency, spectra_test,\n 'All training spectra')\nplot_spectra_by_type(frequency, spectra_test, molecule_test,\n 'Mean spectra in function of the molecules')\nplot_spectra_by_type(frequency, spectra_test, concentration_test,\n 'Mean spectra in function of the concentrations');", "Training and testing a machine learning model for classification", "def plot_cm(cm, classes, title):\n \"\"\"Plot a confusion matrix.\n \n Parameters\n ----------\n cm : ndarray, shape (n_classes, n_classes)\n Confusion matrix.\n \n classes : array-like, shape (n_classes,)\n Array contining the different spectra classes used in the\n classification problem.\n \n title : str\n Title added to the plot.\n \n Returns\n -------\n None\n \n \"\"\"\n fig, ax = plt.subplots()\n plt.imshow(cm, interpolation='nearest', cmap='bwr')\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n return fig, ax\n\nfor clf in [RandomForestClassifier(random_state=0),\n LinearSVC(random_state=0)]:\n \n pipeline = make_pipeline(StandardScaler(),\n PCA(n_components=100, random_state=0),\n clf)\n y_pred = pipeline.fit(spectra, molecule).predict(spectra_test)\n plot_cm(confusion_matrix(molecule_test, y_pred),\n pipeline.classes_,\n 'Confusion matrix using {}'.format(clf.__class__.__name__))\n print('Accuracy score: {0:.2f}'.format(pipeline.score(spectra_test,\n molecule_test)))", "Training and testing a machine learning model for regression", "def plot_regression(y_true, y_pred, title):\n \"\"\"Plot actual vs. predicted scatter plot.\n \n Parameters\n ----------\n y_true : array-like, shape (n_samples,)\n Ground truth (correct) target values.\n\n y_pred : array-like, shape (n_samples,)\n Estimated targets as returned by a regressor.\n\n title : str\n Title added to the plot.\n \n Returns\n -------\n None\n \n \"\"\" \n fig, ax = plt.subplots()\n ax.scatter(y_true, y_pred)\n ax.plot([0, 25000], [0, 25000], '--k')\n ax.set_ylabel('Target predicted')\n ax.set_xlabel('True Target')\n ax.set_title(title)\n ax.text(1000, 20000, r'$R^2$=%.2f, MAE=%.2f' % (\n r2_score(y_true, y_pred), median_absolute_error(y_true, y_pred)))\n ax.set_xlim([0, 25000])\n ax.set_ylim([0, 25000])\n return fig, ax\n\ndef regression_experiment(X_train, X_test, y_train, y_test):\n \"\"\"Perform regression experiment.\n \n Build a pipeline using PCA and either a Ridge\n or a RandomForestRegressor model.\n \n Parameters\n ----------\n X_train : pandas DataFrame, shape (n_spectra, n_freq_points)\n DataFrame containing training Raman spectra.\n \n X_test : pandas DataFrame, shape (n_spectra, n_freq_points)\n DataFrame containing testing Raman spectra.\n \n y_training : pandas Serie, shape (n_spectra,)\n Serie containing the training concentrations acting as targets.\n \n y_testing : pandas Serie, shape (n_spectra,)\n Serie containing the testing concentrations acting as targets.\n \n Returns\n -------\n None\n \n \"\"\"\n for reg in [RidgeCV(), RandomForestRegressor(random_state=0)]:\n pipeline = make_pipeline(PCA(n_components=100), reg)\n y_pred = pipeline.fit(X_train, y_train).predict(X_test)\n plot_regression(y_test, y_pred,\n 'Regression using {}'.format(reg.__class__.__name__))\n\nregression_experiment(spectra, spectra_test,\n concentration, concentration_test)\n\ndef fit_params(data):\n \"\"\"Compute statistics for robustly scale data.\n \n Compute the median and the variance, i.e. the difference\n between the 75th and 25th percentiles.\n These statistics are used later to scale data.\n \n Parameters\n ----------\n data : pandas DataFrame, shape (n_spectra, n_freq_point)\n DataFrame containing all Raman spectra.\n \n Returns\n -------\n median : ndarray, shape (n_freq_point,)\n Median for each wavelength.\n \n variance : ndarray, shape (n_freq_point,)\n Variance (difference between the 75th and 25th\n percentiles) for each wavelength.\n \n \"\"\"\n median = np.median(data, axis=0)\n percentile_25 = np.percentile(data, 25, axis=0)\n percentile_75 = np.percentile(data, 75, axis=0)\n return median, (percentile_75 - percentile_25)\n\ndef transform(data, median, var_25_75):\n \"\"\"Scale data using robust estimators.\n \n Scale the data by subtracting the median and dividing by the\n variance, i.e. the difference between the 75th and 25th percentiles.\n \n Parameters\n ----------\n data : pandas DataFrame, shape (n_spectra, n_freq_point)\n DataFrame containing all Raman spectra.\n \n median : ndarray, shape (n_freq_point,)\n Median for each wavelength.\n \n var_25_75 : ndarray, shape (n_freq_point,)\n Variance (difference between the 75th and 25th\n percentiles) for each wavelength.\n \n Returns\n -------\n data_scaled : pandas DataFrame, shape (n_spectra, n_freq_point)\n DataFrame containing all scaled Raman spectra.\n \n \"\"\"\n return (data - median) / var_25_75\n\n# compute the statistics on the training data\nmed, var = fit_params(spectra)\n# transform the training and testing data\nspectra_scaled = transform(spectra, med, var)\nspectra_test_scaled = transform(spectra_test, med, var)\n\nregression_experiment(spectra_scaled, spectra_test_scaled,\n concentration, concentration_test)" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
luctrudeau/Teaching
IntroductionIOAsync/IntroductionIOAsync.ipynb
lgpl-3.0
[ "Introduction à l'I/O Asynchrone\nLe Socket de Berkeley\nIl est difficile d'imaginer le nombre d'instanciations d'objets de type Socket depuis leur introduction en 1983 à l'université Berkeley.\nLe Socket est l'interface de programmation la plus populaire pour faire de la réseautique.\nElle est si populaire, que tous les systèmes d'exploitation l'offrent et tous les étudiants sont introduits à la programmation réseau avec les Sockets.\nExemple d'un Socket client qui se connecte", "from IPython.display import Image\nfrom IPython.display import display\n\nimport socket\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.connect((\"etsmtl.ca\" , 80))", "Exemple d'un Socket client qui envoi des données", "msg = b'GET /ETS/media/Prive/logo/ETS-rouge-devise-ecran.jpg HTTP/1.1\\r\\nHost:etsmtl.ca\\r\\n\\r\\n'\nsock.sendall(msg)", "Exemple d'un Socket qui reçoit des données", "recvd = b''\nwhile True:\n data = sock.recv(1024)\n if not data: \n break\n recvd += data\n\nsock.shutdown(1)\nsock.close()\n\nresponse = recvd.split(b'\\r\\n\\r\\n', 1)\nImage(data=response[1])", "Quoique les versions de l'interface Socket ont évolué avec les années, surtout sur les plateformes orientées-objet, l'essence de l'interface de 1983 reste très présente dans les implémentations modernes.\n2.3.1.5. Making connections\n connect(s, name, namelen);\n2.3.1.6. Sending and receiving data\n cc = sendto(s, buf, len, flags, to, tolen);\n msglen = recvfrom(s, buf, len, flags, from, fromlenaddr);\nExtrait du manuel system de BSD 4.2 [1983]\nSocket Synchrone\nLe Socket Berkeley de 1983 est synchrone\nCeci implique que lorsqu'une fonction comme connect, sendto, recvfrom... est invoquée, le processus bloque jusqu'à l'obtention de la réponse. \nNotons la présence dans le même document d'une fonction d'I/O asynchrone. \nFail Whale\nLe Socket synchrone n'est pas efficace en conditions de charge élevée\nLe déploiement de réseaux haute vitesse combiné à l'explosion de la popularité des réseaux sociaux le démontre bien\n\nLes sites de réseau sociaux ne savent pas comment gérer cette impasse. \nLa situation est telle, que les pages d'erreurs de Twitter deviennent célèbres.\nLe Problème:\nLors d'un appel bloquant, le processus et ses ressources sont suspendus. Lorsque la charge augmente, la quantité de ressources suspendue devient ingérable pour le système d'exploitation\nLa Solution:\nIl ne faut pas bloquer\nLe Socket Asynchrone\nDès 1983, le socket de Berkley offre un mode asynchrones. Cependant, il n'est pas très utilisé, car ils sont beaucoup plus complexes et prône à l'erreur.\nLe Patron Reactor\nEn 1995, le Patron Reactor est découvert\nce patron simplifie grandement l'I/O Asynchrone\nhttp://www.dre.vanderbilt.edu/~schmidt/PDF/reactor-siemens.pdf\nUne influence est du patron Reactor est la fonction Select\nSelect est la fonction asynchrone présentée dans le même document que le Socket\nExemple d'un Socket client asynchrone qui se connecte\n(Avec le Patron Reactor)", "import selectors\nimport socket\nimport errno \n\nsel = selectors.DefaultSelector()\n\ndef connector(sock, mask):\n msg = b'GET /ETS/media/Prive/logo/ETS-rouge-devise-ecran.jpg HTTP/1.1\\r\\nHost:etsmtl.ca\\r\\n\\r\\n'\n sock.sendall(msg)\n # Le connector a pour responsabilité \n # d'instancier un nouveau Handler\n # et de l'ajouter au Selector\n h = HTTPHandler()\n sel.modify(sock, selectors.EVENT_READ, h.handle)\n\nclass HTTPHandler:\n \n recvd = b''\n \n def handle(self, sock, mask):\n data = sock.recv(1024)\n if not data:\n # Le Handler se retire \n # lorsqu'il a terminé.\n sel.unregister(sock)\n response = self.recvd.split(b'\\r\\n\\r\\n', 1)\n display(Image(data=response[1]))\n \n else:\n self.recvd += data", "Création d'un Socket Asynchrone", "sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nsock.setblocking(False)\ntry:\n sock.connect((\"etsmtl.ca\" , 80))\nexcept socket.error:\n pass # L'exception est toujours lancé!\n # C'est normal, l'OS veut nous avertir que \n # nous ne sommes pas encore connecté\n", "Enregistrement du Connector", "# L'application enregistre le Connector\nsel.register(sock, selectors.EVENT_WRITE, connector)\n\n# Le Reactor\nwhile len(sel.get_map()):\n events = sel.select()\n for key, mask in events:\n handleEvent = key.data\n handleEvent(key.fileobj, mask)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ThunderShiviah/code_guild
interactive-coding-challenges/graphs_trees/check_balance/check_balance_challenge.ipynb
mit
[ "<small><i>This notebook was prepared by Donne Martin. Source and license info is on GitHub.</i></small>\nChallenge Notebook\nProblem: Check if a binary tree is balanced.\n\nConstraints\nTest Cases\nAlgorithm\nCode\nUnit Test\nSolution Notebook\n\nConstraints\n\nIs a balanced tree one where the heights of two sub trees of any node doesn't differ by more than 1?\nYes\n\n\nCan we assume we already have a Node class with an insert method?\nYes\n\n\n\nTest Cases\n\n5, 3, 8, 1, 4 -> Yes\n5, 3, 8, 9, 10 -> No\n\nAlgorithm\nRefer to the Solution Notebook. If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.\nCode", "%run ../bst/bst.py\n%load ../bst/bst.py\n\ndef check_balance(root):\n # TODO: Implement me\n pass", "Unit Test\nThe following unit test is expected to fail until you solve the challenge.", "# %load test_check_balance.py\nfrom nose.tools import assert_equal\n\n\nclass TestCheckBalance(object):\n\n def test_check_balance(self):\n node = Node(5)\n insert(node, 3)\n insert(node, 8)\n insert(node, 1)\n insert(node, 4)\n assert_equal(check_balance(node), True)\n\n node = Node(5)\n insert(node, 3)\n insert(node, 8)\n insert(node, 9)\n insert(node, 10)\n assert_equal(check_balance(node), False)\n\n print('Success: test_check_balance')\n\n\ndef main():\n test = TestCheckBalance()\n test.test_check_balance()\n\n\nif __name__ == '__main__':\n main()", "Solution Notebook\nReview the Solution Notebook for a discussion on algorithms and code solutions." ]
[ "markdown", "code", "markdown", "code", "markdown" ]
trangel/Data-Science
deep_learning_ai/Tensorflow+Tutorial+dropout.ipynb
gpl-3.0
[ "TensorFlow Tutorial\nWelcome to this week's programming assignment. Until now, you've always used numpy to build neural networks. Now we will step you through a deep learning framework that will allow you to build neural networks more easily. Machine learning frameworks like TensorFlow, PaddlePaddle, Torch, Caffe, Keras, and many others can speed up your machine learning development significantly. All of these frameworks also have a lot of documentation, which you should feel free to read. In this assignment, you will learn to do the following in TensorFlow: \n\nInitialize variables\nStart your own session\nTrain algorithms \nImplement a Neural Network\n\nPrograming frameworks can not only shorten your coding time, but sometimes also perform optimizations that speed up your code. \n1 - Exploring the Tensorflow Library\nTo start, you will import the library:", "import math\nimport numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict\n\n%matplotlib inline\nnp.random.seed(1)", "Now that you have imported the library, we will walk you through its different applications. You will start with an example, where we compute for you the loss of one training example. \n$$loss = \\mathcal{L}(\\hat{y}, y) = (\\hat y^{(i)} - y^{(i)})^2 \\tag{1}$$", "y_hat = tf.constant(36, name='y_hat') # Define y_hat constant. Set to 36.\ny = tf.constant(39, name='y') # Define y. Set to 39\n\nloss = tf.Variable((y - y_hat)**2, name='loss') # Create a variable for the loss\n\ninit = tf.global_variables_initializer() # When init is run later (session.run(init)),\n # the loss variable will be initialized and ready to be computed\nwith tf.Session() as session: # Create a session and print the output\n session.run(init) # Initializes the variables\n print(session.run(loss)) # Prints the loss", "Writing and running programs in TensorFlow has the following steps:\n\nCreate Tensors (variables) that are not yet executed/evaluated. \nWrite operations between those Tensors.\nInitialize your Tensors. \nCreate a Session. \nRun the Session. This will run the operations you'd written above. \n\nTherefore, when we created a variable for the loss, we simply defined the loss as a function of other quantities, but did not evaluate its value. To evaluate it, we had to run init=tf.global_variables_initializer(). That initialized the loss variable, and in the last line we were finally able to evaluate the value of loss and print its value.\nNow let us look at an easy example. Run the cell below:", "a = tf.constant(2)\nb = tf.constant(10)\nc = tf.multiply(a,b)\nprint(c)", "As expected, you will not see 20! You got a tensor saying that the result is a tensor that does not have the shape attribute, and is of type \"int32\". All you did was put in the 'computation graph', but you have not run this computation yet. In order to actually multiply the two numbers, you will have to create a session and run it.", "sess = tf.Session()\nprint(sess.run(c))", "Great! To summarize, remember to initialize your variables, create a session and run the operations inside the session. \nNext, you'll also have to know about placeholders. A placeholder is an object whose value you can specify only later. \nTo specify values for a placeholder, you can pass in values by using a \"feed dictionary\" (feed_dict variable). Below, we created a placeholder for x. This allows us to pass in a number later when we run the session.", "# Change the value of x in the feed_dict\n\nx = tf.placeholder(tf.int64, name = 'x')\nprint(sess.run(2 * x, feed_dict = {x: 3}))\nsess.close()", "When you first defined x you did not have to specify a value for it. A placeholder is simply a variable that you will assign data to only later, when running the session. We say that you feed data to these placeholders when running the session. \nHere's what's happening: When you specify the operations needed for a computation, you are telling TensorFlow how to construct a computation graph. The computation graph can have some placeholders whose values you will specify only later. Finally, when you run the session, you are telling TensorFlow to execute the computation graph.\n1.1 - Linear function\nLets start this programming exercise by computing the following equation: $Y = WX + b$, where $W$ and $X$ are random matrices and b is a random vector. \nExercise: Compute $WX + b$ where $W, X$, and $b$ are drawn from a random normal distribution. W is of shape (4, 3), X is (3,1) and b is (4,1). As an example, here is how you would define a constant X that has shape (3,1):\n```python\nX = tf.constant(np.random.randn(3,1), name = \"X\")\n```\nYou might find the following functions helpful: \n- tf.matmul(..., ...) to do a matrix multiplication\n- tf.add(..., ...) to do an addition\n- np.random.randn(...) to initialize randomly", "# GRADED FUNCTION: linear_function\n\ndef linear_function():\n \"\"\"\n Implements a linear function: \n Initializes W to be a random tensor of shape (4,3)\n Initializes X to be a random tensor of shape (3,1)\n Initializes b to be a random tensor of shape (4,1)\n Returns: \n result -- runs the session for Y = WX + b \n \"\"\"\n \n np.random.seed(1)\n \n ### START CODE HERE ### (4 lines of code)\n X = tf.constant(np.random.randn(3,1), name = 'X')\n W = tf.constant(np.random.randn(4,3), name = 'W')\n b = tf.constant(np.random.randn(4,1), name = 'b')\n Y = tf.constant(np.random.randn(4,1), name = 'Y')\n ### END CODE HERE ### \n \n # Create the session using tf.Session() and run it with sess.run(...) on the variable you want to calculate\n \n ### START CODE HERE ###\n sess = tf.Session()\n result = sess.run(tf.add(tf.matmul(W,X),b))\n ### END CODE HERE ### \n \n # close the session \n sess.close()\n\n return result\n\nprint( \"result = \" + str(linear_function()))", "Expected Output : \n<table> \n<tr> \n<td>\n**result**\n</td>\n<td>\n[[-2.15657382]\n [ 2.95891446]\n [-1.08926781]\n [-0.84538042]]\n</td>\n</tr> \n\n</table>\n\n1.2 - Computing the sigmoid\nGreat! You just implemented a linear function. Tensorflow offers a variety of commonly used neural network functions like tf.sigmoid and tf.softmax. For this exercise lets compute the sigmoid function of an input. \nYou will do this exercise using a placeholder variable x. When running the session, you should use the feed dictionary to pass in the input z. In this exercise, you will have to (i) create a placeholder x, (ii) define the operations needed to compute the sigmoid using tf.sigmoid, and then (iii) run the session. \n Exercise : Implement the sigmoid function below. You should use the following: \n\ntf.placeholder(tf.float32, name = \"...\")\ntf.sigmoid(...)\nsess.run(..., feed_dict = {x: z})\n\nNote that there are two typical ways to create and use sessions in tensorflow: \nMethod 1:\n```python\nsess = tf.Session()\nRun the variables initialization (if needed), run the operations\nresult = sess.run(..., feed_dict = {...})\nsess.close() # Close the session\n**Method 2:**python\nwith tf.Session() as sess: \n # run the variables initialization (if needed), run the operations\n result = sess.run(..., feed_dict = {...})\n # This takes care of closing the session for you :)\n```", "# GRADED FUNCTION: sigmoid\n\ndef sigmoid(z):\n \"\"\"\n Computes the sigmoid of z\n \n Arguments:\n z -- input value, scalar or vector\n \n Returns: \n results -- the sigmoid of z\n \"\"\"\n \n ### START CODE HERE ### ( approx. 4 lines of code)\n # Create a placeholder for x. Name it 'x'.\n x = tf.placeholder(tf.float32, name='x')\n\n # compute sigmoid(x)\n sigmoid = tf.sigmoid(x)\n\n # Create a session, and run it. Please use the method 2 explained above. \n # You should use a feed_dict to pass z's value to x. \n with tf.Session() as sess:\n # Run session and call the output \"result\"\n result = sess.run(sigmoid, feed_dict = {x: z})\n \n ### END CODE HERE ###\n \n return result\n\nprint (\"sigmoid(0) = \" + str(sigmoid(0)))\nprint (\"sigmoid(12) = \" + str(sigmoid(12)))", "Expected Output : \n<table> \n<tr> \n<td>\n**sigmoid(0)**\n</td>\n<td>\n0.5\n</td>\n</tr>\n<tr> \n<td>\n**sigmoid(12)**\n</td>\n<td>\n0.999994\n</td>\n</tr> \n\n</table>\n\n<font color='blue'>\nTo summarize, you how know how to:\n1. Create placeholders\n2. Specify the computation graph corresponding to operations you want to compute\n3. Create the session\n4. Run the session, using a feed dictionary if necessary to specify placeholder variables' values. \n1.3 - Computing the Cost\nYou can also use a built-in function to compute the cost of your neural network. So instead of needing to write code to compute this as a function of $a^{2}$ and $y^{(i)}$ for i=1...m: \n$$ J = - \\frac{1}{m} \\sum_{i = 1}^m \\large ( \\small y^{(i)} \\log a^{ [2] (i)} + (1-y^{(i)})\\log (1-a^{ [2] (i)} )\\large )\\small\\tag{2}$$\nyou can do it in one line of code in tensorflow!\nExercise: Implement the cross entropy loss. The function you will use is: \n\ntf.nn.sigmoid_cross_entropy_with_logits(logits = ..., labels = ...)\n\nYour code should input z, compute the sigmoid (to get a) and then compute the cross entropy cost $J$. All this can be done using one call to tf.nn.sigmoid_cross_entropy_with_logits, which computes\n$$- \\frac{1}{m} \\sum_{i = 1}^m \\large ( \\small y^{(i)} \\log \\sigma(z^{2}) + (1-y^{(i)})\\log (1-\\sigma(z^{2})\\large )\\small\\tag{2}$$", "# GRADED FUNCTION: cost\n\ndef cost(logits, labels):\n \"\"\"\n    Computes the cost using the sigmoid cross entropy\n    \n    Arguments:\n    logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)\n    labels -- vector of labels y (1 or 0) \n \n Note: What we've been calling \"z\" and \"y\" in this class are respectively called \"logits\" and \"labels\" \n in the TensorFlow documentation. So logits will feed into z, and labels into y. \n    \n    Returns:\n    cost -- runs the session of the cost (formula (2))\n \"\"\"\n \n ### START CODE HERE ### \n \n # Create the placeholders for \"logits\" (z) and \"labels\" (y) (approx. 2 lines)\n z = tf.placeholder(tf.float32, name='logits')\n y = tf.placeholder(tf.float32, name='labels')\n \n # Use the loss function (approx. 1 line)\n cost = tf.nn.sigmoid_cross_entropy_with_logits(logits = z, labels = y)\n \n # Create a session (approx. 1 line). See method 1 above.\n sess = tf.Session()\n \n # Run the session (approx. 1 line).\n cost = sess.run(cost, feed_dict = {z:logits, y:labels})\n \n # Close the session (approx. 1 line). See method 1 above.\n sess.close()\n \n ### END CODE HERE ###\n \n return cost\n\nlogits = sigmoid(np.array([0.2,0.4,0.7,0.9]))\ncost = cost(logits, np.array([0,0,1,1]))\nprint (\"cost = \" + str(cost))", "Expected Output : \n<table> \n <tr> \n <td>\n **cost**\n </td>\n <td>\n [ 1.00538719 1.03664088 0.41385433 0.39956614]\n </td>\n </tr>\n\n</table>\n\n1.4 - Using One Hot encodings\nMany times in deep learning you will have a y vector with numbers ranging from 0 to C-1, where C is the number of classes. If C is for example 4, then you might have the following y vector which you will need to convert as follows:\n<img src=\"images/onehot.png\" style=\"width:600px;height:150px;\">\nThis is called a \"one hot\" encoding, because in the converted representation exactly one element of each column is \"hot\" (meaning set to 1). To do this conversion in numpy, you might have to write a few lines of code. In tensorflow, you can use one line of code: \n\ntf.one_hot(labels, depth, axis) \n\nExercise: Implement the function below to take one vector of labels and the total number of classes $C$, and return the one hot encoding. Use tf.one_hot() to do this.", "# GRADED FUNCTION: one_hot_matrix\n\ndef one_hot_matrix(labels, C):\n \"\"\"\n Creates a matrix where the i-th row corresponds to the ith class number and the jth column\n corresponds to the jth training example. So if example j had a label i. Then entry (i,j) \n will be 1. \n \n Arguments:\n labels -- vector containing the labels \n C -- number of classes, the depth of the one hot dimension\n \n Returns: \n one_hot -- one hot matrix\n \"\"\"\n \n ### START CODE HERE ###\n \n # Create a tf.constant equal to C (depth), name it 'C'. (approx. 1 line)\n C = tf.constant(C, name='C')\n \n # Use tf.one_hot, be careful with the axis (approx. 1 line)\n one_hot_matrix = tf.one_hot(labels, C, axis=0)\n \n # Create the session (approx. 1 line)\n sess = tf.Session()\n \n # Run the session (approx. 1 line)\n one_hot = sess.run(one_hot_matrix)\n \n # Close the session (approx. 1 line). See method 1 above.\n sess.close()\n \n ### END CODE HERE ###\n \n return one_hot\n\nlabels = np.array([1,2,3,0,2,1])\none_hot = one_hot_matrix(labels, C = 4)\nprint (\"one_hot = \" + str(one_hot))", "Expected Output: \n<table> \n <tr> \n <td>\n **one_hot**\n </td>\n <td>\n [[ 0. 0. 0. 1. 0. 0.]\n [ 1. 0. 0. 0. 0. 1.]\n [ 0. 1. 0. 0. 1. 0.]\n [ 0. 0. 1. 0. 0. 0.]]\n </td>\n </tr>\n\n</table>\n\n1.5 - Initialize with zeros and ones\nNow you will learn how to initialize a vector of zeros and ones. The function you will be calling is tf.ones(). To initialize with zeros you could use tf.zeros() instead. These functions take in a shape and return an array of dimension shape full of zeros and ones respectively. \nExercise: Implement the function below to take in a shape and to return an array (of the shape's dimension of ones). \n\ntf.ones(shape)", "# GRADED FUNCTION: ones\n\ndef ones(shape):\n \"\"\"\n Creates an array of ones of dimension shape\n \n Arguments:\n shape -- shape of the array you want to create\n \n Returns: \n ones -- array containing only ones\n \"\"\"\n \n ### START CODE HERE ###\n \n # Create \"ones\" tensor using tf.ones(...). (approx. 1 line)\n ones = tf.ones(shape)\n \n # Create the session (approx. 1 line)\n sess = tf.Session()\n \n # Run the session to compute 'ones' (approx. 1 line)\n ones = sess.run(ones)\n \n # Close the session (approx. 1 line). See method 1 above.\n sess.close()\n \n ### END CODE HERE ###\n return ones\n\nprint (\"ones = \" + str(ones([3])))", "Expected Output:\n<table> \n <tr> \n <td>\n **ones**\n </td>\n <td>\n [ 1. 1. 1.]\n </td>\n </tr>\n\n</table>\n\n2 - Building your first neural network in tensorflow\nIn this part of the assignment you will build a neural network using tensorflow. Remember that there are two parts to implement a tensorflow model:\n\nCreate the computation graph\nRun the graph\n\nLet's delve into the problem you'd like to solve!\n2.0 - Problem statement: SIGNS Dataset\nOne afternoon, with some friends we decided to teach our computers to decipher sign language. We spent a few hours taking pictures in front of a white wall and came up with the following dataset. It's now your job to build an algorithm that would facilitate communications from a speech-impaired person to someone who doesn't understand sign language.\n\nTraining set: 1080 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (180 pictures per number).\nTest set: 120 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (20 pictures per number).\n\nNote that this is a subset of the SIGNS dataset. The complete dataset contains many more signs.\nHere are examples for each number, and how an explanation of how we represent the labels. These are the original pictures, before we lowered the image resolutoion to 64 by 64 pixels.\n<img src=\"images/hands.png\" style=\"width:800px;height:350px;\"><caption><center> <u><font color='purple'> Figure 1</u><font color='purple'>: SIGNS dataset <br> <font color='black'> </center>\nRun the following code to load the dataset.", "# Loading the dataset\nX_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()", "Change the index below and run the cell to visualize some examples in the dataset.", "# Example of a picture\nindex = 0\nplt.imshow(X_train_orig[index])\nprint (\"y = \" + str(np.squeeze(Y_train_orig[:, index])))", "As usual you flatten the image dataset, then normalize it by dividing by 255. On top of that, you will convert each label to a one-hot vector as shown in Figure 1. Run the cell below to do so.", "# Flatten the training and test images\nX_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T\nX_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T\n# Normalize image vectors\nX_train = X_train_flatten/255.\nX_test = X_test_flatten/255.\n# Convert training and test labels to one hot matrices\nY_train = convert_to_one_hot(Y_train_orig, 6)\nY_test = convert_to_one_hot(Y_test_orig, 6)\n\nprint (\"number of training examples = \" + str(X_train.shape[1]))\nprint (\"number of test examples = \" + str(X_test.shape[1]))\nprint (\"X_train shape: \" + str(X_train.shape))\nprint (\"Y_train shape: \" + str(Y_train.shape))\nprint (\"X_test shape: \" + str(X_test.shape))\nprint (\"Y_test shape: \" + str(Y_test.shape))", "Note that 12288 comes from $64 \\times 64 \\times 3$. Each image is square, 64 by 64 pixels, and 3 is for the RGB colors. Please make sure all these shapes make sense to you before continuing.\nYour goal is to build an algorithm capable of recognizing a sign with high accuracy. To do so, you are going to build a tensorflow model that is almost the same as one you have previously built in numpy for cat recognition (but now using a softmax output). It is a great occasion to compare your numpy implementation to the tensorflow one. \nThe model is LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX. The SIGMOID output layer has been converted to a SOFTMAX. A SOFTMAX layer generalizes SIGMOID to when there are more than two classes. \n2.1 - Create placeholders\nYour first task is to create placeholders for X and Y. This will allow you to later pass your training data in when you run your session. \nExercise: Implement the function below to create the placeholders in tensorflow.", "# GRADED FUNCTION: create_placeholders\n\ndef create_placeholders(n_x, n_y):\n \"\"\"\n Creates the placeholders for the tensorflow session.\n \n Arguments:\n n_x -- scalar, size of an image vector (num_px * num_px = 64 * 64 * 3 = 12288)\n n_y -- scalar, number of classes (from 0 to 5, so -> 6)\n \n Returns:\n X -- placeholder for the data input, of shape [n_x, None] and dtype \"float\"\n Y -- placeholder for the input labels, of shape [n_y, None] and dtype \"float\"\n \n Tips:\n - You will use None because it let's us be flexible on the number of examples you will for the placeholders.\n In fact, the number of examples during test/train is different.\n \"\"\"\n\n ### START CODE HERE ### (approx. 2 lines)\n X = tf.placeholder(dtype=tf.float32, shape=[n_x, None], name='X')\n Y = tf.placeholder(dtype=tf.float32, shape=[n_y, None], name='Y')\n keep_prob = tf.placeholder(dtype=tf.float32, shape=[2], name='keep_prob')\n ### END CODE HERE ###\n \n return X, Y, keep_prob\n\nX, Y, keep_prob = create_placeholders(12288, 6)\nprint (\"X = \" + str(X))\nprint (\"Y = \" + str(Y))", "Expected Output: \n<table> \n <tr> \n <td>\n **X**\n </td>\n <td>\n Tensor(\"Placeholder_1:0\", shape=(12288, ?), dtype=float32) (not necessarily Placeholder_1)\n </td>\n </tr>\n <tr> \n <td>\n **Y**\n </td>\n <td>\n Tensor(\"Placeholder_2:0\", shape=(10, ?), dtype=float32) (not necessarily Placeholder_2)\n </td>\n </tr>\n\n</table>\n\n2.2 - Initializing the parameters\nYour second task is to initialize the parameters in tensorflow.\nExercise: Implement the function below to initialize the parameters in tensorflow. You are going use Xavier Initialization for weights and Zero Initialization for biases. The shapes are given below. As an example, to help you, for W1 and b1 you could use: \npython\nW1 = tf.get_variable(\"W1\", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1))\nb1 = tf.get_variable(\"b1\", [25,1], initializer = tf.zeros_initializer())\nPlease use seed = 1 to make sure your results match ours.", "# GRADED FUNCTION: initialize_parameters\n\ndef initialize_parameters():\n \"\"\"\n Initializes parameters to build a neural network with tensorflow. The shapes are:\n W1 : [25, 12288]\n b1 : [25, 1]\n W2 : [12, 25]\n b2 : [12, 1]\n W3 : [6, 12]\n b3 : [6, 1]\n \n Returns:\n parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3\n \"\"\"\n \n tf.set_random_seed(1) # so that your \"random\" numbers match ours\n \n ### START CODE HERE ### (approx. 6 lines of code)\n W1 = tf.get_variable(\"W1\", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed=1))\n b1 = tf.get_variable(\"b1\", [25,1], initializer = tf.zeros_initializer())\n W2 = tf.get_variable('W2', [12,25], initializer = tf.contrib.layers.xavier_initializer(seed=1))\n b2 = tf.get_variable('b2', [12,1], initializer = tf.zeros_initializer())\n W3 = tf.get_variable('W3', [6,12], initializer = tf.contrib.layers.xavier_initializer(seed=1))\n b3 = tf.get_variable('b3', [6,1], initializer = tf.zeros_initializer())\n ### END CODE HERE ###\n\n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2,\n \"W3\": W3,\n \"b3\": b3}\n \n return parameters\n\ntf.reset_default_graph()\nwith tf.Session() as sess:\n parameters = initialize_parameters()\n print(\"W1 = \" + str(parameters[\"W1\"]))\n print(\"b1 = \" + str(parameters[\"b1\"]))\n print(\"W2 = \" + str(parameters[\"W2\"]))\n print(\"b2 = \" + str(parameters[\"b2\"]))", "Expected Output: \n<table> \n <tr> \n <td>\n **W1**\n </td>\n <td>\n < tf.Variable 'W1:0' shape=(25, 12288) dtype=float32_ref >\n </td>\n </tr>\n <tr> \n <td>\n **b1**\n </td>\n <td>\n < tf.Variable 'b1:0' shape=(25, 1) dtype=float32_ref >\n </td>\n </tr>\n <tr> \n <td>\n **W2**\n </td>\n <td>\n < tf.Variable 'W2:0' shape=(12, 25) dtype=float32_ref >\n </td>\n </tr>\n <tr> \n <td>\n **b2**\n </td>\n <td>\n < tf.Variable 'b2:0' shape=(12, 1) dtype=float32_ref >\n </td>\n </tr>\n\n</table>\n\nAs expected, the parameters haven't been evaluated yet.\n2.3 - Forward propagation in tensorflow\nYou will now implement the forward propagation module in tensorflow. The function will take in a dictionary of parameters and it will complete the forward pass. The functions you will be using are: \n\ntf.add(...,...) to do an addition\ntf.matmul(...,...) to do a matrix multiplication\ntf.nn.relu(...) to apply the ReLU activation\n\nQuestion: Implement the forward pass of the neural network. We commented for you the numpy equivalents so that you can compare the tensorflow implementation to numpy. It is important to note that the forward propagation stops at z3. The reason is that in tensorflow the last linear layer output is given as input to the function computing the loss. Therefore, you don't need a3!", "# GRADED FUNCTION: forward_propagation\n\ndef forward_propagation(X, parameters, keep_prob):\n \"\"\"\n Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX\n \n Arguments:\n X -- input dataset placeholder, of shape (input size, number of examples)\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\"\n the shapes are given in initialize_parameters\n\n Returns:\n Z3 -- the output of the last LINEAR unit\n \"\"\"\n \n # Retrieve the parameters from the dictionary \"parameters\" \n W1 = parameters['W1']\n b1 = parameters['b1']\n W2 = parameters['W2']\n b2 = parameters['b2']\n W3 = parameters['W3']\n b3 = parameters['b3']\n \n \n ### START CODE HERE ### (approx. 5 lines) # Numpy Equivalents:\n Z1 = tf.add(tf.matmul(W1, X), b1) # Z1 = np.dot(W1, X) + b1\n A1 = tf.nn.relu(Z1) # A1 = relu(Z1)\n A1_dropout = tf.nn.dropout(A1, keep_prob[0]) # apply dropout (*)\n Z2 = tf.add(tf.matmul(W2, A1_dropout), b2) # Z2 = np.dot(W2, a1) + b2\n A2 = tf.nn.relu(Z2) # A2 = relu(Z2)\n A2_dropout = tf.nn.dropout(A2, keep_prob[1]) # apply dropout (*)\n Z3 = tf.add(tf.matmul(W3, A2_dropout), b3) # Z3 = np.dot(W3,Z2) + b3\n ### END CODE HERE ###\n \n return Z3\n\ntf.reset_default_graph()\n\nwith tf.Session() as sess:\n X, Y, keep_prob = create_placeholders(12288, 6)\n parameters = initialize_parameters()\n Z3 = forward_propagation(X, parameters, keep_prob)\n print(\"Z3 = \" + str(Z3))", "Expected Output: \n<table> \n <tr> \n <td>\n **Z3**\n </td>\n <td>\n Tensor(\"Add_2:0\", shape=(6, ?), dtype=float32)\n </td>\n </tr>\n\n</table>\n\nYou may have noticed that the forward propagation doesn't output any cache. You will understand why below, when we get to brackpropagation.\n2.4 Compute cost\nAs seen before, it is very easy to compute the cost using:\npython\ntf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = ..., labels = ...))\nQuestion: Implement the cost function below. \n- It is important to know that the \"logits\" and \"labels\" inputs of tf.nn.softmax_cross_entropy_with_logits are expected to be of shape (number of examples, num_classes). We have thus transposed Z3 and Y for you.\n- Besides, tf.reduce_mean basically does the summation over the examples.", "# GRADED FUNCTION: compute_cost \n\ndef compute_cost(Z3, Y):\n \"\"\"\n Computes the cost\n \n Arguments:\n Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples)\n Y -- \"true\" labels vector placeholder, same shape as Z3\n \n Returns:\n cost - Tensor of the cost function\n \"\"\"\n \n # to fit the tensorflow requirement for tf.nn.softmax_cross_entropy_with_logits(...,...)\n logits = tf.transpose(Z3)\n labels = tf.transpose(Y)\n \n ### START CODE HERE ### (1 line of code)\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits))\n ### END CODE HERE ###\n \n return cost\n\ntf.reset_default_graph()\n\nwith tf.Session() as sess:\n X, Y, keep_prob = create_placeholders(12288, 6)\n parameters = initialize_parameters()\n Z3 = forward_propagation(X, parameters, keep_prob)\n cost = compute_cost(Z3, Y)\n print(\"cost = \" + str(cost))", "Expected Output: \n<table> \n <tr> \n <td>\n **cost**\n </td>\n <td>\n Tensor(\"Mean:0\", shape=(), dtype=float32)\n </td>\n </tr>\n\n</table>\n\n2.5 - Backward propagation & parameter updates\nThis is where you become grateful to programming frameworks. All the backpropagation and the parameters update is taken care of in 1 line of code. It is very easy to incorporate this line in the model.\nAfter you compute the cost function. You will create an \"optimizer\" object. You have to call this object along with the cost when running the tf.session. When called, it will perform an optimization on the given cost with the chosen method and learning rate.\nFor instance, for gradient descent the optimizer would be:\npython\noptimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost)\nTo make the optimization you would do:\npython\n_ , c = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})\nThis computes the backpropagation by passing through the tensorflow graph in the reverse order. From cost to inputs.\nNote When coding, we often use _ as a \"throwaway\" variable to store values that we won't need to use later. Here, _ takes on the evaluated value of optimizer, which we don't need (and c takes the value of the cost variable). \n2.6 - Building the model\nNow, you will bring it all together! \nExercise: Implement the model. You will be calling the functions you had previously implemented.", "def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001,\n num_epochs = 3000, minibatch_size = 32, print_cost = True):\n \"\"\"\n Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.\n \n Arguments:\n X_train -- training set, of shape (input size = 12288, number of training examples = 1080)\n Y_train -- test set, of shape (output size = 6, number of training examples = 1080)\n X_test -- training set, of shape (input size = 12288, number of training examples = 120)\n Y_test -- test set, of shape (output size = 6, number of test examples = 120)\n learning_rate -- learning rate of the optimization\n num_epochs -- number of epochs of the optimization loop\n minibatch_size -- size of a minibatch\n print_cost -- True to print the cost every 100 epochs\n \n Returns:\n parameters -- parameters learnt by the model. They can then be used to predict.\n \"\"\"\n \n ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables\n tf.set_random_seed(1) # to keep consistent results\n seed = 3 # to keep consistent results\n (n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)\n n_y = Y_train.shape[0] # n_y : output size\n costs = [] # To keep track of the cost\n \n # Create Placeholders of shape (n_x, n_y)\n ### START CODE HERE ### (1 line)\n X, Y, keep_prob = create_placeholders(n_x, n_y)\n ### END CODE HERE ###\n\n # Initialize parameters\n ### START CODE HERE ### (1 line)\n parameters = initialize_parameters()\n ### END CODE HERE ###\n \n # Forward propagation: Build the forward propagation in the tensorflow graph\n ### START CODE HERE ### (1 line)\n Z3 = forward_propagation(X, parameters, keep_prob)\n ### END CODE HERE ###\n \n # Cost function: Add cost function to tensorflow graph\n ### START CODE HERE ### (1 line)\n cost = compute_cost(Z3, Y)\n ### END CODE HERE ###\n \n # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.\n ### START CODE HERE ### (1 line)\n optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)\n ### END CODE HERE ###\n \n # Initialize all the variables\n init = tf.global_variables_initializer()\n\n keep_prob_train = [0.9, 1.0]\n \n # Start the session to compute the tensorflow graph\n with tf.Session() as sess:\n \n # Run the initialization\n sess.run(init)\n \n # Do the training loop\n for epoch in range(num_epochs):\n\n epoch_cost = 0. # Defines a cost related to an epoch\n num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set\n seed = seed + 1\n minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)\n\n for minibatch in minibatches:\n\n # Select a minibatch\n (minibatch_X, minibatch_Y) = minibatch\n \n # IMPORTANT: The line that runs the graph on a minibatch.\n # Run the session to execute the \"optimizer\" and the \"cost\", the feedict should contain a minibatch for (X,Y).\n ### START CODE HERE ### (1 line)\n _ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y, keep_prob:keep_prob_train})\n ### END CODE HERE ###\n \n epoch_cost += minibatch_cost / num_minibatches\n\n # Print the cost every epoch\n if print_cost == True and epoch % 100 == 0:\n print (\"Cost after epoch %i: %f\" % (epoch, epoch_cost))\n if print_cost == True and epoch % 5 == 0:\n costs.append(epoch_cost)\n \n # plot the cost\n plt.plot(np.squeeze(costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per tens)')\n plt.title(\"Learning rate =\" + str(learning_rate))\n plt.show()\n\n # lets save the parameters in a variable\n parameters = sess.run(parameters)\n print (\"Parameters have been trained!\")\n\n # Calculate the correct predictions\n correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))\n\n # Calculate accuracy on the test set\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\n print (\"Train Accuracy:\", accuracy.eval({X: X_train, Y: Y_train, keep_prob : [1.0, 1.0]}))\n print (\"Test Accuracy:\", accuracy.eval({X: X_test, Y: Y_test, keep_prob : [1.0, 1.0]}))\n \n return parameters", "Run the following cell to train your model! On our machine it takes about 5 minutes. Your \"Cost after epoch 100\" should be 1.016458. If it's not, don't waste time; interrupt the training by clicking on the square (⬛) in the upper bar of the notebook, and try to correct your code. If it is the correct cost, take a break and come back in 5 minutes!", "parameters = model(X_train, Y_train, X_test, Y_test)", "Expected Output:\n<table> \n <tr> \n <td>\n **Train Accuracy**\n </td>\n <td>\n 0.999074\n </td>\n </tr>\n <tr> \n <td>\n **Test Accuracy**\n </td>\n <td>\n 0.716667\n </td>\n </tr>\n\n</table>\n\nAmazing, your algorithm can recognize a sign representing a figure between 0 and 5 with 71.7% accuracy.\nInsights:\n- Your model seems big enough to fit the training set well. However, given the difference between train and test accuracy, you could try to add L2 or dropout regularization to reduce overfitting. \n- Think about the session as a block of code to train the model. Each time you run the session on a minibatch, it trains the parameters. In total you have run the session a large number of times (1500 epochs) until you obtained well trained parameters.\n2.7 - Test with your own image (optional / ungraded exercise)\nCongratulations on finishing this assignment. You can now take a picture of your hand and see the output of your model. To do that:\n 1. Click on \"File\" in the upper bar of this notebook, then click \"Open\" to go on your Coursera Hub.\n 2. Add your image to this Jupyter Notebook's directory, in the \"images\" folder\n 3. Write your image's name in the following code\n 4. Run the code and check if the algorithm is right!", "import scipy\nfrom PIL import Image\nfrom scipy import ndimage\n\n## START CODE HERE ## (PUT YOUR IMAGE NAME) \nmy_image = \"thumbs_up.jpg\"\n## END CODE HERE ##\n\n# We preprocess your image to fit your algorithm.\nfname = \"images/\" + my_image\nimage = np.array(ndimage.imread(fname, flatten=False))\nmy_image = scipy.misc.imresize(image, size=(64,64)).reshape((1, 64*64*3)).T\nmy_image_prediction = predict(my_image, parameters)\n\nplt.imshow(image)\nprint(\"Your algorithm predicts: y = \" + str(np.squeeze(my_image_prediction)))", "You indeed deserved a \"thumbs-up\" although as you can see the algorithm seems to classify it incorrectly. The reason is that the training set doesn't contain any \"thumbs-up\", so the model doesn't know how to deal with it! We call that a \"mismatched data distribution\" and it is one of the various of the next course on \"Structuring Machine Learning Projects\".\n<font color='blue'>\nWhat you should remember:\n- Tensorflow is a programming framework used in deep learning\n- The two main object classes in tensorflow are Tensors and Operators. \n- When you code in tensorflow you have to take the following steps:\n - Create a graph containing Tensors (Variables, Placeholders ...) and Operations (tf.matmul, tf.add, ...)\n - Create a session\n - Initialize the session\n - Run the session to execute the graph\n- You can execute the graph multiple times as you've seen in model()\n- The backpropagation and optimization is automatically done when running the session on the \"optimizer\" object." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
anthonyng2/FX-Trading-with-Python-and-Oanda
Oanda v1 REST-oandapy/04.00 Order Management.ipynb
mit
[ "<!--NAVIGATION-->\n< Account Information | Contents | Trade Management >\nOrder Management\nOrders\nCreating Orders\ncreate_order(self, account_id, **params)", "from datetime import datetime, timedelta\nimport pandas as pd\nimport oandapy\nimport configparser\n\nconfig = configparser.ConfigParser()\nconfig.read('../config/config_v1.ini')\naccount_id = config['oanda']['account_id']\napi_key = config['oanda']['api_key']\n\noanda = oandapy.API(environment=\"practice\", \n access_token=api_key)\n\ntrade_expire = datetime.now() + timedelta(days=1)\ntrade_expire = trade_expire.isoformat(\"T\") + \"Z\"\ntrade_expire", "For a detailed explanation of the above, please refer to Rates Information.", "response = oanda.create_order(account_id,\n instrument = \"AUD_USD\",\n units=1000,\n side=\"buy\",\n type=\"limit\",\n price=0.7420,\n expiry=trade_expire)\nprint(response)\n\npd.Series(response[\"orderOpened\"])\n\norder_id = response[\"orderOpened\"]['id']", "Getting Open Orders\nget_orders(self, account_id, **params)", "response = oanda.get_orders(account_id)\nprint(response)\n\npd.DataFrame(response['orders'])", "Getting Specific Order Information\nget_order(self, account_id, order_id, **params)", "response = oanda.get_orders(account_id)\nid = response['orders'][0]['id']\n\noanda.get_order(account_id, order_id=id)", "Modify Order\nmodify_order(self, account_id, order_id, **params)", "response = oanda.get_orders(account_id)\nid = response['orders'][0]['id']\n\noanda.modify_order(account_id, order_id=id, price=0.7040)", "Close Order\nclose_order(self, account_id, order_id, **params)", "response = oanda.get_orders(account_id)\nid = response['orders'][0]['id']\n\noanda.close_order(account_id, order_id=id)", "Now when we check the orders. The above order has been closed and removed without being filled. There is only one outstanding order now.", "oanda.get_orders(account_id)", "<!--NAVIGATION-->\n< Account Information | Contents | Trade Management >" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
justanr/notebooks
hexagonal/refactoring_and_interfaces.ipynb
mit
[ "I've been thinking a lot about software achitecure lately. Not just thinking, because I wouldn't come up with these ideas on my own, but consuming a lot about it -- books, talks, slide decks, blog posts. And while thinking about all this, I've been hacking away at some projects in my spare time. And I noticed something, there's a lot of things in these projects that look a lot like this:", "@app.route('/register', methods=['GET', 'POST'])\ndef register():\n form = RegisterUserForm()\n \n if form.validate_on_submit():\n user = User()\n form.populate_obj(user)\n db.session.add(user)\n db.session.commit()\n return redirect('homepage')\n \n return render_template('register.html', form=form)", "This is a bog standard user registration endpoint. We create a form, check if it's valid, shove that information on a user model and then into the database and redirect off. If it's not valid or if it wasn't submitted (the user just navigated to the page), we render out some HTML.\nIt's all very basic, well trodden code. Besides, who wants to do registration again? It's boring. We want to do the interesting stuff. But there's some very real consequences to this code: \nIt's not testable\nEverything is wrapped up together, form validation, database stuff, rendering. Honestly, I'm not interested in testing if SQLAlchemy, WTForms of Jinja2 work -- they have their own tests. So testing this ends up looking like this:", "@mock.patch('myapp.views.RegisterUserForm')\[email protected]('myapp.views.db')\[email protected]('myapp.views.redirect')\[email protected]('myapp.views.url_for')\[email protected]('myapp.views.render_template')\ndef test_register_new_user(render, url_for, redirect, db, form):\n # TODO: Write test\n assert True", "What's even the point of this? We're just testing if Mock works at this point. There's actual things we can do to make it more testable, but before delving into that, \nIt hides logic\nIf registering a user was solely about, \"Fill this form out and we'll shove it into a database\" there wouldn't be a blog post here. However, there is some logic hiding out here in the form:", "class RegisterUserForm(Form):\n def validate_username(self, field):\n if User.query.filter(User.username == field.data).count():\n raise ValidationError(\"Username in use already\")\n \n def validate_email(self, field):\n if User.query.filter(User.email == field.data).count():\n raise ValidationError(\"Email in use already\")", "When we call RegisterUserForm.validate_on_submit it also runs these two methods. However, I'm not of the opinion that the form should talk to the database at all, let alone run validation against database contents. So, let's write a little test harness that can prove that an existing user with a given username and email causes us to not register:", "from myapp.forms import RegisterUserForm\nfrom myapp.models import User\n\nfrom collections import namedtuple\n\nfrom unittest import mock\n\nFakeData = namedtuple('User', ['username', 'email', 'password', 'confirm_password'])\n\ndef test_existing_username_fails_validation():\n test_data = FakeData('fred', '[email protected]', 'a', 'a')\n UserModel = mock.Mock()\n UserModel.query.filter.count.return_value = 1\n form = RegisterUserForm(obj=test_data)\n \n with mock.patch('myapp.forms.User', UserModel):\n form.validate()\n \n assert form.errors['username'] == \"Username in use already\"\n \ndef test_existing_email_fails_validation():\n test_user = FakeUser('fred', '[email protected]', 'a', 'a')\n UserModel = mock.Mock()\n UserModel.query.filter.first.return_value = True\n form = RegisterUserForm(obj=test_user)\n \n with mock.patch('myapp.forms.User', UserModel):\n form.validate()\n \n assert form.errors['username'] == \"Email in use already\"", "If these pass -- which they should, but you may have to install mock if you're not on Python 3 -- I think we should move the username and email validation into their own callables that are independently testable:", "def is_username_free(username):\n return User.query.filter(User.username == username).count() == 0\n\ndef is_email_free(email):\n return User.query.filter(User.email == email).count() == 0", "And then use these in the endpoint itself:", "@app.route('/register', methods=['GET', 'POST'])\ndef register():\n form = RegisterUserForm()\n \n if form.validate_on_submit():\n if not is_username_free(form.username.data):\n form.errors['username'] = ['Username in use already']\n return render_template('register.html', form=form)\n \n if not is_email_free(form.email.data):\n form.errors['email'] = ['Email in use already']\n return render_template('register.html', form=form)\n \n user = User()\n form.populate_obj(user)\n db.session.add(user)\n db.session.commit()\n return redirect('homepage')\n \n return render_template('register.html', form=form)", "This is really hard to test, so instead of even attempting that -- being honest, I spent the better part of an hour attempting to test the actual endpoint and it was just a complete mess -- let's extract out the actual logic and place it into it's own callable:", "class OurValidationError(Exception):\n def __init__(self, msg, field):\n self.msg = msg\n self.field = field\n\ndef register_user(username, email, password):\n if not is_username_free(username):\n raise OurValidationError('Username in use already', 'username')\n \n if not is_email_free(email):\n raise OurValidationError('Email in use already', 'email')\n \n user = User(username=username, email=email, password=password)\n db.session.add(user)\n db.session.commit()\n \n \[email protected]('/register', methods=['GET', 'POST'])\ndef register_user_view():\n form = RegisterUserForm()\n \n if form.validate_on_submit():\n try:\n register_user(form.username.data, form.email.data, form.password.data)\n except OurValidationError as e:\n form.errors[e.field] = [e.msg]\n return render_template('register.html', form=form)\n else:\n return redirect('homepage')\n \n return render_template('register.html', form=form)", "Now we're beginning to see the fruits of our labors. These aren't the easiest functions to test, but there's less we need to mock out in order to test the actual logic we're after.", "def test_duplicated_user_raises_error():\n ChasteValidator = mock.Mock(return_value=False)\n \n with mock.patch('myapp.logic.is_username_free', ChasteValidator):\n with pytest.raises(OurValidationError) as excinfo:\n register_user('fred', '[email protected]', 'fredpassword')\n \n assert excinfo.value.msg == 'Username in use already'\n assert excinfo.value.field == 'username'\n\ndef test_duplicated_user_raises_error():\n ChasteValidator = mock.Mock(return_value=False)\n PromisciousValidator = mock.Mock(return_value=True)\n \n with mock.patch('myapp.logic.is_username_free', PromisciousValidator),\n mock.patch('myapp.logic.is_email_free', ChasteValidator):\n with pytest.raises(OurValidationError) as excinfo:\n register_user('fred', '[email protected]', 'fredpassword')\n \n assert excinfo.value.msg == 'Email in use already'\n assert excinfo.value.field == 'email'\n\ndef test_register_user_happy_path():\n PromisciousValidator = mock.Mock(return_value=True)\n MockDB = mock.Mock()\n \n with mock.patch('myapp.logic.is_username_free', PromisciousValidator),\n mock.patch('myapp.logic.is_email_free', ChasteValidator), \n mock.patch('myapp.logic.db', MockDB):\n \n register_user('fred', '[email protected]', 'freddpassword')\n \n assert MockDB.commit.call_count", "Of course, we should also write tests for the controller. I'll leave that as an exercise. However, there's something very important we're learning from these tests. We have to mock.patch everything still. Our validators lean directly on the database, our user creation leans directly on the database, everything leans directly on the database. And I don't want to do that, we've found that it makes testing hard. We're also seeing if we need to add another registration restriction -- say we don't like people named Fred so we won't let anyone register with a username or email containing Fred in it -- we need to crack open the register_user function and add it directly. We can solve both of these problems.\nThe Database Problem\nTo address the database problem we need to realize something. We're not actually interested in the database, we're interested in the data it stores. And since we're interested in finding data rather than where it's stored at, why not stuff an interface in the way?", "from abc import ABC, abstractmethod\n\nclass AbstractUserRepository(ABC):\n \n @abstractmethod\n def find_by_username(self, username):\n pass\n \n @abstractmethod\n def find_by_email(self, email):\n pass\n \n @abstractmethod\n def persist(self, user):\n pass", "Hmm...that's interesting. Since we'll end up depending on this instead of a concrete implementation, we can run our tests completely in memory and production on top of SQLAlchemy, Mongo, a foreign API, whatever.\nBut we need to inject it into our validators instead of reaching out into the global namespace like we currently are.", "def is_username_free(user_repository):\n def is_username_free(username):\n return not user_repository.find_by_username(username)\n return is_username_free\n\ndef is_email_free(user_repository):\n def is_email_free(email):\n return not user_repository.find_by_email(email)\n return is_email_free", "These validators are simple enough that closures work instead of full-fledged objects. The important part here is to maintain a consistent interface -- if we need to use classes all of a sudden, we need to define a __call__ on them to maintain this interface.\nWe can also change our register callable to accept the repository as well:", "def register_user(user_repository):\n email_checker = is_email_free(user_repository)\n username_checker = is_username_free(user_repository)\n \n def register_user(username, email, password):\n \n if not username_checker(username):\n raise OurValidationError('Username in use already', 'username')\n\n if not email_checker(email):\n raise OurValidationError('Email in use already', 'email')\n\n user = User(username=username, email=email, password=password)\n user_repository.persist(user)\n \n return register_user", "Of course the tests break now, and that's okay. We made a very sweeping change to the architecture here. We need to go back through and alter the tests one by one, but instead of patching everything out we can do something better: Dependency Injection.", "def test_duplicated_email_causes_false():\n fake_user_repository = mock.create_autospec(AbstractUserRepository)\n fake_user_repository.find_by_email.return_value = True\n checker = is_email_free(fake_user_repository)\n \n assert not checker('[email protected]')\n \ndef test_duplicated_username_causes_false():\n fake_user_repository = mock.create_autospec(AbstractUserRepository)\n fake_user_repository.find_by_username.return_value = True\n checker = is_username_free(fake_user_repository)\n \n assert not checker('fred')\n\n\ndef test_register_user_happy_path():\n fake_user_repository = mock.create_autospec(AbstractUserRepository)\n fake_user_repository.find_by_email.return_value = False\n fake_user_repository.find_by_username.return_value = False\n registrar = register_user(fake_user_repository)\n \n registrar('fred', '[email protected]', 'fredpassword')\n \n assert fake_user_repository.persist.call_count", "But to test that our validators function correctly in this context, we need to fake out find_by_email and find_by_username indpendently. This is a symptom of our code not being Open-Closed.\nThe Open-Closed Problem\nRevisiting the other major issue from how the code is laid out right now is that it's not Open-Closed. If you're not familiar with the principle, Wikipedia says this:\n\n\"software entities (classes, modules, functions, etc.) should be open for extension, but closed for modification\"\n\nOr in a different way, \"You should be able to change functionality without editing existing code.\" -- I believe I need to credit Sandi Metz with this, but I'm not sure. We've actually already used this idea by injecting the User Repository. In tests, we inject a fake or in memory repository, but in production it can be a SQLAlchemy implementation, or maybe wrap that up into a caching repository. We can do the same thing with the validators.", "def register_user(user_repository, validator):\n def registrar(username, email, password):\n user = User(username, email, password)\n validator(user)\n user_repository.persist(user)\n return registrar", "Of course, our tests break again, so let's revisit the currently breaking one first:", "def test_register_user_happy_path():\n fake_user_repository = mock.create_autospec(AbstractUserRepository)\n registrar = register_user(fake_user_repository, lambda user: None)\n \n registrar('fred', '[email protected]', 'fredpassword')\n \n assert fake_user_repository.persist.call_count\n \ndef test_register_user_fails_validation():\n fake_user_repository = mock.create_autospec(AbstractUserRepository)\n fake_validator = mock.Mock(side_effect=OurValidationError('username in use already', 'username'))\n registrar = register_user(fake_user_repository, fake_validator)\n \n try:\n registrar('fred', '[email protected]', 'fredpassword')\n except OurValidationError as e:\n assert e.msg == 'username in use already'\n assert e.field == 'username'\n else:\n assert False, \"Did not Raise\"", "We'll need to tweak the validation logic some to make up for the fact that we're passing the whole user object now:", "def validate_username(user_repoistory):\n def validator(user):\n if not user_repoistory.find_by_username(user.username):\n raise OurValidationError('Username in use already', 'username')\n return True\n return validator\n\ndef validate_email(user_repoistory):\n def validator(user):\n if not user_repoistory.find_by_email(user.email):\n raise OurValidationError(\"Email in use already\", 'email')\n return True\n return validator", "The tests for these are pretty straight forward as well, so I'll omit them. But we need a way to stitch them together...", "def validate_many(*validators):\n def checker(input):\n return all(validator(input) for validator in validators)\n return checker", "And then hook it all up like this:", "validator = validate_username(validate_email(user_repository), validate_username(user_repository))\nregistrar = register_user(user_repository, validator)", "Our neglected Controller\nWe've spent a lot of time looking at how to compartmentalize the registration logic and portion out its concerns. However, the controller itself needs some attention as well. When we last left, it looked like this:", "@app.route('/register', methods=['GET', 'POST'])\ndef register_user_view():\n form = RegisterUserForm()\n \n if form.validate_on_submit():\n try:\n register_user(form.username.data, form.email.data, form.password.data)\n except OurValidationError as e:\n form.errors[e.field] = [e.msg]\n return render_template('register.html', form=form)\n else:\n return redirect('homepage')\n \n return render_template('register.html', form=form)", "But we can do beter than that. The problem here is that the logic is set in stone, nested flows of control. But mostly, I really like any excuse to use class based views.", "class RegisterUser(MethodView):\n def __init__(self, form, registrar, template, redirect):\n self.form = form\n self.registrar = registrar\n self.template = template\n self.redirect = redirect\n \n def get(self):\n return self._render()\n \n def post(self):\n if self.form.validate_on_submit():\n return self._register()\n else:\n return self._render()\n \n def _register(self):\n try:\n self.registrar(self.form.username.data, self.form.email.data, self.form.password.data)\n except OurValidationError as e:\n self._handle_error(e)\n self._render()\n else:\n return self._redirect()\n \n def _render(self):\n return render_template(self.template, self.form=form)\n\n def _redirect(self):\n return redirect(url_for(self.redirect))\n \n def _handle_error(self, e):\n self.form.error[e.field] = [e.msg]", "Now that looks like a lot of code. However, each piece is much simpler than the original function. This speaks to handing out actions, as if it were controlling things. We can test the main logic of it as well. Even though we should test it, I might just leave it alone. Maybe run it through an acceptance test.\nWhat did we gain?\nEverything is much more high level. The controller, validation, registration, even the form. Nothing's concerned with more than it needs to be. Sure, there's still some plumbing to do with the SQLAlchemy implementation of the UserRepository" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
nicolas998/Analisis_Datos
02_Medidas_Localizacion.ipynb
gpl-3.0
[ "Medidas de Localización\nMedidas típicas, que son paramétricas:\n- Media: $\\mu = \\frac{\\sum_{i=1}^{N} x_i}{N} $\n- Desviación $\\sigma = \\sqrt{ \\frac{1}{N} \\sum_{i=1}^{N} (x_i -\\mu)^2}$\n- Asimetría.\n- etc...\nExisten otras medidas que son no paramétricas, estas pueden ser:\n- Mediana.\n- Quintiles.\n- Deciles.\n- ..\n- etc.\n- Cuantiles.\nDependiendo de la medida esta puede ser o no robusta, esto significa que no se vea afectada fácilmente por aspectos tales como:\n\nCantidad de datos.\nCalidad de los datos.\n\n\nEjemplo de medidas paramétricas:", "%matplotlib inline\nimport numpy as np\nimport pylab as pl\n\nSerie = np.random.normal(0,1,1000)\n# Impresión de la media y la desviación\nprint Serie.mean()\nprint Serie.std()", "Pero miren lo que pasa con la mediana", "print np.percentile(Serie,50)\nprint np.median(Serie)", "Que ocurre\n\nLa media esperada es 0, sin embargo esta presenta una diferencia.\nIgualmente ocurre conla desviación estandar.\n\n\n\nPero no ocurre lo mismo con la mediana, en 1000 datos se presenta dos ordenes de magnitud más cerca de 0 \n\nLa siguiente figura ejemplifica esto:\nDefinición de función para graficar", "def GraficaHistogramaParam(Values,bins=15):\n # Genera el histograma de valores\n h,b = np.histogram(Values,bins=bins)\n h = h.astype(float); h = h / h.sum()\n b = (b[1:]+b[:-1])/2.0\n # Obtiene la figura \n fig=pl.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n ax.plot(b,h,'b',lw=2)\n ax.fill_between(b,h,color='b',alpha=0.2)\n ax.set_xlabel('$X$',size=15)\n ax.set_ylabel('$f(x)$',size=15)\n ax.set_xlim(-3,3)\n ax.set_ylim(0,h.max()+0.05)\n ax.grid(True)\n ax.legend(loc=0)\n # Grafica las localizaciones\n ax.vlines(Values.mean(),0,h.max()+0.05,lw=2,color='r')\n ax.vlines([Values.mean()+Values.std(),Values.mean()-Values.std()],0,h.max()+0.05,lw=1,color='r')\n pl.show()\n\n\ndef GraficaHistogramaNoParam(Values,bins=15):\n # Genera el histograma de valores\n h,b = np.histogram(Values,bins=bins)\n h = h.astype(float); h = h / h.sum()\n b = (b[1:]+b[:-1])/2.0\n # Obtiene la figura \n fig=pl.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n ax.plot(b,h,'b',lw=2)\n ax.fill_between(b,h,color='b',alpha=0.2)\n ax.set_xlabel('$X$',size=15)\n ax.set_ylabel('$f(x)$',size=15)\n ax.set_xlim(-3,3)\n ax.set_ylim(0,h.max()+0.05)\n ax.grid(True)\n ax.legend(loc=0)\n # Grafica las localizaciones\n ax.vlines(np.percentile(Values,50),0,h.max()+0.05,lw=2,color='r')\n ax.vlines([np.percentile(Values,10),np.percentile(Values,90)],0,h.max()+0.05,lw=1,color='r')\n pl.show()\n", "Grafica de las medidas de localización paramétricas", "GraficaHistogramaParam(Serie)", "Gráfica de medidas no paramétricas", "GraficaHistogramaNoParam(Serie)", "Caso con menos datos\nEn un caso con menor cantida dde datos se espera tener una mayor diferencia entre ambas medidas, de ahí si inestabilidad:", "Serie = np.random.uniform(2.5,10,2e5)\nprint Serie.mean()\nprint np.median(Serie)\nfrom scipy import stats as st\nprint st.skew(Serie)\n", "Ejercicio para observar robustez en ambas medidas\nEn el siguiente ejercicio generamos 200 veces series aleatorias cada una con 25 entradas, \nluego vamos a comprar como son las diferencias entre las medias y las medianas encontradas para cada uno de los casos.", "medianas = np.zeros(20000)\nmedias=np.zeros(20000)\nfor i in range(20000):\n Serie = np.random.normal(0,1,25)\n medias[i] = Serie.mean()\n medianas[i]=np.median(Serie)\n\ndef ComparaHistogramas(Vec1,Vec2,bins=15):\n # Genera el histograma de valores\n h1,b1 = np.histogram(Vec1,bins=bins)\n h1 = h1.astype(float); h1 = h1 / h1.sum()\n b1 = (b1[1:]+b1[:-1])/2.0\n h2,b2 = np.histogram(Vec2,bins=bins)\n h2 = h2.astype(float); h2 = h2 / h2.sum()\n b2 = (b2[1:]+b2[:-1])/2.0\n #Genera la figura \n fig=pl.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n ax.plot(b1,h1,'b',lw=2,label='Vec 1')\n ax.plot(b2,h2,'r',lw=2,label='Vec 2')\n ax.fill_between(b1,h1,color='b',alpha=0.2)\n ax.fill_between(b2,h2,color='r',alpha=0.2)\n ax.set_xlabel('$X$',size=15)\n ax.set_ylabel('$f(x)$',size=15)\n ax.set_xlim(-1,1)\n ax.set_ylim(0,h1.max()+0.05)\n ax.grid(True)\n ax.legend(loc=0)\n # Grafica las localizaciones\n pl.show()\n return h1,h2\n\nHistMedianas, HistMedias = ComparaHistogramas(medianas,medias)", "Suceptibilidad a Datos Atípicos\nUn dato atípico se define como aquel dato que se encuentra fuera del rango de oscilación de los datos, o bien que no es coherente con la física del fenómeno que se está sensando, los siguientes son ejemplos de datos atípicos:\n\nValores exageradamente altos.\nValores negativos en casos de fenómenos sin valores negativos.\nValores fuera de un rango definido.\nSecuencia de valores con el mismo valor (no es tanto atípico, pero si es un indicio de problemas)\n\nUna forma de identificarlos es a partir de la media de los valores y la desviación, o los percentiles sobre los que se ubiquen.\n\n$ValAtipico > \\mu + N \\sigma$, donde $N$ oscila de acuerdo a lo fuerte que se quiera hacer la pregunta \n$ValAtipico > P_{99.9}$\n\n\nDependiendo de la cantidad de registros en los datos, de la cantidad de valores atípicos y de los valores que estos tengan pueden tener o no consecuencias sobre la serie y sobre posteriores análisis que se realicen sobre la misma.\nEjemplo de robustez ante datos atípicos", "Serie = np.random.normal(0,1,50)\nfig = pl.figure(figsize=(9,7))\npl.plot(Serie)\npl.grid(True)", "Incertemos un dato loco, que se salga", "Serie2 = np.copy(Serie)\nSerie2[10] = 50.0\nfig = pl.figure(figsize=(9,7))\npl.plot(Serie2)\npl.plot(Serie)", "Ahora veamos que ocurre con la media:", "print Serie.mean()\nprint Serie2.mean()", "Y que ocurre con la mediana:", "print np.median(Serie)\nprint np.median(Serie2)", "Introducción de múltiples Outliers\nQue pasa si se introduce una alta cantidad de datos atípicos?, es decir como es la tasa a \nla cual la media puede ir pasando a ser cada ves un estimador con un mayor error?.", "def CreaOutliers(vect,NumOut,Mult=10): \n # Encuentra el rango de oscilacion \n Per = np.array([np.percentile(vect,i) for i in [0.1,99.9]])\n # Genera los aleatorios \n vectOut = np.copy(vect)\n for i in np.random.choice(vect.shape[0],NumOut):\n p = np.random.choice(2,1)[0]\n vectOut[i] = vectOut[i] + Per[p]*Mult*np.random.uniform(2,15,1)[0]\n return vectOut\n\nprint Serie3.mean()\nprint Serie.mean()\nprint '----------'\nprint np.median(Serie3)\nprint np.median(Serie)\n\n# Definición de variables\nN = 1000\nS1 = np.random.normal(0,1,N)\nMedias = []; Std = []\nMedianas = []; R25_75 = []\n# Introduccion de outliers\nfor i in np.arange(5,200):\n S2 = CreaOutliers(S1, i)\n Medias.append(S2.mean())\n Medianas.append(np.median(S2))\n Std.append(S2.std())\n R25_75.append(np.percentile(S2,75)-np.percentile(S2,25))\nMedias = np.array(Medias)\nMedianas = np.array(Medianas)\n", "Resultados:\nSegún lo obtenido la mediana se ve altamente afectada, y la desviación también:\nCaso de una distribución Normal", "# Definición de variables\nN = 1000\nS1 = np.random.uniform(0,1,N)\nMedias = []; Std = []\nMedianas = []; R25_75 = []\n# Introduccion de outliers\nfor i in np.arange(5,200):\n S2 = CreaOutliers(S1, i)\n Medias.append(S2.mean())\n Medianas.append(np.median(S2))\n Std.append(S2.std())\n R25_75.append(np.percentile(S2,75)-np.percentile(S2,25))\nMedias = np.array(Medias)\nMedianas = np.array(Medianas)\n\n\nfig = pl.figure(figsize=(13,5))\nax = fig.add_subplot(121)\nax.scatter(Medianas,Medias,c=np.arange(5,200))\nax.set_xlabel('Mediana',size=14)\nax.set_ylabel('Media $\\mu$',size=14)\nax = fig.add_subplot(122)\nax.scatter(R25_75,Std,c=np.arange(5,200))\n#ax.set_xlim(0,1)\nax.set_xlabel('Rango $25%$ - $75\\%$',size=14)\nax.set_ylabel('Desviacion $\\sigma$',size=14)\npl.show()", "Caso de una distribución uniforme", "fig = pl.figure(figsize=(13,5))\nax = fig.add_subplot(121)\nax.scatter(Medianas,Medias,c=np.arange(5,200))\nax.set_xlabel('Mediana',size=14)\nax.set_ylabel('Media $\\mu$',size=14)\nax = fig.add_subplot(122)\nax.scatter(R25_75,Std,c=np.arange(5,200))\n#ax.set_xlim(0,1)\nax.set_xlabel('Rango $25%$ - $75\\%$',size=14)\nax.set_ylabel('Desviacion $\\sigma$',size=14)\npl.show()", "Cuantiles\nComo una medida no paramétrica de la distribución de los datos se encuentran los cuantiles,\nel más conocido es la mediana, sin embargo se pueden obtener cuantiles de cualquier medida.\nQue representan : El cuantil del 25% igual a 3.56, indica que el 25% de los datos son iguales o inferiores a 3.56.\nAl ser una medida no paramétrica se ve poco afectada por errores en los datos y por datos atípicos.", "S1 = np.random.normal(0,1,100)\na=pl.boxplot(S1)\na=pl.xlabel('Serie')", "Caso de Introducción de Outliers\nQQ plot de las series donde se introducen outliers y la serie en donde no", "S1 = np.random.normal(0,1,100)\nS2 = CreaOutliers(S1,10)\nPer1 = np.array([np.percentile(S1,i) for i in range(10,91,10)])\nPer2 = np.array([np.percentile(S2,i) for i in range(10,91,10)])\n\nfig = pl.figure(figsize=(9,7))\nax = fig.add_subplot(111)\nax.scatter(Per1,Per2,s=40)\nax.set_xlim(-2,2)\nax.set_ylim(-2,2)\nax.grid(True)\nax.set_xlabel('Deciles Observados',size =14); ax.set_ylabel('Deciles Alterados',size=14) \nax.plot([-2,2],[-2,2],lw=0.5,c='k')\npl.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
HUDataScience/StatisticalMethods2016
notebooks/Basic_Python.ipynb
apache-2.0
[ "Basic of Python.\nThe library we are going to use are the following:\n\nnumpy", "for i in range(10):\n y = i*40 +i**2 + 2\n print(y)\n \ni = i+2\n\nrange(10)\n\nfor i in [\"a\",\"b\",\"tt\"]:\n print i\n\nx = [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],[2, 1, 2, 3, 4, 5, 6, 7, 8, 9],[0,5, 2, 3, 4, 5, 6, 7, 8, 9]]\nprint(x)\n\nnp.mean(x)\n\nx= np.linspace(-1,1,100)\n\nx\n\nx[[34,67]]\n\nx[45:90]\n\nx[::3]\n\nx[10::3]\n\nx[::-1]\n\nx= [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\nx[::-1]\n\ny = np.linspace(0,10,100)\nx= np.linspace(-1,1,100)\n\nx", "careful with \"=\":\n\nxx= x means they are the same object\nxx = x .... whatever or x.copy() they are two different objects", "xx = x.copy()\n\nxx+=2\n\nxx\n\nx", "Masking\nThis only works with numpy array.\nnumpy array vs. list", "xlist = [3,4,5,6,7,8,9]\nxarray = np.asarray([3,4,5,6,7,8,9]) # np.asarray(xlist)\n\nxlist*2\n\nxarray*2\n\nstrangelist = [\"toto\",3,{},[]]\n\nnp.asarray(strangelist)*2", "how to apply masking?\nUse Numpy ARRAY", "x\n\nmask = x>2\n\nmask\n\nx[mask] # x[x>2]\n\nx[ (x>2) & (x<2.5) ] # x[ (x>2) * (x>1.5) ] # both have to be true\n\nx[ (x>2) | (x>1.5) ] # x[ (x>2) + (x>1.5) ] # any have to be true", "The case of the NaN Value", "iamnan = np.NaN\n\niamnan\n\niamnan==iamnan\n\nnp.inf==np.inf\n\nxwithnan = np.asarray([3,4,5,6,7,2,3,np.NaN,75,75])\n\nxwithnan\n\nxwithnan*2\n\n4+np.NaN\n\n4/np.NaN\n\n4**np.NaN\n\nnp.mean(xwithnan)\n\nnp.nanmean(xwithnan)\n\nnp.mean(xwithnan[xwithnan==xwithnan])\n\n~(xwithnan==xwithnan)\n\nxwithnan!=xwithnan\n\nnp.isnan(xwithnan)\n\nxwithnan = [3,4,5,6,7,2,3,np.NaN,75,75]\n\nxwithnan[xwithnan==xwithnan]\n\n0 == False\n\n1 == True", "Your first plot\nFor ploting we are going to use matplotlib. let's plot 2 random variable a vs. b", "a = np.random.rand(30)\nb = np.random.rand(30)\n\n# plot within the notebook\n%matplotlib inline\nimport matplotlib.pyplot as mpl\n\npl = mpl.hist(a)\n\nmpl.scatter(a,b,s=150, facecolors=\"None\", edgecolors=\"b\",lw=3)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
afeiguin/comp-phys
14_02_multilayer-networks.ipynb
mit
[ "How a regression network is traditionally trained\nThis network is trained using a data set $D = ({{\\bf x}^{(n)}, {\\bf t}^{(n)}})$ by adjusting ${\\bf w}$ so as to minimize an error function, e.g.,\n$$\nE_D({\\bf w}) = \\sum_n\\sum_i (y_i({\\bf x}^{(n)};{\\bf w}) - t_i^{(n)})^2\n$$\nThis objective function is a sum of terms, one for each input/target pair ${ {\\bf x}, {\\bf t} }$, measuring how close the output ${\\bf y}({\\bf x}; {\\bf w})$ is to the target ${\\bf t}$:\n$$\nE_D({\\bf w}) = \\sum_n E_{\\bf x}^{(n)}, \\quad E_{\\bf x}^{(n)}=\\sum_i (y_i({\\bf x}^{(n)};{\\bf w}) - t_i^{(n)})^2\n$$\nThis minimization is based on repeated evaluation of the gradient of $E_D$. This gradient can be efficiently computed using the backpropagation algorithm which uses the chain rule to find the derivatives, as we discuss below.\nOften, regularization (also known as weight decay) is included, modifying\nthe objective function to:\n$$\nM({\\bf w})=\\alpha E_D({\\bf w}) + \\beta E_W({\\bf w}),\n$$\nwhere $E_W = \\frac{1}{2}\\sum_i w_i^2$.\nGradient descent\n(From Wikipedia)\nCool animations at http://www.benfrederickson.com/numerical-optimization/\nGradient descent is a first-order iterative optimization algorithm for finding the minimum of a function. To find a local minimum of a function using gradient descent, one takes steps proportional to the negative of the gradient (or of the approximate gradient) of the function at the current point.\nGradient descent is based on the observation that if the multi-variable function $ F(\\mathbf {x} )$ is defined and differentiable in a neighborhood of a point $ \\mathbf {a}$ , then $ F(\\mathbf {x} )$ decreases fastest if one goes from $ \\mathbf {a}$ in the direction of the negative gradient of $F$ at $ \\mathbf {a}$ , $ -\\nabla F(\\mathbf {a} )$. It follows that, if\n$$\\mathbf {a} {n+1}=\\mathbf {a} {n}-\\eta \\nabla F(\\mathbf {a} _{n})$$ \nfor $\\eta$ small enough, then $F(\\mathbf {a_{n}} )\\geq F(\\mathbf {a_{n+1}} )$. In other words, the term $\\eta \\nabla F(\\mathbf {a} )$ is subtracted from $ \\mathbf {a}$ because we want to move against the gradient, namely down toward the minimum. With this observation in mind, one starts with a guess $\\mathbf {x} {0}$ for a local minimum of $F$, and considers the sequence $\\mathbf {x} {0},\\mathbf {x} {1},\\mathbf {x} {2},\\dots$ such that\n$${x} {n+1}=\\mathbf {x} {n}-\\gamma {n}\\nabla F(\\mathbf {x} {n}),\\ n\\geq 0.$$\nWe have\n$F(\\mathbf {x} {0})\\geq F(\\mathbf {x} {1})\\geq F(\\mathbf {x} {2})\\geq \\cdots$ ,\nso hopefully the sequence $(\\mathbf {x} {n})$ converges to the desired local minimum. Note that the value of the step size $\\eta$ is allowed to change at every iteration.\nThis process is illustrated in the adjacent picture. Here $F$ is assumed to be defined on the plane, and that its graph has a bowl shape. The blue curves are the contour lines, that is, the regions on which the value of $F$ is constant. A red arrow originating at a point shows the direction of the negative gradient at that point. Note that the (negative) gradient at a point is orthogonal to the contour line going through that point. We see that gradient descent leads us to the bottom of the bowl, that is, to the point where the value of the function $F$ is minimal.\n<img src=\"figures/Gradient_descent.png\" style=\"width: 350px;\"/>\nIllustration of the gradient descept procedure on a series of iterations down a bowl shaped surface\nThe \"Zig-Zagging\" nature of the method is also evident below, where the gradient descent method is applied to $$F(x,y)=\\sin \\left({\\frac {1}{2}}x^{2}-{\\frac {1}{4}}y^{2}+3\\right)\\cos(2x+1-e^{y})$$", "%matplotlib inline\nfrom matplotlib import pyplot\npyplot.rcParams['image.cmap'] = 'jet'\nimport numpy as np\n\nx0 = -1.4\ny0 = 0.5\nx = [x0] # The algorithm starts at x0, y0\ny = [y0] \n\neta = 0.1 # step size multiplier\nprecision = 0.00001\n\ndef f(x,y):\n f1 = x**2/2-y**2/4+3\n f2 = 2*x+1-np.exp(y)\n return np.sin(f1)*np.cos(f2)\n\ndef gradf(x,y):\n f1 = x**2/2-y**2/4+3\n f2 = 2*x+1-np.exp(y)\n dx = np.cos(f1)*np.cos(f2)*x-np.sin(f1)*np.sin(f2)*2.\n dy = np.cos(f1)*np.cos(f2)*(-y/2.)-np.sin(f1)*np.sin(f2)*(-np.exp(y))\n return (dx,dy)\n\nerr = 100.\nwhile err > precision:\n (step_x, step_y) = gradf(x0, y0)\n x0 -= eta*step_x\n y0 -= eta*step_y\n x.append(x0)\n y.append(y0)\n err = eta*(abs(step_x)+abs(step_y))\n\n\nprint(x0,y0)\n\n#### All this below is just to visualize the process\ndx = 0.05\ndy = 0.05\nxx = np.arange(-1.5, 1.+dx, dx)\nyy = np.arange(0., 2.+dy, dy)\nV = np.zeros(shape=(len(yy),len(xx)))\n\nfor iy in range(0,len(yy)):\n for ix in range(0,len(xx)):\n V[iy,ix] = f(xx[ix],yy[iy])\n\nX, Y = np.meshgrid(xx, yy)\npyplot.contour(X, Y, V)\n\n#pyplot.plot(x,y,linestyle='--', lw=3);\npyplot.scatter(x,y);\n\npyplot.ylabel(\"y\")\npyplot.xlabel(\"x\");", "Stochastic gradient descent (SGD)\nStochastic gradient descent (often shortened to SGD), also known as incremental gradient descent, is a stochastic approximation of the gradient descent optimization and iterative method for minimizing an objective function that is written as a sum of differentiable functions. \nThere are a number of challenges in applying the gradient descent rule. To understand what the problem is, let's look back at the quadratic cost $E_D$. Notice that this cost function has the form $E=\\sum_n E_{\\bf x}^{(n)}$\nIn practice, to compute the gradient $\\nabla E_D$\n we need to compute the gradients $\\nabla E_{\\bf x}^{(n)}$\n separately for each training input, ${\\bf x^{(n)}}$\nand then average them.\n. Unfortunately, when the number of training inputs is very large this can take a long time, and learning thus occurs slowly.\nStochastic gradient descent can be used to speed up learning. The idea is to estimate the gradient $\\nabla E$\n by computing $\\nabla E_{\\bf x}$\nfor a small sample of randomly chosen training inputs. By averaging over this small sample it turns out that we can quickly get a good estimate of the true gradient.\n<!--To make these ideas more precise, stochastic gradient descent works by randomly picking out a small number $m$\n of randomly chosen training inputs. We'll label those random training inputs ${\\bf x^{(1)},x^{(2)},…,x^{(m)}}$\n, and refer to them as a mini-batch. Provided the sample size m\n is large enough we expect that the average value of the $\\nabla E_x$\n will be roughly equal to the average over all of them, that is\n$$\\frac{1}{m}\\sum _{j=1}^m \\nabla E_{x^{j}} \\approx \\frac{1}{n}\\sum _{j=1}^n \\nabla E_{x^{j}}$$\nwhere the second sum is over the entire set of training data. \n!-->\n\nTo connect this explicitly to learning in neural networks, suppose $w_k$\n and $b_l$\n denote the weights and biases in our neural network. Then stochastic gradient descent works by picking out a randomly chosen mini-batch of training inputs, and training with those,\n$$\nw_k \\rightarrow w_k - \\eta \\sum_{j=1}^m \\frac{\\partial{E_{\\bf x}^{(j)}}}{\\partial w_k}\n$$\n$$\nb_l \\rightarrow b_l - \\eta \\sum_{j=1}^m \\frac{\\partial{E_{\\bf x}^{(j)}}}{\\partial b_l}\n$$\nwhere the sums are over all the training examples in the current mini-batch. Then we pick out another randomly chosen mini-batch and train with those. And so on, until we have exhausted the training inputs, which is said to complete an epoch of training. At that point we start over with a new training epoch.\nThe pseudocode would look like:\nChoose an initial vector of parameters $w$ and learning rate $\\eta$.\nRepeat until an approximate minimum is obtained:\nRandomly shuffle examples in the training set.\n For i=1,2,...,n , do:\n$\\quad \\quad \\quad \\quad \\quad w:=w-\\eta \\nabla E_{i}(w).$ \nExample: linear regression\nAs seen previously, the objective function to be minimized is:\n$$\n\\begin{aligned}\nE(w)=\\sum {i=1}^{n}E{i}(w)=\\sum {i=1}^{n}\\left(w{1}+w_{2}x_{i}-y_{i}\\right)^{2}.\n\\end{aligned}\n$$\nAnd the gradent descent equations can be written in matrix form as:\n$$\n\\begin{bmatrix}w_{1}\\w_{2}\\end{bmatrix}:={\\begin{bmatrix}w_{1}\\w_{2}\\end{bmatrix}}-\\eta {\\begin{bmatrix}2(w_{1}+w_{2}x_{i}-y_{i})\\2x_{i}(w_{1}+w_{2}x_{i}-y_{i})\\end{bmatrix}}.\n$$\nWe'll generate a series of 100 random points aligned more or less along the line $y=a+bx$ with $a=1$ and $b=2$", "%matplotlib inline\nfrom matplotlib import pyplot\nimport numpy as np\n\na = 1\nb = 2\nnum_points = 100\nnp.random.seed(637163) # we make sure we always generate the same sequence\nx_data = np.random.rand(num_points)*20.\ny_data = x_data*b+a+3*(2.*np.random.rand(num_points)-1)\n\npyplot.scatter(x_data,y_data)\npyplot.plot(x_data, b*x_data+a)\n\n#### Least squares fit\nsum_x = np.sum(x_data)\nsum_y = np.sum(y_data)\nsum_x2 = np.sum(x_data**2)\nsum_xy = np.sum(x_data*y_data)\ndet = num_points*sum_x2-sum_x**2\nfit_a = (sum_y*sum_x2-sum_x*sum_xy)/det\nfit_b = (num_points*sum_xy-sum_x*sum_y)/det\nprint(fit_a,fit_b)\n\npyplot.xlim(-1,22)\npyplot.ylim(-1,24)\npyplot.plot(x_data, fit_b*x_data+fit_a);", "We now write an SGD code for this problem. The training_data is a list of tuples (x, y) representing the training inputs and corresponding desired outputs. The variables epochs and mini_batch_size are what you'd expect - the number of epochs to train for, and the size of the mini-batches to use when sampling. eta is the learning rate, $\\eta$. If the optional argument test_data is supplied, then the program will evaluate the network after each epoch of training, and print out partial progress. This is useful for tracking progress, but slows things down substantially.\nThe code works as follows. In each epoch, it starts by randomly shuffling the training data, and then partitions it into mini-batches of the appropriate size. This is an easy way of sampling randomly from the training data. Then for each mini_batch we apply a single step of gradient descent. This is done by the code self.update_mini_batch(mini_batch, eta), which updates the coefficients according to a single iteration of gradient descent, using just the training data in mini_batch.", "epochs = 1000\nmini_batch_size = 10\neta = 0.01/mini_batch_size\n\na = 3.\nb = 3.\ndef update_mini_batch(mini_batch, eta):\n global a, b\n a0 = a\n b0 = b\n for x, y, in mini_batch:\n e = eta*(a0+b0*x-y)\n a -= e\n b -= x*e\n \ntraining_data = list(zip(x_data,y_data))\nfor j in range(epochs):\n np.random.shuffle(training_data)\n mini_batches = [training_data[k:k+mini_batch_size]\n for k in range(0, len(training_data), mini_batch_size)]\n for mini_batch in mini_batches:\n update_mini_batch(mini_batch, eta)\n print (\"Epoch {0}: {1} {2}\".format(j,a,b))", "Challenge 14.2\nUse SGD to train the single neuron in the previous notebook using a linearly separable set of 100 points, divided by the line $-\\frac{5}{2}x+\\frac{3}{2}y+3=0$", "### We provide a set of randomly generated training points \nnum_points = 100\nw1 = -2.5\nw2 = 1.5\nw0 = 3.\nnp.random.seed(637163) # we make sure we always generate the same sequence\nx_data = np.random.rand(num_points)*10.\ny_data = np.random.rand(num_points)*10.\nz_data = np.zeros(num_points)\nfor i in range(len(z_data)):\n if (y_data[i] > (-w0-w1*x_data[i])/w2):\n z_data[i] = 1.\n\npyplot.scatter(x_data,y_data,c=z_data,marker='o',linewidth=1.5,edgecolors='black')\npyplot.plot(x_data,(-w1*x_data-w0)/w2)\npyplot.gray()\npyplot.xlim(0,10)\npyplot.ylim(0,10);", "You will need the following auxiliary functions:", "def sigmoid(z):\n \"\"\"The sigmoid function.\"\"\"\n return 1.0/(1.0+np.exp(-z))\n\ndef sigmoid_prime(z):\n \"\"\"Derivative of the sigmoid function.\"\"\"\n return sigmoid(z)*(1-sigmoid(z))", "A simple network to classify handwritten digits\nMost of this section has been taken from M. Nielsen's free on-line book: \"Neural Networks and Deep Learning\" http://neuralnetworksanddeeplearning.com/\nIn this section we discuss a neural network which can solve the more interesting and difficult problem, namely, recognizing individual handwritten digits.\nThe input layer of the network contains neurons encoding the values of the input pixels. Our training data for the network will consist of many 28 by 28\n pixel images of scanned handwritten digits, and so the input layer contains 784=28×28\n neurons. The input pixels are greyscale, with a value of 0.0\n representing white, a value of 1.0\nrepresenting black, and in between values representing gradually darkening shades of grey.\nThe second layer of the network is a hidden layer. We denote the number of neurons in this hidden layer by $n$\n, and we'll experiment with different values for $n$\n. The example shown illustrates a small hidden layer, containing just $n=15$\n neurons.\nThe output layer of the network contains 10 neurons. If the first neuron fires, i.e., has an output $\\sim 1$\n, then that will indicate that the network thinks the digit is a 0\n. If the second neuron fires then that will indicate that the network thinks the digit is a 1\n. And so on. A little more precisely, we number the output neurons from 0\n through 9\n, and figure out which neuron has the highest activation value. If that neuron is, say, neuron number 6\n, then our network will guess that the input digit was a 6\n. And so on for the other output neurons.\n<img src=\"figures/nnetwork.png\" style=\"width: 500px;\"/>\nNetwork to identify single digits. The output layer has 10 neurons, one for each digit.\nThe first thing we'll need is a data set to learn from - a so-called training data set. We'll use the MNIST data set, which contains tens of thousands of scanned images of handwritten digits, together with their correct classifications. MNIST's name comes from the fact that it is a modified subset of two data sets collected by NIST, the United States' National Institute of Standards and Technology. Here's a few images from MNIST:\n<img src=\"figures/digits_separate.png\" style=\"width: 250px;\"/>\nThe MNIST data comes in two parts. The first part contains 60,000 images to be used as training data. These images are scanned handwriting samples from 250 people, half of whom were US Census Bureau employees, and half of whom were high school students. The images are greyscale and 28 by 28 pixels in size. The second part of the MNIST data set is 10,000 images to be used as test data. Again, these are 28 by 28 greyscale images. We'll use the test data to evaluate how well our neural network has learned to recognize digits. To make this a good test of performance, the test data was taken from a different set of 250 people than the original training data (albeit still a group split between Census Bureau employees and high school students). This helps give us confidence that our system can recognize digits from people whose writing it didn't see during training.\nIn practice, we are going to split the data a little differently. We'll leave the test images as is, but split the 60,000-image MNIST training set into two parts: a set of 50,000 images, which we'll use to train our neural network, and a separate 10,000 image validation set.\nWe'll use the notation $x$\n to denote a training input. It'll be convenient to regard each training input $x$\n as a 28×28=784-dimensional vector. Each entry in the vector represents the grey value for a single pixel in the image. We'll denote the corresponding desired output by y=y(x)\n, where y\n is a 10\n-dimensional vector. For example, if a particular training image, $x$\n, depicts a 6\n, then $y(x)=(0,0,0,0,0,0,1,0,0,0)^T$\n is the desired output from the network. Note that T\n here is the transpose operation, turning a row vector into an ordinary (column) vector.", "\"\"\"\nmnist_loader\n~~~~~~~~~~~~\n\nA library to load the MNIST image data. For details of the data\nstructures that are returned, see the doc strings for ``load_data``\nand ``load_data_wrapper``. In practice, ``load_data_wrapper`` is the\nfunction usually called by our neural network code.\n\"\"\"\n\n#### Libraries\n# Standard library\nimport pickle\nimport gzip\n\n# Third-party libraries\nimport numpy as np\n\ndef load_data():\n \"\"\"Return the MNIST data as a tuple containing the training data,\n the validation data, and the test data.\n\n The ``training_data`` is returned as a tuple with two entries.\n The first entry contains the actual training images. This is a\n numpy ndarray with 50,000 entries. Each entry is, in turn, a\n numpy ndarray with 784 values, representing the 28 * 28 = 784\n pixels in a single MNIST image.\n\n The second entry in the ``training_data`` tuple is a numpy ndarray\n containing 50,000 entries. Those entries are just the digit\n values (0...9) for the corresponding images contained in the first\n entry of the tuple.\n\n The ``validation_data`` and ``test_data`` are similar, except\n each contains only 10,000 images.\n\n This is a nice data format, but for use in neural networks it's\n helpful to modify the format of the ``training_data`` a little.\n That's done in the wrapper function ``load_data_wrapper()``, see\n below.\n \"\"\"\n f = gzip.open('data/mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = pickle.load(f, encoding='latin1')\n f.close()\n return (training_data, validation_data, test_data)\n\ndef load_data_wrapper():\n \"\"\"Return a tuple containing ``(training_data, validation_data,\n test_data)``. Based on ``load_data``, but the format is more\n convenient for use in our implementation of neural networks.\n\n In particular, ``training_data`` is a list containing 50,000\n 2-tuples ``(x, y)``. ``x`` is a 784-dimensional numpy.ndarray\n containing the input image. ``y`` is a 10-dimensional\n numpy.ndarray representing the unit vector corresponding to the\n correct digit for ``x``.\n\n ``validation_data`` and ``test_data`` are lists containing 10,000\n 2-tuples ``(x, y)``. In each case, ``x`` is a 784-dimensional\n numpy.ndarry containing the input image, and ``y`` is the\n corresponding classification, i.e., the digit values (integers)\n corresponding to ``x``.\n\n Obviously, this means we're using slightly different formats for\n the training data and the validation / test data. These formats\n turn out to be the most convenient for use in our neural network\n code.\"\"\"\n tr_d, va_d, te_d = load_data()\n training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]\n training_results = [vectorized_result(y) for y in tr_d[1]]\n training_data = list(zip(training_inputs, training_results))\n validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]\n validation_data = list(zip(validation_inputs, va_d[1]))\n test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]\n test_data = list(zip(test_inputs, te_d[1]))\n return (training_data, validation_data, test_data)\n\ndef vectorized_result(j):\n \"\"\"Return a 10-dimensional unit vector with a 1.0 in the jth\n position and zeroes elsewhere. This is used to convert a digit\n (0...9) into a corresponding desired output from the neural\n network.\"\"\"\n e = np.zeros((10, 1))\n e[j] = 1.0\n return e", "Note also that the biases and weights are stored as lists of Numpy matrices. So, for example net.weights[1] is a Numpy matrix storing the weights connecting the second and third layers of neurons. (It's not the first and second layers, since Python's list indexing starts at 0.) Since net.weights[1] is rather verbose, let's just denote that matrix $w$\n. It's a matrix such that $w_{jk}$\n is the weight for the connection between the $k^{th}$\n neuron in the second layer, and the $j^{th}$\n neuron in the third layer. This ordering of the $j$\n and $k$\n indices may seem strange. The big advantage of using this ordering is that it means that the vector of activations of the third layer of neurons is:\n$$a'=\\mathrm {sigmoid}(wa+b)$$\nThere's quite a bit going on in this equation, so let's unpack it piece by piece. $a$\n is the vector of activations of the second layer of neurons. To obtain $a'$\n we multiply $a$\n by the weight matrix $w$\n, and add the vector $b$\n of biases. We then apply the function sigmoid\nelementwise to every entry in the vector $wa+b$.\nOf course, the main thing we want our Network objects to do is to learn. To that end we'll give them an SGD method which implements stochastic gradient descent. \n<!--\nThe training_data is a list of tuples `(x, y)` representing the training inputs and corresponding desired outputs. The variables `epochs` and `mini_batch_size` are what you'd expect - the number of epochs to train for, and the size of the mini-batches to use when sampling. `eta` is the learning rate, $\\eta$. If the optional argument `test_data` is supplied, then the program will evaluate the network after each epoch of training, and print out partial progress. This is useful for tracking progress, but slows things down substantially.\n\nThe code works as follows. In each epoch, it starts by randomly shuffling the training data, and then partitions it into mini-batches of the appropriate size. This is an easy way of sampling randomly from the training data. Then for each `mini_batch` we apply a single step of gradient descent. This is done by the code `self.update_mini_batch(mini_batch, eta)`, which updates the network weights and biases according to a single iteration of gradient descent, using just the training data in `mini_batch`.\n-->\n\nMost of the work is done by the line\ndelta_nabla_b, delta_nabla_w = self.backprop(x, y)\nThis invokes something called the backpropagation algorithm, which is a fast way of computing the gradient of the cost function. So update_mini_batch works simply by computing these gradients for every training example in the mini_batch, and then updating self.weights and self.biases appropriately.\nThe activation $a_{lj}$\n of the $j^{th}$\n neuron in the $l^{th}$\nlayer is related to the activations in the $(l-1)^{th}$\n layer by the equation\n$$a^l_j=\\mathrm{sigmoid}(\\sum_k w_{jk}^l a^{l-1}k+b^l_j)$$\nwhere the sum is over all neurons $k$\n in the $(l−1)^{th}$\n layer. To rewrite this expression in a matrix form we define a weight matrix $w^l$\n for each layer, $l$\n. The entries of the weight matrix $w^l$\n are just the weights connecting to the $l^{th}$\n layer of neurons, that is, the entry in the $j^{th}$\n row and $k^{th}$\n column is $w^l{jk}$. Similarly, for each layer $l$\n we define a bias vector, $b^l$. You can probably guess how this works - the components of the bias vector are just the values $b^l_j$\n, one component for each neuron in the $l^{th}$\n layer. And finally, we define an activation vector $a^l$\nwhose components are the activations $a^l_j$.\nWith these notations in mind, these equations can be rewritten in the beautiful and compact vectorized form\n$$a^l=\\mathrm{sigmoid}(w^la^{l-1}+b^l).$$\nThis expression gives us a much more global way of thinking about how the activations in one layer relate to activations in the previous layer: we just apply the weight matrix to the activations, then add the bias vector, and finally apply the sigmoid function.\nApart from self.backprop the program is self-explanatory - all the heavy lifting is done in self.SGD and self.update_mini_batch, which we've already discussed. The self.backprop method makes use of a few extra functions to help in computing the gradient, namely sigmoid_prime, which computes the derivative of the sigmoid\n function, and self.cost_derivative. You can get the gist of these (and perhaps the details) just by looking at the code and documentation strings. Note that while the program appears lengthy, much of the code is documentation strings intended to make the code easy to understand. In fact, the program contains just 74 lines of non-whitespace, non-comment code.", "\"\"\"\nnetwork.py\n~~~~~~~~~~\n\nA module to implement the stochastic gradient descent learning\nalgorithm for a feedforward neural network. Gradients are calculated\nusing backpropagation. Note that I have focused on making the code\nsimple, easily readable, and easily modifiable. It is not optimized,\nand omits many desirable features.\n\"\"\"\n\n#### Libraries\n# Standard library\nimport random\n\n# Third-party libraries\nimport numpy as np\n\nclass Network(object):\n\n def __init__(self, sizes):\n \"\"\"The list ``sizes`` contains the number of neurons in the\n respective layers of the network. For example, if the list\n was [2, 3, 1] then it would be a three-layer network, with the\n first layer containing 2 neurons, the second layer 3 neurons,\n and the third layer 1 neuron. The biases and weights for the\n network are initialized randomly, using a Gaussian\n distribution with mean 0, and variance 1. Note that the first\n layer is assumed to be an input layer, and by convention we\n won't set any biases for those neurons, since biases are only\n ever used in computing the outputs from later layers.\"\"\"\n self.num_layers = len(sizes)\n self.sizes = sizes\n self.biases = [np.random.randn(y, 1) for y in sizes[1:]]\n self.weights = [np.random.randn(y, x)\n for x, y in zip(sizes[:-1], sizes[1:])]\n\n def feedforward(self, a):\n \"\"\"Return the output of the network if ``a`` is input.\"\"\"\n for b, w in zip(self.biases, self.weights):\n a = sigmoid(np.dot(w, a)+b)\n return a\n\n def SGD(self, training_data, epochs, mini_batch_size, eta,\n test_data=None):\n \"\"\"Train the neural network using mini-batch stochastic\n gradient descent. The ``training_data`` is a list of tuples\n ``(x, y)`` representing the training inputs and the desired\n outputs. The other non-optional parameters are\n self-explanatory. If ``test_data`` is provided then the\n network will be evaluated against the test data after each\n epoch, and partial progress printed out. This is useful for\n tracking progress, but slows things down substantially.\"\"\"\n if test_data: n_test = len(test_data)\n n = len(training_data)\n for j in range(epochs):\n random.shuffle(training_data)\n mini_batches = [\n training_data[k:k+mini_batch_size]\n for k in range(0, n, mini_batch_size)]\n for mini_batch in mini_batches:\n self.update_mini_batch(mini_batch, eta)\n if test_data:\n print (\"Epoch {0}: {1} / {2}\".format(\n j, self.evaluate(test_data), n_test))\n else:\n print (\"Epoch {0} complete\".format(j))\n\n def update_mini_batch(self, mini_batch, eta):\n \"\"\"Update the network's weights and biases by applying\n gradient descent using backpropagation to a single mini batch.\n The ``mini_batch`` is a list of tuples ``(x, y)``, and ``eta``\n is the learning rate.\"\"\"\n nabla_b = [np.zeros(b.shape) for b in self.biases]\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n for x, y in mini_batch:\n delta_nabla_b, delta_nabla_w = self.backprop(x, y)\n nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]\n nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]\n self.weights = [w-(eta/len(mini_batch))*nw\n for w, nw in zip(self.weights, nabla_w)]\n self.biases = [b-(eta/len(mini_batch))*nb\n for b, nb in zip(self.biases, nabla_b)]\n\n def backprop(self, x, y):\n \"\"\"Return a tuple ``(nabla_b, nabla_w)`` representing the\n gradient for the cost function C_x. ``nabla_b`` and\n ``nabla_w`` are layer-by-layer lists of numpy arrays, similar\n to ``self.biases`` and ``self.weights``.\"\"\"\n nabla_b = [np.zeros(b.shape) for b in self.biases]\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n # feedforward\n activation = x\n activations = [x] # list to store all the activations, layer by layer\n zs = [] # list to store all the z vectors, layer by layer\n for b, w in zip(self.biases, self.weights):\n z = np.dot(w, activation)+b\n zs.append(z)\n activation = sigmoid(z)\n activations.append(activation)\n # backward pass\n delta = self.cost_derivative(activations[-1], y) * \\\n sigmoid_prime(zs[-1])\n nabla_b[-1] = delta\n nabla_w[-1] = np.dot(delta, activations[-2].transpose())\n # Note that the variable l in the loop below is used a little\n # differently to the notation in Chapter 2 of the book. Here,\n # l = 1 means the last layer of neurons, l = 2 is the\n # second-last layer, and so on. It's a renumbering of the\n # scheme in the book, used here to take advantage of the fact\n # that Python can use negative indices in lists.\n for l in range(2, self.num_layers):\n z = zs[-l]\n sp = sigmoid_prime(z)\n delta = np.dot(self.weights[-l+1].transpose(), delta) * sp\n nabla_b[-l] = delta\n nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())\n return (nabla_b, nabla_w)\n\n def evaluate(self, test_data):\n \"\"\"Return the number of test inputs for which the neural\n network outputs the correct result. Note that the neural\n network's output is assumed to be the index of whichever\n neuron in the final layer has the highest activation.\"\"\"\n test_results = [(np.argmax(self.feedforward(x)), y)\n for (x, y) in test_data]\n return sum(int(x == y) for (x, y) in test_results)\n\n def cost_derivative(self, output_activations, y):\n \"\"\"Return the vector of partial derivatives \\partial C_x /\n \\partial a for the output activations.\"\"\"\n return (output_activations-y)\n\n#### Miscellaneous functions\ndef sigmoid(z):\n \"\"\"The sigmoid function.\"\"\"\n return 1.0/(1.0+np.exp(-z))\n\ndef sigmoid_prime(z):\n \"\"\"Derivative of the sigmoid function.\"\"\"\n return sigmoid(z)*(1-sigmoid(z))\n", "We first load the MNIST data:", "training_data, validation_data, test_data = load_data_wrapper()", "After loading the MNIST data, we'll set up a Network with 30 hidden neurons.", "net = Network([784, 30, 10])", "Finally, we'll use stochastic gradient descent to learn from the MNIST training_data over 30 epochs, with a mini-batch size of 10, and a learning rate of $\\eta$=3.0:", "net.SGD(training_data, 30, 10, 3.0, test_data=test_data)", "Challenge 14.3\nTry creating a network with just two layers - an input and an output layer, no hidden layer - with 784 and 10 neurons, respectively. Train the network using stochastic gradient descent. What classification accuracy can you achieve?\nNumber of hidden layers\nSuppose that we want to approximate a set of functions to a given accuracy. How many hidden layers do we need? The answer is: At most two layers, with arbitrary accuracy obtained given enough units per layer. It has been also shown that only one layer is enough to approximate any continuous function. Of course, there is no way to know how many units we would need, and this is not known in general, and this number may grow exponentially with the number of input units." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
banbh/little-pythoner
python/Main.ipynb
apache-2.0
[ "On this page you'll find a series of exercises. We'll be using Python for all the code, but not really. You barely need to know any Python at all. In fact here is all you need to know (at least about Python).\nAll You Need to Know\nNumbers: 0, 1, 2, 3, ... (i.e., no negative numbers or decimals)\nStrings: things like 'hello' and 'the cat on the mat' and the empty string ''\nBooleans: True, False\nLists: [], but you can make lists (see cons below)\nFunctions\nis_eq_str(x, y): x and y must both be strings; returns whether x equals y\nis_empty(xx) : xx must be a list; returns whether the xx is the empty list\nhead(xx): xx must be a non-empty list; returns the first item of xx\ntail(xx): xx must be a non-empty list; returns a list with everything after the head\ncons(h, tl): returns a list whose first item is h and whose remaining items are the items of tl (i.e. it put backs a list taken apart by head and tail)\nadd1(n): n must be a number; returns a number one bigger than n\nsub1(n): n must be a number greater than zero; returns a number one less than n\nis_zero(n): n must be a number; returns whether n is zero\nis_str(x): returns whether x is a string\nis_num(x): returns whether x is a number\nGetting Started\nThe above functions, simple though they are, are not built into Python, so you must download a file that defins them. Download basic_functions.py.", "from basic_functions import *\n\nis_empty([1,2])\n\nis_empty([])\n\nhead([1,2])\n\nhead([1])", "Note that head([]) is an error since you can't find the first item in an empty list.", "tail([1,2])\n\ntail([1])", "Note that tail([]) is an error since the tail of a list is what's left over when you remove the head, and the empty list has no head.", "cons(1, [2,3])\n\ncons(1, [])\n\nis_num(99)\n\nis_num('hello')\n\nis_str(99)\n\nis_str('hello')\n\nis_str_eq('hello', 'hello')\n\nis_str_eq('hello', 'goodbye')\n\nadd1(99)\n\nsub1(99)", "Note that sub1(0) is an error because you can't subtract 1 from 0. (Actually it is possible if you allow negative numbers, but in these exercises we will not allow such numbers.)\nAll Strings\nWrite a function, is_list_of_strings, that determines whether a list contains only strings. Below are some examples of how it should behave.", "from solutions import is_list_of_strings\n\nis_list_of_strings(['hello', 'goodbye'])\n\nis_list_of_strings([1, 'aa'])\n\nis_list_of_strings([])", "The last example, is_list_of_strings([]), might seem puzzling at first, but really it's not. Suppose you are flying into a strange island and at customs there is a sign that says \"all food in your suitcase must be cooked\". Then if you have a ham sanswich in your suitcase then you are ok, since ham is cooked. What about if you have no food in your suitcase? Then clearly you are also ok. This because in normal language (and in more mathematical language too) when we say \"every X must be (say) big\", then if there are no X's then the statment is true. In normal language it may be less clear what to do with slightly sillier examples; for example, say someone asks you \"Are all the coins in your pocket quarters?\" and you have no coins in your pocket. Out of politeness you might say \"I have no coins in my pocket\", but if they forced you to say \"yes\" or \"no\", I think you would say \"yes\".\nFollow the pattern below:\n # fill in the blanks\n def is_list_of_strings(l):\n if is_empty(l):\n pass\n else:\n # do something with head(l), tail(l)\n pass\nBefore you try it, here is a hint. You can do something magical in a function: you call yourself! For example when filling in the blanks above, a good place to try this is in the else block. In fact what you want to think about in the else block is this: assuming I've broken the list apart into it's first item (head(l)) and a list with all the other items (tail(l)) how can I figure out the answer in terms of these pieces? If the first item is not a string then we are done (the answer if false). However if the first item is a list then we know we have a list of strings if the rest of the items are all strings. So maybe we can 'cheat' and just call is_list_of_strings on the tail to answer that! And in fact you can, and it's not even cheating. In fact it's called recursion." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ChadFulton/statsmodels
examples/notebooks/generic_mle.ipynb
bsd-3-clause
[ "Maximum Likelihood Estimation (Generic models)\nThis tutorial explains how to quickly implement new maximum likelihood models in statsmodels. We give two examples: \n\nProbit model for binary dependent variables\nNegative binomial model for count data\n\nThe GenericLikelihoodModel class eases the process by providing tools such as automatic numeric differentiation and a unified interface to scipy optimization functions. Using statsmodels, users can fit new MLE models simply by \"plugging-in\" a log-likelihood function. \nExample 1: Probit model", "from __future__ import print_function\nimport numpy as np\nfrom scipy import stats\nimport statsmodels.api as sm\nfrom statsmodels.base.model import GenericLikelihoodModel", "The Spector dataset is distributed with statsmodels. You can access a vector of values for the dependent variable (endog) and a matrix of regressors (exog) like this:", "data = sm.datasets.spector.load_pandas()\nexog = data.exog\nendog = data.endog\nprint(sm.datasets.spector.NOTE)\nprint(data.exog.head())", "Them, we add a constant to the matrix of regressors:", "exog = sm.add_constant(exog, prepend=True)", "To create your own Likelihood Model, you simply need to overwrite the loglike method.", "class MyProbit(GenericLikelihoodModel):\n def loglike(self, params):\n exog = self.exog\n endog = self.endog\n q = 2 * endog - 1\n return stats.norm.logcdf(q*np.dot(exog, params)).sum()", "Estimate the model and print a summary:", "sm_probit_manual = MyProbit(endog, exog).fit()\nprint(sm_probit_manual.summary())", "Compare your Probit implementation to statsmodels' \"canned\" implementation:", "sm_probit_canned = sm.Probit(endog, exog).fit()\n\nprint(sm_probit_canned.params)\nprint(sm_probit_manual.params)\n\nprint(sm_probit_canned.cov_params())\nprint(sm_probit_manual.cov_params())", "Notice that the GenericMaximumLikelihood class provides automatic differentiation, so we didn't have to provide Hessian or Score functions in order to calculate the covariance estimates.\nExample 2: Negative Binomial Regression for Count Data\nConsider a negative binomial regression model for count data with\nlog-likelihood (type NB-2) function expressed as:\n$$\n \\mathcal{L}(\\beta_j; y, \\alpha) = \\sum_{i=1}^n y_i ln \n \\left ( \\frac{\\alpha exp(X_i'\\beta)}{1+\\alpha exp(X_i'\\beta)} \\right ) -\n \\frac{1}{\\alpha} ln(1+\\alpha exp(X_i'\\beta)) + ln \\Gamma (y_i + 1/\\alpha) - ln \\Gamma (y_i+1) - ln \\Gamma (1/\\alpha)\n$$\nwith a matrix of regressors $X$, a vector of coefficients $\\beta$,\nand the negative binomial heterogeneity parameter $\\alpha$. \nUsing the nbinom distribution from scipy, we can write this likelihood\nsimply as:", "import numpy as np\nfrom scipy.stats import nbinom\n\ndef _ll_nb2(y, X, beta, alph):\n mu = np.exp(np.dot(X, beta))\n size = 1/alph\n prob = size/(size+mu)\n ll = nbinom.logpmf(y, size, prob)\n return ll", "New Model Class\nWe create a new model class which inherits from GenericLikelihoodModel:", "from statsmodels.base.model import GenericLikelihoodModel\n\nclass NBin(GenericLikelihoodModel):\n def __init__(self, endog, exog, **kwds):\n super(NBin, self).__init__(endog, exog, **kwds)\n \n def nloglikeobs(self, params):\n alph = params[-1]\n beta = params[:-1]\n ll = _ll_nb2(self.endog, self.exog, beta, alph)\n return -ll \n \n def fit(self, start_params=None, maxiter=10000, maxfun=5000, **kwds):\n # we have one additional parameter and we need to add it for summary\n self.exog_names.append('alpha')\n if start_params == None:\n # Reasonable starting values\n start_params = np.append(np.zeros(self.exog.shape[1]), .5)\n # intercept\n start_params[-2] = np.log(self.endog.mean())\n return super(NBin, self).fit(start_params=start_params, \n maxiter=maxiter, maxfun=maxfun, \n **kwds) ", "Two important things to notice: \n\nnloglikeobs: This function should return one evaluation of the negative log-likelihood function per observation in your dataset (i.e. rows of the endog/X matrix). \nstart_params: A one-dimensional array of starting values needs to be provided. The size of this array determines the number of parameters that will be used in optimization.\n\nThat's it! You're done!\nUsage Example\nThe Medpar\ndataset is hosted in CSV format at the Rdatasets repository. We use the read_csv\nfunction from the Pandas library to load the data\nin memory. We then print the first few columns:", "import statsmodels.api as sm\n\nmedpar = sm.datasets.get_rdataset(\"medpar\", \"COUNT\", cache=True).data\n\nmedpar.head()", "The model we are interested in has a vector of non-negative integers as\ndependent variable (los), and 5 regressors: Intercept, type2,\ntype3, hmo, white.\nFor estimation, we need to create two variables to hold our regressors and the outcome variable. These can be ndarrays or pandas objects.", "y = medpar.los\nX = medpar[[\"type2\", \"type3\", \"hmo\", \"white\"]].copy()\nX[\"constant\"] = 1", "Then, we fit the model and extract some information:", "mod = NBin(y, X)\nres = mod.fit()", "Extract parameter estimates, standard errors, p-values, AIC, etc.:", "print('Parameters: ', res.params)\nprint('Standard errors: ', res.bse)\nprint('P-values: ', res.pvalues)\nprint('AIC: ', res.aic)", "As usual, you can obtain a full list of available information by typing\ndir(res).\nWe can also look at the summary of the estimation results.", "print(res.summary())", "Testing\nWe can check the results by using the statsmodels implementation of the Negative Binomial model, which uses the analytic score function and Hessian.", "res_nbin = sm.NegativeBinomial(y, X).fit(disp=0)\nprint(res_nbin.summary())\n\nprint(res_nbin.params)\n\nprint(res_nbin.bse)", "Or we could compare them to results obtained using the MASS implementation for R:\nurl = 'https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/csv/COUNT/medpar.csv'\nmedpar = read.csv(url)\nf = los~factor(type)+hmo+white\n\nlibrary(MASS)\nmod = glm.nb(f, medpar)\ncoef(summary(mod))\n Estimate Std. Error z value Pr(&gt;|z|)\n(Intercept) 2.31027893 0.06744676 34.253370 3.885556e-257\nfactor(type)2 0.22124898 0.05045746 4.384861 1.160597e-05\nfactor(type)3 0.70615882 0.07599849 9.291748 1.517751e-20\nhmo -0.06795522 0.05321375 -1.277024 2.015939e-01\nwhite -0.12906544 0.06836272 -1.887951 5.903257e-02\n\nNumerical precision\nThe statsmodels generic MLE and R parameter estimates agree up to the fourth decimal. The standard errors, however, agree only up to the second decimal. This discrepancy is the result of imprecision in our Hessian numerical estimates. In the current context, the difference between MASS and statsmodels standard error estimates is substantively irrelevant, but it highlights the fact that users who need very precise estimates may not always want to rely on default settings when using numerical derivatives. In such cases, it is better to use analytical derivatives with the LikelihoodModel class." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
aattaran/Machine-Learning-with-Python
titanic/titanic_survival_exploration[1].ipynb
bsd-3-clause
[ "Machine Learning Engineer Nanodegree\nIntroduction and Foundations\nProject: Titanic Survival Exploration\nIn 1912, the ship RMS Titanic struck an iceberg on its maiden voyage and sank, resulting in the deaths of most of its passengers and crew. In this introductory project, we will explore a subset of the RMS Titanic passenger manifest to determine which features best predict whether someone survived or did not survive. To complete this project, you will need to implement several conditional predictions and answer the questions below. Your project submission will be evaluated based on the completion of the code and your responses to the questions.\n\nTip: Quoted sections like this will provide helpful instructions on how to navigate and use an iPython notebook. \n\nGetting Started\nTo begin working with the RMS Titanic passenger data, we'll first need to import the functionality we need, and load our data into a pandas DataFrame.\nRun the code cell below to load our data and display the first few entries (passengers) for examination using the .head() function.\n\nTip: You can run a code cell by clicking on the cell and using the keyboard shortcut Shift + Enter or Shift + Return. Alternatively, a code cell can be executed using the Play button in the hotbar after selecting it. Markdown cells (text cells like this one) can be edited by double-clicking, and saved using these same shortcuts. Markdown allows you to write easy-to-read plain text that can be converted to HTML.", "# Import libraries necessary for this project\nimport numpy as np\nimport pandas as pd\nfrom IPython.display import display # Allows the use of display() for DataFrames\n\n# Import supplementary visualizations code visuals.py\nimport visuals as vs\n\n# Pretty display for notebooks\n%matplotlib inline\n\n# Load the dataset\nin_file = 'titanic_data.csv'\nfull_data = pd.read_csv(in_file)\n\n# Print the first few entries of the RMS Titanic data\ndisplay(full_data.head())", "From a sample of the RMS Titanic data, we can see the various features present for each passenger on the ship:\n- Survived: Outcome of survival (0 = No; 1 = Yes)\n- Pclass: Socio-economic class (1 = Upper class; 2 = Middle class; 3 = Lower class)\n- Name: Name of passenger\n- Sex: Sex of the passenger\n- Age: Age of the passenger (Some entries contain NaN)\n- SibSp: Number of siblings and spouses of the passenger aboard\n- Parch: Number of parents and children of the passenger aboard\n- Ticket: Ticket number of the passenger\n- Fare: Fare paid by the passenger\n- Cabin Cabin number of the passenger (Some entries contain NaN)\n- Embarked: Port of embarkation of the passenger (C = Cherbourg; Q = Queenstown; S = Southampton)\nSince we're interested in the outcome of survival for each passenger or crew member, we can remove the Survived feature from this dataset and store it as its own separate variable outcomes. We will use these outcomes as our prediction targets.\nRun the code cell below to remove Survived as a feature of the dataset and store it in outcomes.", "# Store the 'Survived' feature in a new variable and remove it from the dataset\noutcomes = full_data['Survived']\ndata = full_data.drop('Survived', axis = 1)\n\n# Show the new dataset with 'Survived' removed\ndisplay(data.head())", "The very same sample of the RMS Titanic data now shows the Survived feature removed from the DataFrame. Note that data (the passenger data) and outcomes (the outcomes of survival) are now paired. That means for any passenger data.loc[i], they have the survival outcome outcomes[i].\nTo measure the performance of our predictions, we need a metric to score our predictions against the true outcomes of survival. Since we are interested in how accurate our predictions are, we will calculate the proportion of passengers where our prediction of their survival is correct. Run the code cell below to create our accuracy_score function and test a prediction on the first five passengers. \nThink: Out of the first five passengers, if we predict that all of them survived, what would you expect the accuracy of our predictions to be?", "def accuracy_score(truth, pred):\n \"\"\" Returns accuracy score for input truth and predictions. \"\"\"\n \n # Ensure that the number of predictions matches number of outcomes\n if len(truth) == len(pred): \n \n # Calculate and return the accuracy as a percent\n return \"Predictions have an accuracy of {:.2f}%.\".format((truth == pred).mean()*100)\n \n else:\n return \"Number of predictions does not match number of outcomes!\"\n \n# Test the 'accuracy_score' function\npredictions = pd.Series(np.ones(5, dtype = int))\nprint accuracy_score(outcomes[:5], predictions)", "Tip: If you save an iPython Notebook, the output from running code blocks will also be saved. However, the state of your workspace will be reset once a new session is started. Make sure that you run all of the code blocks from your previous session to reestablish variables and functions before picking up where you last left off.\n\nMaking Predictions\nIf we were asked to make a prediction about any passenger aboard the RMS Titanic whom we knew nothing about, then the best prediction we could make would be that they did not survive. This is because we can assume that a majority of the passengers (more than 50%) did not survive the ship sinking.\nThe predictions_0 function below will always predict that a passenger did not survive.", "def predictions_0(data):\n \"\"\" Model with no features. Always predicts a passenger did not survive. \"\"\"\n\n predictions = []\n for _, passenger in data.iterrows():\n \n # Predict the survival of 'passenger'\n predictions.append(0)\n \n # Return our predictions\n return pd.Series(predictions)\n\n# Make the predictions\npredictions = predictions_0(data)", "Question 1\nUsing the RMS Titanic data, how accurate would a prediction be that none of the passengers survived?\nHint: Run the code cell below to see the accuracy of this prediction.", "print accuracy_score(outcomes, predictions)", "Answer: Replace this text with the prediction accuracy you found above.\nPredictions have an accuracy of 61.62%.\n\nLet's take a look at whether the feature Sex has any indication of survival rates among passengers using the survival_stats function. This function is defined in the visuals.py Python script included with this project. The first two parameters passed to the function are the RMS Titanic data and passenger survival outcomes, respectively. The third parameter indicates which feature we want to plot survival statistics across.\nRun the code cell below to plot the survival outcomes of passengers based on their sex.", "vs.survival_stats(data, outcomes, 'Sex')", "Examining the survival statistics, a large majority of males did not survive the ship sinking. However, a majority of females did survive the ship sinking. Let's build on our previous prediction: If a passenger was female, then we will predict that they survived. Otherwise, we will predict the passenger did not survive.\nFill in the missing code below so that the function will make this prediction.\nHint: You can access the values of each feature for a passenger like a dictionary. For example, passenger['Sex'] is the sex of the passenger.", "def predictions_1(data):\n \"\"\" Model with one feature: \n - Predict a passenger survived if they are female. \"\"\"\n \n predictions = []\n for _, passenger in data.iterrows():\n \n # Remove the 'pass' statement below \n # and write your prediction conditions here\n #pass\n if passenger['Sex']==\"female\":\n predictions.append(1)\n else:\n predictions.append(0)\n \n # Return our predictions\n return pd.Series(predictions)\n\n# Make the predictions\npredictions = predictions_1(data)", "Question 2\nHow accurate would a prediction be that all female passengers survived and the remaining passengers did not survive?\nHint: Run the code cell below to see the accuracy of this prediction.", "print accuracy_score(outcomes, predictions)", "Answer: Replace this text with the prediction accuracy you found above.\nPredictions have an accuracy of 78.68%.\n\nUsing just the Sex feature for each passenger, we are able to increase the accuracy of our predictions by a significant margin. Now, let's consider using an additional feature to see if we can further improve our predictions. For example, consider all of the male passengers aboard the RMS Titanic: Can we find a subset of those passengers that had a higher rate of survival? Let's start by looking at the Age of each male, by again using the survival_stats function. This time, we'll use a fourth parameter to filter out the data so that only passengers with the Sex 'male' will be included.\nRun the code cell below to plot the survival outcomes of male passengers based on their age.", "vs.survival_stats(data, outcomes, 'Age', [\"Sex == 'male'\"])", "Examining the survival statistics, the majority of males younger than 10 survived the ship sinking, whereas most males age 10 or older did not survive the ship sinking. Let's continue to build on our previous prediction: If a passenger was female, then we will predict they survive. If a passenger was male and younger than 10, then we will also predict they survive. Otherwise, we will predict they do not survive.\nFill in the missing code below so that the function will make this prediction.\nHint: You can start your implementation of this function using the prediction code you wrote earlier from predictions_1.", "def predictions_2(data):\n \"\"\" Model with two features: \n - Predict a passenger survived if they are female.\n - Predict a passenger survived if they are male and younger than 10. \"\"\"\n \n predictions = []\n for _, passenger in data.iterrows():\n \n # Remove the 'pass' statement below \n # and write your prediction conditions here\n #pass\n if passenger[\"Sex\"]==\"female\":\n predictions.append(1)\n #elif passenger[\"Sex\"]==\"male\":\n # predictions.append(0)\n \n elif passenger[\"Sex\"]==\"male\" and passenger[\"Age\"] < 10:\n predictions.append(1)\n #elif passenger[\"Sex\"]==\"male\" and passenger[\"Age\"] > 10:\n # predictions.append(0)\n else:\n predictions.append(0)\n \n \n # Return our predictions\n return pd.Series(predictions)\n\n# Make the predictions\npredictions = predictions_2(data)", "Question 3\nHow accurate would a prediction be that all female passengers and all male passengers younger than 10 survived?\nHint: Run the code cell below to see the accuracy of this prediction.", "print accuracy_score(outcomes, predictions)", "Predictions have an accuracy of 79.35%.\nAnswer: Replace this text with the prediction accuracy you found above.\n\nAdding the feature Age as a condition in conjunction with Sex improves the accuracy by a small margin more than with simply using the feature Sex alone. Now it's your turn: Find a series of features and conditions to split the data on to obtain an outcome prediction accuracy of at least 80%. This may require multiple features and multiple levels of conditional statements to succeed. You can use the same feature multiple times with different conditions. \nPclass, Sex, Age, SibSp, and Parch are some suggested features to try.\nUse the survival_stats function below to to examine various survival statistics.\nHint: To use mulitple filter conditions, put each condition in the list passed as the last argument. Example: [\"Sex == 'male'\", \"Age &lt; 18\"]", "vs.survival_stats(data, outcomes, 'Sex', [ \"Pclass == 3\" ])", "vs.survival_stats(data, outcomes, 'Age', [\"Sex == 'male'\", \"Age < 18\"])", "vs.survival_stats(data, outcomes, 'Age', [\"Sex == 'female'\" , \"Embarked == C\"])", "After exploring the survival statistics visualization, fill in the missing code below so that the function will make your prediction.\nMake sure to keep track of the various features and conditions you tried before arriving at your final prediction model.\nHint: You can start your implementation of this function using the prediction code you wrote earlier from predictions_2.", "def predictions_3(data):\n \"\"\" Model with multiple features. Makes a prediction with an accuracy of at least 80%. \"\"\"\n \n predictions = []\n for _, passenger in data.iterrows():\n \n # Remove the 'pass' statement below \n # and write your prediction conditions here\n #pass\n #if passenger[\"Sex\"] == \"female\" :\n if passenger[\"Sex\"] == \"female\":\n if passenger[\"Pclass\"] ==3 :\n predictions.append(0)\n else: \n predictions.append(1)\n \n else:\n if passenger['Age'] < 10 and passenger['Pclass'] in (1, 2):\n predictions.append(1)\n elif passenger['Age'] < 18 and passenger['Pclass'] == 1:\n predictions.append(1)\n else:\n predictions.append(0)\n \n \n \n # Return our predictions\n return pd.Series(predictions)\n\n# Make the predictions\npredictions = predictions_3(data)\n\n\n", "Question 4\nDescribe the steps you took to implement the final prediction model so that it got an accuracy of at least 80%. What features did you look at? Were certain features more informative than others? Which conditions did you use to split the survival outcomes in the data? How accurate are your predictions?\nHint: Run the code cell below to see the accuracy of your predictions.", "print accuracy_score(outcomes, predictions)", "Answer: Replace this text with your answer to the question above.\nI used my intuition, that women and childerens would be saved. So I started narrowing down the women with lower class. Then I divided the data with age and saw with kids younger than 10 in first and second class, there is a higher precentage of survival. Then I saw with kids younger than 18; and I divided classes. I saw with kids in first class, the precentage of survival increased more than 80%.\nConclusion\nAfter several iterations of exploring and conditioning on the data, you have built a useful algorithm for predicting the survival of each passenger aboard the RMS Titanic. The technique applied in this project is a manual implementation of a simple machine learning model, the decision tree. A decision tree splits a set of data into smaller and smaller groups (called nodes), by one feature at a time. Each time a subset of the data is split, our predictions become more accurate if each of the resulting subgroups are more homogeneous (contain similar labels) than before. The advantage of having a computer do things for us is that it will be more exhaustive and more precise than our manual exploration above. This link provides another introduction into machine learning using a decision tree.\nA decision tree is just one of many models that come from supervised learning. In supervised learning, we attempt to use features of the data to predict or model things with objective outcome labels. That is to say, each of our data points has a known outcome value, such as a categorical, discrete label like 'Survived', or a numerical, continuous value like predicting the price of a house.\nQuestion 5\nThink of a real-world scenario where supervised learning could be applied. What would be the outcome variable that you are trying to predict? Name two features about the data used in this scenario that might be helpful for making the predictions. \nAnswer: Replace this text with your answer to the question above.\nOne application is gesture or speech recognition. For a trained model, we can add a sign language to TVs, where the model by hearing the sounds of the TV contents, could recognize them and translate them in sign language and be displayed in the corner of the TV. Features could be the pitch of the sounds, so it recognized that this is a human voice and not a sound of wind or something else. Another feature could be if the sound is inside the dictionary alphabets or not. \nAnother example is drug discovery. We could train the model to differeciate between the deseased and healthy cells and test it on new subjects (rats).\nOne feature could be the behavior of the healthy cells to the test drug. another feature could be the duration it takes for the cells to react to the drug.\nanother example is behavioral detection, we can train a model and install it in the car and when the driver is drunk or sleepy, it would warn the driver or turn off the ignition until he/she is sober or not sleepy.\nOne feature could be that is the driver's heart beat usual or not, another could be is the driver's movement is usual or not. Is he/she rocking left to right like a drunk person or not.\n\nNote: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to\nFile -> Download as -> HTML (.html). Include the finished document along with this notebook as your submission." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
NathanYee/ThinkBayes2
code/report03.ipynb
gpl-2.0
[ "Report03 - Nathan Yee\nThis notebook contains report03 for computational baysian statistics fall 2016\nMIT License: https://opensource.org/licenses/MIT", "from __future__ import print_function, division\n\n% matplotlib inline\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport math\nimport numpy as np\n\nfrom thinkbayes2 import Pmf, Cdf, Suite, Joint\nimport thinkplot", "The sock problem\nCreated by Yuzhong Huang\nThere are two drawers of socks. The first drawer has 40 white socks and 10 black socks; the second drawer has 20 white socks and 30 black socks. We randomly get 2 socks from a drawer, and it turns out to be a pair(same color) but we don't know the color of these socks. What is the chance that we picked the first drawer.\nTo make calculating our likelihood easier, we start by defining a multiply function. The function is written in a functional way primarily for fun.", "from functools import reduce\nimport operator\n\ndef multiply(items):\n \"\"\"\n multiply takes a list of numbers, multiplies all of them, and returns the result\n \n Args:\n items (list): The list of numbers\n \n Return:\n the items multiplied together\n \"\"\"\n return reduce(operator.mul, items, 1)", "Next we define a drawer suite. This suite will allow us to take n socks up to the least number of socks in a drawer. To make our likelihood function simpler, we ignore the case where we take 11 black socks and that only drawer 2 is possible.", "class Drawers(Suite):\n def Likelihood(self, data, hypo):\n \"\"\"\n Likelihood returns the likelihood given a bayesian update \n consisting of a particular hypothesis and new data. In the\n case of our drawer problem, the probabilities change with the\n number of pairs we take (without replacement) so we we start\n by defining lists for each color sock in each drawer.\n \n Args:\n data (int): The number of socks we take\n hypo (str): The hypothesis we are updating\n \n Return:\n the likelihood for a hypothesis\n \"\"\"\n \n drawer1W = []\n drawer1B = []\n drawer2W = []\n drawer2B = []\n for i in range(data):\n drawer1W.append(40-i)\n drawer1B.append(10-i)\n drawer2W.append(20-i)\n drawer2B.append(30-i)\n \n if hypo == 'drawer1':\n return multiply(drawer1W)+multiply(drawer1B)\n if hypo == 'drawer2':\n return multiply(drawer2W)+multiply(drawer2B)\n", "Next, define our hypotheses and create the drawer Suite.", "hypos = ['drawer1','drawer2']\ndrawers = Drawers(hypos)\ndrawers.Print()", "Next, update the drawers by taking two matching socks.", "drawers.Update(2)\ndrawers.Print()", "It seems that the drawer with many of a single sock (40 white 10 black) is more likely after the update. To confirm this suspicion, let's restart the problem by taking 5 pairs of socks.", "hypos = ['drawer1','drawer2']\ndrawers5 = Drawers(hypos)\ndrawers5.Update(5)\ndrawers5.Print()", "We see that after we take 5 pairs of socks, the probability of the socks coming from drawer 1 is 80.6%. We can now conclude that the drawer with a more extreme numbers of socks is more likely be chosen if we are updating with matching color socks.\nChess-playing twins\nAllen Downey\nTwo identical twins are members of my chess club, but they never show up on the same day; in fact, they strictly alternate the days they show up. I can't tell them apart except that one is a better player than the other: Avery beats me 60% of the time and I beat Blake 70% of the time. If I play one twin on Monday and win, and the other twin on Tuesday and lose, which twin did I play on which day?\nTo solve this problem, we first need to create our hypothesis. In this case, we have: \nhypo1: Avery Monday, Blake Tuesday\nhypo2: Blake Monday, Avery Tuesday \nWe will abreviate Avery to A and Blake to B.", "twins = Pmf()\ntwins['AB'] = 1\ntwins['BA'] = 1\ntwins.Normalize()\ntwins.Print()", "Now we update our hypotheses with us winning the first day. We have a 40% chance of winning against Avery and a 70% chance of winning against Blake.", "#win day 1\ntwins['AB'] *= .4\ntwins['BA'] *= .7\ntwins.Normalize()\ntwins.Print()", "At this point in time, there is only a 36% chance that we play Avery the first day while a 64% chance that we played Blake the first day.\nHowever, let's see what happens when we update with a loss.", "#lose day 2\ntwins['AB'] *= .6\ntwins['BA'] *= .3\ntwins.Normalize()\ntwins.Print()", "Interesting. Now there is a 53% chance that we played Avery then Blake and a 47% chance that we played Blake then Avery.\nWho saw that movie?\nNathan Yee\nEvery year the MPAA (Motion Picture Association of America) publishes a report about theatrical market statistics. Included in the report, are both the gender and the ethnicity share of the top 5 most grossing films. If a randomly selected person in the United States went to Pixar's \"Inside Out\", what is the probability that they are both female and Asian?\nData:\n| Gender | Male (%) | Female (%) |\n| :-------------------------- | :------- | :---------- |\n| Furious 7 | 56 | 44 |\n| Inside Out | 46 | 54 |\n| Avengers: Age of Ultron | 58 | 42 |\n| Star Wars: The Force Awakens| 58 | 42 |\n| Jurassic World | 55 | 45 |\n| Ethnicity | Caucasian (%) | African-American (%) | Hispanic (%) | Asian (%) | Other (%) |\n| :-------------------------- | :------------ | :------------------- | :----------- | :-------- | :-------- |\n| Furious 7 | 40 | 22 | 25 | 8 | 5 |\n| Inside Out | 54 | 15 | 16 | 9 | 5 |\n| Avengers: Age of Ultron | 50 | 16 | 20 | 10 | 5 |\n| Star Wars: The Force Awakens| 61 | 12 | 15 | 7 | 5 |\n| Jurassic World | 39 | 16 | 19 | 11 | 6 |\nSince we are picking a random person in the United States, we can use demographics of the United States as an informed prior.\n| Demographic | Caucasian (%) | African-American (%) | Hispanic (%) | Asian (%) | Other (%) |\n| :-------------------------- | :------------ | :------------------- | :----------- | :-------- | :-------- |\n| Population United States | 63.7 | 12.2 | 16.3 | 4.7 | 3.1 |\nNote:\nDemographic data was gathered from the US Census Bureau. There may be errors within 2% due to rounding. Also note that certian races were combined to fit our previous demographic groupings.\nTo make writing code easier, we will encoude data in a numerical structure. The first item in the tuple corresponds to gender, the second item in the tuple corresponds to ethnicity.\n| Gender | Male | Female |\n| :-------------------------- | :--- | :----- |\n| Encoding number | 0 | 1 |\n| Ethnicity | Caucasian | African-American | Hispanic | Asian | Other |\n| :-------------------------- | :-------- | :--------------- | :------- | :---- | :---- |\n| Encoding number | 0 | 1 | 2 | 3 | 4 |\nSuch that a (female, asian) = (1, 3)\nThe first piece of code we write will be our Movie class. This version of Suite will have a special likelihood function that takes in a movie, and returns the probability of the gender and the ethnicity.", "class Movie(Suite):\n def Likelihood(self, data, hypo):\n \"\"\"\n Likelihood returns the likelihood given a bayesian update consisting of a particular\n hypothesis and data. In this case, we need to calculate the probability of seeing a\n gender seeing a movie. Then we calculat the probability that an ethnicity saw a\n movie. Finally we multiply the two to calculate the a person of a gender and \n ethnicity saw a movie.\n \n Args:\n data (str): The title of the movie\n hypo (str): The hypothesis we are updating\n \n Return:\n the likelihood for a hypothesis\n \"\"\"\n \n movie = data \n gender = hypo[0]\n ethnicity = hypo[1]\n \n # first calculate update based on gender\n movies_gender = {'Furious 7' : {0:56, 1:44},\n 'Inside Out' : {0:46, 1:54},\n 'Avengers: Age of Ultron' : {0:58, 1:42},\n 'Star Wars: The Force Awakens' : {0:58, 1:42},\n 'Jurassic World' : {0:55, 1:45}\n }\n \n like_gender = movies_gender[movie][gender]\n \n # second calculate update based on ethnicity\n movies_ethnicity = {'Furious 7' : {0:40, 1:22, 2:25, 3:8 , 4:5},\n 'Inside Out' : {0:54, 1:15, 2:16, 3:9 , 4:4},\n 'Avengers: Age of Ultron' : {0:50, 1:16, 2:20, 3:10, 4:5},\n 'Star Wars: The Force Awakens' : {0:61, 1:12, 2:15, 3:7 , 4:5},\n 'Jurassic World' : {0:39, 1:16, 2:19, 3:11, 4:6}\n }\n \n like_ethnicity = movies_ethnicity[movie][ethnicity]\n \n # multiply the two together and return\n return like_gender * like_ethnicity\n \n ", "Next we make our hypotheses and input them as tuples into the Movie class.", "genders = range(0,2)\nethnicities = range(0,5)\npairs = [(gender, ethnicity) for gender in genders for ethnicity in ethnicities]\n\nmovie = Movie(pairs)", "We decided that we are picking a random person in the United states. So, we can use population demographics of the United States as an informed prior. We will assume that the United States is 50% male and 50% female. Population percent is defined in the order which we enumerate ethnicities.", "population_percent = [63.7, 12.2, 16.3, 4.7, 3.1, 63.7, 12.2, 16.3, 4.7, 3.1]\n\nfor i in range(len(population_percent)):\n movie[pairs[i]] = population_percent[i]\n\nmovie.Normalize()\nmovie.Print()", "Next update with the two movies", "movie.Update('Inside Out')\n\nmovie.Normalize()\nmovie.Print()", "Given that a random person has seen Inside Out, the probability that the person is both female and Asian is .58%. Interestingly, when we update our hypotheses with our data, the the chance that the randomly selected person is caucasian goes up to 87%. It seems that our model just increases the chance that the randomly selected person is caucasian after seeing a movie.\nValidation:\nTo make ourselves convinced that model is working properly, what happens if we just look at gender data. We know that 54% of people who saw inside out were female. So, if we sum together the female audience, we should get 54%.", "total = 0\nfor pair in pairs:\n if pair[0] == 1:\n total += movie[pair]\n \nprint(total)", "Parking meter theft\nFrom DASL(http://lib.stat.cmu.edu/DASL/Datafiles/brinkdat.html)\n\nThe variable CON in the datafile Parking Meter Theft represents monthly parking meter collections by the principle contractor in New York City from May 1977 to March 1981. In addition to contractor collections, the city made collections from a number of \"control\" meters close to City Hall. These are recorded under the varia- ble CITY. From May 1978 to April 1980 the contractor was Brink's. In 1983 the city presented evidence in court that Brink's employees has been stealing parking meter moneys - delivering to the city less than the total collections. The court was satisfied that theft has taken place, but the actual amount of shortage was in question. Assume that there was no theft before or after Brink's tenure and estimate the monthly short- age and its 95% confidence limits.\n\nSo we are asking three questions. What is the probability that that money has been stolen? What is the probability that the variance of the Brink collections is higher. And how much money was stolen?\nThis problem is very similar to that of \"Improving Reading Ability\" by Allen Downey\nTo do this, we want to calculate First we load our data from the csv file.", "import pandas as pd\n\ndf = pd.read_csv('parking.csv', skiprows=17, delimiter='\\t')\ndf.head()", "First we need to normalize the CON (contractor) collections by the amount gathered by the CITY. This will give us a ratio of contractor collections to city collections. If we just use the raw contractor collections, fluctuations throughout the months could mislead us.", "df['RATIO'] = df['CON'] / df['CITY']", "Next, lets see what the means of the RATIO data compare between the general contractors and BRINK.", "grouped = df.groupby('BRINK')\nfor name, group in grouped:\n print(name, group.RATIO.mean())", "We see that for a dollar gathered by the city, general contractors report 244.7 dollars while BRINK only reports 230 dollars.\nNow, we will fit the data to a Normal class to compute the likelihood of a sameple from the normal distribution. This is a similar process to what we did in the improved reading ability problem.", "from scipy.stats import norm\n\nclass Normal(Suite, Joint):\n \n def Likelihood(self, data, hypo):\n \"\"\"\n \n data: sequence of test scores\n hypo: mu, sigma\n \"\"\"\n mu, sigma = hypo\n likes = norm.pdf(data, mu, sigma)\n return np.prod(likes)", "Next, we need to calculate a marginal distribution for both brink and general contractors. To get the marginal distribution of the general contractors, start by generating a bunch of prior distributions for mu and sigma. These will be generated uniformly.", "mus = np.linspace(210, 270, 301)\nsigmas = np.linspace(10, 65, 301)", "Next, use itertools.product to enumerate all pairs of mu and sigma.", "from itertools import product\n\ngeneral = Normal(product(mus, sigmas))\ndata = df[df.BRINK==0].RATIO\ngeneral.Update(data)", "Next we will plot the probability of each mu-sigma pair on a contour plot.", "thinkplot.Contour(general, pcolor=True)\nthinkplot.Config(xlabel='mu', ylabel='sigma')", "Next, extract the marginal distribution of mu from general.", "pmf_mu0 = general.Marginal(0)\nthinkplot.Pdf(pmf_mu0)\nthinkplot.Config(xlabel='mu', ylabel='Pmf')", "And the marginal distribution of sigma from the general.", "pmf_sigma0 = general.Marginal(1)\nthinkplot.Pdf(pmf_sigma0)\nthinkplot.Config(xlabel='sigma', ylabel='Pmf')", "Next, we will run this again for BRINK and see what the difference is between the group. This will give us insight into whether or not Brink employee's are stealing parking money from the city.\nFirst use the same range of mus and sigmas calcualte the marginal distributions of brink.", "brink = Normal(product(mus, sigmas))\ndata = df[df.BRINK==1].RATIO\nbrink.Update(data)", "Plot the mus and sigmas on a contour plot to see what is going on.", "thinkplot.Contour(brink, pcolor=True)\nthinkplot.Config(xlabel='mu', ylabel='sigma')", "Extract the marginal distributions of mu from brink.", "pmf_mu1 = brink.Marginal(0)\nthinkplot.Pdf(pmf_mu1)\nthinkplot.Config(xlabel='mu', ylabel='Pmf')", "Extract the marginal distributions sigma from brink", "pmf_sigma1 = brink.Marginal(1)\nthinkplot.Pdf(pmf_sigma1)\nthinkplot.Config(xlabel='sigma', ylabel='Pmf')", "From here, we want to compare the two distributions. To do this, we will start by taking the difference between the distributions.", "pmf_diff = pmf_mu1 - pmf_mu0\npmf_diff.Mean()", "From here we can calculate the probability that money was stolen from the city.", "cdf_diff = pmf_diff.MakeCdf()\nthinkplot.Cdf(cdf_diff)\ncdf_diff[0]", "So we can calculate that the probability money was stolen from the city is 93.9%\nNext, we want to calculate how much money was stolen from the city. We first need to calculate how much money the city collected during Brink times. Then we can multiply this times our pmf_diff to get a probability distribution of potential stolen money.", "money_city = np.where(df['BRINK']==1, df['CITY'], 0).sum(0)\nprint((pmf_diff * money_city).CredibleInterval(50))\nthinkplot.Pmf(pmf_diff * money_city)", "Above we see a plot of stolen money in millions. We have also calculated a credible interval that tells us that there is a 50% chance that Brink stole between 1.4 to 3.6 million dollars.\nIn pursuit of more evidence, we find the probability that the standard deviation in the Brink collections is higher than that of the general contractors.", "pmf_sigma1.ProbGreater(pmf_sigma0)", "We see that there is an extremely low chance that the standard deviation that the Brink collections is higher than the general collections.\nAt this point, we have seemingly overwhelming evidence that Brink stole money from the city. I solved this problem using tools I learned in class. If I were to do this problem again, I would to it without calcualting the variance because the best evidence is to calculate if and how much money was stolen.\nFinal project ideas\nOver the past few weeks, I have dabbled into potential final projects. One of my ideas was to predict the content of tweets based on other words in the tweet. For example, if we were to update with word \"emails\" how does the probability change for the tweet to contain Hillary, Trump, or Hillary and Trump. I mined quite a bit of data over debate night but I'm not convinved that this is a particularly interesting project." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
GoogleCloudPlatform/asl-ml-immersion
notebooks/image_models/solutions/2_mnist_models.ipynb
apache-2.0
[ "MNIST Image Classification with TensorFlow on Cloud AI Platform\nThis notebook demonstrates how to implement different image models on MNIST using the tf.keras API.\nLearning Objectives\n\nUnderstand how to build a Dense Neural Network (DNN) for image classification\nUnderstand how to use dropout (DNN) for image classification\nUnderstand how to use Convolutional Neural Networks (CNN)\nKnow how to deploy and use an image classifcation model using Google Cloud's AI Platform\n\nFirst things first. Configure the parameters below to match your own Google Cloud project details.", "import os\nfrom datetime import datetime\n\nREGION = \"us-central1\"\nPROJECT = !(gcloud config get-value core/project)\nPROJECT = PROJECT[0]\nBUCKET = PROJECT\nMODEL_TYPE = \"cnn\" # \"linear\", \"dnn\", \"dnn_dropout\", or \"cnn\"\n\n# Do not change these\nos.environ[\"PROJECT\"] = PROJECT\nos.environ[\"BUCKET\"] = BUCKET\nos.environ[\"REGION\"] = REGION\nos.environ[\"MODEL_TYPE\"] = MODEL_TYPE\nos.environ[\"IMAGE_URI\"] = os.path.join(\"gcr.io\", PROJECT, \"mnist_models\")", "Building a dynamic model\nIn the previous notebook, <a href=\"mnist_linear.ipynb\">mnist_linear.ipynb</a>, we ran our code directly from the notebook. In order to run it on the AI Platform, it needs to be packaged as a python module.\nThe boilerplate structure for this module has already been set up in the folder mnist_models. The module lives in the sub-folder, trainer, and is designated as a python package with the empty __init__.py (mnist_models/trainer/__init__.py) file. It still needs the model and a trainer to run it, so let's make them.\nLet's start with the trainer file first. This file parses command line arguments to feed into the model.", "%%writefile mnist_models/trainer/task.py\nimport argparse\nimport json\nimport os\nimport sys\n\nfrom . import model\n\n\ndef _parse_arguments(argv):\n \"\"\"Parses command-line arguments.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--model_type',\n help='Which model type to use',\n type=str, default='linear')\n parser.add_argument(\n '--epochs',\n help='The number of epochs to train',\n type=int, default=10)\n parser.add_argument(\n '--steps_per_epoch',\n help='The number of steps per epoch to train',\n type=int, default=100)\n parser.add_argument(\n '--job-dir',\n help='Directory where to save the given model',\n type=str, default='mnist_models/')\n return parser.parse_known_args(argv)\n\n\ndef main():\n \"\"\"Parses command line arguments and kicks off model training.\"\"\"\n args = _parse_arguments(sys.argv[1:])[0]\n\n # Configure path for hyperparameter tuning.\n trial_id = json.loads(\n os.environ.get('TF_CONFIG', '{}')).get('task', {}).get('trial', '')\n output_path = args.job_dir if not trial_id else args.job_dir + '/'\n\n model_layers = model.get_layers(args.model_type)\n image_model = model.build_model(model_layers, args.job_dir)\n model_history = model.train_and_evaluate(\n image_model, args.epochs, args.steps_per_epoch, args.job_dir)\n\n\nif __name__ == '__main__':\n main()\n", "Next, let's group non-model functions into a util file to keep the model file simple. We'll copy over the scale and load_dataset functions from the previous lab.", "%%writefile mnist_models/trainer/util.py\nimport tensorflow as tf\n\n\ndef scale(image, label):\n \"\"\"Scales images from a 0-255 int range to a 0-1 float range\"\"\"\n image = tf.cast(image, tf.float32)\n image /= 255\n image = tf.expand_dims(image, -1)\n return image, label\n\n\ndef load_dataset(\n data, training=True, buffer_size=5000, batch_size=100, nclasses=10):\n \"\"\"Loads MNIST dataset into a tf.data.Dataset\"\"\"\n (x_train, y_train), (x_test, y_test) = data\n x = x_train if training else x_test\n y = y_train if training else y_test\n # One-hot encode the classes\n y = tf.keras.utils.to_categorical(y, nclasses)\n dataset = tf.data.Dataset.from_tensor_slices((x, y))\n dataset = dataset.map(scale).batch(batch_size)\n if training:\n dataset = dataset.shuffle(buffer_size).repeat()\n return dataset\n", "Finally, let's code the models! The tf.keras API accepts an array of layers into a model object, so we can create a dictionary of layers based on the different model types we want to use. The below file has two functions: get_layers and create_and_train_model. We will build the structure of our model in get_layers. Last but not least, we'll copy over the training code from the previous lab into train_and_evaluate.\nTODO 1: Define the Keras layers for a DNN model \nTODO 2: Define the Keras layers for a dropout model\nTODO 3: Define the Keras layers for a CNN model \nHint: These models progressively build on each other. Look at the imported tensorflow.keras.layers modules and the default values for the variables defined in get_layers for guidance.", "%%writefile mnist_models/trainer/model.py\nimport os\nimport shutil\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.callbacks import TensorBoard\nfrom tensorflow.keras.layers import (\n Conv2D, Dense, Dropout, Flatten, MaxPooling2D, Softmax)\n\nfrom . import util\n\n\n# Image Variables\nWIDTH = 28\nHEIGHT = 28\n\n\ndef get_layers(\n model_type,\n nclasses=10,\n hidden_layer_1_neurons=400,\n hidden_layer_2_neurons=100,\n dropout_rate=0.25,\n num_filters_1=64,\n kernel_size_1=3,\n pooling_size_1=2,\n num_filters_2=32,\n kernel_size_2=3,\n pooling_size_2=2):\n \"\"\"Constructs layers for a keras model based on a dict of model types.\"\"\"\n model_layers = {\n 'linear': [\n Flatten(),\n Dense(nclasses),\n Softmax()\n ],\n 'dnn': [\n Flatten(),\n Dense(hidden_layer_1_neurons, activation='relu'),\n Dense(hidden_layer_2_neurons, activation='relu'),\n Dense(nclasses),\n Softmax()\n ],\n 'dnn_dropout': [\n Flatten(),\n Dense(hidden_layer_1_neurons, activation='relu'),\n Dense(hidden_layer_2_neurons, activation='relu'),\n Dropout(dropout_rate),\n Dense(nclasses),\n Softmax()\n ],\n 'cnn': [\n Conv2D(num_filters_1, kernel_size=kernel_size_1,\n activation='relu', input_shape=(WIDTH, HEIGHT, 1)),\n MaxPooling2D(pooling_size_1),\n Conv2D(num_filters_2, kernel_size=kernel_size_2,\n activation='relu'),\n MaxPooling2D(pooling_size_2),\n Flatten(),\n Dense(hidden_layer_1_neurons, activation='relu'),\n Dense(hidden_layer_2_neurons, activation='relu'),\n Dropout(dropout_rate),\n Dense(nclasses),\n Softmax()\n ]\n }\n return model_layers[model_type]\n\n\ndef build_model(layers, output_dir):\n \"\"\"Compiles keras model for image classification.\"\"\"\n model = Sequential(layers)\n model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n return model\n\n\ndef train_and_evaluate(model, num_epochs, steps_per_epoch, output_dir):\n \"\"\"Compiles keras model and loads data into it for training.\"\"\"\n mnist = tf.keras.datasets.mnist.load_data()\n train_data = util.load_dataset(mnist)\n validation_data = util.load_dataset(mnist, training=False)\n\n callbacks = []\n if output_dir:\n tensorboard_callback = TensorBoard(log_dir=output_dir)\n callbacks = [tensorboard_callback]\n\n history = model.fit(\n train_data,\n validation_data=validation_data,\n epochs=num_epochs,\n steps_per_epoch=steps_per_epoch,\n verbose=2,\n callbacks=callbacks)\n\n if output_dir:\n export_path = os.path.join(output_dir, 'keras_export')\n model.save(export_path, save_format='tf')\n\n return history\n", "Local Training\nWith everything set up, let's run locally to test the code. Some of the previous tests have been copied over into a testing script mnist_models/trainer/test.py to make sure the model still passes our previous checks. On line 13, you can specify which model types you would like to check. line 14 and line 15 has the number of epochs and steps per epoch respectively.\nMoment of truth! Run the code below to check your models against the unit tests. If you see \"OK\" at the end when it's finished running, congrats! You've passed the tests!", "!python3 -m mnist_models.trainer.test", "Now that we know that our models are working as expected, let's run it on the Google Cloud AI Platform. We can run it as a python module locally first using the command line.\nThe below cell transfers some of our variables to the command line as well as create a job directory including a timestamp.", "current_time = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\nmodel_type = \"cnn\"\n\nos.environ[\"MODEL_TYPE\"] = model_type\nos.environ[\"JOB_DIR\"] = \"mnist_models/models/{}_{}/\".format(\n model_type, current_time\n)", "The cell below runs the local version of the code. The epochs and steps_per_epoch flag can be changed to run for longer or shorther, as defined in our mnist_models/trainer/task.py file.", "%%bash\npython3 -m mnist_models.trainer.task \\\n --job-dir=$JOB_DIR \\\n --epochs=5 \\\n --steps_per_epoch=50 \\\n --model_type=$MODEL_TYPE", "Training on the cloud\nWe will use a Deep Learning Container to train this model on AI Platform. Below is a simple Dockerlife which copies our code to be used in a TensorFlow 2.3 environment.", "%%writefile mnist_models/Dockerfile\nFROM gcr.io/deeplearning-platform-release/tf2-cpu.2-3\nCOPY mnist_models/trainer /mnist_models/trainer\nENTRYPOINT [\"python3\", \"-m\", \"mnist_models.trainer.task\"]", "The below command builds the image and ships it off to Google Cloud so it can be used for AI Platform. When built, it will show up here with the name mnist_models. (Click here to enable Cloud Build)", "!docker build -f mnist_models/Dockerfile -t $IMAGE_URI ./\n\n!docker push $IMAGE_URI", "Finally, we can kickoff the AI Platform training job. We can pass in our docker image using the master-image-uri flag.", "current_time = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\nmodel_type = \"cnn\"\n\nos.environ[\"MODEL_TYPE\"] = model_type\nos.environ[\"JOB_DIR\"] = \"gs://{}/mnist_{}_{}/\".format(\n BUCKET, model_type, current_time\n)\nos.environ[\"JOB_NAME\"] = f\"mnist_{model_type}_{current_time}\"\n\n%%bash\necho $JOB_DIR $REGION $JOB_NAME\ngcloud ai-platform jobs submit training $JOB_NAME \\\n --staging-bucket=gs://$BUCKET \\\n --region=$REGION \\\n --master-image-uri=$IMAGE_URI \\\n --scale-tier=BASIC_GPU \\\n --job-dir=$JOB_DIR \\\n -- \\\n --model_type=$MODEL_TYPE", "Deploying and predicting with model\nOnce you have a model you're proud of, let's deploy it! All we need to do is give AI Platform the location of the model. Below uses the keras export path of the previous job, but ${JOB_DIR}keras_export/ can always be changed to a different path.", "TIMESTAMP = datetime.now().strftime(\"%Y%m%d%H%M%S\")\nMODEL_NAME = f\"mnist_{TIMESTAMP}\"\n\n%env MODEL_NAME = $MODEL_NAME\n\n%%bash\nMODEL_VERSION=${MODEL_TYPE}\nMODEL_LOCATION=${JOB_DIR}keras_export/\necho \"Deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes\"\ngcloud ai-platform models create ${MODEL_NAME} --region $REGION\ngcloud ai-platform versions create ${MODEL_VERSION} \\\n --model ${MODEL_NAME} \\\n --origin ${MODEL_LOCATION} \\\n --framework tensorflow \\\n --runtime-version=2.3", "To predict with the model, let's take one of the example images.\nTODO 4: Write a .json file with image data to send to an AI Platform deployed model", "import codecs\nimport json\n\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\nHEIGHT = 28\nWIDTH = 28\nIMGNO = 12\n\nmnist = tf.keras.datasets.mnist.load_data()\n(x_train, y_train), (x_test, y_test) = mnist\ntest_image = x_test[IMGNO]\n\njsondata = test_image.reshape(HEIGHT, WIDTH, 1).tolist()\njson.dump(jsondata, codecs.open(\"test.json\", \"w\", encoding=\"utf-8\"))\nplt.imshow(test_image.reshape(HEIGHT, WIDTH));", "Finally, we can send it to the prediction service. The output will have a 1 in the index of the corresponding digit it is predicting. Congrats! You've completed the lab!", "%%bash\ngcloud ai-platform predict \\\n --model=${MODEL_NAME} \\\n --version=${MODEL_TYPE} \\\n --json-instances=./test.json", "Copyright 2021 Google Inc.\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
jeiros/Jupyter_notebooks
python/markov_analysis/PyEMMA-API.ipynb
mit
[ "Testing the pyEMMA API", "import pyemma\npyemma.__version__", "Now we import a few general packages that we need to start with. The following imports basic numerics and algebra routines (numpy) and plotting routines (matplotlib), and makes sure that all plots are shown inside the notebook rather than in a separate window (nicer that way).", "import matplotlib.pylab as plt\nimport numpy as np\n%pylab inline", "Now we import the pyEMMA package that we will be using in the beginning: the coordinates package. This package contains functions and classes for reading and writing trajectory files, extracting order parameters from them (such as distances or angles), as well as various methods for dimensionality reduction and clustering.\nThe shortcuts module is a bunch of functions specific to this workshop - they help us to visualize some of our results. Some of them might become part of the pyemma package once they are more mature.", "import pyemma.coordinates as coor\nimport pyemma.msm as msm\nimport pyemma.plots as mplt\nfrom pyemma import config\n\n# some helper funcs\ndef average_by_state(dtraj, x, nstates):\n assert(len(dtraj) == len(x))\n N = len(dtraj)\n res = np.zeros((nstates))\n for i in range(nstates):\n I = np.argwhere(dtraj == i)[:,0]\n res[i] = np.mean(x[I])\n return res\n\ndef avg_by_set(x, sets):\n # compute mean positions of sets. This is important because of some technical points the set order\n # in the coarse-grained TPT object can be different from the input order.\n avg = np.zeros(len(sets))\n for i in range(len(sets)):\n I = list(sets[i])\n avg[i] = np.mean(x[I])\n return avg\n\nshortcuts = {'average_by_state': average_by_state, 'avg_by_set': avg_by_set}\n\nimport glob\ntrajfiles = sorted(glob.glob('./*/05*nc'))\nfor file in trajfiles:\n print(\"%s\\n\" % file)\ntopfile = \"./test.pdb\"\n\nfeat = coor.featurizer(topfile)\nfeat.add_backbone_torsions(cossin=True)\nfeat.add_chi1_torsions(cossin=True)\n\ninp = coor.source(trajfiles, feat)\n\n\nprint(\"Number of trajectories: %s\" % inp.number_of_trajectories())\nprint(\"Aggregate simulation time: %.2f ns\" % (inp.n_frames_total() * 0.02))\nprint(\"Number of dimensions: %s\" % inp.dimension())", "TICA and clustering\nSo we would like to first reduce our dimension by throwing out the ‘uninteresting’ ones and only keeping the ‘relevant’ ones. But how do we do that?\nIt turns out that a really good way to do that if you are interesting in the slow kinetics of the molecule - e.g. for constructing a Markov model, is to use the time-lagged independent component analysis (TICA) [2]. Amongst linear methods, TICA is optimal in its ability to approximate the relevant slow coordinates / reaction coordinates from MD simulation [3], and therefore it’s ideal to construct Markov models.", "tica_obj = coor.tica(inp, lag=100)\n\nY = tica_obj.get_output()[0]", "By default, TICA will choose a number of output dimensions to cover 95% of the kinetic variance and scale the output to produce a kinetic map. In this case we retain 575 dimensions, which is a lot but note that they are scaled by eigenvalue, so it’s mostly the first dimensions that contribute.", "print(\"Projected data shape: (%s,%s)\" % (Y.shape[0], Y.shape[1]))\n\nprint('Retained dimensions: %s' % tica_obj.dimension())\nplot(tica_obj.cumvar, linewidth=2)\nplot([tica_obj.dimension(), tica_obj.dimension()], [0, 1], color='black', linewidth=2)\nplot([0, Y.shape[0]], [0.95, 0.95], color='black', linewidth=2)\nxlabel('Number of dimensions'); ylabel('Cum. kinetic variance fraction')", "The TICA object has a number of properties that we can extract and work with. We have already obtained the projected trajectory and wrote it in a variable Y that is a matrix of size (103125 x 2). The rows are the MD steps, the 2 columns are the independent component coordinates projected onto. So each columns is a trajectory. Let us plot them:", "mplt.plot_free_energy(np.vstack(Y)[:, 0], np.vstack(Y)[:, 1])\nxlabel('independent component 1'); ylabel('independent component 2')", "A particular thing about the IC’s is that they have zero mean and variance one. We can easily check that:", "print(\"Mean values: %s\" % np.mean(Y[0], axis = 0))\nprint(\"Variances: %s\" % np.var(Y[0], axis = 0))", "The small deviations from 0 and 1 come from statistical and numerical issues. That’s not a problem. Note that if we had set kinetic_map=True when doing TICA, then the variances would not be 1 but rather the square of the corresponding TICA eigenvalue.\nTICA is a special transformation because it will project the data such that the autocorrelation along the independent components is as slow as possible. The eigenvalues of the TICA transform are the values of these autocorrelations at the chosen lag time (here 100). We can even interpret them in terms of relaxation timescales:", "print(-100/np.log(tica_obj.eigenvalues[:5]))", "We will see more timescales later when we estimate a Markov model, and there will be some differences. For now you should treat these numbers as a rough guess of your molecule’s timescales, and we will see later that this guess is actually a bit too fast. The timescales are relative to the 10 ns saving interval, so we have", "subplot2grid((2,1),(0,0))\nplot(Y[:,0])\nylabel('ind. comp. 1')\nsubplot2grid((2,1),(1,0))\nplot(Y[:,1])\nylabel('ind. comp. 2')\nxlabel('time (10 ns)')\n\ntica_obj.chunksize\n\nmplt.plot_implied_timescales(tica_obj)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
mdiaz236/DeepLearningFoundations
gan_mnist/Intro_to_GANs_Solution.ipynb
mit
[ "Generative Adversarial Network\nIn this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!\nGANs were first reported on in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:\n\nPix2Pix \nCycleGAN\nA whole list\n\nThe idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks as close as possible to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator.\n\nThe general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can foold the discriminator.\nThe output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow.", "%matplotlib inline\n\nimport pickle as pkl\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data')", "Model Inputs\nFirst we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input inputs_real and the generator input inputs_z. We'll assign them the appropriate sizes for each of the networks.", "def model_inputs(real_dim, z_dim):\n inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real') \n inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')\n \n return inputs_real, inputs_z", "Generator network\n\nHere we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.\nVariable Scope\nHere we need to use tf.variable_scope for two reasons. Firstly, we're going to make sure all the variable names start with generator. Similarly, we'll prepend discriminator to the discriminator variables. This will help out later when we're training the separate networks.\nWe could just use tf.name_scope to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also sample from it as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the reuse keyword for tf.variable_scope to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again.\nTo use tf.variable_scope, you use a with statement:\npython\nwith tf.variable_scope('scope_name', reuse=False):\n # code here\nHere's more from the TensorFlow documentation to get another look at using tf.variable_scope.\nLeaky ReLU\nTensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can use take the outputs from a linear fully connected layer and pass them to tf.maximum. Typically, a parameter alpha sets the magnitude of the output for negative values. So, the output for negative input (x) values is alpha*x, and the output for positive x is x:\n$$\nf(x) = max(\\alpha * x, x)\n$$\nTanh Output\nThe generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1.", "def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):\n with tf.variable_scope('generator', reuse=reuse):\n # Hidden layer\n h1 = tf.layers.dense(z, n_units, activation=None)\n # Leaky ReLU\n h1 = tf.maximum(alpha * h1, h1)\n \n # Logits and tanh output\n logits = tf.layers.dense(h1, out_dim, activation=None)\n out = tf.tanh(logits)\n \n return out", "Discriminator\nThe discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer.", "def discriminator(x, n_units=128, reuse=False, alpha=0.01):\n with tf.variable_scope('discriminator', reuse=reuse):\n # Hidden layer\n h1 = tf.layers.dense(x, n_units, activation=None)\n # Leaky ReLU\n h1 = tf.maximum(alpha * h1, h1)\n \n logits = tf.layers.dense(h1, 1, activation=None)\n out = tf.sigmoid(logits)\n \n return out, logits", "Hyperparameters", "# Size of input image to discriminator\ninput_size = 784\n# Size of latent vector to generator\nz_size = 100\n# Sizes of hidden layers in generator and discriminator\ng_hidden_size = 128\nd_hidden_size = 128\n# Leak factor for leaky ReLU\nalpha = 0.01\n# Smoothing \nsmooth = 0.1", "Build network\nNow we're building the network from the functions defined above.\nFirst is to get our inputs, input_real, input_z from model_inputs using the sizes of the input and z.\nThen, we'll create the generator, generator(input_z, input_size). This builds the generator with the appropriate input and output sizes.\nThen the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as g_model. So the real data discriminator is discriminator(input_real) while the fake discriminator is discriminator(g_model, reuse=True).", "tf.reset_default_graph()\n# Create our input placeholders\ninput_real, input_z = model_inputs(input_size, z_size)\n\n# Build the model\ng_model = generator(input_z, input_size)\n# g_model is the generator output\n\nd_model_real, d_logits_real = discriminator(input_real)\nd_model_fake, d_logits_fake = discriminator(g_model, reuse=True)", "Discriminator and Generator Losses\nNow we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, d_loss = d_loss_real + d_loss_fake. The losses will by sigmoid cross-entropys, which we can get with tf.nn.sigmoid_cross_entropy_with_logits. We'll also wrap that in tf.reduce_mean to get the mean for all the images in the batch. So the losses will look something like \npython\ntf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))\nFor the real image logits, we'll use d_logits_real which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter smooth. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like labels = tf.ones_like(tensor) * (1 - smooth)\nThe discriminator loss for the fake data is similar. The logits are d_logits_fake, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.\nFinally, the generator losses are using d_logits_fake, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images.", "# Calculate losses\nd_loss_real = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, \n labels=tf.ones_like(d_logits_real) * (1 - smooth)))\nd_loss_fake = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, \n labels=tf.zeros_like(d_logits_real)))\nd_loss = d_loss_real + d_loss_fake\n\ng_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,\n labels=tf.ones_like(d_logits_fake)))", "Optimizers\nWe want to update the generator and discriminator variables separately. So we need to get the variables for each part build optimizers for the two parts. To get all the trainable variables, we use tf.trainable_variables(). This creates a list of all the variables we've defined in our graph.\nFor the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with generator. So, we just need to iterate through the list from tf.trainable_variables() and keep variables to start with generator. Each variable object has an attribute name which holds the name of the variable as a string (var.name == 'weights_0' for instance). \nWe can do something similar with the discriminator. All the variables in the discriminator start with discriminator.\nThen, in the optimizer we pass the variable lists to var_list in the minimize method. This tells the optimizer to only update the listed variables. Something like tf.train.AdamOptimizer().minimize(loss, var_list=var_list) will only train the variables in var_list.", "# Optimizers\nlearning_rate = 0.002\n\n# Get the trainable_variables, split into G and D parts\nt_vars = tf.trainable_variables()\ng_vars = [var for var in t_vars if var.name.startswith('generator')]\nd_vars = [var for var in t_vars if var.name.startswith('discriminator')]\n\nd_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)\ng_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)", "Training", "!mkdir checkpoints\n\nbatch_size = 100\nepochs = 100\nsamples = []\nlosses = []\n# Only save generator variables\nsaver = tf.train.Saver(var_list=g_vars)\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for e in range(epochs):\n for ii in range(mnist.train.num_examples//batch_size):\n batch = mnist.train.next_batch(batch_size)\n \n # Get images, reshape and rescale to pass to D\n batch_images = batch[0].reshape((batch_size, 784))\n batch_images = batch_images*2 - 1\n \n # Sample random noise for G\n batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))\n \n # Run optimizers\n _ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})\n _ = sess.run(g_train_opt, feed_dict={input_z: batch_z})\n \n # At the end of each epoch, get the losses and print them out\n train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})\n train_loss_g = g_loss.eval({input_z: batch_z})\n \n print(\"Epoch {}/{}...\".format(e+1, epochs),\n \"Discriminator Loss: {:.4f}...\".format(train_loss_d),\n \"Generator Loss: {:.4f}\".format(train_loss_g)) \n # Save losses to view after training\n losses.append((train_loss_d, train_loss_g))\n \n # Sample from generator as we're training for viewing afterwards\n sample_z = np.random.uniform(-1, 1, size=(16, z_size))\n gen_samples = sess.run(\n generator(input_z, input_size, reuse=True),\n feed_dict={input_z: sample_z})\n samples.append(gen_samples)\n saver.save(sess, './checkpoints/generator.ckpt')\n\n# Save training generator samples\nwith open('train_samples.pkl', 'wb') as f:\n pkl.dump(samples, f)", "Training loss\nHere we'll check out the training losses for the generator and discriminator.", "fig, ax = plt.subplots()\nlosses = np.array(losses)\nplt.plot(losses.T[0], label='Discriminator')\nplt.plot(losses.T[1], label='Generator')\nplt.title(\"Training Losses\")\nplt.legend()", "Generator samples from training\nHere we can view samples of images from the generator. First we'll look at images taken while training.", "def view_samples(epoch, samples):\n fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)\n for ax, img in zip(axes.flatten(), samples[epoch]):\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')\n \n return fig, axes\n\n# Load samples from generator taken while training\nwith open('train_samples.pkl', 'rb') as f:\n samples = pkl.load(f)", "These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 1, 7, 3, 2. Since this is just a sample, it isn't representative of the full range of images this generator can make.", "_ = view_samples(-1, samples)", "Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!", "rows, cols = 10, 6\nfig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)\n\nfor sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):\n for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):\n ax.imshow(img.reshape((28,28)), cmap='Greys_r')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)", "It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise like 1s and 9s.\nSampling from the generator\nWe can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples!", "saver = tf.train.Saver(var_list=g_vars)\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n sample_z = np.random.uniform(-1, 1, size=(16, z_size))\n gen_samples = sess.run(\n generator(input_z, input_size, reuse=True),\n feed_dict={input_z: sample_z})\n_ = view_samples(0, [gen_samples])" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ad960009/dist-keras
examples/example_1_analysis.ipynb
gpl-3.0
[ "Model Development and Evaluation\nJoeri Hermans (Technical Student, IT-DB-SAS, CERN) \nDepartement of Knowledge Engineering \nMaastricht University, The Netherlands\nThis notebook is dedicated to the development and evaluation of a Keras model based on a large preprocessed dataset.", "%matplotlib inline \n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation\n\nfrom pyspark import SparkContext\nfrom pyspark import SparkConf\n\nfrom pyspark.ml.feature import StandardScaler\nfrom pyspark.ml.feature import VectorAssembler\nfrom pyspark.ml.feature import StringIndexer\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\n\nfrom distkeras.transformers import LabelIndexTransformer\nfrom distkeras.predictors import ModelPredictor\nfrom distkeras.trainers import SingleTrainer\nfrom distkeras.trainers import AEASGD\nfrom distkeras.trainers import DOWNPOUR", "Spark Configuration and Preparation\nEdit the variables in the cell below. If you are running Spark in local mode, please set the local flag to true and adjust the resources you wish to use on your local machine. The same goes for the case when you are running Spark 2.0 and higher.", "# Modify these variables according to your needs.\napplication_name = \"Distributed Deep Learning: Analysis\"\nusing_spark_2 = False\nlocal = False\nif local:\n # Tell master to use local resources.\n master = \"local[*]\"\n num_cores = 3\n num_executors = 1\nelse:\n # Tell master to use YARN.\n master = \"yarn-client\"\n num_executors = 8\n num_cores = 2\n\n# This variable is derived from the number of cores and executors, and will be used to assign the number of model trainers.\nnum_workers = num_executors * num_cores\n\nprint(\"Number of desired executors: \" + `num_executors`)\nprint(\"Number of desired cores / executor: \" + `num_cores`)\nprint(\"Total number of workers: \" + `num_workers`)\n\nconf = SparkConf()\nconf.set(\"spark.app.name\", application_name)\nconf.set(\"spark.master\", master)\nconf.set(\"spark.executor.cores\", `num_cores`)\nconf.set(\"spark.executor.instances\", `num_executors`)\nconf.set(\"spark.executor.memory\",\"2g\")\nconf.set(\"spark.serializer\", \"org.apache.spark.serializer.KryoSerializer\");\n\n# Check if the user is running Spark 2.0 +\nif using_spark_2:\n sc = SparkSession.builder.config(conf=conf) \\\n .appName(application_name) \\\n .getOrCreate()\nelse:\n # Create the Spark context.\n sc = SparkContext(conf=conf)\n # Add the missing imports\n from pyspark import SQLContext\n sqlContext = SQLContext(sc)", "Data Preparation\nAfter the Spark Context (or Spark Session if you are using Spark 2.0) has been set up, we can start reading the preprocessed dataset from storage.", "# Check if we are using Spark 2.0\nif using_spark_2:\n reader = sc\nelse:\n reader = sqlContext\n# Read the dataset.\nraw_dataset = reader.read.parquet(\"data/processed.parquet\")\n\n# Check the schema.\nraw_dataset.printSchema()", "After reading the dataset from storage, we will extract several metrics such as nb_features, which basically is the number of input neurons, and nb_classes, which is the number of classes (signal and background).", "nb_features = len(raw_dataset.select(\"features_normalized\").take(1)[0][\"features_normalized\"])\nnb_classes = len(raw_dataset.select(\"label\").take(1)[0][\"label\"])\n\nprint(\"Number of features: \" + str(nb_features))\nprint(\"Number of classes: \" + str(nb_classes))", "Finally, we split up the dataset for training and testing purposes, and fetch some additional statistics on the number of training and testing instances.", "# Finally, we create a trainingset and a testset.\n(training_set, test_set) = raw_dataset.randomSplit([0.7, 0.3])\ntraining_set.cache()\ntest_set.cache()\n\n# Distribute the training and test set to the workers.\ntest_set = test_set.repartition(num_workers)\ntraining_set = training_set.repartition(num_workers)\n\nnum_test_set = test_set.count()\nnum_training_set = training_set.count()\n\nprint(\"Number of testset instances: \" + str(num_test_set))\nprint(\"Number of trainingset instances: \" + str(num_training_set))\nprint(\"Total number of instances: \" + str(num_test_set + num_training_set))", "Model construction", "model = Sequential()\nmodel.add(Dense(500, input_shape=(nb_features,)))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.4))\nmodel.add(Dense(500))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.6))\nmodel.add(Dense(500))\nmodel.add(Activation('relu'))\nmodel.add(Dense(nb_classes))\nmodel.add(Activation('softmax'))\n\n# Summarize the model.\nmodel.summary()\n\noptimizer = 'adagrad'\nloss = 'categorical_crossentropy'", "Model evaluation", "def evaluate(model):\n global test_set\n\n metric_name = \"f1\"\n evaluator = MulticlassClassificationEvaluator(metricName=metric_name, predictionCol=\"prediction_index\", labelCol=\"label_index\")\n # Clear the prediction column from the testset.\n test_set = test_set.select(\"features_normalized\", \"label\", \"label_index\")\n # Apply a prediction from a trained model.\n predictor = ModelPredictor(keras_model=trained_model, features_col=\"features_normalized\")\n test_set = predictor.predict(test_set)\n # Transform the prediction vector to an indexed label.\n index_transformer = LabelIndexTransformer(output_dim=nb_classes)\n test_set = index_transformer.transform(test_set)\n # Store the F1 score of the SingleTrainer.\n score = evaluator.evaluate(test_set)\n \n return score\n\nresults = {}\ntime_spent = {}", "Model training and evaluation\nIn the next sections we train and evaluate the models trained by different (distributed) optimizers.\nSingle Trainer", "trainer = SingleTrainer(keras_model=model, loss=loss, worker_optimizer=optimizer, \n features_col=\"features_normalized\", num_epoch=1, batch_size=64)\ntrained_model = trainer.train(training_set)\n\n# Fetch the training time.\ndt = trainer.get_training_time()\nprint(\"Time spent (SingleTrainer): \" + `dt` + \" seconds.\")\n\n# Evaluate the model.\nscore = evaluate(trained_model)\nprint(\"F1 (SingleTrainer): \" + `score`)\n\n# Store the training metrics.\nresults['single'] = score\ntime_spent['single'] = dt", "Asynchronous EASGD", "trainer = AEASGD(keras_model=model, worker_optimizer=optimizer, loss=loss, num_workers=num_workers, batch_size=64,\n features_col=\"features_normalized\", num_epoch=1, communication_window=32, \n rho=5.0, learning_rate=0.1)\ntrainer.set_parallelism_factor(1)\ntrained_model = trainer.train(training_set)\n\n# Fetch the training time.\ndt = trainer.get_training_time()\nprint(\"Time spent (AEASGD): \" + `dt` + \" seconds.\")\n\n# Evaluate the model.\nscore = evaluate(trained_model)\nprint(\"F1 (AEASGD): \" + `score`)\n\n# Store the training metrics.\nresults['aeasgd'] = score\ntime_spent['aeasgd'] = dt", "DOWNPOUR", "trainer = DOWNPOUR(keras_model=model, worker_optimizer=optimizer, loss=loss, num_workers=num_workers,\n batch_size=64, communication_window=5, learning_rate=0.1, num_epoch=1,\n features_col=\"features_normalized\")\ntrainer.set_parallelism_factor(1)\ntrained_model = trainer.train(training_set)\n\n# Fetch the training time.\ndt = trainer.get_training_time()\nprint(\"Time spent (DOWNPOUR): \" + `dt` + \" seconds.\")\n\n# Evaluate the model.\nscore = evaluate(trained_model)\nprint(\"F1 (DOWNPOUR): \" + `score`)\n\n# Store the training metrics.\nresults['downpour'] = score\ntime_spent['downpour'] = dt", "Results\nAs we can see from the plots below, the distributed optimizers finish a single epoch ~7 times however. However, for this, the distributed optimizers use 16 times the amount of resources. However, a not very descriptive measure since some of jobs are scheduled on the same machines, some machines have a higher load etc. Nevertheless, the statistical performance of the optimizers is within 1% error. Which means that the classifiers would have near-identical performance. Furthermore, it is our guess that the statistical performance of the distributed optimizers can be improved by adding adaptive learning rates.", "# Plot the time.\nfig = plt.figure()\nst = fig.suptitle(\"Lower is better.\", fontsize=\"x-small\")\n\nplt.bar(range(len(time_spent)), time_spent.values(), align='center')\nplt.xticks(range(len(time_spent)), time_spent.keys())\nplt.xlabel(\"Optimizers\")\nplt.ylabel(\"Seconds\")\nplt.ylim([0, 7000])\nplt.show()\n\n# Plot the statistical performanc of the optimizers.\nfig = plt.figure()\nst = fig.suptitle(\"Higer is better.\", fontsize=\"x-small\")\n\nplt.bar(range(len(results)), results.values(), align='center')\nplt.xticks(range(len(results)), results.keys())\nplt.xlabel(\"Optimizers\")\nplt.ylabel(\"F1\")\nplt.ylim([0.83,0.85])\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
fionapigott/Data-Science-45min-Intros
neural-networks-101/Neural Networks - Part 1.ipynb
unlicense
[ "Neural Networks - Part 1\n2016-06-17, Josh Montague\nMotivation, a little history, a naive implementation, and a discussion of neural networks.\nLogistic regression\nRecap of the structural pillars of logistic regression for classification (previous RST).\n<img src=\"img/NN-1.jpeg\"> \nLet's see an example where logistic regression works. Consider some two-dimensional data that we'd like to classify.", "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nfrom mlxtend.evaluate import plot_decision_regions\nfrom sklearn.datasets import make_blobs\nfrom sklearn.linear_model import LogisticRegression\n\nsamples = 20\n\nX, y = make_blobs(n_samples=samples, n_features=2, cluster_std=0.25,\n centers=[(0, 0.5), (1.5, 0.5)], shuffle=False, random_state=1)\n\n# fit the LR model\nclf = LogisticRegression().fit(X,y)\n\n# plotting decision regions\nplot_decision_regions(X, y, clf=clf, res=0.02)\nplt.xlabel('x1'); plt.ylabel('x2'); plt.title('LR (linearly separable)')\n\nprint('The model features are weighted according to: {}'.format(clf.coef_))", "A different view of logistic regression\nConsider a schematic reframing of the LR model above. This time we'll treat the inputs as nodes, and they connect to other nodes via vertices that represent the weight coefficients.\n<img src=\"img/NN-2.jpeg\"> \nThe diagram above is a (simplified form of a) single-neuron model in biology. \n<img src=\"img/neuron.gif\"> \nAs a result, this is the same model that is used to demonstrate a computational neural network. \nSo that's great. Logistic regression works, why do we need something like a neural network? To start, consider an example where the LR model breaks down:", "rng = np.random.RandomState(1)\nX = rng.randn(samples, 2)\ny = np.array(np.logical_xor(X[:, 0] > 0, X[:, 1] > 0), dtype=int)\n\nclf = LogisticRegression().fit(X,y)\n\nplot_decision_regions(X=X, y=y, clf=clf, res=0.02, legend=2)\nplt.xlabel('x1'); plt.ylabel('x2'); plt.title('LR (XOR)')", "Why does this matter? Well...\nNeural Networks\nSome history\nIn the 1960s, when the concept of neural networks were first gaining steam, this type of data was a show-stopper. In particular, the reason our model fails to be effective with this data is that it's not linearly separable; it has interaction terms.\nThis is a specific type of data that is representative of an XOR logic gate. It's not magic, just well-known, and a fundamental type of logic in computing. We can say it in words, as approximately: \"label is 1, if either x1 or x2 is 1, but not if both are 1.\"\nAt the time, this led to an interesting split in computational work in the field: on the one hand, some people set off on efforts to design very custom data and feature engineering tactics so that existing models would still work. On the other hand, people set out to solve the challenge of designing new algorithms; for example, this is approximately the era when the support vector machine was developed. Since progress on neural network models slowed significantly in this era (rememeber that computers were entire rooms!), this is often referred to as the first \"AI winter.\" Even though the multi-layer network was designed a few years later, and solved the XOR problem, the attention on the field of AI and neural networks had faded. \nToday, you might (sensibly) suggest something like an 'rbf-kernel SVM' to solve this problem, and that would totally work! But that's not where we're going today. \nWith the acceleration of computational power in the last decade, there has been a resurgence in the interest (and capability) of neural network computation.\nSo what does a neural network look like?\nWhat is a multi-layer model, and how does it help solve this problem? Non-linearity and feature mixing leads to new features that we don't have to encode by hand. In particular, we no longer depend just on combinations of input features. We combine input features, apply non-linearities, then combine all of those as new features, apply additional non-linearities, and so on until basically forever. \nIt sounds like a mess, and it pretty much can be. But first, we'll start simply. Imagine that we put just a single layer of \"neurons\" between our input data and output. How would that change the evaluation approach we looked at earlier?\n<img src=\"img/NN-3.jpeg\"> \nDIY neural network!\nReminder: manually writing out algorithms is a terrible idea for using them, but a great idea for learning how they work.\nTo get a sense for how the diagram above works, let's first write out the \"single-layer\" version (which we saw above is equivalent to logistic regression and doesn't work!). We just want to see how it looks in the form of forward- and backward-propagation.\nRemember, we have a (samples x 2) input matrix, so we need a (2x1) matrix of weights. And to save space, we won't use the fully-accurate and correct implementation of backprop and SGD; instead, we'll use a simplified version that's easier to read but has very similar results.", "# make the same data as above (just a little closer so it's easier to find)\nrng = np.random.RandomState(1)\nX = rng.randn(samples, 2)\n\ny = np.array(np.logical_xor(X[:, 0] > 0, X[:, 1] > 0), dtype=int)\n\ndef activate(x, deriv=False):\n \"\"\"sigmoid activation function and its derivative wrt the argument\"\"\"\n if deriv is True:\n return x*(1-x)\n return 1/(1+np.exp(-x))\n\n# initialize synapse0 weights randomly with mean 0\nsyn0 = 2*np.random.random((2,1)) - 1\n\n# nothing to see here... just some numpy vector hijinks for the next code\ny = y[None].T", "This is the iterative phase. We propagate the input data forward through the synapse (weights), calculate the errors, and then back-propogate those errors through the synapses (weights) according to the proper gradients. Note that the number of iterations is arbitary at this point. We'll come back to that.", "for i in range(10000):\n # first \"layer\" is the input data\n l0 = X\n \n # forward propagation\n l1 = activate(np.dot(l0, syn0))\n\n ### \n # this is an oversimplified version of backprop + gradient descent\n #\n # how much did we miss?\n l1_error = y - l1\n #\n # how much should we scale the adjustments?\n # (how much we missed by) * (gradient at l1 value)\n # ~an \"error-weighted derivative\"\n l1_delta = l1_error * activate(l1,True)\n ###\n \n # how much should we update the weight matrix (synapse)? \n syn0 += np.dot(l0.T,l1_delta)\n \n # some insight into the update progress\n if (i% 2000) == 0:\n print(\"Mean error @ iteration {}: {}\".format(i, np.mean(np.abs(l1_error))))", "As expected, this basically didn't work at all! \nEven though we aren't looking at the actual output data, we can use it to look at the accuracy; it never got much better than random guessing. Even after thousands of iterations! But remember, we knew that would be the case, because this single-layer network is functionally the same as vanilla logistic regression, which we saw fail on the xor data above! \nBut, now that we have the framework and understanding for how to optimize backprogation, we can add an additional layer to the network (a so-called \"hidden\" layer of neurons), which will introduce the kind of mixing we need to represent this data.\nAs we saw above in the diagram (and talked about), introduction of a new layer means that we get an extra step in both the forward- and backward-propagation steps. This new step means we need an additional weight (synapse) matrix, and an additional derivative calculation. Other than that, the code looks pretty much the same.", "# hold tight, we'll come back to choosing this number\nhidden_layer_width = 3\n\n# initialize synapse (weight) matrices randomly with mean 0\nsyn0 = 2*np.random.random((2,hidden_layer_width)) - 1\nsyn1 = 2*np.random.random((hidden_layer_width,1)) - 1\n\nfor i in range(60000):\n\n # forward propagation through layers 0, 1, and 2\n l0 = X\n l1 = activate(np.dot(l0,syn0))\n l2 = activate(np.dot(l1,syn1))\n\n # how much did we miss the final target value?\n l2_error = y - l2\n \n # how much should we scale the adjustments?\n l2_delta = l2_error*activate(l2,deriv=True)\n\n # project l2 error back onto l1 values according to weights\n l1_error = l2_delta.dot(syn1.T)\n \n # how much should we scale the adjustments?\n l1_delta = l1_error * activate(l1,deriv=True)\n\n # how much should we update the weight matrices (synapses)? \n syn1 += l1.T.dot(l2_delta)\n syn0 += l0.T.dot(l1_delta)\n \n if (i % 10000) == 0:\n print(\"Error @ iteration {}: {}\".format(i, np.mean(np.abs(l2_error))))", "Ok, this time we started at random guessing (sensible), but notice that we quickly reduced our overall error! That's excellent!\nNote: I didn't have time to debug the case where the full XOR data only trained to label one quadrant correctly. To get a sense for how it can look with a smaller set, change the \"fall-back data\" cell to code, and run the cells starting there!\nKnowing that the error is lower is great, but we can also inspect the results of the fit network by looking at the forward propagation results from the trained synapses (weights).", "def forward_prop(X):\n \"\"\"forward-propagate data X through the pre-fit network\"\"\"\n l1 = activate(np.dot(X,syn0))\n l2 = activate(np.dot(l1,syn1))\n return l2\n\n# numpy and plotting shenanigans come from:\n# http://scikit-learn.org/stable/auto_examples/svm/plot_iris.html\n\n# mesh step size\nh = .02 \n\n# create a mesh to plot in\nx_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\ny_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\nxx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n\n# calculate the surface (by forward-propagating)\nZ = forward_prop(np.c_[xx.ravel(), yy.ravel()])\n\n# reshape the result into a grid\nZ = Z.reshape(xx.shape)\n\nplt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)\n\n# we can use this to inspect the smaller dataset\n#plt.plot(X[:, 0], X[:, 1], 'o')", "Success! (Possibly... depending on whether Josh debugged the larger network problem :) ). If only one quadrant was trained correctly, go use the smaller dataset!\nWrap-up\nThe stuff in this session is just a very basic start! The limits to the increasing complexity are now at the hardware level! Networks can be amazingly complicated, too. Below is an example from a talk I saw - note how interestingly the layers are building on each other to represent increasingly complicated structure in the context of facial recognition. \n<img src=\"img/3l-face.png\"> \nIt's not clear how you'd encode \"this is a face,\" but once you see how the first layer's \"atomic\" components are assembled into abstract parts of a face, and how those parts are combined into representations of kinds of faces, it seems more believable!\nDon't actually do it like this\nAnd, as you probably guessed, what we've done above isn't how you use these in practice. There are many Python libraries for building and using various neural network models. And, as you might expect, many are built with an object-oriented expressiveness:\n```python\npseudo-code (that is actually very nearly valid)\nnn = Network(optimizer='sgd')\nnn.add_layer('fully_connected', name='l0', nodes=4)\nnn.add_layer('fully_connected', name='l1', nodes=5)\nnn.add_layer('fully_connected', name='l2', nodes=2)\nnn.compile()\nnn.fit(X,y)\n```\nIn Neural Networks - Part 2, we'll look at some of these libraries and use them for some learning tasks! (hold me to it!)\nIn addition to using optimized libraries, there are many other issues and topics that go into developing and using neural networks for practical purposes. Below is a bag-of-words approach to some terms and phrases that you'll invariably see when reading about neural networks.\nNeural Network Word Salad\n\n\nGPU (graphical processing unit)\n\nThe matrix manipulations needed for large network training are typically bottlenecked by the compute throughput of a CPU. Starting in ~2013, people figured out the computer graphics chips were much faster at computing these steps and are now the go-to hardware for training networks. CPUs still work! They just tend to be an order of magnitude slower.\n\n\n\narchitecture\n\nWe only looked at so-called \"fully-connected\" networks - that is, every node was connected to every other node downstream. This is not the only way to design the layout! \nAmong many others, so-called \"convolution networks\" are very common in image recognition tasks; each layer combines a region of the previous layer's outputs into a single node in the subsequent layer.\nThere are still other choices to be made in designing a network: the number of nodes in a hidden layer, the activation function, and more. \n\n\n\nbatching\n\nIf you're training a network on the entirety of the internet's search queries, you can't exactly feed it all forward and backward through the network at once. The concept of batching is deciding how much of the input data to feed forward (and backward) before updating your weight matrices.\n\n\n\ntraining epochs\n\nthe magic numbers in our for loops above were chosen arbitrarily. A lot of work has also gone into deciding how to optimize the convergence of network training. \n\n\n\nregularization\n\nNeural networks, too, can suffer from overfitting. There are tactics to \n\"dropout\"\n\"pooling\"\n\n\n\n\"deep learning\"\n\nlots of layers\n\n\n\nLinks\nTo save you some time if you want to learn more, here are some of the references that I found the most helpful while researching for this RST:\n\nHacker's guide to Neural Networks\nDeep Learning Basics: Neural Networks, Backpropagation and Stochastic Gradient Descent\nA Neural Network in 11 lines of Python\nA Neural Network in 13 lines of Python\nIntro to Neural Networks\nSingle-Layer Neural Networks and Gradient Descent\nTensorflow Playground" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
PepSalehi/tuthpc
Untitled5.ipynb
bsd-3-clause
[ "MPI and cluster computing\nWhat is large-scale and cluster computing?\n\n\nMPI: the message passing interface (mpi4py and ...)\n\n\nGPU: graphics processing-based parallelism (pyopencl and pycuda)\n\n\nCloud computing (cloud, mrjob, and Apache's mesos/spark/hadoop/zookeeper/...)\n\n\nWe'll focus on mpi4py, as it's probably the most stable and active of the python MPI modules, and generally provides the most in terms of classic scalability to instutional class resources.\nGetting started with mpi4py\n\n\nTypically not an easy_install -- thanks MPI\n\n\nGetting started: say \"hello\"", "%%file hellompi.py\n\"\"\"\nParallel Hello World\n\"\"\"\n\nfrom mpi4py import MPI\nimport sys\n\nsize = MPI.COMM_WORLD.Get_size()\nrank = MPI.COMM_WORLD.Get_rank()\nname = MPI.Get_processor_name()\n\nsys.stdout.write(\n \"Hello, World! I am process %d of %d on %s.\\n\" \n % (rank, size, name))", "Executes with mpiexec", "!mpiexec -n 4 python2.7 hellompi.py", "Coding for multiple \"personalities\" (nodes, actually)\n\nPoint to point communication", "%%file mpipt2pt.py\nfrom mpi4py import MPI\n\ncomm = MPI.COMM_WORLD\nrank, size = comm.Get_rank(), comm.Get_size()\n\nif rank == 0:\n data = range(10)\n more = range(0,20,2)\n print 'rank %i sends data:' % rank, data\n comm.send(data, dest=1, tag=1337)\n print 'rank %i sends data:' % rank, more\n comm.send(more, dest=2 ,tag=1456)\nelif rank == 1:\n data = comm.recv(source=0, tag=1337)\n print 'rank %i got data:' % rank, data\nelif rank == 2:\n more = comm.recv(source=0, tag=1456)\n print 'rank %i got data:' % rank, more\n\n!mpiexec -n 4 python2.7 mpipt2pt.py\n\n%%file mpipt2pt2.py\n'''nonblocking communication\n'''\nfrom mpi4py import MPI\nimport numpy as np\nimport time\n\ncomm = MPI.COMM_WORLD\nrank, size = comm.Get_rank(), comm.Get_size()\n\npair = {0:1, 1:0} # rank 0 sends to 1 and vice versa\nsendbuf = np.zeros(5) + rank\nrecvbuf = np.empty_like(sendbuf)\n\nprint 'rank %i sends data:' % rank, sendbuf\nsreq = comm.Isend(sendbuf, dest=pair[rank], tag=1337)\nrreq = comm.Irecv(recvbuf, source=pair[rank], tag=1337)\n\n# rreq.Wait(); sreq.Wait()\nMPI.Request.Waitall([rreq, sreq])\nif rank == 1: \n time.sleep(0.001) # delay slightly for better printing\nprint 'rank %i got data:' % rank, recvbuf\n\n!mpiexec -n 2 python2.7 mpipt2pt2.py", "Collective communication", "%%file mpiscattered.py\n'''mpi scatter\n'''\nfrom mpi4py import MPI\nimport numpy as np\nimport time\n\ncomm = MPI.COMM_WORLD\nrank, size = comm.Get_rank(), comm.Get_size()\n\nif rank == 0:\n data = np.arange(10)\n print 'rank %i has data' % rank, data\n data_split_list = np.array_split(data, size)\nelse:\n data_split_list = None\ndata_split = comm.scatter(data_split_list, root=0)\n\n# some delays for printing purposes\nif rank == 1:\n time.sleep(0.001)\nelif rank == 2:\n time.sleep(0.002)\nprint 'rank %i got data' % rank, data_split\n\n!mpiexec -n 3 python2.7 mpiscattered.py\n\n%%file mpibroadcasted.py\n'''mpi broadcast\n'''\nfrom mpi4py import MPI\nimport numpy as np\nimport time\n\ncomm = MPI.COMM_WORLD\nrank, size = comm.Get_rank(), comm.Get_size()\n\nN = 10.\ndata = np.arange(N) if rank == 0 else np.zeros(N)\nif rank == 1:\n time.sleep(0.001)\nelif rank == 2:\n time.sleep(0.002)\nprint 'rank %i has data' % rank, data\n\ncomm.Bcast(data, root=0)\nif rank == 1:\n time.sleep(0.001)\nelif rank == 2:\n time.sleep(0.002)\nprint 'rank %i got data' % rank, data\n\n!mpiexec -n 3 python2.7 mpibroadcasted.py", "Not covered: shared memory and shared objects\nBetter serialization", "from mpi4py import MPI\n\ntry:\n import dill\n MPI._p_pickle.dumps = dill.dumps\n MPI._p_pickle.loads = dill.loads\nexcept ImportError, AttributeError:\n pass", "Working with cluster schedulers, the JOB file", "%%file jobscript.sh\n#!/bin/sh\n#PBS -l nodes=1:ppn=4\n#PBS -l walltime=00:03:00\ncd ${PBS_O_WORKDIR} || exit 2\nmpiexec -np 4 python hellompi.py", "Beyond mpi4py\n\nThe task Pool: pyina and emcee.utils", "%%file pyinapool.py\n\ndef test_pool(obj):\n from pyina.launchers import Mpi\n x = range(6)\n p = Mpi(8)\n \n # worker pool strategy + dill\n p.scatter = False\n print p.map(obj, x)\n \n # worker pool strategy + dill.source \n p.source = True\n print p.map(obj, x)\n \n # scatter-gather strategy + dill.source\n p.scatter = True\n print p.map(obj, x)\n \n # scatter-gather strategy + dill\n p.source = False\n print p.map(obj, x)\n\n\nif __name__ == '__main__':\n\n from math import sin\n f = lambda x:x+1\n def g(x):\n return x+2\n\n for func in [g, f, abs, sin]:\n test_pool(func)\n\n!python2.7 pyinapool.py", "For emcee, see: http://dan.iel.fm/emcee/current/user/advanced/\n\n\nLoooooong import times: MPI_import\n\n\nInteractive MPI: pyina and IPython.parallel\n\n\nWorking with schedulers directly: pyina and ipython-cluster-helper\n\n\nIssue: conforming to the multiprocessing interface: ...\n\n\nWe are in the tall weeds here... \nThe other end of the spectrum is high-performance parallel instead of large-scale parallel." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
JonasHarnau/apc
apc/vignettes/vignette_misspecification.ipynb
gpl-3.0
[ "Misspecification Tests for Log-Normal and Over-Dispersed Poisson Chain-Ladder Models\nWe replicate the empirical applications in Harnau (2018) in Section 5.\nThe work on this vignette was supported by the European Research Council, grant AdG 694262.\nFirst, we import the package", "import apc\n\n# Turn off future warnings\nimport warnings\nwarnings.simplefilter('ignore', FutureWarning)", "5.1 Log-Normal Chain-Ladder\nThis corresponds to Section 5.1 in the paper. The data are taken from Verrall et al. (2010). Kuang et al. (2015) fitted a log-normal chain-ladder model to this data. The model is given by\n$$ M^{LN}{\\mu, \\sigma^2}: \\quad \\log(Y{ij}) \\stackrel{D}{=} N(\\alpha_i + \\beta_j + \\delta, \\sigma^2). $$\nThey found that the largest residuals could be found within the first five accident years. Consequently, they raised the question whether the model is misspecified. Here, we investigate this question.\nFull model\nWe set up and estimate the full, most restrictive, model $M^{LN}_{\\mu, \\sigma^2}$.\nWe begin by setting up a model class.", "model_VNJ = apc.Model()", "Next, we attach the data for the model. The data come pre-formatted in the package.", "model_VNJ.data_from_df(apc.loss_VNJ(), data_format='CL')", "We fit a log-normal chain-ladder model to the full data.", "model_VNJ.fit('log_normal_response', 'AC')", "and confirm that we get the same result as in the paper for the log-data variance estimate $\\hat{\\sigma}^{2,LN}$ and the degrees of freedom $df$. This should correspond to the values for $\\mathcal{I}$ in Figure 2(b).", "print('log-data variance full model: {:.3f}'.format(model_VNJ.s2))\nprint('degrees of freedom full model: {:.0f}'.format(model_VNJ.df_resid))", "This matches the results in the paper.\nSub-models\nWe move on to split the data into sub-samples. The sub-samples $\\mathcal{I}_1$ and $\\mathcal{I}_2$ contain the first and the last five accident years, respectively. Accident years correspond to \"cohorts\" in age-period-cohort terminology. Rather than first splitting the sample and the generating a new model and fitting it, we make use of the \"sub_model\" functionality of the package which does all that for us. Combined, the sub-models correspond to $M^{LN}$.", "sub_model_VNJ_1 = model_VNJ.sub_model(coh_from_to=(1,5), fit=True)\nsub_model_VNJ_2 = model_VNJ.sub_model(coh_from_to=(6,10), fit=True)", "We can check that this generated the estimates $\\hat{\\sigma}^{2, LN}\\ell$ and degrees of freedom $df\\ell$ from the paper.", "print('First five accident years (I_1)')\nprint('-------------------------------')\nprint('log-data variance: {:.3f}'.format(sub_model_VNJ_1.s2))\nprint('degrees of freedom: {:.0f}\\n'.format(sub_model_VNJ_1.df_resid))\n\nprint('Last five accident years (I_2)')\nprint('------------------------------')\nprint('log-data variance: {:.3f}'.format(sub_model_VNJ_2.s2))\nprint('degrees of freedom: {:.0f}'.format(sub_model_VNJ_2.df_resid))", "Reassuringly, it does. We can then also compute the weighted average predictor $\\bar{\\sigma}^{2,LN}$", "s2_bar_VNJ = ((sub_model_VNJ_1.s2 * sub_model_VNJ_1.df_resid \n + sub_model_VNJ_2.s2 * sub_model_VNJ_2.df_resid)\n /(sub_model_VNJ_1.df_resid + sub_model_VNJ_2.df_resid))\nprint('Weighted avg of log-data variance: {:.3f}'.format(s2_bar_VNJ))", "Check!\nTesting for common variances\nNow we can move on to test the hypothesis of common variances\n$$ H_{\\sigma^2}: \\sigma^2_1 = \\sigma^2_2. $$\nThis corresponds to testing for a reduction from $M^{LN}$ to $M^{LN}_{\\sigma^2}$.\nFirst, we can conduct a Bartlett test. This functionality is pre-implemented in the package.", "bartlett_VNJ = apc.bartlett_test([sub_model_VNJ_1, sub_model_VNJ_2])", "The test statistic $B^{LN}$ is computed as the ratio of $LR^{LN}$ to the Bartlett correction factor $C$. The p-value is computed by the $\\chi^2$ approximation to the distribution of $B^{LN}$. The number of sub-samples is given by $m$.", "for key, value in bartlett_VNJ.items():\n print('{}: {:.2f}'.format(key, value))", "We get the same results as in the paper. Specifically, we get a p-value of $0.09$ for the hypothesis so that the Bartlett test does not arm us with strong evidence against the null hypothesis.\nIn the paper, we also conduct an $F$-test for the same hypothesis. The statistic is computed as\n$$ F_{\\sigma^2}^{LN} = \\frac{\\hat\\sigma^{2,LN}2}{\\hat\\sigma^{2,LN}_1} $$\nwhich, under the null, is distributed as $\\mathrm{F}{df_2, df_1}$. This is not directly implemented in the package but still easily computed. \nFirst we compute the test statistic", "F_VNJ_sigma2 = sub_model_VNJ_2.s2/sub_model_VNJ_1.s2\nprint('F statistic for common variances: {:.2f}'.format(F_VNJ_sigma2))", "Now we can compute p-values in one-sided and two-sided tests. \nFor an (equal-tailed) two-sided test, we first find the percentile $P(F_{\\sigma^2}^{LN} \\leq \\mathrm{F}_{df_2, df_1})$. This is given by", "from scipy import stats\nF_VNJ_sigma2_percentile = stats.f.cdf(\n F_VNJ_sigma2, dfn=sub_model_VNJ_2.df_resid, dfd=sub_model_VNJ_1.df_resid\n)\nprint('Percentile of F statistic: {:.2f}'.format(F_VNJ_sigma2_percentile))", "If this is below the 50th percentile, the p-value is simply twice the percentile, otherwise we subtract the percentile from unity and multiply that by two. For intuition, we can look at the plot below. The green areas in the lower and upper tail of the distribution contain the same probability mass, namely $P(F_{\\sigma^2}^{LN} \\leq \\mathrm{F}_{df_2, df_1})$. The two-sided p-value corresponds to the sum of the two areas.", "import matplotlib.pyplot as plt\nimport numpy as np\n%matplotlib inline\n\nx = np.linspace(0.01,5,1000)\ny = stats.f.pdf(x, \n dfn=sub_model_VNJ_2.df_resid, \n dfd=sub_model_VNJ_1.df_resid)\nplt.figure()\nplt.plot(x, y, label='$\\mathrm{F}_{df_2, df_1}$ density')\n\nplt.axvline(F_VNJ_sigma2, color='black', linewidth=1, label='$F^{LN}_{\\sigma^2}$')\ntmp = stats.f.cdf(F_VNJ_sigma2, \n dfn=sub_model_VNJ_2.df_resid, \n dfd=sub_model_VNJ_1.df_resid)\nplt.fill_between(x[x < F_VNJ_sigma2], y[x < F_VNJ_sigma2], color='green', alpha=0.3)\ntmp = stats.f.ppf(1-tmp,\n dfn=sub_model_VNJ_2.df_resid, \n dfd=sub_model_VNJ_1.df_resid)\nplt.fill_between(x[x > tmp], y[x > tmp], color='green', alpha=0.3)\nplt.annotate('Area 0.06', xy=(0.15, 0.1), \n xytext=(0.75, 0.15), arrowprops=dict(facecolor='black'))\nplt.annotate('Area 0.06', xy=(2.75, 0.025), \n xytext=(3, 0.2), arrowprops=dict(facecolor='black'))\nplt.legend()\nplt.title('Two-sided F-test')\nplt.show()", "Since $F_{\\sigma^2}^{LN}$ is below the 50th percentile, the two-sided equal tailed p-value is in our case given by", "print('F test two-sided p-value: {:.2f}'.format(\n 2*np.min([F_VNJ_sigma2_percentile, 1-F_VNJ_sigma2_percentile])\n)\n )", "The one-sided p-value for the hypothesis $H_{\\sigma^2}: \\sigma^2_1 \\leq \\sigma^2_2$ simply corresponds to the area in the lower tail of the distribution. This is because the statistic is $\\hat\\sigma^{2,LN}_2/\\hat\\sigma^{2,LN}_1$ so that smaller values work against our hypothesis. Thus, the rejection region is the lower tail.\nRemark: in the paper, the one-sided hypothesis is given as $H_{\\sigma^2}: \\sigma^2_1 > \\sigma^2_2$. This is a mistake as this corresponds to the alternative.", "print('F statistic one-sided p-value: {:.2f}'.format(F_VNJ_sigma2_percentile))", "Testing for common linear predictors\nWe can move on to test for common linear predictors:\n$$ H_{\\mu, \\sigma^2}: \\sigma^2_1 = \\sigma^2_2 \\quad \\text{and} \\quad \\alpha_{i,\\ell} + \\beta_{j,\\ell} + \\delta_\\ell = \\alpha_i + \\beta_j + \\delta $$\nIf we are happy to accept the hypothesis of common variances $H_{\\sigma^2}: \\sigma^2_1 = \\sigma^2_2$, we can test $H_{\\mu, \\sigma^2}: \\sigma^2_1$ with a simple $F$-test; corresponding to a reduction from $M^{LN}{\\sigma^2}$ to $M^{LN}{\\mu, \\sigma^2}$ The test is implemented in the package.", "f_linpred_VNJ = apc.f_test(model_VNJ, [sub_model_VNJ_1, sub_model_VNJ_2])", "This returns the test statistic $F_\\mu^{LN}$ along with the p-value.", "for key, value in f_linpred_VNJ.items():\n print('{}: {:.2f}'.format(key, value))", "These results, too, much those from the paper.\n5.2 Over-dispersed Poisson Chain-Ladder\nThis corresponds to Section 5.2 in the paper. The data are taken from Taylor and Ashe (1983). For this data, the desired full model is an over-dispersed Poisson model given by\n$$ M^{ODP}{\\mu, \\sigma^2}: \\quad E(Y{ij}) = \\exp(\\alpha_i + \\beta_j + \\delta), \\quad \\frac{\\mathrm{var}(Y_{ij})}{E(Y_{ij})} = \\sigma^2. $$\nWe proceed just as we did above. First, we set up and estimate the full model and the sub-models. Second, we compute the Bartlett test for common over-dispersion. Third, we test for common linear predictors.\nFinally, we repeat the testing procedure for different sub-sample structures.\nFull model\nWe set up and estimate the model $M^{ODP}_{\\mu, \\sigma^2}$ on the full data set.", "model_TA = apc.Model()\n\nmodel_TA.data_from_df(apc.data.pre_formatted.loss_TA(), data_format='CL')\n\nmodel_TA.fit('od_poisson_response', 'AC')\n\nprint('log-data variance full model: {:.0f}'.format(model_TA.s2))\nprint('degrees of freedom full model: {:.0f}'.format(model_TA.df_resid))", "Sub-models\nWe set up and estimate the models on the four sub-samples. Combined, these models correspond to $M^{ODP}$.", "sub_model_TA_1 = model_TA.sub_model(per_from_to=(1,5), fit=True)\nsub_model_TA_2 = model_TA.sub_model(coh_from_to=(1,5), age_from_to=(1,5),\n per_from_to=(6,10), fit=True)\nsub_model_TA_3 = model_TA.sub_model(age_from_to=(6,10), fit=True)\nsub_model_TA_4 = model_TA.sub_model(coh_from_to=(6,10), fit=True)\n\nsub_models_TA = [sub_model_TA_1, sub_model_TA_2, \n sub_model_TA_3, sub_model_TA_4]\n\nfor i, sm in enumerate(sub_models_TA):\n print('Sub-sample I_{}'.format(i+1))\n print('--------------')\n print('over-dispersion: {:.0f}'.format(sm.s2))\n print('degrees of freedom: {:.0f}\\n'.format(sm.df_resid))\n \ns2_bar_TA = np.array([sm.s2 for sm in sub_models_TA]).dot(\n np.array([sm.df_resid for sm in sub_models_TA])\n)/np.sum([sm.df_resid for sm in sub_models_TA])\nprint('Weighted avg of over-dispersion: {:.0f}'.format(s2_bar_TA))", "Testing for common over-dispersion\nWe perform a Bartlett test for the hypothesis of common over-dispersion across sub-samples $H_{\\sigma^2}: \\sigma^2_\\ell = \\sigma^2$. This corresponds to testing a reduction from $M^{ODP}$ to $M^{ODP}_{\\sigma^2}$.", "bartlett_TA = apc.bartlett_test(sub_models_TA)\n\nfor key, value in bartlett_TA.items():\n print('{}: {:.2f}'.format(key, value))", "These results match those in the paper. The Bartlett test yields a p-value of 0.08.\nTesting for common linear predictors\nIf we are happy to impose common over-dispersion, we can test for common linear predictors across sub-samples. Then, this corresponds to a reduction from $M^{ODP}{\\sigma^2}$ to $M^{ODP}{\\mu, \\sigma^2}$.", "f_linpred_TA = apc.f_test(model_TA, sub_models_TA)\n\nfor key, value in f_linpred_TA.items():\n print('{}: {:.2f}'.format(key, value))", "Repeated testing\nIn the paper, we also suggest a procedure to repeat the tests for different sub-sample structures, using a Bonferroni correction for size-control.", "sub_models_TA_2 = [model_TA.sub_model(coh_from_to=(1,5), fit=True),\n model_TA.sub_model(coh_from_to=(6,10), fit=True)]\n\nsub_models_TA_3 = [model_TA.sub_model(per_from_to=(1,4), fit=True),\n model_TA.sub_model(per_from_to=(5,7), fit=True),\n model_TA.sub_model(per_from_to=(8,10), fit=True)]\n\nprint('Two sub-samples')\nprint('---------------')\nprint('Bartlett')\nprint('--------')\nfor key, value in apc.bartlett_test(sub_models_TA_2).items():\n print('{}: {:.2f}'.format(key, value))\n\nprint('\\nF-test')\nprint('------')\nfor key, value in apc.f_test(model_TA, sub_models_TA_2).items():\n print('{}: {:.2f}'.format(key, value))\n \nprint('\\nThree sub-samples')\nprint('-----------------')\nprint('Bartlett')\nprint('--------')\nfor key, value in apc.bartlett_test(sub_models_TA_3).items():\n print('{}: {:.2f}'.format(key, value))\n\nprint('\\nF-test')\nprint('------')\nfor key, value in apc.f_test(model_TA, sub_models_TA_3).items():\n print('{}: {:.2f}'.format(key, value))", "The test results match those in the paper. \nFor a quick refresher on the Bonferroni correction we turn to Wikipedia. The idea is to control the family wise error rate, the probability of rejecting at least one null hypothesis when the null is true.\nIn our scenario, we repeat testing three times. Each individual repetition is comprised of two sequential tests: a Bartlett and an $F$-test. \nUnder the null hypothesis (so the true model is $M_{\\mu, \\sigma^2}^{ODP}$), the two tests are independent so\n$$P(\\text{reject $F$-test } | \\text{ not-reject Bartlett test}) = P(\\text{reject $F$-test}).$$\nThus, if we test at level $\\alpha$, the probability to reject at least once within a repetition is not $\\alpha$ but $1-(1-\\alpha)^2 \\approx 2\\alpha$:\n$$ P(\\text{Reject Bartlett or F-test at level }\\alpha \\text{ for a given split}) \\approx 2 \\alpha .$$\nFor thrice repeated testing, we replace $\\alpha$ by $\\alpha/3$. Then, we bound the probability to reject when the null is true with\n$$ P\\left{\\cup_{i=1}^3\\left(\\text{Reject Bartlett or F-test at level } \\frac{\\alpha}{3} \\text{ for split }i\\right)\\right} \\leq 2\\alpha \\quad \\text{(approximately)} .$$\n5.3 Log-Normal (Extended) Chain-Ladder\nThis corresponds to Section 5.3 in the paper. The data are taken from Barnett and Zehnwirth (2000). These data are commonly modeled with a calendar effect. We consider misspecification tests both for a model without $M^{LN}$ and with $M^{LNe}$ a calendar effect $\\gamma$. The models are given by\n$$ M^{LN}{\\mu, \\sigma^2}: \\quad \\log(Y{ij}) \\stackrel{D}{=} N(\\alpha_i + \\beta_j + \\delta, \\sigma^2)$$\nand\n$$ M^{LNe}{\\mu, \\sigma^2}: \\quad \\log(Y{ij}) \\stackrel{D}{=} N(\\alpha_i + \\beta_j + \\gamma_k + \\delta, \\sigma^2). $$\nNo calendar effect\nWe set up and estimate the model $M^{LN}_{\\mu, \\sigma^2}$ on the full data set.", "model_BZ = apc.Model()\n\nmodel_BZ.data_from_df(apc.data.pre_formatted.loss_BZ(), time_adjust=1, data_format='CL')\n\nmodel_BZ.fit('log_normal_response', 'AC')\n\nprint('log-data variance full model: {:.4f}'.format(model_BZ.s2))\nprint('degrees of freedom full model: {:.0f}'.format(model_BZ.df_resid))", "Next, the models for the sub-samples.", "sub_models_BZ = [model_BZ.sub_model(per_from_to=(1977,1981), fit=True),\n model_BZ.sub_model(per_from_to=(1982,1984), fit=True), \n model_BZ.sub_model(per_from_to=(1985,1987), fit=True)]\n\nfor i, sm in enumerate(sub_models_BZ):\n print('Sub-sample I_{}'.format(i+1))\n print('--------------')\n print('over-dispersion: {:.4f}'.format(sm.s2))\n print('degrees of freedom: {:.0f}\\n'.format(sm.df_resid))\n \ns2_bar_BZ = np.array([sm.s2 for sm in sub_models_BZ]).dot(\n np.array([sm.df_resid for sm in sub_models_BZ])\n)/np.sum([sm.df_resid for sm in sub_models_BZ])\nprint('Weighted avg of over-dispersion: {:.4f}'.format(s2_bar_BZ))", "We move on the Bartlett test for the hypothesis of common log-data variances across sub-samples $H_{\\sigma^2}: \\sigma^2_\\ell = \\sigma^2$.", "bartlett_BZ = apc.bartlett_test(sub_models_BZ)\n\nfor key, value in bartlett_BZ.items():\n print('{}: {:.2f}'.format(key, value))", "The Bartlett test yields a p-value of 0.05 as in the paper.\nWe test for common linear predictors across sub-samples.", "f_linpred_BZ = apc.f_test(model_BZ, sub_models_BZ)\n\nfor key, value in f_linpred_BZ.items():\n print('{}: {:.2f}'.format(key, value))", "Calendar effect\nNow we redo the same for the model with calendar effect.", "model_BZe = apc.Model()\n\nmodel_BZe.data_from_df(apc.data.pre_formatted.loss_BZ(), time_adjust=1, data_format='CL')\n\nmodel_BZe.fit('log_normal_response', 'APC') # The only change is in this line. \n\nprint('log-data variance full model: {:.4f}'.format(model_BZe.s2))\nprint('degrees of freedom full model: {:.0f}'.format(model_BZe.df_resid))\n\nsub_models_BZe = [model_BZe.sub_model(per_from_to=(1977,1981), fit=True),\n model_BZe.sub_model(per_from_to=(1982,1984), fit=True), \n model_BZe.sub_model(per_from_to=(1985,1987), fit=True)]\n\nfor i, sm in enumerate(sub_models_BZe):\n print('Sub-sample I_{}'.format(i+1))\n print('--------------')\n print('over-dispersion: {:.4f}'.format(sm.s2))\n print('degrees of freedom: {:.0f}\\n'.format(sm.df_resid))\n \ns2_bar_BZe = np.array([sm.s2 for sm in sub_models_BZe]).dot(\n np.array([sm.df_resid for sm in sub_models_BZe])\n)/np.sum([sm.df_resid for sm in sub_models_BZe])\nprint('Weighted avg of log-data variances: {:.4f}'.format(s2_bar_BZe))\n\nbartlett_BZe = apc.bartlett_test(sub_models_BZe)\n\nprint('\\nBartlett test')\nprint('-------------')\n\nfor key, value in bartlett_BZe.items():\n print('{}: {:.2f}'.format(key, value))\n\nprint('\\nF-test')\nprint('------')\n \nf_linpred_BZe = apc.f_test(model_BZe, sub_models_BZe)\n\nfor key, value in f_linpred_BZe.items():\n print('{}: {:.2f}'.format(key, value))", "With this, we replicated Figure 4b.\nCloser look at the effect of dropping the calendar effect\nIn the paper, we move on to take a closer look at the effect of dropping the calendar effect. We do so in two ways starting with $$M^{LNe}{\\sigma^2}: \\stackrel{D}{=} N(\\alpha{i, \\ell} + \\beta_{j, \\ell} + \\gamma_{k, \\ell} + \\delta_\\ell, \\sigma^2).$$ \nWe want to test for a reduction to \n$$M^{LN}_{\\mu, \\sigma^2}: \\stackrel{D}{=} N(\\alpha_i + \\beta_j + \\delta, \\sigma^2).$$ \nIn the figure below, we illustrate two different testing procedures that would get us to there. \n<center>\n <img src=\"https://user-images.githubusercontent.com/25103918/41599423-27d94fec-73a1-11e8-9fe1-3f3a1a9e184a.png\" \n alt=\"Two ways to test for reduction to the same model\"\n width=\"400px\"/>\n</center>\n\nWe can move down, testing $H^{LNe}{\\sigma^2, \\mu}$, and then right, testing $H\\gamma: \\gamma_k = 0$\nWe can move right, testing $H_{\\gamma_{k, \\ell}}: \\gamma_{k, \\ell} = 0$, and then down, testing $H^{LN}_{\\sigma^2, \\mu}$\n\nLooking at the first way, we already saw that $H_{\\gamma_{k, \\ell}}: \\gamma_{k, \\ell} = 0$ cannot be rejected. To test for the absence of a calendar effect, we can do an (exact) $F$ test.", "model_BZe.fit_table(attach_to_self=False).loc[['AC']]", "We see that the p-value (P&gt;F) is close to zero.\nNext, we consider the second way. We first test $H_{\\gamma_{k, \\ell}}$. Since $\\sigma^2$ is common across the array from the outset, we can do this with a simple $F$-test:\n$$ \\frac{(RSS_.^{LN} - RSS_.^{LNe})/(df_.^{LN} - df_.^{LNe})}{RSS_.^{LNe}/df_.^{LNe}} \\stackrel{D}{=} F_{df_.^{LN} - df_.^{LNe}, df_.^{LNe}} $$", "rss_BZe_dot = np.sum([sub.rss for sub in sub_models_BZe])\nrss_BZ_dot = np.sum([sub.rss for sub in sub_models_BZ])\ndf_BZe_dot = np.sum([sub.df_resid for sub in sub_models_BZe])\ndf_BZ_dot = np.sum([sub.df_resid for sub in sub_models_BZ])\n\nF_BZ = ((rss_BZ_dot - rss_BZe_dot)/(df_BZ_dot - df_BZe_dot)) / (rss_BZe_dot/df_BZe_dot)\np_F_BZ = stats.f.sf(F_BZ, dfn=df_BZ_dot - df_BZe_dot, dfd=df_BZe_dot)\nprint('p-value of F-test: {:.2f}'.format(p_F_BZ))", "Thus this is not rejected. However, we already saw that a reduction from $M^{LN}{\\sigma^2}$ to $M^{LN}{\\mu, \\sigma^2}$ is rejected.\nRepeated testing\nJust as for the Taylor and Ashe (1983) data, we repeat testing for different splits.", "sub_models_BZe_2 = [model_BZe.sub_model(coh_from_to=(1977,1981), fit=True),\n model_BZe.sub_model(coh_from_to=(1982,1987), fit=True)]\n\nsub_models_BZe_4 = [model_BZe.sub_model(per_from_to=(1977,1981), fit=True), \n model_BZe.sub_model(coh_from_to=(1977,1982), age_from_to=(1,5),\n per_from_to=(1982,1987), fit=True),\n model_BZe.sub_model(age_from_to=(6,11), fit=True),\n model_BZe.sub_model(coh_from_to=(1983,19871), fit=True)]\n \nprint('Two sub-samples')\nprint('---------------')\nprint('Bartlett')\nprint('--------')\nfor key, value in apc.bartlett_test(sub_models_BZe_2).items():\n print('{}: {:.3f}'.format(key, value))\n\nprint('\\nF-test')\nprint('------')\nfor key, value in apc.f_test(model_BZe, sub_models_BZe_2).items():\n print('{}: {:.3f}'.format(key, value))\n \nprint('\\nFour sub-samples')\nprint('----------------')\nprint('Bartlett')\nprint('--------')\nfor key, value in apc.bartlett_test(sub_models_BZe_4).items():\n print('{}: {:.2f}'.format(key, value))\n\nprint('\\nF-test')\nprint('------')\nfor key, value in apc.f_test(model_BZe, sub_models_BZe_4).items():\n print('{}: {:.2f}'.format(key, value))", "Again, we replicated the results from the paper.\nReferences\n\nBarnett, G., & Zehnwirth, B. (2000). Best estimates for reserves. Proceedings of the Casualty Actuarial Society, 87(167), 245–321.\nHarnau, J. (2018). Misspecification Tests for Log-Normal and Over-Dispersed Poisson Chain-Ladder Models. Risks, 6(2), 25. \nKuang, D., Nielsen, B., & Nielsen, J. P. (2015). The geometric chain-ladder. Scandinavian Actuarial Journal, 2015(3), 278–300.\nTaylor, G. C., & Ashe, F. R. (1983). Second moments of estimates of outstanding claims. Journal of Econometrics, 23(1), 37–61. \nVerrall, R., Nielsen, J. P., & Jessen, A. H. (2010). Prediction of RBNS and IBNR claims using claim amounts and claim counts. ASTIN Bulletin, 40(2), 871–887." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
AaronCWong/phys202-2015-work
days/day20/MoviePy.ipynb
mit
[ "Making Animations using MoviePy\nThis notebook shows how to make animations using MoviePy and Matplotlib. Here are links to the MoviePy documentation and a short tutorial:\n\nhttp://zulko.github.io/moviepy/\nhttp://zulko.github.io/blog/2014/11/29/data-animations-with-python-and-moviepy/\n\nLet's start by importing everything we need:", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom moviepy.video.io.bindings import mplfig_to_npimage\nimport moviepy.editor as mpy", "To create an animation we need to do two things:\n\nCreate the initial visualization, with handles on the figure and axes object.\nWrite a function that will get called for each frame that updates the data and returns the next frame.", "duration = 10.0 # this is the total time\n\nN = 500\n\n# Make the initial plot outside the animation function\nfig_mpl, ax = plt.subplots(1,figsize=(5,3), facecolor='white')\nx = np.random.normal(0.0, 1.0, size=N)\ny = np.random.normal(0.0, 1.0, size=N)\nplt.sca(ax)\nplt.xlim(-3,3)\nplt.ylim(-3,3)\nscat = ax.scatter(x, y)\n\ndef make_frame_mpl(t):\n # t is the current time between [0,duration]\n newy = y*np.cos(4.0*t/duration)\n # Just update the data on each frame\n # set_offset takes a Nx2 dimensional array of positions\n scat.set_offsets(np.transpose(np.vstack([x, newy])))\n # The mplfig_to_npimage convert the matplotlib figure to an image that\n # moviepy can work with:\n return mplfig_to_npimage(fig_mpl)\n\nanimation = mpy.VideoClip(make_frame_mpl, duration=duration)", "Use the following call to generate and display the animation in the notebook:", "animation.ipython_display(fps=24)", "Use the following to save the animation to a file that can be uploaded you YouTube:", "animation.write_videofile(\"scatter_animation.mp4\", fps=20)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
LSSTC-DSFP/LSSTC-DSFP-Sessions
Sessions/Session05/Day5/MultiwavelengthPhotometry.ipynb
mit
[ "Matched wavelength photometry\nVersion 0.1\nFor today's problem, we will perform matched-aperture photometry in 3 bands on multiple galaxies within a rich galaxy cluster. Ultimately, we will be looking for trends in galaxy colors and other properties as a function of cluster radius.\nNote - we will use astropy for these tasks, though the use of Source Extractor is more standard within the galaxy community.\n\nBy M Alpaslan (NYU) & AA Miller (CIERA/Northwestern & Adler)\nProblem 0) Install photutils\nIf you have not already done so, install the photutils package from the astropy conda channel within your DSFP environment. You will also need the scikit-image package.\nconda install -c astropy photutils\nconda install scikit-image", "import numpy as np\nimport pandas as pd\nimport astropy\nfrom astropy.io import fits\nfrom photutils.aperture import CircularAperture, CircularAnnulus, EllipticalAperture, EllipticalAnnulus\nfrom photutils.segmentation import detect_sources, source_properties\nfrom photutils.detection import detect_threshold\nfrom photutils.centroids import centroid_com\nfrom photutils import aperture_photometry\nfrom photutils.utils import calc_total_error\nimport matplotlib.pyplot as plt\n\n%matplotlib notebook", "Problem 1) Download and Examine the Data\nThe images for this exercise can be downloaded from here: https://northwestern.box.com/s/x6nzuqtdys3jo1nufvswkx62o44ifa11. Be sure to place the images in the same directory as this notebook (but do not add them to your git repo!).\nBefore we dive in, here is some background information on the images we will be analyzing: the imaging data and the group information all come from the Galaxy And Mass Assembly (GAMA) survey; and more specifically, its panchromatic data release. \nMany of the difficult steps associated with multiband galaxy photometry have already been done for you: GAMA constructs large mosaics of co-added FITS images in 20 bands to measure photometry. The images we will use today are from the g, r, and i mosaics that I (MA) built $\\sim$7 years ago. They are built from SDSS observations in those bands, and have all been convolved to a seeing of approximately 2”, background subtracted, and renormalized to a common zeropoint of 30 magnitudes. The group catalogue was done by Aaron Robotham (see https://arxiv.org/abs/1106.1994).\nIn the downloaded directory there are g, r, and i images of 36 galaxies that all belong to the same cluster. These image cutouts have been centered on the galaxy position, are $\\sim$80.7\" on a side, and have a pixel scale of 0.339\"/pix.\nTo begin we will focus on a single galaxy, before eventually working on the entire cluster. \nProblem 1a\nDisplay the $r$-band image of the galaxy 85698. Use a asinh stretch.", "r_filename = \"galaxy_images/85698_sdss_r.fits\"\nr_data = fits.getdata( # complete\n\nplt.imshow( # complete\n\nplt.colorbar()\nplt.tight_layout()", "Problem 1b\nRoughly how many sources are present in the image?\nHint - an exact count is not required here.\nSolution 1b\nWrite your answer here\nProblem 2) Source Detection\nPrior to measuring any properties of sources in the image, we must first determine the number of sources present in the image. Source detection is challenging, and there are many different thresholding approaches. \nToday, we will streamline this step in order to spend more time focusing on the issues associated with matching photometric measurements across different images. We will use the detect_sources function in photutils to identify objects in our image.\nThe simplest model assumes that the background is constant over the entire image. Once the background is determined, it can be subtracted from the image to determine high significance \"peaks\" corresponding to sources. After this week, we have learned that the background isn't so simple, nevertheless we will use the detect_threshold convenience function to estimate a constant background for our images. detect_threshold produces a \"detection image\" that can be used to estimate the significance of the flux detected in any individual pixel.\nProblem 2a \nCreate a detection threshold image using the detect_threshold function, set the snr parameter to 3.", "threshold = detect_threshold( # complete", "Problem 2b\nDevelop better intuition for the detection image by plotting it side-by-side with the actual image of the field.\nDo you notice anything interesting about the threshold image?", "fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(7,4))\n\nax1.imshow( # complete\nax2.imshow( # complete\nfig.tight_layout()", "Following this measurement of the background, we can find sources using the detect_sources function. Briefly, this function uses image segmentation to define and assign pixels to sources, which are defined as objects with $N$ connected pixels that are $s$ times brighter than the background (we already set $s = 3$). Read the docs for further details.\nProblem 2c\nGenerate a segmentation image using detect_sources. Keep only sources with $N = 7$ pixels, which is keyword arg npixels in detect_sources.\nIf you have extra time Come back to this problem and see how changing $N$ affects your results.", "segm = detect_sources( # complete", "Problem 2d\nPlot the segmentation image side-by-side with the actual image of the field.\nAre you concerned or happy with the results?\nHint - no stretch should be applied to the segmentation image.", "fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(7,4))\n\nax1.imshow(# complete\nax2.imshow(# complete\nfig.tight_layout()", "Problem 3) Source Centroids and Shapes\nNow that we have defined all of the sources in the image, we must determine the centroid for each source (in order to ultimately make some form of photometric measurement). As Dora mentioned earlier in the week, there are many ways to determine the centroid of a given source (e.g., fitting a model, finding the max of the marginalized 1-d distribution, etc). Today we will use the centroid_com function, which calculates the \"center of mass\" of the 2d image moments to determine the source centroids.\nTo measure the centroid we want to isolate the source in question, thus we have generated a convenience function to return the extent of each source from its corresponding segmentation image.", "def get_source_extent(segm_data, source_num):\n \"\"\"\n Determine extent of sources for centroid measurements\n \n Parameters\n ----------\n segm_data : array-like\n Segementation image produced by photutils.segmentation.detect_sources\n \n source_num : int\n The source number from the segmentation image\n \n Returns\n -------\n source_extent : list\n The minimum y, maximum y, minimum x, and maximum x pixel values \n over which a source is detected\n \"\"\"\n source_pix = np.where(segm_data == source_num)\n source_extent = [np.min(source_pix[0]), np.max(source_pix[0]), \n np.min(source_pix[1]), np.max(source_pix[1])]\n\n return source_extent", "Problem 3a \nMeasure the centroid for each source detected in the image using the centroid_com function.\nHint - you'll want to start with a subset of pixels containing the source.\nHint 2 - centroids are measured relative to the provided data, you'll need to convert back to \"global\" pixel values.", "xcentroid = np.zeros_like(np.unique(segm.data)[1:], dtype=\"float\")\nycentroid = np.zeros_like(np.unique(segm.data)[1:], dtype=\"float\")\n\nfor source_num in np.unique(segm.data)[1:]:\n source_extent = get_source_extent( # complete\n xc, yc = centroid_com( # complete\n xcentroid[source_num-1], ycentroid[source_num-1] = # complete", "Problem 3b\nOverplot the derived centroids on the image data as a sanity check for your methodology.", "fig, ax1 = plt.subplots()\n\nax1.imshow( # complete\nax1.plot( # complete\nfig.tight_layout()", "With an estimate of the centroid of every source in hand, we now need to determine the ellipse that best describes the galaxies in order to measure their flux. Fortunately, this can be done using the source_properties function within photutils.morphology package.\nBriefly, source_properties takes both the data array, and the segmentation image as inputs, and then calculates properties for every source. The list of properties is long (see the attributes list), and for now we only care about the semi-major and semi-minor axes as well as the orientation of the source, all of which are needed to measure the flux in an elliptical aperture [this is a lot easier than trying to fit concentric ellipses, no?].\nProblem 3c\nUsing source_properties to determine $a$, $b$, and the orientation of each source.", "cat = source_properties( # complete\ntbl = cat.to_table(columns=['id', 'semimajor_axis_sigma','semiminor_axis_sigma', 'orientation'])", "Problem 4) Photometry\nWe now have all the necessary information to measure the flux in elliptical apertures. The EllipticalAperture function in photutils defines apertures on an image based on input centroids, $a$, $b$, and orientation values. \nProblem 4a\nDefine apertures for the sources that are detected in the image.\nNote - the semimajor_axis_sigma reported by source_properties() is the \"The 1-sigma standard deviation along the semimajor axis of the 2D Gaussian function that has the same second-order central moments as the source\" according to the docs. Thus, be sure to multiple $a$ and $b$ by a factor of 3 in order to capture $\\sim$3$\\sigma$ of the source flux.\nNote to the note - this isn't well motivated, but for the sake of argument assume that this adjustment creates a reasonable aperture.", "positions = # complete\n\napertures = [EllipticalAperture( # complete\n # complete\n # complete\n # complete", "Problem 4b \nOverplot your apertures on the sources that have been detected.\nHint - each aperture object has a plot() attribute that can be used to show the aperture for each source.", "fig, ax1 = plt.subplots()\n\nax1.imshow( # complete\n# complete\n# complete\nfig.tight_layout()", "With apertures now defined, we can finally measure the flux of each source. The aperture_photometry function returns the flux (actually counts) in an image for the provided apertures. It takes the image, apertures, and bakground image as arguments.\nNote - the background has already been subtracted from these images so we currently do not have an estimate of the full background for these sources.\nWe will create a background image that is approximately correct (we know this because we know the properties of the SDSS survey and detector). In this case what we are doing is not only incorrect, it's entirely made up and should not be repeated in your own work. Nevertheless, this (bad) approximation is necessary to produce uncertainty estimates.\nExecute the cell below to create an uncertainty image to use with the aperture_photometry function.", "bkg = np.random.normal(100, 35, r_data.shape)\nuncertainty_img = calc_total_error(r_data, bkg - np.mean(bkg), 1)", "Problem 4c\nMeasure the counts and uncertainty detected from each source within the apertures defined in 4a. \nHint - you will need to loop over each aperture as aperture_photometry does not take multiple apertures of different shapes as a single argument.", "source_cnts = # complete\nsource_cnts_unc = # complete\nfor source_num, ap in enumerate(apertures):\n phot = # complete\n source_cnts[source_num] = # complete\n source_cnts_unc[source_num] = # complete", "The images have been normalized to a zero point of 30. Thus, we can convert from counts to magnitudes via the following equation: \n$$m = 30 - 2.5 \\log (\\mathrm{counts}).$$\nRecall from Dora's talk that the uncertainty of the magnitude measurements can be calculated as: \n$$\\frac{2.5}{\\ln(10)} \\frac{\\sigma_\\mathrm{counts}}{\\mathrm{counts}}.$$\nProblem 4d\nCalculate the magnitude of each source in the image.", "source_mag = # complete\nsource_mag_unc = # complete\n\nfor source_num, (mag, mag_unc) in enumerate(zip(source_mag, source_mag_unc)):\n print(\"Source {:d} has m = {:.3f} +/- {:.3f} mag\".format( # complete", "That's it! You've measured the magnitude for every source in the image.\nAs previously noted, the images provided for this dataset are centered are galaxies within a cluster, and ultimately, these galaxies are all that we care about. For this first image, that means we care about the galaxy centered at $(x,y) \\approx (118, 118)$. \nProblem 4e\nWhat is the magnitude of the galaxy we care about for this image? [We will need this moving forward]", "# complete", "Problem 5) Multiwavelength Photometry\nUltimately we want to measure colors for these galaxies. We now know the $r$-band magnitude for galaxy 85698, we need to measure the $g$ and $i$ band magnitudes as well. \nProblem 5a Using the various pieces described above, write a function to measure the magnitude of the galaxy at the center of the image. You should create a new background image for every field. \nHint - creating an actual function is essential as we will eventually run this on every image. \nHint 2 - source_properties directly measures source centroids, use this it will be faster.", "def cluster_galaxy_photometry(data):\n '''\n Determine the magnitude of the galaxy at the center of the image\n \n Parameters\n ----------\n data : array-like\n Background subtracted 2D image centered on the galaxy\n of interest\n \n Returns\n -------\n mag : float\n Magnitude of the galaxy\n \n mag_unc : float\n Uncertainty of the magnitude measurement\n '''\n\n # complete\n # complete\n # complete\n # complete\n # complete\n # complete\n # complete\n # complete\n # complete\n # complete\n # complete\n # complete\n # complete\n \n \n return mag, mag_unc", "Problem 5b\nConfirm that the function calculates the same $r$-band mag that was calculated in Problem 4.", "# complete\n\nprint(\"\"\"Previously, we found m = {:.3f} mag. \nThis new function finds m = {:.3f} mag.\"\"\".format( # complete", "Problem 5c \nUse this new function to calculate the galaxy magnitude in the $g$ and the $i$ band, and determine the $g - r$ and $r - i$ colors of the galaxy.", "g_data = fits.getdata( # complete\ni_data = fits.getdata( # complete\n\n# complete\n# complete\n# complete\nprint(\"\"\"The g-r color = {:.3f} +/- {:.3f} mag.\nThe r-i color = {:.3f} +/- {:.3f} mag\"\"\".format(g_mag - r_mag, np.hypot(g_mag_unc, r_mag_unc), \n r_mag - i_mag, np.hypot(r_mag_unc, i_mag_unc)))", "But wait!\nProblem 5d\nWas this calculation \"fair\"?\nHint - this is a relatively red galaxy.\nSolution 5d\nThis calculation was not \"fair\" because identical apertures were not used in all 3 filters. \nProblem 5e \n[Assuming your calculation was not fair] Calculate the $g - r$ and $r - i$ colors of the galaxy in a consistent fashion.\nHint - split your initial function into two functions, one to determine an aperture and another to measure photometry. Use the $r$-band image (where the signal-to-noise ratio of the data is highest) to define the aperture for all 3 images.", "def cluster_galaxy_aperture(data):\n# complete\n# complete\n# complete\n# complete\n# complete\n# complete\n# complete\n# complete\n# complete\n# complete\n# complete\n return aperture\n\ndef cluster_galaxy_phot(data, aperture):\n# complete\n# complete\n# complete\n# complete\n# complete\n return mag, mag_unc\n\nr_ap = # complete\n\n# complete\n# complete\n# complete\n\nprint(\"\"\"The g-r color = {:.3f} +/- {:.3f} mag.\nThe r-i color = {:.3f} +/- {:.3f} mag\"\"\".format(g_mag - r_mag, np.hypot(g_mag_unc, r_mag_unc), \n r_mag - i_mag, np.hypot(r_mag_unc, i_mag_unc)))", "Challenge Problem) Colors as a Function of Radius\nEach of the provided FITS images corresponds to a single galaxy in the galaxy cluster. Measure the colors for each galaxy, and plot these colors as a function of cluster radius.\nHint - the file galsAngSep.txt has the galaxy names and separation from the center of the cluster." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
gregoryg/cdh-projects
notebooks/jupyter/datascience/K Means Cluster Visualization.ipynb
apache-2.0
[ "Visualizing clusters in Python\nI am wanting to see the results of clustering methods such as K-Means; this is my playground.\nInitial examples are taken from K Means Clustering in Python", "import matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom sklearn.cluster import KMeans\nimport sklearn.metrics as sm\n\nimport pandas as pd\nimport numpy as np\n\n# Only needed if you want to display your plots inline if using Notebook\n# change inline to auto if you have Spyder installed\n%matplotlib inline", "Using Iris Dataset", "# import some data to play with\niris = datasets.load_iris()\n# look at individual aspects by uncommenting the below\n#iris.data\n#iris.feature_names\n#iris.target\n#iris.target_names", "Original author converted the data to Pandas Dataframes. Note that we have separated out the inputs (x) and the outputs/labels (y).", "# Store the inputs as a Pandas Dataframe and set the column names\nx = pd.DataFrame(iris.data)\nx.columns = ['Sepal_Length','Sepal_Width','Petal_Length','Petal_Width']\n\ny = pd.DataFrame(iris.target)\ny.columns = ['Targets']", "Visualise the data\nIt is always important to have a look at the data. We will do this by plotting two scatter plots. One looking at the Sepal values and another looking at Petal. We will also set it to use some colours so it is clearer.", "# Set the size of the plot\nplt.figure(figsize=(14,7))\n\n# Create a colormap\ncolormap = np.array(['red', 'lime', 'black'])\n\n# Plot Sepal\nplt.subplot(1, 2, 1)\nplt.scatter(x.Sepal_Length, x.Sepal_Width, c=colormap[y.Targets], s=40)\nplt.title('Sepal')\n\nplt.subplot(1, 2, 2)\nplt.scatter(x.Petal_Length, x.Petal_Width, c=colormap[y.Targets], s=40)\nplt.title('Petal');", "Build the K Means Model - non-Spark example\nThis is the easy part, providing you have the data in the correct format (which we do). Here we only need two lines. First we create the model and specify the number of clusters the model should find (n_clusters=3) next we fit the model to the data.", "# K Means Cluster\nmodel = KMeans(n_clusters=3)\nmodel.fit(x)\n1\n2\n3\n# K Means Cluster\nmodel = KMeans(n_clusters=3)\nmodel.fit(x)\n\n# This is what KMeans thought\nmodel.labels_", "Visualise the classifier results\nLet's plot the actual classes against the predicted classes from the K Means model.\nHere we are plotting the Petal Length and Width, however each plot changes the colors of the points using either c=colormap[y.Targets] for the original class and c=colormap[model.labels_] for the predicted classess.", "# View the results\n# Set the size of the plot\nplt.figure(figsize=(14,7))\n\n# Create a colormap\ncolormap = np.array(['red', 'lime', 'black'])\n\n# Plot the Original Classifications\nplt.subplot(1, 2, 1)\nplt.scatter(x.Petal_Length, x.Petal_Width, c=colormap[y.Targets], s=40)\nplt.title('Real Classification')\n\n# Plot the Models Classifications\nplt.subplot(1, 2, 2)\nplt.scatter(x.Petal_Length, x.Petal_Width, c=colormap[model.labels_], s=40)\nplt.title('K Mean Classification');", "Fixing the coloring\nHere we are going to change the class labels, we are not changing the any of the classification groups we are simply giving each group the correct number. We need to do this for measuring the performance.\nUsing this code below we using the np.choose() to assign new values, basically we are changing the 1’s in the predicted values to 0’s and the 0’s to 1’s. Class 2 matched so we can leave. By running the two print functions you can see that all we have done is swap the values.\nNOTE: your results might be different to mine, if so you will have to figure out which class matches which and adjust the order of the values in the np.choose() function.", "# The fix, we convert all the 1s to 0s and 0s to 1s.\npredY = np.choose(model.labels_, [1, 0, 2]).astype(np.int64)\nprint (model.labels_)\nprint (predY)", "Re-plot\nNow we can re plot the data as before but using predY instead of model.labels_.", "# View the results\n# Set the size of the plot\nplt.figure(figsize=(14,7))\n\n# Create a colormap\ncolormap = np.array(['red', 'lime', 'black'])\n\n# Plot Orginal\nplt.subplot(1, 2, 1)\nplt.scatter(x.Petal_Length, x.Petal_Width, c=colormap[y.Targets], s=40)\nplt.title('Real Classification')\n\n# Plot Predicted with corrected values\nplt.subplot(1, 2, 2)\nplt.scatter(x.Petal_Length, x.Petal_Width, c=colormap[predY], s=40)\nplt.title('K Mean Classification');" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
evanmiltenburg/python-for-text-analysis
Assignments/ASSIGNMENT-1.ipynb
apache-2.0
[ "Assignment 1: Calculation, Strings, Boolean Expressions and Conditions\nDeadline: Friday, September 9, 2021 before 3pm (submit via Canvas: Block I/Assignment 1) \nThis assignment is not graded, but it is mandatory to submit a version that shows you have given it a serious try. We will check your assignments to monitor the progress and get an impression of how the course is going so far. You will receive feedback and solutions on Monday September 13. Please note that the material taught in this block is essential for all subsequent blocks. \n\nPlease name your notebook with the following naming convention: ASSIGNMENT_1_FIRSTNAME_LASTNAME.ipynb\nIf you have questions about this topic, please contact [email protected].\n\nIn this block, we covered the following chapters:\n\nChapter 1 - Getting Started with Variables and Values\nChapter 2 - Basic Data Types (Integers and Floats)\nChapter 3 - Strings\nChapter 4 - Boolean Expressions and Conditions\n\nIn this assignment, you will be asked to show what you have learned from the topics above! \nLooking things up\nDon't worry - you do not have to know things by heart yet - feel free to have all Chapters of block 1 open while you work on this. This also goes for all future assignments. \nFinding solutions online\nVery often, you can find good solutions online. We encourage you to use online resources when you get stuck. However, please always try to understand the code you find and indicate that it is not your own. Use the following format to mark code written by someone else:\nTaken from [link] [date]\n[code]\n###\nPlease use a similar format to indicate that you have worked with a classmate (e.g. mention the name instead of the link). \nIndicating online resources and collaboration is mandatory! Please stick to this strategy for all course assignments.\nBeing stuck and getting help\nIt is very normal to get stuck every now and then. Sometimes you find what is wrong within minutes, sometimes this takes longer. If you get stuck, please apply the strategies described in the readme. If none of them work, please contact us ([email protected]). \nExercise 1: Calculation\n1a. Average\nDefine three variables var1, var2 and var3. Calculate the average of these variables and assign it to the variable average. Print the outcome of your program as follows (use casting or an f-string):\nThe average of [...], [...] and [...] is: [...]", "# average code", "1b. Book prices\nCalculate book prices for the following scenarios:\nSuppose the price of a book is 24.95 EUR, but if the book is bought by a bookstore, they get a 30 percent discount (as opposed to customers buying from an online stores). Shipping costs 3 EUR for the first copy and 75 cents for each additional copy. Shipping costs always apply (the books also have to be shipped to the bookstore). \nWrite a program that can calculate the total costs for any number of copies for both bookstores and other customers. Use variables with clear names for your calculations and print the result using a full sentence.\nThe program should use variables which indicate whether the customer is a bookstore or not and how many books are bought. You can simply assign values to the variables in you code or use the input function (both is accepted).\nTip\nStart small and add things in steps. For instance, start by calculating the price minus the discount. Then add the additional steps. Also, it helps to a start by simply assuming a specific number of books (start with 1 and make sure it works with any other number). Do not forget to test your program!", "# complete the code below\n\nn_books = \ncustomer_is_bookstore = \n\n# you bookprice calculations here", "1c. The modulus operator\nThere is one operator (like the ones for multiplication and subtraction) that we did not discuss yet, namely the modulus operator %. Could you figure out by yourself what it does when you place it between two numbers (e.g. 113 % 9)? (PS: Try to figure it out by yourself first, by trying multiple combinations of numbers. If you do not manage, it's OK to use online resources...) \nYou don't need this operator all that often, but when you do, it comes in really handy! Also, it is important to learn how to figure out what operators/methods/functions you have never seen before do by playing with code, googling or reading documentation.", "# try out the modulus operator!\n", "Help the cashier\nCan you use the modulus operator you just learned about to solve the following task? Imagine you want to help cashiers to return the change in a convenient way. This means you do not want to return hands full of small coins, but rather use bills and as few coins as possible. \nWrite code that classifies a given amount of money into smaller monetary units. Given a specific amout of dollars, your program should report the maximum number of dollar bills, quarters, dimes, nickels, and pennies. \nSet the amount variable to 11.67. You code should output a report listing the monetary equivalent in dollars, quarters, dimes, nickels, and pennies (one quarter is equivalent to 25 cents; one dime to 10 cents; one nickle to 5 cents and a pennie to 1 cent). Your program should report the maximum number of dollars, then the number of quarters, dimes, nickels, and pennies, in this order, to result in the minimum number of coins. Here are the steps in developing the program:\n\nConvert the amount (11.67) into cents (1167).\nFirst get the amount of cents that you would get after subtracting the maximum amount of dollars (100 cents) using the modulus operator (67 cents).\nThen subtract the remainder (67 cents) from the total amount of cents (1167 cents) and divide this by 100 to find the number of dollars.\nUse the modulus operator again to find out the remainder after subtracting the maximum amount of quarters (17 cents).\nSubtract this remainder (17 cents) from the previous remainder (67 cents) and divide this by 25 to find out the number of quarters.\nFollow the same steps for the dimes, nickels and pennies.\nDisplay the result for your cashier! (the amount of dollars, quarters, dimes, nickels and pennies that (s)he would have to give back)`", "# cashier code", "Exercise 2: Printing and user input\n2a. Difference between \",\" and \"+\"\nWhat is the difference between using + and , in a print statement? Illustrate by using both in each of the following:\n\ncalling the print() fuction with multiple strings\nprinting combinations of strings and integers\nconcatenating multiple strings and assign to one single variable\nconcatenating strings and integers and assign to one single variable\n\n2b. Small Talk\nWrite a program to have a little conversation with someone. First ask them for their name and their age, and then say something about your own age compared to theirs. Your code should result in a conversation following this example:\n\nHello there! What is your name?\n-- Emily.\nNice to meet you, Emily. How old are you?\n-- 23\nI'm 25 years old, so I'm 2 years older than you.\n\nAlso account for situations where the other person is older or the same age. You will need to use if-else-statements!", "name = input(\"Hello there! What is your name? \")\n# finish this code", "Exercise 3: String Art\n3a. Drawing figures\nWe start with some repetition of the theory about strings:\n| Topic | Explanation |\n|-----------|--------|\n| quotes | A string is delimited by single quotes ('...') or double quotes (\"...\") |\n| special characters | Certain special characters can be used, such as \"\\n\" (for newline) and \"\\t\" (for a tab) | \n| printing special characters | To print the special characters, they must be preceded by a backslash (\\) |\n| continue on next line | A backslash (\\) at the end of a line is used to continue a string on the next line |\n| multi-line strings | A multi-line print statement should be enclosed by three double or three single quotes (\"\"\"...\"\"\" of '''...''') | \nPlease run the code snippet below and observe what happens:", "print('hello\\n')\nprint('To print a newline use \\\\n')\nprint('She said: \\'hello\\'')\nprint('\\tThis is indented')\nprint('This is a very, very, very, very, very, very \\\nlong print statement')\nprint('''\nThis is a multi-line print statement\nFirst line\nSecond line\n''')", "Now write a Python script that prints the following figure using only one line of code! (so don't use triple quotes)\n | | |\n @ @\n u\n |\"\"\"|", "# your code here", "3b. Colors\nWe start again with some repetition of the theory:\n| Topic | Explanation |\n|-----------|--------|\n| a = b + c | if b and c are strings: concatenate b and c to form a new string a| \n| a = b * c | if b is an integer and c is a string: c is repeated b times to form a new string a |\n| a[0] | the first character of string a |\n| len(a) | the number of characters in string a |\n| min(a) | the smallest element in string a (alphabetically first) |\n| max(a) | the largest element in string a (alphabetically last) |\nPlease run the code snippet below and observe what happens:", "b = 'the'\nc = 'cat'\nd = ' is on the mat'\na = b + ' ' + c + d\nprint(a)\na = b * 5\nprint(a)\nprint('The first character of', c, 'is' , c[0])\nprint('The word c has,', len(c) ,'characters')", "Now write a program that asks users for their favorite color. Create the following output (assuming \"red\" is the chosen color). Use \"+\" and \"*\".\nIt should work with any color name though.\nxml\nred red red red red red red red red red \nred red\nred red\nred red red red red red red red red red", "color = input('what is your favorite color? ')\n\nprint(color)\nprint(color)\nprint(color)\nprint(color)", "Exercise 4: String methods\nRemember that you can see all methods of the class str by using dir(). You can ignore all methods that start with one or two underscores.", "dir(str)", "To see the explanation for a method of this class, you can use help(str.method). For example:", "help(str.upper)", "4a. Counting vowels\nCount how many of each vowel (a,e,i,o,u) there are in the text string in the next cell. Print the count for each vowel with a single formatted string. Remember that vowels can be both lower and uppercase.", "text = \"\"\"But I must explain to you how all this mistaken idea of denouncing pleasure and praising pain was born \nand I will give you a complete account of the system, and expound the actual teachings of the great explorer of the \ntruth, the master-builder of human happiness. No one rejects, dislikes, or avoids pleasure itself, because it is \npleasure, but because those who do not know how to pursue pleasure rationally encounter consequences that are \nextremely painful. Nor again is there anyone who loves or pursues or desires to obtain pain of itself, because it is \npain, but because occasionally circumstances occur in which toil and pain can procure him some great pleasure. \nTo take a trivial example, which of us ever undertakes laborious physical exercise, except to obtain some advantage \nfrom it? But who has any right to find fault with a man who chooses to enjoy a pleasure that has no annoying \nconsequences, or one who avoids a pain that produces no resultant pleasure? On the other hand, we denounce with \nrighteous indignation and dislike men who are so beguiled and demoralized by the charms of pleasure of the moment, \nso blinded by desire, that they cannot foresee the pain and trouble that are bound to ensue; and equal blame belongs \nto those who fail in their duty through weakness of will, which is the same as saying through shrinking from toil \nand pain.\"\"\"\n\n# your code here", "4b. Printing the lexicon\nHave a good look at the internal representation of the string below. Use a combination of string methods (you will need at least 3 different ones and some will have to be used multiple times) in the correct order to remove punctuation and redundant whitespaces, and print each word in lowercase characters on a new line. The result should look like:\nthe \nquick \nbrown \nfox \njumps \netc.", "text = \"\"\" The quick, brown fox jumps over a lazy dog.\\tDJs flock by when MTV ax quiz prog. \nJunk MTV quiz graced by fox whelps.\\tBawds jog, flick quartz, vex nymphs. \nWaltz, bad nymph, for quick jigs vex!\\tFox nymphs grab quick-jived waltz. \nBrick quiz whangs jumpy veldt fox. \"\"\"\n\nprint(text)\nprint()\nprint(repr(text))\n\ntext = # your code here\n\nprint(text)", "4c. Passwords\nWrite a program that asks a user for a password and checks some simple requirements of a password. If necessary, print out the following warnings (use if-statements):\n\nYour password should contain at least 6 characters.\nYour password should contain no more than 12 characters.\nYour password only contains alphabetic characters! Please also use digits and/or special characters.\nYour password only contains digits! Please also use alphabetic and/or special characters.\nYour password should contain at least one special character.\nYour password contains only lowercase letters! Please also use uppercase letters.\nYour password contains only uppercase letters! Please also use lowercase letters.", "# your code here", "Exercise 5: Boolean Logic and Conditions\n5a. Speeding\nWrite code to solve the following scenario:\nYou are driving a little too fast, and a police officer stops you. Write code to compute and print the result, encoded as a string: 'no ticket', 'small ticket', 'big ticket'. If speed is 60 or less, the result is 'no ticket'. If speed is between 61 and 80 inclusive, the result is 'small ticket'. If speed is 81 or more, the result is 'big ticket'. Unless it is your birthday -- on that day, your speed can be 5 higher in all cases.", "# your code here", "5b. Alarm clock\nWrite code to set you alarm clock! Given the day of the week and information about whether you are currently on vacation or not, your code should print the time you want to be woken up following these constraints: \nWeekdays, the alarm should be \"7:00\" and on the weekend it should be \"10:00\". Unless we are on vacation -- then on weekdays it should be \"10:00\" and weekends it should be \"off\".\nEncode the weeks days as ints in the following way: 0=Sun, 1=Mon, 2=Tue, ...6=Sat. Encode the vacation infromation as boolean. Your code should assign the correct time to a variable as a string (following this format: \"7:00\") and print it.\nNote: Encoding the days as an integer helps you with defining conditions. You can check whether the week day is in a certain interval (instead of writing code for every single day).", "# your code here", "5c. Parcel delivery\nThe required postage for an international parcel delivery service is calculated based on item weight and country of destination:\n| Tariff zone | 0 - 2 kg | 2 - 5 kg | 5 - 10 kg | 10 - 20 kg | 20 - 30 kg |\n|-------------|----------|----------|-----------|------------|------------|\n|EUR 1 | € 13.00 | € 19.50 | € 25.00 | € 34.00 | € 45.00 | \n|EUR 2 | € 18.50 | € 25.00 | € 31.00 | € 40.00 | € 55.00 | \n|World | € 24.30 | € 34.30 | € 58.30 | € 105.30 | - | \nAsk a user for the weight and zone. Use (nested) if-statements to find the required postage based on these variables. Assign the result to a variable postage and print the result using a full sentence:\nThe price of sending a [...] kg parcel to the [...] zone is € [...].", "# your code here" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
phoebe-project/phoebe2-docs
2.1/tutorials/spots.ipynb
gpl-3.0
[ "Binary with Spots\nSetup\nIMPORTANT NOTE: if using spots on contact systems or single stars, make sure to use 2.1.15 or later as the 2.1.15 release fixed a bug affecting spots in these systems.\nLet's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).", "!pip install -I \"phoebe>=2.1,<2.2\"", "As always, let's do imports and initialize a logger and a new bundle. See Building a System for more details.", "%matplotlib inline\n\nimport phoebe\nfrom phoebe import u # units\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nlogger = phoebe.logger()\n\nb = phoebe.default_binary()", "Adding Spots\nLet's add one spot to each of our stars in the binary.\nA spot is a feature, and needs to be attached directly to a component upon creation. Providing a tag for 'feature' is entirely optional - if one is not provided it will be created automatically.", "b.add_feature('spot', component='primary', feature='spot01')", "As a shortcut, we can also call add_spot directly.", "b.add_spot(component='secondary', feature='spot02')", "Relevant Parameters\nA spot is defined by the colatitude (where 0 is defined as the North (spin) Pole) and longitude (where 0 is defined as pointing towards the other star for a binary, or to the observer for a single star) of its center, its angular radius, and the ratio of temperature of the spot to the local intrinsic value.", "print b['spot01']\n\nb.set_value(qualifier='relteff', feature='spot01', value=0.9)\n\nb.set_value(qualifier='radius', feature='spot01', value=30)\n\nb.set_value(qualifier='colat', feature='spot01', value=45)\n\nb.set_value(qualifier='long', feature='spot01', value=90)", "To see the spot, add a mesh dataset and plot it.", "b.add_dataset('mesh', times=[0,0.25,0.5,0.75,1.0], columns=['teffs'])\n\nb.run_compute()\n\nafig, mplfig = b.filter(component='primary', time=0.75).plot(fc='teffs', show=True)", "Spot Corotation\nThe positions (colat, long) of a spot are defined at t0 (note: t0@system, not necessarily t0_perpass or t0_supconj). If the stars are not synchronous, then the spots will corotate with the star. To illustrate this, let's set the syncpar > 1 and plot the mesh at three different phases from above.", "b.set_value('syncpar@primary', 1.5)\n\nb.run_compute(irrad_method='none')", "At time=t0=0, we can see that the spot is where defined: 45 degrees south of the north pole and 90 degree longitude (where longitude of 0 is defined as pointing towards the companion star at t0).", "print \"t0 = {}\".format(b.get_value('t0', context='system'))\n\nafig, mplfig = b.plot(time=0, y='ws', fc='teffs', ec='None', show=True)", "At a later time, the spot is still technically at the same coordinates, but longitude of 0 no longer corresponds to pointing to the companion star. The coordinate system has rotated along with the asyncronous rotation of the star.", "afig, mplfig = b.plot(time=0.25, y='ws', fc='teffs', facecmap='YlOrRd', ec='None', show=True)\n\nafig, mplfig = b.plot(time=0.5, y='ws', fc='teffs', facecmap='YlOrRd', ec='None', show=True)\n\nax, artists = b.plot(time=0.75, y='ws', fc='teffs', facecmap='YlOrRd', ec='None', show=True)", "Since the syncpar was set to 1.5, one full orbit later the star (and the spot) has made an extra half-rotation.", "ax, artists = b.plot(time=1.0, y='ws', fc='teffs', facecmap='YlOrRd', ec='None', show=True)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
rohanisaac/spectra
notebooks/find_background.ipynb
gpl-3.0
[ "Intelligent background subtraction algorithm using wavelets.\nAlso contains implementations of other wavelets and peak-finding code\nRuns 2017/1/9\nFind and Subtract background", "%matplotlib inline\nfrom __future__ import division\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport sys\nfrom scipy import signal", "Find Peaks\nUse method in P. Du, W. A. Kibbe, S. M. Lin, Bioinformatics 2006, 22, 2059. Same as in scipy.signal.find_peaks_cwt() and baselineWavelet\nWavelet transform", "data1 = np.genfromtxt(os.path.join('..', 'tests', 'data', 'raman-785nm.txt'))\nx = data1[:, 0]\ny = data1[:, 1]\nplt.plot(x, y)", "Find ridge lines", "widths = np.arange(1,71)\ncwtmat = signal.cwt(y, signal.ricker, widths)\nplt.imshow(cwtmat, aspect='auto', cmap='PRGn')\n\n# Find local maxima\n# make a binary array containing local maximum of transform, with same shape\nlmax = np.zeros(cwtmat.shape)\nfor i in range(cwtmat.shape[0]):\n lmax[i, signal.argrelextrema(cwtmat[i, :], np.greater)] = 1\nfig, ax = plt.subplots(figsize=(15, 4))\nax.imshow(lmax, aspect='auto', cmap='gray_r')\n\n# allocate memory\n\n# intial location assigned to peak from the first row\npeak_pos_start = np.where(lmax[0,:]==1)[0]\n\n# current position of the ridge\npeak_ridge = np.copy(peak_pos_start) # full copy\n\nn_peaks = peak_pos_start.size\n\n# length of the ridge\npeak_len = np.ones(n_peaks)\n\n# use the max of the ridge line to find the width of the peaks\npeak_pos = np.zeros(n_peaks, dtype='int')\npeak_width = np.ones(n_peaks)\npeak_width_max = np.zeros(n_peaks)\n\n# Link local maxima (find ridges)\n\nw = 3\n\n# for each row starting at the second\nfor i in range(1, lmax.shape[0]):\n # for each peak\n for j in range(n_peaks):\n # assume it doesn't extend, and then check\n extends = False\n p = peak_ridge[j]\n if lmax[i, p] == 1:\n # if there is one below, it is part of the same ridge\n extends = True\n else:\n # if not search around peak \n for k in range(1, w):\n if lmax[i, p-k] == 1:\n extends = True\n peak_ridge[j] -= k\n break\n elif lmax[i, p+k] == 1:\n extends = True\n peak_ridge[j] += k\n break\n # if it extends\n if extends:\n # it it longer\n peak_len[j] += 1\n # find width by comparing max vs. previous\n if cwtmat[i, p] > peak_width_max[j]:\n peak_width_max[j] = cwtmat[i, p]\n peak_width[j] = i\n peak_pos[j] = p\n\nprint peak_pos[:20]\nprint peak_width[:20]\n\n# generate a simulated spectrum of sorts, with peak positions and the length of the ridge lines\nypeaks = np.zeros(y.shape)\nypeaks[peak_pos] = peak_len*peak_width\nfig, ax = plt.subplots(figsize=(15, 4))\nax.plot(x, ypeaks)\n\n# find peaks using the first ridge position, last ridge position as well using find_peaks\npeaks = signal.find_peaks_cwt(y, wavelet=signal.ricker, widths=widths)\npeaks_2 = peak_pos[np.all(((peak_width > 0), (peak_len > 5)), axis=0)]\n\n\nprint peaks, peaks_2", "For now use scipy.signal.find_peaks_cwt(), compare with my own implementation", "fig, ax = plt.subplots(24, figsize=(10,10))\nfor w in range(3):\n for l in range(2, 10):\n a = ax[w*8 + (l-2)]\n peaks = peak_pos[np.all(((peak_width > w), (peak_len > l)), axis=0)]\n a.plot(x,y)\n a.plot(x[peaks], y[peaks], 'rx', label='w%i, l%i' % (w,l))\n #a.legend()\n\n# find peaks using the first ridge position, last ridge position as well using find_peaks\npeaks = signal.find_peaks_cwt(y, wavelet=signal.ricker, widths=widths)\npeaks_2 = peak_pos[np.all(((peak_width > 1), (peak_len > 5)), axis=0)]\n\nfig, ax = plt.subplots(figsize=(15,5))\nax.semilogy(x,y)\nax.semilogy(x[peaks], y[peaks], 'kv', alpha=0.8)\nax.semilogy(x[peaks_2], y[peaks_2], 'rd', alpha=0.8, label='filterd width')\n#ax.plot(x[peaks_3], y[peaks_3], 'bx', label='filterd length')\nax.set_ylim(200000,600000)\nax.legend()\n\n# find peaks using the first ridge position, last ridge position as well using find_peaks\npeaks = signal.find_peaks_cwt(y, wavelet=signal.ricker, widths=widths)\npeaks_2 = peak_pos[np.all(((peak_width > 5), (peak_len > 20)), axis=0)]\n\nfig, ax = plt.subplots(figsize=(15,5))\nax.plot(x,y)\nax.plot(x[peaks], y[peaks], 'kv', alpha=0.8, label='scipy')\nax.plot(x[peaks_2], y[peaks_2], 'rd', alpha=0.8, label='filterd length and width')\n#ax.plot(x[peaks_3], y[peaks_3], 'bx', label='filterd length')\nax.set_ylim(200000,520000)\nax.legend()", "Estimate Peak widths\nProcedure from Zhang et al.\n\nPerform CWT with Haar wavelet w/ same scales as peak finding. Result M x N matrix\nTake abs of all values\nFor each peak in peak-detection there are two parameter: index and scale\n a. Row corresponding to scale is taken out\n b. Search for local minima to three times of peak scale or next peak index\nIf local minima do not exist:\n a. Peak start or end point is min(3 x peak scale, next peak index)\n else\n b. Peaks boundaries are minima and min(...)\nRepeat for all peaks", "# analyze the ricker wavelet to help build the ricker wavelet\npoints = 100\nfor a in range(2, 11, 2):\n wave = signal.ricker(points, a)\n plt.plot(wave)\n\n# note, all integrate to 0\n\n# make a haar mother wavelet\ndef haar2(points, a):\n \"\"\"\n Returns a haar wavelet mother wavelet\n \n 1 if 0 <= t < 1/2\n h(t) = -1 if 1/2 <= t < 1\n 0 otherwise`\n \n Numpy version, not accurate right now\n \"\"\"\n x = np.arange(0, points) - (points - 1.0) / 2\n wave = np.zeros(x.shape)\n amp = 2/a\n wave[np.where(np.logical_and(0 <= x, x < 0.5*a))[0]] = 1\n wave[np.where(np.logical_and(-0.5*a <= x, x < 1))[0]] = -1\n\n return wave*amp\n\n# make a haar mother wavelet\ndef haar(points, a):\n \"\"\"\n Returns a haar wavelet mother wavelet\n \n 1 if 0 <= t < 1/2\n h(t) = -1 if 1/2 <= t < 1\n 0 otherwise`\n \"\"\"\n vec = np.arange(0, points) - (points - 1.0) / 2\n wave = np.zeros(vec.shape)\n amp = 2/a\n for i, x in enumerate(vec):\n if 0 <= x < 0.5*a:\n wave[i] = 1\n elif -0.5*a <= x < 1:\n wave[i] = -1\n return wave*amp\n\npoints = 100\nfor a in range(2, 11, 2):\n wave = haar(points, a)\n plt.step(np.arange(points), wave)\n\nhw = signal.cwt(y, haar, widths=widths)\nplt.imshow(hw, aspect='auto', cmap='PRGn')\n\nahw = np.abs(hw)\nplt.imshow(ahw, aspect='auto', cmap='PRGn')", "Search for local minima in in the row corresponding to the peak's scale, within 3x peak scale or peak index", "for p in peak_pos:\n print p" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
profxj/xastropy
xastropy/casbah/CASBAH_galaxy_database.ipynb
bsd-3-clause
[ "Building the CASBAH Galaxy Database (v1.0)\nSDSS\nTargeting\nI am unclear on how to sensibly extract targeting information from the\nSDSS. But this may well be an issue for various analyses.\nExtracting Galaxy data\nThe script build_sdss loops through the listed fields with SDSS\ncoverage and calls the grab_sdss_spectra script to grab photometric\nand spectral data. It is currently grabbing DR12.\nHere are some defaults:\n* Box of 2deg on a side\n* Photometry is Petrosian ugriz\n* Galaxies are cut down to 20Mpc separation (LCDM cosmology)\n* z > 500km/s to cut stars\nHere is the basic procedure:\n* Query photometry all objects in search box with spectra\n* Query list of spectra from SDSS in search box\n * This list often contain duplicates from multiple passes\n* Cut on 20Mpc using redshifts, RA+DEC of obj, and RA+DEC of QSO\n* Loop on sources to build table\n * Take BOSS data over SDSS\n* Generate a binary FITS table, including photometry, redshift and spectra", "# Example call\nfrom xastropy.casbah import galaxy_data as xcgd\nreload(xcgd)\nradec = (212.34957*u.deg,26.30585*u.deg)\ntab=xcgd.grab_sdss_spectra(radec, radius=1.*u.degree/12.) \nTable(tab[0:5])", "Open questions/issues\n\nShould we be recording other observing meta-data?\nHow about SFR, M*, etc.?\n\nDEIMOS\nTargeting\n\nPull mask target info from Mask files :: parse_deimos_mask_file\nPull other target info from SExtractor output\nRequires yaml file describing target criteria\nAnd the SExtractor output file\n\nSample output of MULTI_OBS file\nMULTI_OBJ file:\n| INSTR | MASK_NAME | MASK_RA | MASK_DEC | MASK_EPOCH | MASK_PA | DATE_OBS | DISPERSER | TEXP | CONDITIONS |\n| DEIMOS | PG1407_may_early | 14:09:34.10 | 26:18:45.1 | 2000.0 | -96.1 | 23-Jul-2015 | G600 | 3600.0 | POOR_SEEING,CLOUDS |\n| DEIMOS | PG1407_may_early | 14:09:34.10 | 26:18:45.1 | 2000.0 | -96.1 | 24-Jul-2015 | G600 | 3600.0 | CLEAR |", "#### Sample of target file\nfil='/Users/xavier/CASBAH/Galaxies/PG1407+265/PG1407+265_targets.fits'\ntarg = Table.read(fil)\n#\nmt = np.where(targ['MASK_NAME'] != 'N/A')[0]\ntarg[mt[0:5]]", "Testing", "fil='/Users/xavier/CASBAH/Galaxies/PG1407+265/PG1407+265_targets.fits'\ntmp = Table.read(fil,fill_values=[('N/A','0','MASK_NAME')],format='fits')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
tensorflow/docs-l10n
site/zh-cn/tutorials/load_data/text.ipynb
apache-2.0
[ "Copyright 2018 The TensorFlow Authors.", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "使用 tf.data 加载文本数据\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://tensorflow.google.cn/tutorials/load_data/text\"><img src=\"https://tensorflow.google.cn/images/tf_logo_32px.png\" />在 TensorFlow.org 上查看</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/tutorials/load_data/text.ipynb\"><img src=\"https://tensorflow.google.cn/images/colab_logo_32px.png\" />在 Google Colab 上运行</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/tutorials/load_data/text.ipynb\"><img src=\"https://tensorflow.google.cn/images/GitHub-Mark-32px.png\" />查看 GitHub 上的资源</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/tutorials/load_data/text.ipynb\"><img src=\"https://tensorflow.google.cn/images/download_logo_32px.png\" />下载 notebook</a>\n </td>\n</table>\n\nNote: 我们的 TensorFlow 社区翻译了这些文档。因为社区翻译是尽力而为, 所以无法保证它们是最准确的,并且反映了最新的\n官方英文文档。如果您有改进此翻译的建议, 请提交 pull request 到\ntensorflow/docs GitHub 仓库。要志愿地撰写或者审核译文,请加入\[email protected] Google Group。\n本教程为你提供了一个如何使用 tf.data.TextLineDataset 来加载文本文件的示例。TextLineDataset 通常被用来以文本文件构建数据集(原文件中的一行为一个样本) 。这适用于大多数的基于行的文本数据(例如,诗歌或错误日志) 。下面我们将使用相同作品(荷马的伊利亚特)三个不同版本的英文翻译,然后训练一个模型来通过单行文本确定译者。\n环境搭建", "import tensorflow as tf\n\nimport tensorflow_datasets as tfds\nimport os", "三个版本的翻译分别来自于:\n\n\nWilliam Cowper — text\n\n\nEdward, Earl of Derby — text\n\n\nSamuel Butler — text\n\n\n本教程中使用的文本文件已经进行过一些典型的预处理,主要包括删除了文档页眉和页脚,行号,章节标题。请下载这些已经被局部改动过的文件。", "DIRECTORY_URL = 'https://storage.googleapis.com/download.tensorflow.org/data/illiad/'\nFILE_NAMES = ['cowper.txt', 'derby.txt', 'butler.txt']\n\nfor name in FILE_NAMES:\n text_dir = tf.keras.utils.get_file(name, origin=DIRECTORY_URL+name)\n \nparent_dir = os.path.dirname(text_dir)\n\nparent_dir", "将文本加载到数据集中\n迭代整个文件,将整个文件加载到自己的数据集中。\n每个样本都需要单独标记,所以请使用 tf.data.Dataset.map 来为每个样本设定标签。这将迭代数据集中的每一个样本并且返回( example, label )对。", "def labeler(example, index):\n return example, tf.cast(index, tf.int64) \n\nlabeled_data_sets = []\n\nfor i, file_name in enumerate(FILE_NAMES):\n lines_dataset = tf.data.TextLineDataset(os.path.join(parent_dir, file_name))\n labeled_dataset = lines_dataset.map(lambda ex: labeler(ex, i))\n labeled_data_sets.append(labeled_dataset)", "将这些标记的数据集合并到一个数据集中,然后对其进行随机化操作。", "BUFFER_SIZE = 50000\nBATCH_SIZE = 64\nTAKE_SIZE = 5000\n\nall_labeled_data = labeled_data_sets[0]\nfor labeled_dataset in labeled_data_sets[1:]:\n all_labeled_data = all_labeled_data.concatenate(labeled_dataset)\n \nall_labeled_data = all_labeled_data.shuffle(\n BUFFER_SIZE, reshuffle_each_iteration=False)", "你可以使用 tf.data.Dataset.take 与 print 来查看 (example, label) 对的外观。numpy 属性显示每个 Tensor 的值。", "for ex in all_labeled_data.take(5):\n print(ex)", "将文本编码成数字\n机器学习基于的是数字而非文本,所以字符串需要被转化成数字列表。\n为了达到此目的,我们需要构建文本与整数的一一映射。\n建立词汇表\n首先,通过将文本标记为单独的单词集合来构建词汇表。在 TensorFlow 和 Python 中均有很多方法来达成这一目的。在本教程中:\n\n迭代每个样本的 numpy 值。\n使用 tfds.features.text.Tokenizer 来将其分割成 token。\n将这些 token 放入一个 Python 集合中,借此来清除重复项。\n获取该词汇表的大小以便于以后使用。", "tokenizer = tfds.features.text.Tokenizer()\n\nvocabulary_set = set()\nfor text_tensor, _ in all_labeled_data:\n some_tokens = tokenizer.tokenize(text_tensor.numpy())\n vocabulary_set.update(some_tokens)\n\nvocab_size = len(vocabulary_set)\nvocab_size", "样本编码\n通过传递 vocabulary_set 到 tfds.features.text.TokenTextEncoder 来构建一个编码器。编码器的 encode 方法传入一行文本,返回一个整数列表。", "encoder = tfds.features.text.TokenTextEncoder(vocabulary_set)", "你可以尝试运行这一行代码并查看输出的样式。", "example_text = next(iter(all_labeled_data))[0].numpy()\nprint(example_text)\n\nencoded_example = encoder.encode(example_text)\nprint(encoded_example)", "现在,在数据集上运行编码器(通过将编码器打包到 tf.py_function 并且传参至数据集的 map 方法的方式来运行)。", "def encode(text_tensor, label):\n encoded_text = encoder.encode(text_tensor.numpy())\n return encoded_text, label\n\ndef encode_map_fn(text, label):\n # py_func doesn't set the shape of the returned tensors.\n encoded_text, label = tf.py_function(encode, \n inp=[text, label], \n Tout=(tf.int64, tf.int64))\n\n # `tf.data.Datasets` work best if all components have a shape set\n # so set the shapes manually: \n encoded_text.set_shape([None])\n label.set_shape([])\n\n return encoded_text, label\n\n\nall_encoded_data = all_labeled_data.map(encode_map_fn)", "将数据集分割为测试集和训练集且进行分支\n使用 tf.data.Dataset.take 和 tf.data.Dataset.skip 来建立一个小一些的测试数据集和稍大一些的训练数据集。\n在数据集被传入模型之前,数据集需要被分批。最典型的是,每个分支中的样本大小与格式需要一致。但是数据集中样本并不全是相同大小的(每行文本字数并不相同)。因此,使用 tf.data.Dataset.padded_batch(而不是 batch )将样本填充到相同的大小。", "train_data = all_encoded_data.skip(TAKE_SIZE).shuffle(BUFFER_SIZE)\ntrain_data = train_data.padded_batch(BATCH_SIZE)\n\ntest_data = all_encoded_data.take(TAKE_SIZE)\ntest_data = test_data.padded_batch(BATCH_SIZE)", "现在,test_data 和 train_data 不是( example, label )对的集合,而是批次的集合。每个批次都是一对(多样本, 多标签 ),表示为数组。", "sample_text, sample_labels = next(iter(test_data))\n\nsample_text[0], sample_labels[0]", "由于我们引入了一个新的 token 来编码(填充零),因此词汇表大小增加了一个。", "vocab_size += 1", "建立模型", "model = tf.keras.Sequential()", "第一层将整数表示转换为密集矢量嵌入。更多内容请查阅 Word Embeddings 教程。", "model.add(tf.keras.layers.Embedding(vocab_size, 64))", "下一层是 LSTM 层,它允许模型利用上下文中理解单词含义。 LSTM 上的双向包装器有助于模型理解当前数据点与其之前和之后的数据点的关系。", "model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)))", "最后,我们将获得一个或多个紧密连接的层,其中最后一层是输出层。输出层输出样本属于各个标签的概率,最后具有最高概率的分类标签即为最终预测结果。", "# 一个或多个紧密连接的层\n# 编辑 `for` 行的列表去检测层的大小\nfor units in [64, 64]:\n model.add(tf.keras.layers.Dense(units, activation='relu'))\n\n# 输出层。第一个参数是标签个数。\nmodel.add(tf.keras.layers.Dense(3, activation='softmax'))", "最后,编译这个模型。对于一个 softmax 分类模型来说,通常使用 sparse_categorical_crossentropy 作为其损失函数。你可以尝试其他的优化器,但是 adam 是最常用的。", "model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])", "训练模型\n利用提供的数据训练出的模型有着不错的精度(大约 83% )。", "model.fit(train_data, epochs=3, validation_data=test_data)\n\neval_loss, eval_acc = model.evaluate(test_data)\n\nprint('\\nEval loss: {}, Eval accuracy: {}'.format(eval_loss, eval_acc))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
fastai/course-v3
nbs/dl1/lesson3-planet.ipynb
apache-2.0
[ "Multi-label prediction with Planet Amazon dataset", "%reload_ext autoreload\n%autoreload 2\n%matplotlib inline\n\nfrom fastai.vision import *", "Getting the data\nThe planet dataset isn't available on the fastai dataset page due to copyright restrictions. You can download it from Kaggle however. Let's see how to do this by using the Kaggle API as it's going to be pretty useful to you if you want to join a competition or use other Kaggle datasets later on.\nFirst, install the Kaggle API by uncommenting the following line and executing it, or by executing it in your terminal (depending on your platform you may need to modify this slightly to either add source activate fastai or similar, or prefix pip with a path. Have a look at how conda install is called for your platform in the appropriate Returning to work section of https://course.fast.ai/. (Depending on your environment, you may also need to append \"--user\" to the command.)", "# ! {sys.executable} -m pip install kaggle --upgrade", "Then you need to upload your credentials from Kaggle on your instance. Login to kaggle and click on your profile picture on the top left corner, then 'My account'. Scroll down until you find a button named 'Create New API Token' and click on it. This will trigger the download of a file named 'kaggle.json'.\nUpload this file to the directory this notebook is running in, by clicking \"Upload\" on your main Jupyter page, then uncomment and execute the next two commands (or run them in a terminal). For Windows, uncomment the last two commands.", "# ! mkdir -p ~/.kaggle/\n# ! mv kaggle.json ~/.kaggle/\n\n# For Windows, uncomment these two commands\n# ! mkdir %userprofile%\\.kaggle\n# ! move kaggle.json %userprofile%\\.kaggle", "You're all set to download the data from planet competition. You first need to go to its main page and accept its rules, and run the two cells below (uncomment the shell commands to download and unzip the data). If you get a 403 forbidden error it means you haven't accepted the competition rules yet (you have to go to the competition page, click on Rules tab, and then scroll to the bottom to find the accept button).", "path = Config.data_path()/'planet'\npath.mkdir(parents=True, exist_ok=True)\npath\n\n# ! kaggle competitions download -c planet-understanding-the-amazon-from-space -f train-jpg.tar.7z -p {path} \n# ! kaggle competitions download -c planet-understanding-the-amazon-from-space -f train_v2.csv -p {path} \n# ! unzip -q -n {path}/train_v2.csv.zip -d {path}", "To extract the content of this file, we'll need 7zip, so uncomment the following line if you need to install it (or run sudo apt install p7zip-full in your terminal).", "# ! conda install --yes --prefix {sys.prefix} -c haasad eidl7zip", "And now we can unpack the data (uncomment to run - this might take a few minutes to complete).", "# ! 7za -bd -y -so x {path}/train-jpg.tar.7z | tar xf - -C {path.as_posix()}", "Multiclassification\nContrary to the pets dataset studied in last lesson, here each picture can have multiple labels. If we take a look at the csv file containing the labels (in 'train_v2.csv' here) we see that each 'image_name' is associated to several tags separated by spaces.", "df = pd.read_csv(path/'train_v2.csv')\ndf.head()", "To put this in a DataBunch while using the data block API, we then need to using ImageList (and not ImageDataBunch). This will make sure the model created has the proper loss function to deal with the multiple classes.", "tfms = get_transforms(flip_vert=True, max_lighting=0.1, max_zoom=1.05, max_warp=0.)", "We use parentheses around the data block pipeline below, so that we can use a multiline statement without needing to add '\\'.", "np.random.seed(42)\nsrc = (ImageList.from_csv(path, 'train_v2.csv', folder='train-jpg', suffix='.jpg')\n .split_by_rand_pct(0.2)\n .label_from_df(label_delim=' '))\n\ndata = (src.transform(tfms, size=128)\n .databunch().normalize(imagenet_stats))", "show_batch still works, and show us the different labels separated by ;.", "data.show_batch(rows=3, figsize=(12,9))", "To create a Learner we use the same function as in lesson 1. Our base architecture is resnet50 again, but the metrics are a little bit differeent: we use accuracy_thresh instead of accuracy. In lesson 1, we determined the predicition for a given class by picking the final activation that was the biggest, but here, each activation can be 0. or 1. accuracy_thresh selects the ones that are above a certain threshold (0.5 by default) and compares them to the ground truth.\nAs for Fbeta, it's the metric that was used by Kaggle on this competition. See here for more details.", "arch = models.resnet50\n\nacc_02 = partial(accuracy_thresh, thresh=0.2)\nf_score = partial(fbeta, thresh=0.2)\nlearn = cnn_learner(data, arch, metrics=[acc_02, f_score])", "We use the LR Finder to pick a good learning rate.", "learn.lr_find()\n\nlearn.recorder.plot()", "Then we can fit the head of our network.", "lr = 0.01\n\nlearn.fit_one_cycle(5, slice(lr))\n\nlearn.save('stage-1-rn50')", "...And fine-tune the whole model:", "learn.unfreeze()\n\nlearn.lr_find()\nlearn.recorder.plot()\n\nlearn.fit_one_cycle(5, slice(1e-5, lr/5))\n\nlearn.save('stage-2-rn50')\n\ndata = (src.transform(tfms, size=256)\n .databunch().normalize(imagenet_stats))\n\nlearn.data = data\ndata.train_ds[0][0].shape\n\nlearn.freeze()\n\nlearn.lr_find()\nlearn.recorder.plot()\n\nlr=1e-2/2\n\nlearn.fit_one_cycle(5, slice(lr))\n\nlearn.save('stage-1-256-rn50')\n\nlearn.unfreeze()\n\nlearn.fit_one_cycle(5, slice(1e-5, lr/5))\n\nlearn.recorder.plot_losses()\n\nlearn.save('stage-2-256-rn50')", "You won't really know how you're going until you submit to Kaggle, since the leaderboard isn't using the same subset as we have for training. But as a guide, 50th place (out of 938 teams) on the private leaderboard was a score of 0.930.", "learn.export()", "fin\n(This section will be covered in part 2 - please don't ask about it just yet! :) )", "#! kaggle competitions download -c planet-understanding-the-amazon-from-space -f test-jpg.tar.7z -p {path} \n#! 7za -bd -y -so x {path}/test-jpg.tar.7z | tar xf - -C {path}\n#! kaggle competitions download -c planet-understanding-the-amazon-from-space -f test-jpg-additional.tar.7z -p {path} \n#! 7za -bd -y -so x {path}/test-jpg-additional.tar.7z | tar xf - -C {path}\n\ntest = ImageList.from_folder(path/'test-jpg').add(ImageList.from_folder(path/'test-jpg-additional'))\nlen(test)\n\nlearn = load_learner(path, test=test)\npreds, _ = learn.get_preds(ds_type=DatasetType.Test)\n\nthresh = 0.2\nlabelled_preds = [' '.join([learn.data.classes[i] for i,p in enumerate(pred) if p > thresh]) for pred in preds]\n\nlabelled_preds[:5]\n\nfnames = [f.name[:-4] for f in learn.data.test_ds.items]\n\ndf = pd.DataFrame({'image_name':fnames, 'tags':labelled_preds}, columns=['image_name', 'tags'])\n\ndf.to_csv(path/'submission.csv', index=False)\n\n! kaggle competitions submit planet-understanding-the-amazon-from-space -f {path/'submission.csv'} -m \"My submission\"", "Private Leaderboard score: 0.9296 (around 80th)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ernestyalumni/servetheloop
packetDef/podCommands.ipynb
mit
[ "server/udp/podCommands.js - from node.js /JavaScript to Python (object)", "# find out where we are on the file directory\nimport os, sys\n\nprint( os.getcwd())\nprint( os.listdir(os.getcwd()))", "The reactGS folder \"mimics\" the actual react-groundstation github repository, only copying the file directory structure, but the source code itself (which is a lot) isn't completely copied over. I wanted to keep these scripts/notebooks/files built on top of that github repository to be separate from the actual working code.", "wherepodCommandsis = os.getcwd()+'/reactGS/server/udp/' \nprint(wherepodCommandsis)", "node.js/(JavaScript) to json; i.e.\nnode.js/(JavaScript) $\\to$ json\nMake a copy of server/udp/podCommands.js. \nIn this copy, comment out var chalk = require('chalk') (this is the only thing you have to do manually). \nRun this in the directory containing your copy of podCommands.js:\nnode traverse_podCommands.js \nThis should generate a json file podCmds_lst.json\nAvailable podCommands as a Python list; json to Python list, i.e. json $\\to$ Python list", "import json\n\nf_podCmds_json = open(wherepodCommandsis+'podCmds_lst.json','rb')\nrawjson_podCmds = f_podCmds_json.read()\nf_podCmds_json.close()\n\nprint(type(rawjson_podCmds))\npodCmds_lst=json.loads(rawjson_podCmds)\nprint(type(podCmds_lst))\nprint(len(podCmds_lst)) # there are 104 available commands for the pod!\n\nfor cmd in podCmds_lst: \n print cmd", "Dirty parsing of podCommands.js and the flight control parameters", "f_podCmds = open(wherepodCommandsis+'podCommands.js','rb')\nraw_podCmds = f_podCmds.read()\nf_podCmds.close()\nprint(type(raw_podCmds))\nprint(len(raw_podCmds))\n\n# get the name of the functions\ncmdnameslst = [func[:func.find(\"(\")].strip() for func in raw_podCmds.split(\"function \")]\n\nfuncparamslst = [func[func.find(\"(\")+1:func.find(\")\")] \n if func[func.find(\"(\")+1:func.find(\")\")] is not '' else None for func in raw_podCmds.split(\"function \")]\n#raw_podCmds.split(\"function \")[3][ raw_podCmds.split(\"function \")[3].find(\"(\")+1:raw_podCmds.split(\"function \")[3].find(\")\")] \n\n# more parsing of this list of strings \nfuncparamslst_cleaned = []\nfor param in funcparamslst:\n if param is None:\n funcparamslst_cleaned.append(None)\n else:\n funcparamslst_cleaned.append( param.strip().split(',') )\n\nprint(len(raw_podCmds.split(\"function \")) ) # 106 commands\n\n# get the index value (e.g. starts at position 22) of where \"udp.tx.transmitPodCommand\" starts, treating it as a string\n#whereisudptransmit = [func.find(\"udp.tx.transmitPodCommand(\") for func in raw_podCmds.split(\"function \")]\n\nwhereisudptransmit = []\nfor func in raw_podCmds.split(\"function \"):\n val = func.find(\"udp.tx.transmitPodCommand(\")\n if val is not -1:\n if func.find(\"// \",val-4) is not -1 or func.find(\"// udp\",val-4) is not -1:\n whereisudptransmit.append(None)\n else:\n whereisudptransmit.append(val)\n else: \n whereisudptransmit.append(None)\n\n#whereisudptransmit = [func.find(\"udp.tx.transmitPodCommand(\") for func in raw_podCmds.split(\"function \")]\n\n\n# remove -1 values\n#whereisudptransmit = filter(lambda x : x != -1, whereisudptransmit)\n\nrawParams=[funcstr[ funcstr.find(\"(\",val)+1:funcstr.find(\")\",val)] if val is not None else None for funcstr, val in zip(raw_podCmds.split(\"function \"), whereisudptransmit)]\n\nfuncparamslst_cleaned[:10]\n\nraw_podCmds.split(\"function \")[4].find(\"// \",116-4);\n\n# more parsing of this list of strings\ncleaningParams = []\nfor rawparam in rawParams: \n if rawparam is None:\n cleaningParams.append(None)\n else:\n cleanParam = []\n cleanParam.append( rawparam.split(',')[0].strip(\"'\") )\n for strval in rawparam.split(',')[1:]:\n strval2 = strval.strip()\n try: \n strval2 = int(strval2,16)\n strval2 = hex(strval2)\n except ValueError:\n strval2\n cleanParam.append(strval2)\n cleaningParams.append(cleanParam)\n\n\ncleaningParams[:10]\n\n# get the name of the functions\n\n#[func[:func.find(\"(\")] \n# if func.find(\"()\") is not -1 else None for func in raw_podCmds.split(\"function \")];\ncmdnameslst = [func[:func.find(\"(\")].strip() for func in raw_podCmds.split(\"function \")]\n\n# each node js function has its arguments; do that first\npodfunclst = zip(cmdnameslst, funcparamslst_cleaned)\nprint(len(podfunclst))\n\npodfunclst[:10];\n\n# each node js function has its arguments; do that first\npodCommandparams = zip(podfunclst, cleaningParams)\nprint(len(podCommandparams))\n\npodCommandparams[-2]", "So the structure of our result is as follows:\nPython tuples (each of size 2 for each of the tuples)\n\"\"\" \n ( (Name of pod command as a string, None if there are no function parameters or Python list of function arguments),\n Python list [ Subsystem name as a string, paramter1 as a hex value, paramter2 as a hex value, paramter3 as a hex value, paramter4 as a hex value] )\n\"\"\" \nNotice that in the original code, there's some TO DO's still left (eek!) so that those udp.tx.transmitPodCommand is commented out or left as TODO, and some are dependent upon arguments in the function (and thus will change, the parameter is a variable).", "podCommandparams[:10]\n\ntry:\n import CPickle as pickle\nexcept ImportError:\n import pickle\n\npodCommandparamsfile = open(\"podCommandparams.pkl\",'wb')\npickle.dump( podCommandparams , podCommandparamsfile )\npodCommandparamsfile.close()\n\n# open up a pickle file like so:\npodCommandparamsfile_recover = open(\"podCommandparams.pkl\",'rb')\npodCommandparams_recover = pickle.load(podCommandparamsfile_recover)\npodCommandparamsfile_recover.close()\n\npodCommandparams_recover[:10]", "Going to .csv\n@nuttwerx and @ernestyalumni decided upon separating the multiple entries in a field by the semicolon \";\":", "tocsv = []\nfor cmd in podCommandparams_recover:\n name = cmd[0][0]\n funcparam = cmd[0][1]\n if funcparam is None:\n fparam = None\n else:\n fparam = \";\".join(funcparam)\n udpparam = cmd[1]\n if udpparam is None:\n uname = None\n uparam = None\n else:\n uname = udpparam[0]\n uparam = \";\".join( udpparam[1:] )\n tocsv.append([name,fparam,uname,uparam])", "Add the headers in manually: \n1 = Command name; 2 = Function args; 3 = Pod Node; 4 = Command Args", "header = [\"Command name\",\"Function args\", \"Pod Node\", \"Command Args\"]\ntocsv.insert(0,header)", "The csv fields format is as follows: \n(function name) , (function arguments (None is there are none)) , (UDP transmit name (None is there are no udp transmit command)), (UDP transmit parameters, 4 of them, separated by semicolon, or None if there are no udp transmit command )", "import csv\n\nf_podCommands_tocsv = open(\"podCommands.csv\",'w')\ntocsv_writer = csv.writer( f_podCommands_tocsv )\ntocsv_writer.writerows(tocsv)\nf_podCommands_tocsv.close()\n\n#tocsv.insert(0,header) no need\n\n#tocsv[:10] no need" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
mne-tools/mne-tools.github.io
0.21/_downloads/6035dcef33422511928bd2247a3d092d/plot_source_power_spectrum_opm.ipynb
bsd-3-clause
[ "%matplotlib inline", "Compute source power spectral density (PSD) of VectorView and OPM data\nHere we compute the resting state from raw for data recorded using\na Neuromag VectorView system and a custom OPM system.\nThe pipeline is meant to mostly follow the Brainstorm [1]\nOMEGA resting tutorial pipeline &lt;bst_omega_&gt;.\nThe steps we use are:\n\nFiltering: downsample heavily.\nArtifact detection: use SSP for EOG and ECG.\nSource localization: dSPM, depth weighting, cortically constrained.\nFrequency: power spectral density (Welch), 4 sec window, 50% overlap.\nStandardize: normalize by relative power for each source.\n :depth: 1\n\nPreprocessing", "# Authors: Denis Engemann <[email protected]>\n# Luke Bloy <[email protected]>\n# Eric Larson <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\n\nfrom mne.filter import next_fast_len\n\nimport mne\n\n\nprint(__doc__)\n\ndata_path = mne.datasets.opm.data_path()\nsubject = 'OPM_sample'\n\nsubjects_dir = op.join(data_path, 'subjects')\nbem_dir = op.join(subjects_dir, subject, 'bem')\nbem_fname = op.join(subjects_dir, subject, 'bem',\n subject + '-5120-5120-5120-bem-sol.fif')\nsrc_fname = op.join(bem_dir, '%s-oct6-src.fif' % subject)\nvv_fname = data_path + '/MEG/SQUID/SQUID_resting_state.fif'\nvv_erm_fname = data_path + '/MEG/SQUID/SQUID_empty_room.fif'\nvv_trans_fname = data_path + '/MEG/SQUID/SQUID-trans.fif'\nopm_fname = data_path + '/MEG/OPM/OPM_resting_state_raw.fif'\nopm_erm_fname = data_path + '/MEG/OPM/OPM_empty_room_raw.fif'\nopm_trans_fname = None\nopm_coil_def_fname = op.join(data_path, 'MEG', 'OPM', 'coil_def.dat')", "Load data, resample. We will store the raw objects in dicts with entries\n\"vv\" and \"opm\" to simplify housekeeping and simplify looping later.", "raws = dict()\nraw_erms = dict()\nnew_sfreq = 90. # Nyquist frequency (45 Hz) < line noise freq (50 Hz)\nraws['vv'] = mne.io.read_raw_fif(vv_fname, verbose='error') # ignore naming\nraws['vv'].load_data().resample(new_sfreq)\nraws['vv'].info['bads'] = ['MEG2233', 'MEG1842']\nraw_erms['vv'] = mne.io.read_raw_fif(vv_erm_fname, verbose='error')\nraw_erms['vv'].load_data().resample(new_sfreq)\nraw_erms['vv'].info['bads'] = ['MEG2233', 'MEG1842']\n\nraws['opm'] = mne.io.read_raw_fif(opm_fname)\nraws['opm'].load_data().resample(new_sfreq)\nraw_erms['opm'] = mne.io.read_raw_fif(opm_erm_fname)\nraw_erms['opm'].load_data().resample(new_sfreq)\n# Make sure our assumptions later hold\nassert raws['opm'].info['sfreq'] == raws['vv'].info['sfreq']", "Do some minimal artifact rejection just for VectorView data", "titles = dict(vv='VectorView', opm='OPM')\nssp_ecg, _ = mne.preprocessing.compute_proj_ecg(\n raws['vv'], tmin=-0.1, tmax=0.1, n_grad=1, n_mag=1)\nraws['vv'].add_proj(ssp_ecg, remove_existing=True)\n# due to how compute_proj_eog works, it keeps the old projectors, so\n# the output contains both projector types (and also the original empty-room\n# projectors)\nssp_ecg_eog, _ = mne.preprocessing.compute_proj_eog(\n raws['vv'], n_grad=1, n_mag=1, ch_name='MEG0112')\nraws['vv'].add_proj(ssp_ecg_eog, remove_existing=True)\nraw_erms['vv'].add_proj(ssp_ecg_eog)\nfig = mne.viz.plot_projs_topomap(raws['vv'].info['projs'][-4:],\n info=raws['vv'].info)\nfig.suptitle(titles['vv'])\nfig.subplots_adjust(0.05, 0.05, 0.95, 0.85)", "Explore data", "kinds = ('vv', 'opm')\nn_fft = next_fast_len(int(round(4 * new_sfreq)))\nprint('Using n_fft=%d (%0.1f sec)' % (n_fft, n_fft / raws['vv'].info['sfreq']))\nfor kind in kinds:\n fig = raws[kind].plot_psd(n_fft=n_fft, proj=True)\n fig.suptitle(titles[kind])\n fig.subplots_adjust(0.1, 0.1, 0.95, 0.85)", "Alignment and forward", "# Here we use a reduced size source space (oct5) just for speed\nsrc = mne.setup_source_space(\n subject, 'oct5', add_dist=False, subjects_dir=subjects_dir)\n# This line removes source-to-source distances that we will not need.\n# We only do it here to save a bit of memory, in general this is not required.\ndel src[0]['dist'], src[1]['dist']\nbem = mne.read_bem_solution(bem_fname)\nfwd = dict()\ntrans = dict(vv=vv_trans_fname, opm=opm_trans_fname)\n# check alignment and generate forward\nwith mne.use_coil_def(opm_coil_def_fname):\n for kind in kinds:\n dig = True if kind == 'vv' else False\n fig = mne.viz.plot_alignment(\n raws[kind].info, trans=trans[kind], subject=subject,\n subjects_dir=subjects_dir, dig=dig, coord_frame='mri',\n surfaces=('head', 'white'))\n mne.viz.set_3d_view(figure=fig, azimuth=0, elevation=90,\n distance=0.6, focalpoint=(0., 0., 0.))\n fwd[kind] = mne.make_forward_solution(\n raws[kind].info, trans[kind], src, bem, eeg=False, verbose=True)\ndel trans, src, bem", "Compute and apply inverse to PSD estimated using multitaper + Welch.\nGroup into frequency bands, then normalize each source point and sensor\nindependently. This makes the value of each sensor point and source location\nin each frequency band the percentage of the PSD accounted for by that band.", "freq_bands = dict(\n delta=(2, 4), theta=(5, 7), alpha=(8, 12), beta=(15, 29), gamma=(30, 45))\ntopos = dict(vv=dict(), opm=dict())\nstcs = dict(vv=dict(), opm=dict())\n\nsnr = 3.\nlambda2 = 1. / snr ** 2\nfor kind in kinds:\n noise_cov = mne.compute_raw_covariance(raw_erms[kind])\n inverse_operator = mne.minimum_norm.make_inverse_operator(\n raws[kind].info, forward=fwd[kind], noise_cov=noise_cov, verbose=True)\n stc_psd, sensor_psd = mne.minimum_norm.compute_source_psd(\n raws[kind], inverse_operator, lambda2=lambda2,\n n_fft=n_fft, dB=False, return_sensor=True, verbose=True)\n topo_norm = sensor_psd.data.sum(axis=1, keepdims=True)\n stc_norm = stc_psd.sum() # same operation on MNE object, sum across freqs\n # Normalize each source point by the total power across freqs\n for band, limits in freq_bands.items():\n data = sensor_psd.copy().crop(*limits).data.sum(axis=1, keepdims=True)\n topos[kind][band] = mne.EvokedArray(\n 100 * data / topo_norm, sensor_psd.info)\n stcs[kind][band] = \\\n 100 * stc_psd.copy().crop(*limits).sum() / stc_norm.data\n del inverse_operator\ndel fwd, raws, raw_erms", "Now we can make some plots of each frequency band. Note that the OPM head\ncoverage is only over right motor cortex, so only localization\nof beta is likely to be worthwhile.\nTheta", "def plot_band(kind, band):\n \"\"\"Plot activity within a frequency band on the subject's brain.\"\"\"\n title = \"%s %s\\n(%d-%d Hz)\" % ((titles[kind], band,) + freq_bands[band])\n topos[kind][band].plot_topomap(\n times=0., scalings=1., cbar_fmt='%0.1f', vmin=0, cmap='inferno',\n time_format=title)\n brain = stcs[kind][band].plot(\n subject=subject, subjects_dir=subjects_dir, views='cau', hemi='both',\n time_label=title, title=title, colormap='inferno',\n clim=dict(kind='percent', lims=(70, 85, 99)), smoothing_steps=10)\n brain.show_view(dict(azimuth=0, elevation=0), roll=0)\n return fig, brain\n\n\nfig_theta, brain_theta = plot_band('vv', 'theta')", "Alpha", "fig_alpha, brain_alpha = plot_band('vv', 'alpha')", "Beta\nHere we also show OPM data, which shows a profile similar to the VectorView\ndata beneath the sensors.", "fig_beta, brain_beta = plot_band('vv', 'beta')\nfig_beta_opm, brain_beta_opm = plot_band('opm', 'beta')", "Gamma", "fig_gamma, brain_gamma = plot_band('vv', 'gamma')", "References\n.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM.\n Brainstorm: A User-Friendly Application for MEG/EEG Analysis.\n Computational Intelligence and Neuroscience, vol. 2011, Article ID\n 879716, 13 pages, 2011. doi:10.1155/2011/879716" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
napjon/ds-nd
p1-statistics/project.ipynb
mit
[ "Overview\nIn a Stroop task, participants are presented with a list of words, with each word displayed in a color of ink. The participant’s task is to say out loud the color of the ink in which the word is printed. The task has two conditions: a congruent words condition, and an incongruent words condition. In the congruent words condition, the words being displayed are color words whose names match the colors in which they are printed: for example RED, BLUE. In the incongruent words condition, the words displayed are color words whose names do not match the colors in which they are printed: for example PURPLE, ORANGE. In each case, we measure the time it takes to name the ink colors in equally-sized lists. Each participant will go through and record a time from each condition.", "import pandas as pd\n%matplotlib inline\n\ndf = pd.read_csv('stroopdata.csv')\ndf['diff'] = df['Incongruent'] - df['Congruent']\ndf", "The experiment takes participants with two test, congruent task and incongruent task. Congruent task is word with agreeing text and font color, while incongruent is a different text and its font color. Both of the task require the participants to say it out loud the word that are being display, and press 'Finish' button to see which time do they take. The control group is the congruent task, while experiment group is ingconruent task.\nThe independent variables is which makes differ between congruent task and incongruent task. That is words that are being displayed. Participants are requested to say the font color of the words, which is the same for both control and experiment group. But while text displayed agree with color in congruent, incongruent is the other way around.\nThe dependent variables is time participants take to complete the task. The time is depend on whether the text agree with the font color being displayed. We can see that from the data, on average, the time participants took for incongruent task is different than when they solve congruent task. We will use statistical test to test whether the time is significantly different.\nSo what kind of paired data should we be asking? We know that in general Incongruent task take longer than Congruent task. So in Confidence Interval, we could be asking the interval in which Ingrouent takes more second than congruent, and in hypothesis we could be asking is whether the incongruent task results in significantly different than congruent task.\nOur sample size is less than 30, and that would means that our sampling distribution won't be normal. We're faced with two conditions, using t-test or bootstrapping. In this case, We will be using t-test. And since this is an experiment (assumed random assignment), we can draw causation. \nIn the instructions, it doesn't stated anywhere how the participants are collected. There might be a convenience bias(only participants that know the experiment), location bias(city/country where the experiment performed ), or voluntarily bias. Assumed participants randomly sampled without any bias at all. The result of this experiment can be generalized to world population.\nWe design the hypothesis test as follows:\nH0: $ \\mu_\\mathbf{congruent} = \\mu_\\mathbf{incongruent}$ The time took for population to solve both congruent task and incongruent task is the same, on average\nHA:$\\mu_\\mathbf{congruent} \\neq \\mu_\\mathbf{incongruent}$ The time took for population to solve both congruent task and incongruent task is different, on average\nWe're going to use two-sided t-statistics. This is an experiment where we have limited data and samples, and we want to test our hypothesis to the population parameters.", "df.describe()", "The measure of tendency that will be used in this situation is mean, and measure of variability is standard deviation.", "df.plot.scatter(x='Congruent',y='Incongruent');", "The plot shown a moderaly weak correlation between congruent variable and incongruent variable.", "(df.Incongruent - df.Congruent).plot.hist();", "We can see that is the difference is right skewed distribution. This makes sense, since congruent task is easier, there shouldn't be any participants that solve incongruent task shorter tha congruent task. And it should be the longer time it took for the participants at solving incongruent task, the less should be for the number of participants. \nHypothesis Testing", "%%R\n\nn = 24\nmu = 7.964792\ns = 4.864827\nCL = 0.95\nn = 24\n# z = round(qnorm((1-CL)/2, lower.tail=F),digits=2)\nSE = s/sqrt(n)\nt = mu/SE\nt_crit = round(qt((1-CL)/2,df=n-1),digits=3)\nc(t,c(-t_crit,t_crit))", "Since our t-statistics, 8.02 is higher than the t critical values, we can conclude that the data provides convincing evidence that the time participants took for incongruent task is significantly different than when they took congruent task.\nConfidence Interval", "%%R\n\nME = t*SE\nc(mu+ME,mu-ME)", "So we are 95% confident that participants on average, took incongruent task 5.91 to 10.02 seconds longer than congruent task. Since this is an experimental design, we can draw causation intead of correlation.\n\nREFERENCES:\n* http://napitupulu-jon.appspot.com/posts/paired-data-coursera-statistics.html\n* http://napitupulu-jon.appspot.com/posts/inference-means-anova.html\n* https://www.udacity.com/course/viewer#!/c-ud134-nd/l-4578095863/e-147019342/m-147300501" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
zlpure/CS231n
assignment1/features.ipynb
mit
[ "Image features exercise\nComplete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the assignments page on the course website.\nWe have seen that we can achieve reasonable performance on an image classification task by training a linear classifier on the pixels of the input image. In this exercise we will show that we can improve our classification performance by training linear classifiers not on raw pixels but on features that are computed from the raw pixels.\nAll of your work for this exercise will be done in this notebook.", "import random\nimport numpy as np\nfrom cs231n.data_utils import load_CIFAR10\nimport matplotlib.pyplot as plt\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n# for auto-reloading extenrnal modules\n# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\n%load_ext autoreload\n%autoreload 2", "Load data\nSimilar to previous exercises, we will load CIFAR-10 data from disk.", "from cs231n.features import color_histogram_hsv, hog_feature\n\ndef get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):\n # Load the raw CIFAR-10 data\n cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n \n # Subsample the data\n mask = range(num_training, num_training + num_validation)\n X_val = X_train[mask]\n y_val = y_train[mask]\n mask = range(num_training)\n X_train = X_train[mask]\n y_train = y_train[mask]\n mask = range(num_test)\n X_test = X_test[mask]\n y_test = y_test[mask]\n\n return X_train, y_train, X_val, y_val, X_test, y_test\n\nX_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()", "Extract Features\nFor each image we will compute a Histogram of Oriented\nGradients (HOG) as well as a color histogram using the hue channel in HSV\ncolor space. We form our final feature vector for each image by concatenating\nthe HOG and color histogram feature vectors.\nRoughly speaking, HOG should capture the texture of the image while ignoring\ncolor information, and the color histogram represents the color of the input\nimage while ignoring texture. As a result, we expect that using both together\nought to work better than using either alone. Verifying this assumption would\nbe a good thing to try for the bonus section.\nThe hog_feature and color_histogram_hsv functions both operate on a single\nimage and return a feature vector for that image. The extract_features\nfunction takes a set of images and a list of feature functions and evaluates\neach feature function on each image, storing the results in a matrix where\neach column is the concatenation of all feature vectors for a single image.", "from cs231n.features import *\n\nnum_color_bins = 10 # Number of bins in the color histogram\nfeature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)]\nX_train_feats = extract_features(X_train, feature_fns, verbose=True)\nX_val_feats = extract_features(X_val, feature_fns)\nX_test_feats = extract_features(X_test, feature_fns)\n\n# Preprocessing: Subtract the mean feature\nmean_feat = np.mean(X_train_feats, axis=0, keepdims=True)\nX_train_feats -= mean_feat\nX_val_feats -= mean_feat\nX_test_feats -= mean_feat\n\n# Preprocessing: Divide by standard deviation. This ensures that each feature\n# has roughly the same scale.\nstd_feat = np.std(X_train_feats, axis=0, keepdims=True)\nX_train_feats /= std_feat\nX_val_feats /= std_feat\nX_test_feats /= std_feat\n\n# Preprocessing: Add a bias dimension\nX_train_feats = np.hstack([X_train_feats, np.ones((X_train_feats.shape[0], 1))])\nX_val_feats = np.hstack([X_val_feats, np.ones((X_val_feats.shape[0], 1))])\nX_test_feats = np.hstack([X_test_feats, np.ones((X_test_feats.shape[0], 1))])", "Train SVM on features\nUsing the multiclass SVM code developed earlier in the assignment, train SVMs on top of the features extracted above; this should achieve better results than training SVMs directly on top of raw pixels.", "# Use the validation set to tune the learning rate and regularization strength\n\nfrom cs231n.classifiers.linear_classifier import LinearSVM\n\nlearning_rates = [1e-9, 1e-8, 1e-7]\nregularization_strengths = [1e5, 1e6, 1e7]\n\nresults = {}\nbest_val = -1\nbest_svm = None\n\npass\n################################################################################\n# TODO: #\n# Use the validation set to set the learning rate and regularization strength. #\n# This should be identical to the validation that you did for the SVM; save #\n# the best trained classifer in best_svm. You might also want to play #\n# with different numbers of bins in the color histogram. If you are careful #\n# you should be able to get accuracy of near 0.44 on the validation set. #\n################################################################################\nfor i in learning_rates:\n for j in regularization_strengths:\n svm=LinearSVM()\n svm.train(X_train_feats, y_train, learning_rate=i, reg=j,num_iters=5000, verbose=False)\n \n y_pred=svm.predict(X_train_feats)\n y_val_pred=svm.predict(X_val_feats)\n \n train_accuracy=np.mean(y_pred==y_train)\n val_accuracy=np.mean(y_val_pred==y_val)\n print train_accuracy, val_accuracy\n \n results[(i,j)]=(train_accuracy,val_accuracy)\n \n if val_accuracy>best_val:\n best_val=val_accuracy\n best_svm=svm\n################################################################################\n# END OF YOUR CODE #\n################################################################################\n\n# Print out results.\nfor lr, reg in sorted(results):\n train_accuracy, val_accuracy = results[(lr, reg)]\n print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (\n lr, reg, train_accuracy, val_accuracy)\n \nprint 'best validation accuracy achieved during cross-validation: %f' % best_val\n\n# Evaluate your trained SVM on the test set\ny_test_pred = best_svm.predict(X_test_feats)\ntest_accuracy = np.mean(y_test == y_test_pred)\nprint test_accuracy\n\n# An important way to gain intuition about how an algorithm works is to\n# visualize the mistakes that it makes. In this visualization, we show examples\n# of images that are misclassified by our current system. The first column\n# shows images that our system labeled as \"plane\" but whose true label is\n# something other than \"plane\".\n\nexamples_per_class = 8\nclasses = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\nfor cls, cls_name in enumerate(classes):\n idxs = np.where((y_test != cls) & (y_test_pred == cls))[0]\n idxs = np.random.choice(idxs, examples_per_class, replace=False)\n for i, idx in enumerate(idxs):\n plt.subplot(examples_per_class, len(classes), i * len(classes) + cls + 1)\n plt.imshow(X_test[idx].astype('uint8'))\n plt.axis('off')\n if i == 0:\n plt.title(cls_name)\nplt.show()", "Inline question 1:\nDescribe the misclassification results that you see. Do they make sense?\nNeural Network on image features\nEarlier in this assigment we saw that training a two-layer neural network on raw pixels achieved better classification performance than linear classifiers on raw pixels. In this notebook we have seen that linear classifiers on image features outperform linear classifiers on raw pixels. \nFor completeness, we should also try training a neural network on image features. This approach should outperform all previous approaches: you should easily be able to achieve over 55% classification accuracy on the test set; our best model achieves about 60% classification accuracy.", "print X_train_feats.shape\n\nfrom cs231n.classifiers.neural_net import TwoLayerNet\n\ninput_dim = X_train_feats.shape[1]\nhidden_dim = 500\nnum_classes = 10\n\nnet = TwoLayerNet(input_dim, hidden_dim, num_classes)\nbest_net = None\n\n################################################################################\n# TODO: Train a two-layer neural network on image features. You may want to #\n# cross-validate various parameters as in previous sections. Store your best #\n# model in the best_net variable. #\n################################################################################\nmaxn=20\nbest_val=0\nfor i in xrange(maxn):\n net_exp=net.train(X_train_feats, y_train, X_val_feats, y_val,\n learning_rate=1e-2, learning_rate_decay=0.95,\n reg=1e-5, num_iters=1000,\n batch_size=200, verbose=False)\n acc_val=np.mean(net.predict(X_test_feats)==y_test)\n print acc_val\n if acc_val>best_val:\n best_val=acc_val\n best_net=net\n################################################################################\n# END OF YOUR CODE #\n################################################################################\n\n# Run your neural net classifier on the test set. You should be able to\n# get more than 55% accuracy.\n\ntest_acc = (best_net.predict(X_test_feats) == y_test).mean()\nprint test_acc", "Bonus: Design your own features!\nYou have seen that simple image features can improve classification performance. So far we have tried HOG and color histograms, but other types of features may be able to achieve even better classification performance.\nFor bonus points, design and implement a new type of feature and use it for image classification on CIFAR-10. Explain how your feature works and why you expect it to be useful for image classification. Implement it in this notebook, cross-validate any hyperparameters, and compare its performance to the HOG + Color histogram baseline.\nBonus: Do something extra!\nUse the material and code we have presented in this assignment to do something interesting. Was there another question we should have asked? Did any cool ideas pop into your head as you were working on the assignment? This is your chance to show off!" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
kecnry/autofig
docs/tutorials/3d.ipynb
gpl-3.0
[ "Plotting in 3D with autofig", "import autofig\nimport numpy as np\n\n#autofig.inline()\n\nt = np.linspace(0,10,31)\nx = np.random.rand(31)\ny = np.random.rand(31)\nz = np.random.rand(31)", "By default, autofig uses the z dimension just to assign z-order (so that positive z appears \"on top\")", "autofig.reset()\nautofig.plot(x, y, z, i=t,\n xlabel='x', ylabel='y', zlabel='z')\nmplfig = autofig.draw()", "To instead plot using a projected 3d axes, simply pass projection='3d'", "autofig.reset()\nautofig.plot(x, y, z, i=t, \n xlabel='x', ylabel='y', zlabel='z',\n projection='3d')\nmplfig = autofig.draw()", "If the projection is set to 3d, you can also set the elevation ('elev') and azimuth ('azim') of the viewing angle. These are provided in degrees and can be either a float (fixed) or a list (changes as a function of the current value of i).", "autofig.reset()\nautofig.plot(x, y, z, i=t, \n xlabel='x', ylabel='y', zlabel='z',\n projection='3d', elev=0, azim=0)\nmplfig = autofig.draw()", "When provided as an array, the set viewing angle is determined as follows:\n\nif no i is passed, the median values of 'elev' and 'azim' are used\nif i is passed, then linear interpolation is used across the i dimension of all calls attached to that axes\n\nTherefore, passing an array (or list or tuple) with two items will simply set the lower and upper bounds. If you want the axes to rotate more than once, simply provide angles above 360.", "autofig.reset()\nautofig.plot(x, y, z, i=t, \n xlabel='x', ylabel='y', zlabel='z',\n projection='3d', elev=0, azim=[0, 180])\nmplfig = autofig.draw(i=3)\n\nanim = autofig.animate(i=t, \n save='3d_azim_2.gif', save_kwargs={'writer': 'imagemagick'})", "We can then achieve an \"accelerating\" rotation by passing finer detail on the azimuth as a function of 'i'.", "autofig.reset()\nautofig.plot(x, y, z, i=t, \n xlabel='x', ylabel='y', zlabel='z',\n projection='3d', elev=0, azim=[0, 20, 30, 50, 150, 180])\nanim = autofig.animate(i=t, \n save='3d_azim_6.gif', save_kwargs={'writer': 'imagemagick'})", "" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
goddoe/CADL
session-1/session-1.ipynb
apache-2.0
[ "Session 1 - Introduction to Tensorflow\n<p class=\"lead\">\nAssignment: Creating a Dataset/Computing with Tensorflow\n</p>\n\n<p class=\"lead\">\nParag K. Mital<br />\n<a href=\"https://www.kadenze.com/courses/creative-applications-of-deep-learning-with-tensorflow/info\">Creative Applications of Deep Learning w/ Tensorflow</a><br />\n<a href=\"https://www.kadenze.com/partners/kadenze-academy\">Kadenze Academy</a><br />\n<a href=\"https://twitter.com/hashtag/CADL\">#CADL</a>\n</p>\n\nThis work is licensed under a <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/4.0/\">Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License</a>.\nLearning Goals\n\nLearn how to normalize a dataset by calculating the mean/std. deviation\nLearn how to use convolution\nExplore what representations exist in your dataset\n\nOutline\n<!-- MarkdownTOC autolink=true autoanchor=true bracket=round -->\n\n\nAssignment Synopsis\nPart One - Create a Small Dataset\nInstructions\nCode\nPart Two - Compute the Mean\nInstructions\nCode\nPart Three - Compute the Standard Deviation\nInstructions\nCode\nPart Four - Normalize the Dataset\nInstructions\nCode\nPart Five - Convolve the Dataset\nInstructions\nCode\nPart Six - Sort the Dataset\nInstructions\nCode\nAssignment Submission\n\n<!-- /MarkdownTOC -->\n\n<h1>Notebook</h1>\n\nEverything you will need to do will be inside of this notebook, and I've marked which cells you will need to edit by saying <b><font color='red'>\"TODO! COMPLETE THIS SECTION!\"</font></b>. For you to work with this notebook, you'll either download the zip file from the resources section on Kadenze or clone the github repo (whichever you are more comfortable with), and then run notebook inside the same directory as wherever this file is located using the command line \"jupyter notebook\" or \"ipython notebook\" (using Terminal on Unix/Linux/OSX, or Command Line/Shell/Powershell on Windows). If you are unfamiliar with jupyter notebook, please look at Installation Preliminaries and Session 0 before starting!\nOnce you have launched notebook, this will launch a web browser with the contents of the zip files listed. Click the file \"session-1.ipynb\" and this document will open in an interactive notebook, allowing you to \"run\" the cells, computing them using python, and edit the text inside the cells.\n<a name=\"assignment-synopsis\"></a>\nAssignment Synopsis\nThis first homework assignment will guide you through working with a small dataset of images. For Part 1, you'll need to find 100 images and use the function I've provided to create a montage of your images, saving it to the file \"dataset.png\" (template code provided below). You can load an existing dataset of images, find your own images, or perhaps create your own images using a creative process such as painting, photography, or something along those lines. Each image will be reshaped to 100 x 100 pixels. There needs to be at least 100 images. For Parts 2 and 3, you'll then calculate the mean and deviation of it using a tensorflow session. In Part 4, you'll normalize your dataset using the mean and deviation. Then in Part 5, you will convolve your normalized dataset. For Part 6, you'll need to sort the entire convolved dataset. Finally, the last part will package everything for you in a zip file which you can upload to Kadenze to get assessed (only if you are a Kadenze Premium member, $10 p/m, free for the first month). Remember to complete the additional excercises online, including the Gallery participation and the Forum post. If you have any questions, be sure to enroll in the course and ask your peers in the #CADL community or me on the forums!\nhttps://www.kadenze.com/courses/creative-applications-of-deep-learning-with-tensorflow/info\nThe following assignment breakdown gives more detailed instructions and includes template code for you to fill out. Good luck!\n<a name=\"part-one---create-a-small-dataset\"></a>\nPart One - Create a Small Dataset\n<a name=\"instructions\"></a>\nInstructions\nUse Python, Numpy, and Matplotlib to load a dataset of 100 images and create a montage of the dataset as a 10 x 10 image using the function below. You'll need to make sure you call the function using a 4-d array of N x H x W x C dimensions, meaning every image will need to be the same size! You can load an existing dataset of images, find your own images, or perhaps create your own images using a creative process such as painting, photography, or something along those lines.\nWhen you are creating your dataset, I want you to think about what representations might exist in the limited amount of data that you are organizing. It is only 100 images after all, not a whole lot for a computer to reason about and learn something meaningful. So <b>think about creating a dataset of images that could possibly reveal something fundamental about what is contained in the images</b>. Try to think about creating a set of images that represents something. For instance, this might be images of yourself over time. Or it might be every picture you've ever taken of your cat. Or perhaps the view from your room at different times of the day. Consider making the changes within each image as significant as possible. As \"representative\" of the thing you want to capture as possible. Hopefully by the end of this lesson, you'll understand a little better the difference between what a computer thinks is significant and what you yourself thought was significant.\nThe code below will show you how to resize and/or crop your images so that they are 100 pixels x 100 pixels in height and width. Once you have 100 images loaded, we'll use a montage function to draw and save your dataset to the file <b>dataset.png</b>.\n<a name=\"code\"></a>\nCode\nThis next section will just make sure you have the right version of python and the libraries that we'll be using. Don't change the code here but make sure you \"run\" it (use \"shift+enter\")!", "# First check the Python version\nimport sys\nif sys.version_info < (3,4):\n print('You are running an older version of Python!\\n\\n' \\\n 'You should consider updating to Python 3.4.0 or ' \\\n 'higher as the libraries built for this course ' \\\n 'have only been tested in Python 3.4 and higher.\\n')\n print('Try installing the Python 3.5 version of anaconda '\n 'and then restart `jupyter notebook`:\\n' \\\n 'https://www.continuum.io/downloads\\n\\n')\n\n# Now get necessary libraries\ntry:\n import os\n import numpy as np\n import matplotlib.pyplot as plt\n from skimage.transform import resize\nexcept ImportError:\n print('You are missing some packages! ' \\\n 'We will try installing them before continuing!')\n !pip install \"numpy>=1.11.0\" \"matplotlib>=1.5.1\" \"scikit-image>=0.11.3\" \"scikit-learn>=0.17\"\n import os\n import numpy as np\n import matplotlib.pyplot as plt\n from skimage.transform import resize\n print('Done!')\n\n# Import Tensorflow\ntry:\n import tensorflow as tf\nexcept ImportError:\n print(\"You do not have tensorflow installed!\")\n print(\"Follow the instructions on the following link\")\n print(\"to install tensorflow before continuing:\")\n print(\"\")\n print(\"https://github.com/pkmital/CADL#installation-preliminaries\")\n\n# This cell includes the provided libraries from the zip file\ntry:\n from libs import utils\nexcept ImportError:\n print(\"Make sure you have started notebook in the same directory\" +\n \" as the provided zip file which includes the 'libs' folder\" +\n \" and the file 'utils.py' inside of it. You will NOT be able\"\n \" to complete this assignment unless you restart jupyter\"\n \" notebook inside the directory created by extracting\"\n \" the zip file or cloning the github repo.\")\n\n# We'll tell matplotlib to inline any drawn figures like so:\n%matplotlib inline\nplt.style.use('ggplot')\n\n# Bit of formatting because inline code is not styled very good by default:\nfrom IPython.core.display import HTML\nHTML(\"\"\"<style> .rendered_html code { \n padding: 2px 4px;\n color: #c7254e;\n background-color: #f9f2f4;\n border-radius: 4px;\n} </style>\"\"\")", "Places your images in a folder such as dirname = '/Users/Someone/Desktop/ImagesFromTheInternet'. We'll then use the os package to load them and crop/resize them to a standard size of 100 x 100 pixels.\n<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>", "# You need to find 100 images from the web/create them yourself\n# or find a dataset that interests you (e.g. I used celeb faces\n# in the course lecture...)\n# then store them all in a single directory.\n# With all the images in a single directory, you can then\n# perform the following steps to create a 4-d array of:\n# N x H x W x C dimensions as 100 x 100 x 100 x 3.\n\ndirname = ...\n\n# Load every image file in the provided directory\nfilenames = [os.path.join(dirname, fname)\n for fname in os.listdir(dirname)]\n\n# Make sure we have exactly 100 image files!\nfilenames = filenames[:100]\nassert(len(filenames) == 100)\n\n# Read every filename as an RGB image\nimgs = [plt.imread(fname)[..., :3] for fname in filenames]\n\n# Crop every image to a square\nimgs = [utils.imcrop_tosquare(img_i) for img_i in imgs]\n\n# Then resize the square image to 100 x 100 pixels\nimgs = [resize(img_i, (100, 100)) for img_i in imgs]\n\n# Finally make our list of 3-D images a 4-D array with the first dimension the number of images:\nimgs = np.array(imgs).astype(np.float32)\n\n# Plot the resulting dataset:\n# Make sure you \"run\" this cell after you create your `imgs` variable as a 4-D array!\n# Make sure we have a 100 x 100 x 100 x 3 dimension array\nassert(imgs.shape == (100, 100, 100, 3))\nplt.figure(figsize=(10, 10))\nplt.imshow(utils.montage(imgs, saveto='dataset.png'))", "<a name=\"part-two---compute-the-mean\"></a>\nPart Two - Compute the Mean\n<a name=\"instructions-1\"></a>\nInstructions\nFirst use Tensorflow to define a session. Then use Tensorflow to create an operation which takes your 4-d array and calculates the mean color image (100 x 100 x 3) using the function tf.reduce_mean. Have a look at the documentation for this function to see how it works in order to get the mean of every pixel and get an image of (100 x 100 x 3) as a result. You'll then calculate the mean image by running the operation you create with your session (e.g. <code>sess.run(...)</code>). Finally, plot the mean image, save it, and then include this image in your zip file as <b>mean.png</b>.\n<a name=\"code-1\"></a>\nCode\n<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>", "# First create a tensorflow session\nsess = ...\n\n# Now create an operation that will calculate the mean of your images\nmean_img_op = ...\n\n# And then run that operation using your session\nmean_img = sess.run(mean_img_op)\n\n# Then plot the resulting mean image:\n# Make sure the mean image is the right size!\nassert(mean_img.shape == (100, 100, 3))\nplt.figure(figsize=(10, 10))\nplt.imshow(mean_img)\nplt.imsave(arr=mean_img, fname='mean.png')", "Once you have seen the mean image of your dataset, how does it relate to your own expectations of the dataset? Did you expect something different? Was there something more \"regular\" or \"predictable\" about your dataset that the mean image did or did not reveal? If your mean image looks a lot like something recognizable, it's a good sign that there is a lot of predictability in your dataset. If your mean image looks like nothing at all, a gray blob where not much seems to stand out, then it's pretty likely that there isn't very much in common between your images. Neither is a bad scenario. Though, it is more likely that having some predictability in your mean image, e.g. something recognizable, that there are representations worth exploring with deeper networks capable of representing them. However, we're only using 100 images so it's a very small dataset to begin with.\n<a name=\"part-three---compute-the-standard-deviation\"></a>\nPart Three - Compute the Standard Deviation\n<a name=\"instructions-2\"></a>\nInstructions\nNow use tensorflow to calculate the standard deviation and upload the standard deviation image averaged across color channels as a \"jet\" heatmap of the 100 images. This will be a little more involved as there is no operation in tensorflow to do this for you. However, you can do this by calculating the mean image of your dataset as a 4-D array. To do this, you could write e.g. mean_img_4d = tf.reduce_mean(imgs, axis=0, keep_dims=True) to give you a 1 x H x W x C dimension array calculated on the N x H x W x C images variable. The axis parameter is saying to calculate the mean over the 0th dimension, meaning for every possible H, W, C, or for every pixel, you will have a mean composed over the N possible values it could have had, or what that pixel was for every possible image. This way, you can write images - mean_img_4d to give you a N x H x W x C dimension variable, with every image in your images array having been subtracted by the mean_img_4d. If you calculate the square root of the expected squared differences of this resulting operation, you have your standard deviation!\nIn summary, you'll need to write something like: subtraction = imgs - tf.reduce_mean(imgs, axis=0, keep_dims=True), then reduce this operation using tf.sqrt(tf.reduce_mean(subtraction * subtraction, axis=0)) to get your standard deviation then include this image in your zip file as <b>std.png</b>\n<a name=\"code-2\"></a>\nCode\n<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>", "# Create a tensorflow operation to give you the standard deviation\n\n# First compute the difference of every image with a\n# 4 dimensional mean image shaped 1 x H x W x C\nmean_img_4d = ...\n\nsubtraction = imgs - mean_img_4d\n\n# Now compute the standard deviation by calculating the\n# square root of the expected squared differences\nstd_img_op = tf.sqrt(tf.reduce_mean(subtraction * subtraction, axis=0))\n\n# Now calculate the standard deviation using your session\nstd_img = sess.run(std_img_op)\n\n# Then plot the resulting standard deviation image:\n# Make sure the std image is the right size!\nassert(std_img.shape == (100, 100) or std_img.shape == (100, 100, 3))\nplt.figure(figsize=(10, 10))\nstd_img_show = std_img / np.max(std_img)\nplt.imshow(std_img_show)\nplt.imsave(arr=std_img_show, fname='std.png')", "Once you have plotted your dataset's standard deviation per pixel, what does it reveal about your dataset? Like with the mean image, you should consider what is predictable and not predictable about this image.\n<a name=\"part-four---normalize-the-dataset\"></a>\nPart Four - Normalize the Dataset\n<a name=\"instructions-3\"></a>\nInstructions\nUsing tensorflow, we'll attempt to normalize your dataset using the mean and standard deviation. \n<a name=\"code-3\"></a>\nCode\n<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>", "norm_imgs_op = ...\n\nnorm_imgs = sess.run(norm_imgs_op)\nprint(np.min(norm_imgs), np.max(norm_imgs))\nprint(imgs.dtype)\n\n# Then plot the resulting normalized dataset montage:\n# Make sure we have a 100 x 100 x 100 x 3 dimension array\nassert(norm_imgs.shape == (100, 100, 100, 3))\nplt.figure(figsize=(10, 10))\nplt.imshow(utils.montage(norm_imgs, 'normalized.png'))", "We apply another type of normalization to 0-1 just for the purposes of plotting the image. If we didn't do this, the range of our values would be somewhere between -1 and 1, and matplotlib would not be able to interpret the entire range of values. By rescaling our -1 to 1 valued images to 0-1, we can visualize it better.", "norm_imgs_show = (norm_imgs - np.min(norm_imgs)) / (np.max(norm_imgs) - np.min(norm_imgs))\nplt.figure(figsize=(10, 10))\nplt.imshow(utils.montage(norm_imgs_show, 'normalized.png'))", "<a name=\"part-five---convolve-the-dataset\"></a>\nPart Five - Convolve the Dataset\n<a name=\"instructions-4\"></a>\nInstructions\nUsing tensorflow, we'll attempt to convolve your dataset with one of the kernels we created during the lesson, and then in the next part, we'll take the sum of the convolved output to use for sorting. You should use the function utils.gabor to create an edge detector. You can also explore with the utils.gauss2d kernel. What you must figure out is how to reshape your kernel to be 4-dimensional: K_H, K_W, C_I, and C_O, corresponding to the kernel's height and width (e.g. 16), the number of input channels (RGB = 3 input channels), and the number of output channels, (1).\n<a name=\"code-4\"></a>\nCode\n<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>", "# First build 3 kernels for each input color channel\nksize = ...\nkernel = np.concatenate([utils.gabor(ksize)[:, :, np.newaxis] for i in range(3)], axis=2)\n \n# Now make the kernels into the shape: [ksize, ksize, 3, 1]:\nkernel_4d = ...\nassert(kernel_4d.shape == (ksize, ksize, 3, 1))", "We'll Perform the convolution with the 4d tensor in kernel_4d. This is a ksize x ksize x 3 x 1 tensor, where each input color channel corresponds to one filter with 1 output. Each filter looks like:", "plt.figure(figsize=(5, 5))\nplt.imshow(kernel_4d[:, :, 0, 0], cmap='gray')\nplt.imsave(arr=kernel_4d[:, :, 0, 0], fname='kernel.png', cmap='gray')", "Perform the convolution with the 4d tensors:\n<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>", "convolved = utils.convolve(...\n\nconvolved_show = (convolved - np.min(convolved)) / (np.max(convolved) - np.min(convolved))\nprint(convolved_show.shape)\nplt.figure(figsize=(10, 10))\nplt.imshow(utils.montage(convolved_show[..., 0], 'convolved.png'), cmap='gray')", "What we've just done is build a \"hand-crafted\" feature detector: the Gabor Kernel. This kernel is built to respond to particular orientation: horizontal edges, and a particular scale. It also responds equally to R, G, and B color channels, as that is how we have told the convolve operation to work: use the same kernel for every input color channel. When we work with deep networks, we'll see how we can learn the convolution kernels for every color channel, and learn many more of them, in the order of 100s per color channel. That is really where the power of deep networks will start to become obvious. For now, we've seen just how difficult it is to get at any higher order features of the dataset. We've really only picked out some edges!\n<a name=\"part-six---sort-the-dataset\"></a>\nPart Six - Sort the Dataset\n<a name=\"instructions-5\"></a>\nInstructions\nUsing tensorflow, we'll attempt to organize your dataset. We'll try sorting based on the mean value of each convolved image's output to use for sorting. To do this, we could calculate either the sum value (tf.reduce_sum) or the mean value (tf.reduce_mean) of each image in your dataset and then use those values, e.g. stored inside a variable values to sort your images using something like tf.nn.top_k and sorted_imgs = np.array([imgs[idx_i] for idx_i in idxs]) prior to creating the montage image, m = montage(sorted_imgs, \"sorted.png\") and then include this image in your zip file as <b>sorted.png</b>\n<a name=\"code-5\"></a>\nCode\n<h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>", "# Create a set of operations using tensorflow which could\n# provide you for instance the sum or mean value of every\n# image in your dataset:\n\n# First flatten our convolved images so instead of many 3d images,\n# we have many 1d vectors.\n# This should convert our 4d representation of N x H x W x C to a\n# 2d representation of N x (H*W*C)\nflattened = tf.reshape(convolved...\nassert(flattened.get_shape().as_list() == [100, 10000])\n\n# Now calculate some statistics about each of our images\nvalues = tf.reduce_sum(flattened, axis=1)\n\n# Then create another operation which sorts those values\n# and then calculate the result:\nidxs_op = tf.nn.top_k(values, k=100)[1]\nidxs = sess.run(idxs_op)\n\n# Then finally use the sorted indices to sort your images:\nsorted_imgs = np.array([imgs[idx_i] for idx_i in idxs])\n\n# Then plot the resulting sorted dataset montage:\n# Make sure we have a 100 x 100 x 100 x 3 dimension array\nassert(sorted_imgs.shape == (100, 100, 100, 3))\nplt.figure(figsize=(10, 10))\nplt.imshow(utils.montage(sorted_imgs, 'sorted.png'))", "What does your sorting reveal? Could you imagine the same sorting over many more images reveal the thing your dataset sought to represent? It is likely that the representations that you wanted to find hidden within \"higher layers\", i.e., \"deeper features\" of the image, and that these \"low level\" features, edges essentially, are not very good at describing the really interesting aspects of your dataset. In later sessions, we'll see how we can combine the outputs of many more convolution kernels that have been assembled in a way that accentuate something very particular about each image, and build a sorting that is much more intelligent than this one!\n<a name=\"assignment-submission\"></a>\nAssignment Submission\nNow that you've completed all 6 parts, we'll create a zip file of the current directory using the code below. This code will make sure you have included this completed ipython notebook and the following files named exactly as:\n<pre>\n session-1/\n session-1.ipynb\n dataset.png\n mean.png\n std.png\n normalized.png\n kernel.png\n convolved.png\n sorted.png\n libs/\n utils.py\n</pre>\n\nYou'll then submit this zip file for your first assignment on Kadenze for \"Assignment 1: Datasets/Computing with Tensorflow\"! If you have any questions, remember to reach out on the forums and connect with your peers or with me.\n<b>To get assessed, you'll need to be a premium student.</b> If you aren't already enrolled as a student, register now at http://www.kadenze.com/ and join the #CADL community to see what your peers are doing! https://www.kadenze.com/courses/creative-applications-of-deep-learning-with-tensorflow/info\nThen remember to complete the remaining parts of Assignemnt 1 on Kadenze!:\n* Comment on 1 student's open-ended arrangement (Part 6) in the course gallery titled \"Creating a Dataset/ Computing with Tensorflow\". Think about what images they've used in their dataset and how the arrangement reflects what could be represented by that data.\n* Finally make a forum post in the forum for this assignment \"Creating a Dataset/ Computing with Tensorflow\".\n - Including a link to an artist making use of machine learning to organize data or finding representations within large datasets\n - Tell a little about their work (min 20 words).\n - Comment on at least 2 other student's forum posts (min 20 words)\nMake sure your notebook is named \"session-1\" or else replace it with the correct name in the list of files below:", "utils.build_submission('session-1.zip',\n ('dataset.png',\n 'mean.png',\n 'std.png',\n 'normalized.png',\n 'kernel.png',\n 'convolved.png',\n 'sorted.png',\n 'session-1.ipynb'))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
quantumlib/Cirq
docs/tutorials/shor.ipynb
apache-2.0
[ "Copyright 2020 The Cirq Developers", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "Shor's algorithm\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://quantumai.google/cirq/tutorials/shor\"><img src=\"https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png\" />View on QuantumAI</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/quantumlib/Cirq/blob/master/docs/tutorials/shor.ipynb\"><img src=\"https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/quantumlib/Cirq/blob/master/docs/tutorials/shor.ipynb\"><img src=\"https://quantumai.google/site-assets/images/buttons/github_logo_1x.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/Cirq/docs/tutorials/shor.ipynb\"><img src=\"https://quantumai.google/site-assets/images/buttons/download_icon_1x.png\" />Download notebook</a>\n </td>\n</table>\n\nThis tutorial presents a pedagogical demonstration of Shor's algorithm. It is a modified and expanded version of this Cirq example.", "\"\"\"Install Cirq.\"\"\"\ntry:\n import cirq\nexcept ImportError:\n print(\"installing cirq...\")\n !pip install --quiet cirq\n print(\"installed cirq.\")\n\n\"\"\"Imports for the notebook.\"\"\"\nimport fractions\nimport math\nimport random\n\nimport numpy as np\nimport sympy\nfrom typing import Callable, List, Optional, Sequence, Union\n\nimport cirq", "Order finding\nFactoring an integer $n$ can be reduced to finding the period of the <i>modular exponential function</i> (to be defined). Finding this period can be accomplished (with high probability) by finding the <i>order</i> of a randomly chosen element of the multiplicative group modulo $n$.\nLet $n$ be a positive integer and \n$$ \\mathbb{Z}n := {x \\in \\mathbb{Z}+ : x < n \\text{ and } \\text{gcd}(x, n) = 1} $$\nbe the multiplicative group modulo $n$.\nGiven $x \\in \\mathbb{Z}_n$, compute the smallest positive integer $r$ such that $x^r \\text{ mod } n = 1$.\nIt can be shown from group/number theory that:\n(1) Such an integer $r$ exists. (Note that $g^{|G|} = 1_G$ for any group $G$ with cardinality $|G|$ and element $g \\in G$, but it's possible that $r < |G|$.)\n(2) If $n = pq$ for primes $p$ and $q$, then $|\\mathbb{Z}_n| = \\phi(n) = (p - 1) (q - 1)$. (The function $\\phi$ is called Euler's totient function.)\n(3) The modular exponential function\n$$ f_x(z) := x^z \\mod n $$\nis periodic with period $r$ (the order of the element $x \\in \\mathbb{Z}_n$). That is, $f_x(z + r) = f_x(z)$. \n(4) If we know the period of the modular exponential function, we can (with high probability) figure out $p$ and $q$ -- that is, factor $n$.\nAs a refresher, we can visualize the elements of some multiplicative groups $\\mathbb{Z}_n$ for integers $n$ via the following simple function.", "\"\"\"Function to compute the elements of Z_n.\"\"\"\ndef multiplicative_group(n: int) -> List[int]:\n \"\"\"Returns the multiplicative group modulo n.\n \n Args:\n n: Modulus of the multiplicative group.\n \"\"\"\n assert n > 1\n group = [1]\n for x in range(2, n):\n if math.gcd(x, n) == 1:\n group.append(x)\n return group", "For example, the multiplicative group modulo $n = 15$ is shown below.", "\"\"\"Example of a multiplicative group.\"\"\"\nn = 15\nprint(f\"The multiplicative group modulo n = {n} is:\")\nprint(multiplicative_group(n))", "One can check that this set of elements indeed forms a group (under ordinary multiplication).\nClassical order finding\nA function for classically computing the order $r$ of an element $x \\in \\mathbb{Z}_n$ is provided below. This function simply computes the sequence \n$$ x^2 \\text{ mod } n $$\n$$ x^3 \\text{ mod } n $$\n$$ x^4 \\text{ mod } n $$\n$$ \\vdots $$\nuntil an integer $r$ is found such that $x^r = 1 \\text{ mod } n$. Since $|\\mathbb{Z}_n| = \\phi(n)$, this algorithm for order finding has time complexity $O(\\phi(n))$ which is inefficient. (Roughly $O(2^{L / 2})$ where $L$ is the number of bits in $n$.)", "\"\"\"Function for classically computing the order of an element of Z_n.\"\"\"\ndef classical_order_finder(x: int, n: int) -> Optional[int]:\n \"\"\"Computes smallest positive r such that x**r mod n == 1.\n\n Args:\n x: Integer whose order is to be computed, must be greater than one\n and belong to the multiplicative group of integers modulo n (which\n consists of positive integers relatively prime to n),\n n: Modulus of the multiplicative group.\n\n Returns:\n Smallest positive integer r such that x**r == 1 mod n.\n Always succeeds (and hence never returns None).\n\n Raises:\n ValueError when x is 1 or not an element of the multiplicative\n group of integers modulo n.\n \"\"\"\n # Make sure x is both valid and in Z_n.\n if x < 2 or x >= n or math.gcd(x, n) > 1:\n raise ValueError(f\"Invalid x={x} for modulus n={n}.\")\n \n # Determine the order.\n r, y = 1, x\n while y != 1:\n y = (x * y) % n\n r += 1\n return r", "An example of computing $r$ for a given $x \\in \\mathbb{Z}_n$ and given $n$ is shown in the code block below.", "\"\"\"Example of (classically) computing the order of an element.\"\"\"\nn = 15 # The multiplicative group is [1, 2, 4, 7, 8, 11, 13, 14].\nx = 8\nr = classical_order_finder(x, n)\n\n# Check that the order is indeed correct.\nprint(f\"x^r mod n = {x}^{r} mod {n} = {x**r % n}\")", "The quantum part of Shor's algorithm is order finding, but done via a quantum circuit, which we'll discuss below.\nQuantum order finding\nQuantum order finding is essentially quantum phase estimation with unitary $U$ that computes the modular exponential function $f_x(z)$ for some randomly chosen $x \\in \\mathbb{Z}_n$. The full details of how $U$ is computed in terms of elementary gates can be complex to unravel, especially on a first reading. In this tutorial, we'll use arithmetic operations in Cirq which can implement such a unitary $U$ without fully delving into the details of elementary gates.\nBelow we first show an example of a simple arithmetic operation in Cirq (addition) then discuss the operation we care about (modular exponentiation).\nQuantum arithmetic operations in Cirq\nHere we discuss an example of defining an arithmetic operation in Cirq, namely modular addition. This operation adds the value of the input register into the target register. More specifically, this operation acts on two qubit registers as\n$$ |a\\rangle_i |b\\rangle_t \\mapsto |a\\rangle_i |a + b \\text{ mod } N_t \\rangle_t . $$\nHere, the subscripts $i$ and $t$ denote <i>i</i>nput and <i>t</i>arget register, respectively, and $N_t$ is the dimension of the target register.\nTo define this operation, called Adder, we inherit from cirq.ArithmeticOperation and override the four methods shown below. The main method is the apply method which defines the arithmetic. Here, we simply state the expression as $a + b$ instead of the more accurate $a + b \\text{ mod } N_t$ above -- the cirq.ArithmeticOperation class is able to deduce what we mean by simply $a + b$ since the operation must be reversible.", "\"\"\"Example of defining an arithmetic (quantum) operation in Cirq.\"\"\"\nclass Adder(cirq.ArithmeticOperation):\n \"\"\"Quantum addition.\"\"\"\n def __init__(self, target_register, input_register):\n self.input_register = input_register\n self.target_register = target_register\n \n def registers(self):\n return self.target_register, self.input_register\n \n def with_registers(self, *new_registers):\n return Adder(*new_registers)\n \n def apply(self, target_value, input_value):\n return target_value + input_value", "Now that we have the operation defined, we can use it in a circuit. The cell below creates two qubit registers, then sets the first register to be $|10\\rangle$ (in binary) and the second register to be $|01\\rangle$ (in binary) via $X$ gates. Then, we use the Adder operation, then measure all the qubits.\nSince $10 + 01 = 11$ (in binary), we expect to measure $|11\\rangle$ in the target register every time. Additionally, since we do not alter the input register, we expect to measure $|10\\rangle$ in the input register every time. In short, the only bitstring we expect to measure is $1011$.", "\"\"\"Example of using an Adder in a circuit.\"\"\"\n# Two qubit registers.\nqreg1 = cirq.LineQubit.range(2)\nqreg2 = cirq.LineQubit.range(2, 4)\n\n# Define the circuit.\ncirc = cirq.Circuit(\n cirq.ops.X.on(qreg1[0]),\n cirq.ops.X.on(qreg2[1]),\n Adder(input_register=qreg1, target_register=qreg2),\n cirq.measure_each(*qreg1),\n cirq.measure_each(*qreg2)\n)\n\n# Display it.\nprint(\"Circuit:\\n\")\nprint(circ)\n\n# Print the measurement outcomes.\nprint(\"\\n\\nMeasurement outcomes:\\n\")\nprint(cirq.sample(circ, repetitions=5).data)", "In the output of this code block, we first see the circuit which shows the initial $X$ gates, the Adder operation, then the final measurements. Next, we see the measurement outcomes which are all the bitstring $1011$ as expected.\nIt is also possible to see the unitary of the adder operation, which we do below. Here, we set the target register to be two qubits in the zero state, i.e. $|00\\rangle$. We specify the input register as the integer one which corresponds to the qubit register $|01\\rangle$.", "\"\"\"Example of the unitary of an Adder operation.\"\"\"\ncirq.unitary(\n Adder(target_register=cirq.LineQubit.range(2),\n input_register=1)\n).real", "We can understand this unitary as follows. The $i$th column of the unitary is the state $|i + 1 \\text{ mod } 4\\rangle$. For example, if we look at the $0$th column of the unitary, we see the state $|i + 1 \\text{ mod } 4\\rangle = |0 + 1 \\text{ mod } 4\\rangle = |1\\rangle$. If we look at the $1$st column of the unitary, we see the state $|i + 1 \\text{ mod } 4\\rangle = |1 + 1 \\text{ mod } 4\\rangle = |2\\rangle$. Similarly for the last two columns.\nModular exponential arithmetic operation\nWe can define the modular exponential arithmetic operation in a similar way to the simple addition arithmetic operation, shown below. For the purposes of understanding Shor's algorithm, the most important part of the following code block is the apply method which defines the arithmetic operation.", "\"\"\"Defines the modular exponential operation used in Shor's algorithm.\"\"\"\nclass ModularExp(cirq.ArithmeticOperation):\n \"\"\"Quantum modular exponentiation.\n\n This class represents the unitary which multiplies base raised to exponent\n into the target modulo the given modulus. More precisely, it represents the\n unitary V which computes modular exponentiation x**e mod n:\n\n V|y⟩|e⟩ = |y * x**e mod n⟩ |e⟩ 0 <= y < n\n V|y⟩|e⟩ = |y⟩ |e⟩ n <= y\n\n where y is the target register, e is the exponent register, x is the base\n and n is the modulus. Consequently,\n\n V|y⟩|e⟩ = (U**e|y)|e⟩\n\n where U is the unitary defined as\n\n U|y⟩ = |y * x mod n⟩ 0 <= y < n\n U|y⟩ = |y⟩ n <= y\n \"\"\"\n def __init__(\n self, \n target: Sequence[cirq.Qid],\n exponent: Union[int, Sequence[cirq.Qid]], \n base: int,\n modulus: int\n ) -> None:\n if len(target) < modulus.bit_length():\n raise ValueError(f'Register with {len(target)} qubits is too small '\n f'for modulus {modulus}')\n self.target = target\n self.exponent = exponent\n self.base = base\n self.modulus = modulus\n\n def registers(self) -> Sequence[Union[int, Sequence[cirq.Qid]]]:\n return self.target, self.exponent, self.base, self.modulus\n\n def with_registers(\n self,\n *new_registers: Union[int, Sequence['cirq.Qid']],\n ) -> cirq.ArithmeticOperation:\n if len(new_registers) != 4:\n raise ValueError(f'Expected 4 registers (target, exponent, base, '\n f'modulus), but got {len(new_registers)}')\n target, exponent, base, modulus = new_registers\n if not isinstance(target, Sequence):\n raise ValueError(\n f'Target must be a qubit register, got {type(target)}')\n if not isinstance(base, int):\n raise ValueError(\n f'Base must be a classical constant, got {type(base)}')\n if not isinstance(modulus, int):\n raise ValueError(\n f'Modulus must be a classical constant, got {type(modulus)}')\n return ModularExp(target, exponent, base, modulus)\n\n def apply(self, *register_values: int) -> int:\n assert len(register_values) == 4\n target, exponent, base, modulus = register_values\n if target >= modulus:\n return target\n return (target * base**exponent) % modulus\n\n def _circuit_diagram_info_(\n self,\n args: cirq.CircuitDiagramInfoArgs,\n ) -> cirq.CircuitDiagramInfo:\n assert args.known_qubits is not None\n wire_symbols: List[str] = []\n t, e = 0, 0\n for qubit in args.known_qubits:\n if qubit in self.target:\n if t == 0:\n if isinstance(self.exponent, Sequence):\n e_str = 'e'\n else:\n e_str = str(self.exponent)\n wire_symbols.append(\n f'ModularExp(t*{self.base}**{e_str} % {self.modulus})')\n else:\n wire_symbols.append('t' + str(t))\n t += 1\n if isinstance(self.exponent, Sequence) and qubit in self.exponent:\n wire_symbols.append('e' + str(e))\n e += 1\n return cirq.CircuitDiagramInfo(wire_symbols=tuple(wire_symbols))", "In the apply method, we see that we evaluate (target * base**exponent) % modulus. The target and the exponent depend on the values of the respective qubit registers, and the base and modulus are constant -- namely, the modulus is $n$ and the base is some $x \\in \\mathbb{Z}_n$. \nThe total number of qubits we will use is $3 (L + 1)$ where $L$ is the number of bits needed to store the integer $n$ to factor. The size of the unitary which implements the modular exponential is thus $4^{3(L + 1)}$. For a modest $n = 15$, the unitary requires storing $2^{30}$ floating point numbers in memory which is out of reach of most current standard laptops.", "\"\"\"Create the target and exponent registers for phase estimation,\nand see the number of qubits needed for Shor's algorithm.\n\"\"\"\nn = 15\nL = n.bit_length()\n\n# The target register has L qubits.\ntarget = cirq.LineQubit.range(L)\n\n# The exponent register has 2L + 3 qubits.\nexponent = cirq.LineQubit.range(L, 3 * L + 3)\n\n# Display the total number of qubits to factor this n.\nprint(f\"To factor n = {n} which has L = {L} bits, we need 3L + 3 = {3 * L + 3} qubits.\")", "As with the simple adder operation, this modular exponential operation has a unitary which we can display (memory permitting) as follows.", "\"\"\"See (part of) the unitary for a modular exponential operation.\"\"\"\n# Pick some element of the multiplicative group modulo n.\nx = 5\n\n# Display (part of) the unitary. Uncomment if n is small enough.\n# cirq.unitary(ModularExp(target, exponent, x, n))", "Using the modular exponentional operation in a circuit\nThe quantum part of Shor's algorithm is just phase estimation with the unitary $U$ corresponding to the modular exponential operation. The following cell defines a function which creates the circuit for Shor's algorithm using the ModularExp operation we defined above.", "\"\"\"Function to make the quantum circuit for order finding.\"\"\"\ndef make_order_finding_circuit(x: int, n: int) -> cirq.Circuit:\n \"\"\"Returns quantum circuit which computes the order of x modulo n.\n\n The circuit uses Quantum Phase Estimation to compute an eigenvalue of\n the unitary\n\n U|y⟩ = |y * x mod n⟩ 0 <= y < n\n U|y⟩ = |y⟩ n <= y\n\n Args:\n x: positive integer whose order modulo n is to be found\n n: modulus relative to which the order of x is to be found\n\n Returns:\n Quantum circuit for finding the order of x modulo n\n \"\"\"\n L = n.bit_length()\n target = cirq.LineQubit.range(L)\n exponent = cirq.LineQubit.range(L, 3 * L + 3)\n return cirq.Circuit(\n cirq.X(target[L - 1]),\n cirq.H.on_each(*exponent),\n ModularExp(target, exponent, x, n),\n cirq.qft(*exponent, inverse=True),\n cirq.measure(*exponent, key='exponent'),\n )", "Using this function, we can visualize the circuit for a given $x$ and $n$ as follows.", "\"\"\"Example of the quantum circuit for period finding.\"\"\"\nn = 15\nx = 7\ncircuit = make_order_finding_circuit(x, n)\nprint(circuit)", "As previously described, we put the exponent register into an equal superposition via Hadamard gates. The $X$ gate on the last qubit in the target register is used for phase kickback. The modular exponential operation performs the sequence of controlled unitaries in phase estimation, then we apply the inverse quantum Fourier transform to the exponent register and measure to read out the result.\nTo illustrate the measurement results, we can sample from a smaller circuit. (Note that in practice we would never run Shor's algorithm with $n = 6$ because it is even. This is just an example to illustrate the measurement outcomes.)", "\"\"\"Measuring Shor's period finding circuit.\"\"\"\ncircuit = make_order_finding_circuit(x=5, n=6)\nres = cirq.sample(circuit, repetitions=8)\n\nprint(\"Raw measurements:\")\nprint(res)\n\nprint(\"\\nInteger in exponent register:\")\nprint(res.data)", "We interpret each measured bitstring as an integer, but what do these integers tell us? In the next section we look at how to classically post-process to interpret them.\nClassical post-processing\nThe integer we measure is close to $s / r$ where $r$ is the order of $x \\in \\mathbb{Z}_n$ and $0 \\le s < r$ is an integer. We use the continued fractions algorithm to determine $r$ from $s / r$ then return it if the order finding circuit succeeded, else we return None.", "def process_measurement(result: cirq.Result, x: int, n: int) -> Optional[int]:\n \"\"\"Interprets the output of the order finding circuit.\n\n Specifically, it determines s/r such that exp(2πis/r) is an eigenvalue\n of the unitary\n\n U|y⟩ = |xy mod n⟩ 0 <= y < n\n U|y⟩ = |y⟩ n <= y\n \n then computes r (by continued fractions) if possible, and returns it.\n\n Args:\n result: result obtained by sampling the output of the\n circuit built by make_order_finding_circuit\n\n Returns:\n r, the order of x modulo n or None.\n \"\"\"\n # Read the output integer of the exponent register.\n exponent_as_integer = result.data[\"exponent\"][0]\n exponent_num_bits = result.measurements[\"exponent\"].shape[1]\n eigenphase = float(exponent_as_integer / 2**exponent_num_bits)\n\n # Run the continued fractions algorithm to determine f = s / r.\n f = fractions.Fraction.from_float(eigenphase).limit_denominator(n)\n \n # If the numerator is zero, the order finder failed.\n if f.numerator == 0:\n return None\n \n # Else, return the denominator if it is valid.\n r = f.denominator\n if x**r % n != 1:\n return None\n return r", "The next code block shows an example of creating an order finding circuit, executing it, then using the classical postprocessing function to determine the order. Recall that the quantum part of the algorithm succeeds with some probability. If the order is None, try re-running the cell a few times.", "\"\"\"Example of the classical post-processing.\"\"\"\n# Set n and x here\nn = 6\nx = 5\n\nprint(f\"Finding the order of x = {x} modulo n = {n}\\n\")\nmeasurement = cirq.sample(circuit, repetitions=1)\nprint(\"Raw measurements:\")\nprint(measurement)\n\nprint(\"\\nInteger in exponent register:\")\nprint(measurement.data)\n\nr = process_measurement(measurement, x, n)\nprint(\"\\nOrder r =\", r)\nif r is not None:\n print(f\"x^r mod n = {x}^{r} mod {n} = {x**r % n}\")", "You should see that the order of $x = 5$ in $\\mathbb{Z}_6$ is $r = 2$. Indeed, $5^2 \\text{ mod } 6 = 25 \\text{ mod } 6 = 1$. \nQuantum order finder\nWe can now define a streamlined function for the quantum version of order finding using the functions we have previously written. The quantum order finder below creates the circuit, executes it, and processes the measurement result.", "def quantum_order_finder(x: int, n: int) -> Optional[int]:\n \"\"\"Computes smallest positive r such that x**r mod n == 1.\n \n Args:\n x: integer whose order is to be computed, must be greater than one\n and belong to the multiplicative group of integers modulo n (which\n consists of positive integers relatively prime to n),\n n: modulus of the multiplicative group.\n \"\"\"\n # Check that the integer x is a valid element of the multiplicative group\n # modulo n.\n if x < 2 or n <= x or math.gcd(x, n) > 1:\n raise ValueError(f'Invalid x={x} for modulus n={n}.')\n\n # Create the order finding circuit.\n circuit = make_order_finding_circuit(x, n)\n \n # Sample from the order finding circuit.\n measurement = cirq.sample(circuit)\n \n # Return the processed measurement result.\n return process_measurement(measurement, x, n)", "This completes our quantum implementation of an order finder, and the quantum part of Shor's algorithm.\nThe complete factoring algorithm\nWe can use this quantum order finder (or the classical order finder) to complete Shor's algorithm. In the following code block, we add a few pre-processing steps which:\n(1) Check if $n$ is even,\n(2) Check if $n$ is prime,\n(3) Check if $n$ is a prime power,\nall of which can be done efficiently with a classical computer. Additionally, we add the last necessary post-processing step which uses the order $r$ to compute a non-trivial factor $p$ of $n$. This is achieved by computing $y = x^{r / 2} \\text{ mod } n$ (assuming $r$ is even), then computing $p = \\text{gcd}(y - 1, n)$.", "\"\"\"Functions for factoring from start to finish.\"\"\"\ndef find_factor_of_prime_power(n: int) -> Optional[int]:\n \"\"\"Returns non-trivial factor of n if n is a prime power, else None.\"\"\"\n for k in range(2, math.floor(math.log2(n)) + 1):\n c = math.pow(n, 1 / k)\n c1 = math.floor(c)\n if c1**k == n:\n return c1\n c2 = math.ceil(c)\n if c2**k == n:\n return c2\n return None\n\n\ndef find_factor(\n n: int,\n order_finder: Callable[[int, int], Optional[int]] = quantum_order_finder,\n max_attempts: int = 30\n) -> Optional[int]:\n \"\"\"Returns a non-trivial factor of composite integer n.\n\n Args:\n n: Integer to factor.\n order_finder: Function for finding the order of elements of the\n multiplicative group of integers modulo n.\n max_attempts: number of random x's to try, also an upper limit\n on the number of order_finder invocations.\n\n Returns:\n Non-trivial factor of n or None if no such factor was found.\n Factor k of n is trivial if it is 1 or n.\n \"\"\"\n # If the number is prime, there are no non-trivial factors.\n if sympy.isprime(n):\n print(\"n is prime!\")\n return None\n \n # If the number is even, two is a non-trivial factor.\n if n % 2 == 0:\n return 2\n \n # If n is a prime power, we can find a non-trivial factor efficiently.\n c = find_factor_of_prime_power(n)\n if c is not None:\n return c\n \n for _ in range(max_attempts):\n # Choose a random number between 2 and n - 1.\n x = random.randint(2, n - 1)\n \n # Most likely x and n will be relatively prime.\n c = math.gcd(x, n)\n \n # If x and n are not relatively prime, we got lucky and found\n # a non-trivial factor.\n if 1 < c < n:\n return c\n \n # Compute the order r of x modulo n using the order finder.\n r = order_finder(x, n)\n \n # If the order finder failed, try again.\n if r is None:\n continue\n \n # If the order r is even, try again.\n if r % 2 != 0:\n continue\n \n # Compute the non-trivial factor.\n y = x**(r // 2) % n\n assert 1 < y < n\n c = math.gcd(y - 1, n)\n if 1 < c < n:\n return c\n\n print(f\"Failed to find a non-trivial factor in {max_attempts} attempts.\")\n return None", "The function find_factor uses the quantum_order_finder by default, in which case it is executing Shor's algorithm. As previously mentioned, due to the large memory requirements for classically simulating this circuit, we cannot run Shor's algorithm for $n \\ge 15$. However, we can use the classical order finder as a substitute.", "\"\"\"Example of factoring via Shor's algorithm (order finding).\"\"\"\n# Number to factor\nn = 184573\n\n# Attempt to find a factor\np = find_factor(n, order_finder=classical_order_finder)\nq = n // p\n\nprint(\"Factoring n = pq =\", n)\nprint(\"p =\", p)\nprint(\"q =\", q)\n\n\"\"\"Check the answer is correct.\"\"\"\np * q == n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
amueller/scipy-2017-sklearn
notebooks/21.Unsupervised_learning-Non-linear_dimensionality_reduction.ipynb
cc0-1.0
[ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np", "Manifold Learning\nOne weakness of PCA is that it cannot detect non-linear features. A set\nof algorithms known as Manifold Learning have been developed to address\nthis deficiency. A canonical dataset used in Manifold learning is the\nS-curve:", "from sklearn.datasets import make_s_curve\nX, y = make_s_curve(n_samples=1000)\n\nfrom mpl_toolkits.mplot3d import Axes3D\nax = plt.axes(projection='3d')\n\nax.scatter3D(X[:, 0], X[:, 1], X[:, 2], c=y)\nax.view_init(10, -60);", "This is a 2-dimensional dataset embedded in three dimensions, but it is embedded\nin such a way that PCA cannot discover the underlying data orientation:", "from sklearn.decomposition import PCA\nX_pca = PCA(n_components=2).fit_transform(X)\nplt.scatter(X_pca[:, 0], X_pca[:, 1], c=y);", "Manifold learning algorithms, however, available in the sklearn.manifold\nsubmodule, are able to recover the underlying 2-dimensional manifold:", "from sklearn.manifold import Isomap\n\niso = Isomap(n_neighbors=15, n_components=2)\nX_iso = iso.fit_transform(X)\nplt.scatter(X_iso[:, 0], X_iso[:, 1], c=y);", "Manifold learning on the digits data\nWe can apply manifold learning techniques to much higher dimensional datasets, for example the digits data that we saw before:", "from sklearn.datasets import load_digits\ndigits = load_digits()\n\nfig, axes = plt.subplots(2, 5, figsize=(10, 5),\n subplot_kw={'xticks':(), 'yticks': ()})\nfor ax, img in zip(axes.ravel(), digits.images):\n ax.imshow(img, interpolation=\"none\", cmap=\"gray\")", "We can visualize the dataset using a linear technique, such as PCA. We saw this already provides some intuition about the data:", "# build a PCA model\npca = PCA(n_components=2)\npca.fit(digits.data)\n# transform the digits data onto the first two principal components\ndigits_pca = pca.transform(digits.data)\ncolors = [\"#476A2A\", \"#7851B8\", \"#BD3430\", \"#4A2D4E\", \"#875525\",\n \"#A83683\", \"#4E655E\", \"#853541\", \"#3A3120\",\"#535D8E\"]\nplt.figure(figsize=(10, 10))\nplt.xlim(digits_pca[:, 0].min(), digits_pca[:, 0].max() + 1)\nplt.ylim(digits_pca[:, 1].min(), digits_pca[:, 1].max() + 1)\nfor i in range(len(digits.data)):\n # actually plot the digits as text instead of using scatter\n plt.text(digits_pca[i, 0], digits_pca[i, 1], str(digits.target[i]),\n color = colors[digits.target[i]],\n fontdict={'weight': 'bold', 'size': 9})\nplt.xlabel(\"first principal component\")\nplt.ylabel(\"second principal component\");", "Using a more powerful, nonlinear techinque can provide much better visualizations, though.\nHere, we are using the t-SNE manifold learning method:", "from sklearn.manifold import TSNE\ntsne = TSNE(random_state=42)\n# use fit_transform instead of fit, as TSNE has no transform method:\ndigits_tsne = tsne.fit_transform(digits.data)\n\nplt.figure(figsize=(10, 10))\nplt.xlim(digits_tsne[:, 0].min(), digits_tsne[:, 0].max() + 1)\nplt.ylim(digits_tsne[:, 1].min(), digits_tsne[:, 1].max() + 1)\nfor i in range(len(digits.data)):\n # actually plot the digits as text instead of using scatter\n plt.text(digits_tsne[i, 0], digits_tsne[i, 1], str(digits.target[i]),\n color = colors[digits.target[i]],\n fontdict={'weight': 'bold', 'size': 9})", "t-SNE has a somewhat longer runtime that other manifold learning algorithms, but the result is quite striking. Keep in mind that this algorithm is purely unsupervised, and does not know about the class labels. Still it is able to separate the classes very well (though the classes four, one and nine have been split into multiple groups).\n<div class=\"alert alert-success\">\n <b>EXERCISE</b>:\n <ul>\n <li>\n Compare the results of applying isomap to the digits dataset to the results of PCA and t-SNE. Which result do you think looks best?\n </li>\n <li>\n Given how well t-SNE separated the classes, one might be tempted to use this processing for classification. Try training a K-nearest neighbor classifier on digits data transformed with t-SNE, and compare to the accuracy on using the dataset without any transformation.\n </li>\n </ul>\n</div>", "# %load solutions/21A_isomap_digits.py\n\n# %load solutions/21B_tsne_classification.py" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ShubhamDebnath/Coursera-Machine-Learning
Course 5/Dinosaurus Island Character level language model final v3.ipynb
mit
[ "Character level language model - Dinosaurus land\nWelcome to Dinosaurus Island! 65 million years ago, dinosaurs existed, and in this assignment they are back. You are in charge of a special task. Leading biology researchers are creating new breeds of dinosaurs and bringing them to life on earth, and your job is to give names to these dinosaurs. If a dinosaur does not like its name, it might go beserk, so choose wisely! \n<table>\n<td>\n<img src=\"images/dino.jpg\" style=\"width:250;height:300px;\">\n\n</td>\n\n</table>\n\nLuckily you have learned some deep learning and you will use it to save the day. Your assistant has collected a list of all the dinosaur names they could find, and compiled them into this dataset. (Feel free to take a look by clicking the previous link.) To create new dinosaur names, you will build a character level language model to generate new names. Your algorithm will learn the different name patterns, and randomly generate new names. Hopefully this algorithm will keep you and your team safe from the dinosaurs' wrath! \nBy completing this assignment you will learn:\n\nHow to store text data for processing using an RNN \nHow to synthesize data, by sampling predictions at each time step and passing it to the next RNN-cell unit\nHow to build a character-level text generation recurrent neural network\nWhy clipping the gradients is important\n\nWe will begin by loading in some functions that we have provided for you in rnn_utils. Specifically, you have access to functions such as rnn_forward and rnn_backward which are equivalent to those you've implemented in the previous assignment.", "import numpy as np\nfrom utils import *\nimport random", "1 - Problem Statement\n1.1 - Dataset and Preprocessing\nRun the following cell to read the dataset of dinosaur names, create a list of unique characters (such as a-z), and compute the dataset and vocabulary size.", "data = open('dinos.txt', 'r').read()\ndata= data.lower()\nchars = list(set(data))\ndata_size, vocab_size = len(data), len(chars)\nprint('There are %d total characters and %d unique characters in your data.' % (data_size, vocab_size))", "The characters are a-z (26 characters) plus the \"\\n\" (or newline character), which in this assignment plays a role similar to the &lt;EOS&gt; (or \"End of sentence\") token we had discussed in lecture, only here it indicates the end of the dinosaur name rather than the end of a sentence. In the cell below, we create a python dictionary (i.e., a hash table) to map each character to an index from 0-26. We also create a second python dictionary that maps each index back to the corresponding character character. This will help you figure out what index corresponds to what character in the probability distribution output of the softmax layer. Below, char_to_ix and ix_to_char are the python dictionaries.", "char_to_ix = { ch:i for i,ch in enumerate(sorted(chars)) }\nix_to_char = { i:ch for i,ch in enumerate(sorted(chars)) }\nprint(ix_to_char)", "1.2 - Overview of the model\nYour model will have the following structure: \n\nInitialize parameters \nRun the optimization loop\nForward propagation to compute the loss function\nBackward propagation to compute the gradients with respect to the loss function\nClip the gradients to avoid exploding gradients\nUsing the gradients, update your parameter with the gradient descent update rule.\n\n\nReturn the learned parameters \n\n<img src=\"images/rnn.png\" style=\"width:450;height:300px;\">\n<caption><center> Figure 1: Recurrent Neural Network, similar to what you had built in the previous notebook \"Building a RNN - Step by Step\". </center></caption>\nAt each time-step, the RNN tries to predict what is the next character given the previous characters. The dataset $X = (x^{\\langle 1 \\rangle}, x^{\\langle 2 \\rangle}, ..., x^{\\langle T_x \\rangle})$ is a list of characters in the training set, while $Y = (y^{\\langle 1 \\rangle}, y^{\\langle 2 \\rangle}, ..., y^{\\langle T_x \\rangle})$ is such that at every time-step $t$, we have $y^{\\langle t \\rangle} = x^{\\langle t+1 \\rangle}$. \n2 - Building blocks of the model\nIn this part, you will build two important blocks of the overall model:\n- Gradient clipping: to avoid exploding gradients\n- Sampling: a technique used to generate characters\nYou will then apply these two functions to build the model.\n2.1 - Clipping the gradients in the optimization loop\nIn this section you will implement the clip function that you will call inside of your optimization loop. Recall that your overall loop structure usually consists of a forward pass, a cost computation, a backward pass, and a parameter update. Before updating the parameters, you will perform gradient clipping when needed to make sure that your gradients are not \"exploding,\" meaning taking on overly large values. \nIn the exercise below, you will implement a function clip that takes in a dictionary of gradients and returns a clipped version of gradients if needed. There are different ways to clip gradients; we will use a simple element-wise clipping procedure, in which every element of the gradient vector is clipped to lie between some range [-N, N]. More generally, you will provide a maxValue (say 10). In this example, if any component of the gradient vector is greater than 10, it would be set to 10; and if any component of the gradient vector is less than -10, it would be set to -10. If it is between -10 and 10, it is left alone. \n<img src=\"images/clip.png\" style=\"width:400;height:150px;\">\n<caption><center> Figure 2: Visualization of gradient descent with and without gradient clipping, in a case where the network is running into slight \"exploding gradient\" problems. </center></caption>\nExercise: Implement the function below to return the clipped gradients of your dictionary gradients. Your function takes in a maximum threshold and returns the clipped versions of your gradients. You can check out this hint for examples of how to clip in numpy. You will need to use the argument out = ....", "### GRADED FUNCTION: clip\n\ndef clip(gradients, maxValue):\n '''\n Clips the gradients' values between minimum and maximum.\n \n Arguments:\n gradients -- a dictionary containing the gradients \"dWaa\", \"dWax\", \"dWya\", \"db\", \"dby\"\n maxValue -- everything above this number is set to this number, and everything less than -maxValue is set to -maxValue\n \n Returns: \n gradients -- a dictionary with the clipped gradients.\n '''\n \n dWaa, dWax, dWya, db, dby = gradients['dWaa'], gradients['dWax'], gradients['dWya'], gradients['db'], gradients['dby']\n \n ### START CODE HERE ###\n # clip to mitigate exploding gradients, loop over [dWax, dWaa, dWya, db, dby]. (≈2 lines)\n for gradient in [dWax, dWaa, dWya, db, dby]:\n np.clip(gradient, -maxValue, maxValue, out = gradient)\n ### END CODE HERE ###\n \n gradients = {\"dWaa\": dWaa, \"dWax\": dWax, \"dWya\": dWya, \"db\": db, \"dby\": dby}\n \n return gradients\n\nnp.random.seed(3)\ndWax = np.random.randn(5,3)*10\ndWaa = np.random.randn(5,5)*10\ndWya = np.random.randn(2,5)*10\ndb = np.random.randn(5,1)*10\ndby = np.random.randn(2,1)*10\ngradients = {\"dWax\": dWax, \"dWaa\": dWaa, \"dWya\": dWya, \"db\": db, \"dby\": dby}\ngradients = clip(gradients, 10)\nprint(\"gradients[\\\"dWaa\\\"][1][2] =\", gradients[\"dWaa\"][1][2])\nprint(\"gradients[\\\"dWax\\\"][3][1] =\", gradients[\"dWax\"][3][1])\nprint(\"gradients[\\\"dWya\\\"][1][2] =\", gradients[\"dWya\"][1][2])\nprint(\"gradients[\\\"db\\\"][4] =\", gradients[\"db\"][4])\nprint(\"gradients[\\\"dby\\\"][1] =\", gradients[\"dby\"][1])", "Expected output:\n<table>\n<tr>\n <td> \n **gradients[\"dWaa\"][1][2] **\n </td>\n <td> \n 10.0\n </td>\n</tr>\n\n<tr>\n <td> \n **gradients[\"dWax\"][3][1]**\n </td>\n <td> \n -10.0\n </td>\n </td>\n</tr>\n<tr>\n <td> \n **gradients[\"dWya\"][1][2]**\n </td>\n <td> \n0.29713815361\n </td>\n</tr>\n<tr>\n <td> \n **gradients[\"db\"][4]**\n </td>\n <td> \n[ 10.]\n </td>\n</tr>\n<tr>\n <td> \n **gradients[\"dby\"][1]**\n </td>\n <td> \n[ 8.45833407]\n </td>\n</tr>\n\n</table>\n\n2.2 - Sampling\nNow assume that your model is trained. You would like to generate new text (characters). The process of generation is explained in the picture below:\n<img src=\"images/dinos3.png\" style=\"width:500;height:300px;\">\n<caption><center> Figure 3: In this picture, we assume the model is already trained. We pass in $x^{\\langle 1\\rangle} = \\vec{0}$ at the first time step, and have the network then sample one character at a time. </center></caption>\nExercise: Implement the sample function below to sample characters. You need to carry out 4 steps:\n\n\nStep 1: Pass the network the first \"dummy\" input $x^{\\langle 1 \\rangle} = \\vec{0}$ (the vector of zeros). This is the default input before we've generated any characters. We also set $a^{\\langle 0 \\rangle} = \\vec{0}$\n\n\nStep 2: Run one step of forward propagation to get $a^{\\langle 1 \\rangle}$ and $\\hat{y}^{\\langle 1 \\rangle}$. Here are the equations:\n\n\n$$ a^{\\langle t+1 \\rangle} = \\tanh(W_{ax} x^{\\langle t \\rangle } + W_{aa} a^{\\langle t \\rangle } + b)\\tag{1}$$\n$$ z^{\\langle t + 1 \\rangle } = W_{ya} a^{\\langle t + 1 \\rangle } + b_y \\tag{2}$$\n$$ \\hat{y}^{\\langle t+1 \\rangle } = softmax(z^{\\langle t + 1 \\rangle })\\tag{3}$$\nNote that $\\hat{y}^{\\langle t+1 \\rangle }$ is a (softmax) probability vector (its entries are between 0 and 1 and sum to 1). $\\hat{y}^{\\langle t+1 \\rangle}_i$ represents the probability that the character indexed by \"i\" is the next character. We have provided a softmax() function that you can use.\n\nStep 3: Carry out sampling: Pick the next character's index according to the probability distribution specified by $\\hat{y}^{\\langle t+1 \\rangle }$. This means that if $\\hat{y}^{\\langle t+1 \\rangle }_i = 0.16$, you will pick the index \"i\" with 16% probability. To implement it, you can use np.random.choice.\n\nHere is an example of how to use np.random.choice():\npython\nnp.random.seed(0)\np = np.array([0.1, 0.0, 0.7, 0.2])\nindex = np.random.choice([0, 1, 2, 3], p = p.ravel())\nThis means that you will pick the index according to the distribution: \n$P(index = 0) = 0.1, P(index = 1) = 0.0, P(index = 2) = 0.7, P(index = 3) = 0.2$.\n\nStep 4: The last step to implement in sample() is to overwrite the variable x, which currently stores $x^{\\langle t \\rangle }$, with the value of $x^{\\langle t + 1 \\rangle }$. You will represent $x^{\\langle t + 1 \\rangle }$ by creating a one-hot vector corresponding to the character you've chosen as your prediction. You will then forward propagate $x^{\\langle t + 1 \\rangle }$ in Step 1 and keep repeating the process until you get a \"\\n\" character, indicating you've reached the end of the dinosaur name.", "# GRADED FUNCTION: sample\n\ndef sample(parameters, char_to_ix, seed):\n \"\"\"\n Sample a sequence of characters according to a sequence of probability distributions output of the RNN\n\n Arguments:\n parameters -- python dictionary containing the parameters Waa, Wax, Wya, by, and b. \n char_to_ix -- python dictionary mapping each character to an index.\n seed -- used for grading purposes. Do not worry about it.\n\n Returns:\n indices -- a list of length n containing the indices of the sampled characters.\n \"\"\"\n \n # Retrieve parameters and relevant shapes from \"parameters\" dictionary\n Waa, Wax, Wya, by, b = parameters['Waa'], parameters['Wax'], parameters['Wya'], parameters['by'], parameters['b']\n vocab_size = by.shape[0]\n n_a = Waa.shape[1]\n \n ### START CODE HERE ###\n # Step 1: Create the one-hot vector x for the first character (initializing the sequence generation). (≈1 line)\n x = np.zeros((vocab_size, 1))\n # Step 1': Initialize a_prev as zeros (≈1 line)\n a_prev = np.zeros((n_a, 1))\n \n # Create an empty list of indices, this is the list which will contain the list of indices of the characters to generate (≈1 line)\n indices = []\n \n # Idx is a flag to detect a newline character, we initialize it to -1\n idx = -1 \n \n # Loop over time-steps t. At each time-step, sample a character from a probability distribution and append \n # its index to \"indices\". We'll stop if we reach 50 characters (which should be very unlikely with a well \n # trained model), which helps debugging and prevents entering an infinite loop. \n counter = 0\n newline_character = char_to_ix['\\n']\n \n while (idx != newline_character and counter != 50):\n \n # Step 2: Forward propagate x using the equations (1), (2) and (3)\n a = np.tanh(np.dot(Waa, a_prev) + np.dot(Wax, x) + b)\n z = np.dot(Wya, a) + by\n y = softmax(z)\n \n # for grading purposes\n np.random.seed(counter+seed) \n \n # Step 3: Sample the index of a character within the vocabulary from the probability distribution y\n idx = np.random.choice([i for i in range(vocab_size)], p = y.ravel())\n\n # Append the index to \"indices\"\n indices.append(idx)\n \n # Step 4: Overwrite the input character as the one corresponding to the sampled index.\n x = np.zeros((vocab_size, 1))\n x[idx] = 1\n \n # Update \"a_prev\" to be \"a\"\n a_prev = a\n \n # for grading purposes\n seed += 1\n counter +=1\n \n ### END CODE HERE ###\n\n if (counter == 50):\n indices.append(char_to_ix['\\n'])\n \n return indices\n\nnp.random.seed(2)\n_, n_a = 20, 100\nWax, Waa, Wya = np.random.randn(n_a, vocab_size), np.random.randn(n_a, n_a), np.random.randn(vocab_size, n_a)\nb, by = np.random.randn(n_a, 1), np.random.randn(vocab_size, 1)\nparameters = {\"Wax\": Wax, \"Waa\": Waa, \"Wya\": Wya, \"b\": b, \"by\": by}\n\n\nindices = sample(parameters, char_to_ix, 0)\nprint(\"Sampling:\")\nprint(\"list of sampled indices:\", indices)\nprint(\"list of sampled characters:\", [ix_to_char[i] for i in indices])", "Expected output:\n<table>\n<tr>\n <td> \n **list of sampled indices:**\n </td>\n <td> \n [12, 17, 24, 14, 13, 9, 10, 22, 24, 6, 13, 11, 12, 6, 21, 15, 21, 14, 3, 2, 1, 21, 18, 24, <br>\n 7, 25, 6, 25, 18, 10, 16, 2, 3, 8, 15, 12, 11, 7, 1, 12, 10, 2, 7, 7, 11, 5, 6, 12, 25, 0, 0]\n </td>\n </tr><tr>\n <td> \n **list of sampled characters:**\n </td>\n <td> \n ['l', 'q', 'x', 'n', 'm', 'i', 'j', 'v', 'x', 'f', 'm', 'k', 'l', 'f', 'u', 'o', <br>\n 'u', 'n', 'c', 'b', 'a', 'u', 'r', 'x', 'g', 'y', 'f', 'y', 'r', 'j', 'p', 'b', 'c', 'h', 'o', <br>\n 'l', 'k', 'g', 'a', 'l', 'j', 'b', 'g', 'g', 'k', 'e', 'f', 'l', 'y', '\\n', '\\n']\n </td>\n\n\n\n</tr>\n</table>\n\n3 - Building the language model\nIt is time to build the character-level language model for text generation. \n3.1 - Gradient descent\nIn this section you will implement a function performing one step of stochastic gradient descent (with clipped gradients). You will go through the training examples one at a time, so the optimization algorithm will be stochastic gradient descent. As a reminder, here are the steps of a common optimization loop for an RNN:\n\nForward propagate through the RNN to compute the loss\nBackward propagate through time to compute the gradients of the loss with respect to the parameters\nClip the gradients if necessary \nUpdate your parameters using gradient descent \n\nExercise: Implement this optimization process (one step of stochastic gradient descent). \nWe provide you with the following functions: \n```python\ndef rnn_forward(X, Y, a_prev, parameters):\n \"\"\" Performs the forward propagation through the RNN and computes the cross-entropy loss.\n It returns the loss' value as well as a \"cache\" storing values to be used in the backpropagation.\"\"\"\n ....\n return loss, cache\ndef rnn_backward(X, Y, parameters, cache):\n \"\"\" Performs the backward propagation through time to compute the gradients of the loss with respect\n to the parameters. It returns also all the hidden states.\"\"\"\n ...\n return gradients, a\ndef update_parameters(parameters, gradients, learning_rate):\n \"\"\" Updates parameters using the Gradient Descent Update Rule.\"\"\"\n ...\n return parameters\n```", "# GRADED FUNCTION: optimize\n\ndef optimize(X, Y, a_prev, parameters, learning_rate = 0.01):\n \"\"\"\n Execute one step of the optimization to train the model.\n \n Arguments:\n X -- list of integers, where each integer is a number that maps to a character in the vocabulary.\n Y -- list of integers, exactly the same as X but shifted one index to the left.\n a_prev -- previous hidden state.\n parameters -- python dictionary containing:\n Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)\n Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)\n Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)\n b -- Bias, numpy array of shape (n_a, 1)\n by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)\n learning_rate -- learning rate for the model.\n \n Returns:\n loss -- value of the loss function (cross-entropy)\n gradients -- python dictionary containing:\n dWax -- Gradients of input-to-hidden weights, of shape (n_a, n_x)\n dWaa -- Gradients of hidden-to-hidden weights, of shape (n_a, n_a)\n dWya -- Gradients of hidden-to-output weights, of shape (n_y, n_a)\n db -- Gradients of bias vector, of shape (n_a, 1)\n dby -- Gradients of output bias vector, of shape (n_y, 1)\n a[len(X)-1] -- the last hidden state, of shape (n_a, 1)\n \"\"\"\n \n ### START CODE HERE ###\n \n # Forward propagate through time (≈1 line)\n loss, cache = rnn_forward(X, Y, a_prev, parameters)\n \n # Backpropagate through time (≈1 line)\n gradients, a = rnn_backward(X, Y, parameters, cache)\n \n # Clip your gradients between -5 (min) and 5 (max) (≈1 line)\n gradients = clip(gradients, 5)\n \n # Update parameters (≈1 line)\n parameters = update_parameters(parameters, gradients, learning_rate)\n \n ### END CODE HERE ###\n \n return loss, gradients, a[len(X)-1]\n\nnp.random.seed(1)\nvocab_size, n_a = 27, 100\na_prev = np.random.randn(n_a, 1)\nWax, Waa, Wya = np.random.randn(n_a, vocab_size), np.random.randn(n_a, n_a), np.random.randn(vocab_size, n_a)\nb, by = np.random.randn(n_a, 1), np.random.randn(vocab_size, 1)\nparameters = {\"Wax\": Wax, \"Waa\": Waa, \"Wya\": Wya, \"b\": b, \"by\": by}\nX = [12,3,5,11,22,3]\nY = [4,14,11,22,25, 26]\n\nloss, gradients, a_last = optimize(X, Y, a_prev, parameters, learning_rate = 0.01)\nprint(\"Loss =\", loss)\nprint(\"gradients[\\\"dWaa\\\"][1][2] =\", gradients[\"dWaa\"][1][2])\nprint(\"np.argmax(gradients[\\\"dWax\\\"]) =\", np.argmax(gradients[\"dWax\"]))\nprint(\"gradients[\\\"dWya\\\"][1][2] =\", gradients[\"dWya\"][1][2])\nprint(\"gradients[\\\"db\\\"][4] =\", gradients[\"db\"][4])\nprint(\"gradients[\\\"dby\\\"][1] =\", gradients[\"dby\"][1])\nprint(\"a_last[4] =\", a_last[4])", "Expected output:\n<table>\n\n\n<tr>\n <td> \n **Loss **\n </td>\n <td> \n 126.503975722\n </td>\n</tr>\n<tr>\n <td> \n **gradients[\"dWaa\"][1][2]**\n </td>\n <td> \n 0.194709315347\n </td>\n<tr>\n <td> \n **np.argmax(gradients[\"dWax\"])**\n </td>\n <td> 93\n </td>\n</tr>\n<tr>\n <td> \n **gradients[\"dWya\"][1][2]**\n </td>\n <td> -0.007773876032\n </td>\n</tr>\n<tr>\n <td> \n **gradients[\"db\"][4]**\n </td>\n <td> [-0.06809825]\n </td>\n</tr>\n<tr>\n <td> \n **gradients[\"dby\"][1]**\n </td>\n <td>[ 0.01538192]\n </td>\n</tr>\n<tr>\n <td> \n **a_last[4]**\n </td>\n <td> [-1.]\n </td>\n</tr>\n\n</table>\n\n3.2 - Training the model\nGiven the dataset of dinosaur names, we use each line of the dataset (one name) as one training example. Every 100 steps of stochastic gradient descent, you will sample 10 randomly chosen names to see how the algorithm is doing. Remember to shuffle the dataset, so that stochastic gradient descent visits the examples in random order. \nExercise: Follow the instructions and implement model(). When examples[index] contains one dinosaur name (string), to create an example (X, Y), you can use this:\npython\n index = j % len(examples)\n X = [None] + [char_to_ix[ch] for ch in examples[index]] \n Y = X[1:] + [char_to_ix[\"\\n\"]]\nNote that we use: index= j % len(examples), where j = 1....num_iterations, to make sure that examples[index] is always a valid statement (index is smaller than len(examples)).\nThe first entry of X being None will be interpreted by rnn_forward() as setting $x^{\\langle 0 \\rangle} = \\vec{0}$. Further, this ensures that Y is equal to X but shifted one step to the left, and with an additional \"\\n\" appended to signify the end of the dinosaur name.", "# GRADED FUNCTION: model\n\ndef model(data, ix_to_char, char_to_ix, num_iterations = 35000, n_a = 50, dino_names = 7, vocab_size = 27):\n \"\"\"\n Trains the model and generates dinosaur names. \n \n Arguments:\n data -- text corpus\n ix_to_char -- dictionary that maps the index to a character\n char_to_ix -- dictionary that maps a character to an index\n num_iterations -- number of iterations to train the model for\n n_a -- number of units of the RNN cell\n dino_names -- number of dinosaur names you want to sample at each iteration. \n vocab_size -- number of unique characters found in the text, size of the vocabulary\n \n Returns:\n parameters -- learned parameters\n \"\"\"\n \n # Retrieve n_x and n_y from vocab_size\n n_x, n_y = vocab_size, vocab_size\n \n # Initialize parameters\n parameters = initialize_parameters(n_a, n_x, n_y)\n \n # Initialize loss (this is required because we want to smooth our loss, don't worry about it)\n loss = get_initial_loss(vocab_size, dino_names)\n \n # Build list of all dinosaur names (training examples).\n with open(\"dinos.txt\") as f:\n examples = f.readlines()\n examples = [x.lower().strip() for x in examples]\n \n # Shuffle list of all dinosaur names\n np.random.seed(0)\n np.random.shuffle(examples)\n \n # Initialize the hidden state of your LSTM\n a_prev = np.zeros((n_a, 1))\n \n # Optimization loop\n for j in range(num_iterations):\n \n ### START CODE HERE ###\n \n # Use the hint above to define one training example (X,Y) (≈ 2 lines)\n index = j % len(examples)\n X = [None] + [char_to_ix[ch] for ch in examples[index]]\n Y = X[1:] + [char_to_ix['\\n']]\n \n # Perform one optimization step: Forward-prop -> Backward-prop -> Clip -> Update parameters\n # Choose a learning rate of 0.01\n curr_loss, gradients, a_prev = optimize(X, Y, a_prev, parameters)\n \n ### END CODE HERE ###\n \n # Use a latency trick to keep the loss smooth. It happens here to accelerate the training.\n loss = smooth(loss, curr_loss)\n\n # Every 2000 Iteration, generate \"n\" characters thanks to sample() to check if the model is learning properly\n if j % 2000 == 0:\n \n print('Iteration: %d, Loss: %f' % (j, loss) + '\\n')\n \n # The number of dinosaur names to print\n seed = 0\n for name in range(dino_names):\n \n # Sample indices and print them\n sampled_indices = sample(parameters, char_to_ix, seed)\n print_sample(sampled_indices, ix_to_char)\n \n seed += 1 # To get the same result for grading purposed, increment the seed by one. \n \n print('\\n')\n \n return parameters", "Run the following cell, you should observe your model outputting random-looking characters at the first iteration. After a few thousand iterations, your model should learn to generate reasonable-looking names.", "parameters = model(data, ix_to_char, char_to_ix)", "Conclusion\nYou can see that your algorithm has started to generate plausible dinosaur names towards the end of the training. At first, it was generating random characters, but towards the end you could see dinosaur names with cool endings. Feel free to run the algorithm even longer and play with hyperparameters to see if you can get even better results. Our implemetation generated some really cool names like maconucon, marloralus and macingsersaurus. Your model hopefully also learned that dinosaur names tend to end in saurus, don, aura, tor, etc.\nIf your model generates some non-cool names, don't blame the model entirely--not all actual dinosaur names sound cool. (For example, dromaeosauroides is an actual dinosaur name and is in the training set.) But this model should give you a set of candidates from which you can pick the coolest! \nThis assignment had used a relatively small dataset, so that you could train an RNN quickly on a CPU. Training a model of the english language requires a much bigger dataset, and usually needs much more computation, and could run for many hours on GPUs. We ran our dinosaur name for quite some time, and so far our favoriate name is the great, undefeatable, and fierce: Mangosaurus!\n<img src=\"images/mangosaurus.jpeg\" style=\"width:250;height:300px;\">\n4 - Writing like Shakespeare\nThe rest of this notebook is optional and is not graded, but we hope you'll do it anyway since it's quite fun and informative. \nA similar (but more complicated) task is to generate Shakespeare poems. Instead of learning from a dataset of Dinosaur names you can use a collection of Shakespearian poems. Using LSTM cells, you can learn longer term dependencies that span many characters in the text--e.g., where a character appearing somewhere a sequence can influence what should be a different character much much later in ths sequence. These long term dependencies were less important with dinosaur names, since the names were quite short. \n<img src=\"images/shakespeare.jpg\" style=\"width:500;height:400px;\">\n<caption><center> Let's become poets! </center></caption>\nWe have implemented a Shakespeare poem generator with Keras. Run the following cell to load the required packages and models. This may take a few minutes.", "from __future__ import print_function\nfrom keras.callbacks import LambdaCallback\nfrom keras.models import Model, load_model, Sequential\nfrom keras.layers import Dense, Activation, Dropout, Input, Masking\nfrom keras.layers import LSTM\nfrom keras.utils.data_utils import get_file\nfrom keras.preprocessing.sequence import pad_sequences\nfrom shakespeare_utils import *\nimport sys\nimport io", "To save you some time, we have already trained a model for ~1000 epochs on a collection of Shakespearian poems called \"The Sonnets\". \nLet's train the model for one more epoch. When it finishes training for an epoch---this will also take a few minutes---you can run generate_output, which will prompt asking you for an input (&lt;40 characters). The poem will start with your sentence, and our RNN-Shakespeare will complete the rest of the poem for you! For example, try \"Forsooth this maketh no sense \" (don't enter the quotation marks). Depending on whether you include the space at the end, your results might also differ--try it both ways, and try other inputs as well.", "print_callback = LambdaCallback(on_epoch_end=on_epoch_end)\n\nmodel.fit(x, y, batch_size=128, epochs=1, callbacks=[print_callback])\n\n# Run this cell to try with different inputs without having to re-train the model \ngenerate_output()", "The RNN-Shakespeare model is very similar to the one you have built for dinosaur names. The only major differences are:\n- LSTMs instead of the basic RNN to capture longer-range dependencies\n- The model is a deeper, stacked LSTM model (2 layer)\n- Using Keras instead of python to simplify the code \nIf you want to learn more, you can also check out the Keras Team's text generation implementation on GitHub: https://github.com/keras-team/keras/blob/master/examples/lstm_text_generation.py.\nCongratulations on finishing this notebook! \nReferences:\n- This exercise took inspiration from Andrej Karpathy's implementation: https://gist.github.com/karpathy/d4dee566867f8291f086. To learn more about text generation, also check out Karpathy's blog post.\n- For the Shakespearian poem generator, our implementation was based on the implementation of an LSTM text generator by the Keras team: https://github.com/keras-team/keras/blob/master/examples/lstm_text_generation.py" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
keras-team/keras-io
guides/ipynb/keras_tuner/custom_tuner.ipynb
apache-2.0
[ "Tune hyperparameters in your custom training loop\nAuthors: Tom O'Malley, Haifeng Jin<br>\nDate created: 2019/10/28<br>\nLast modified: 2022/01/12<br>\nDescription: Use HyperModel.fit() to tune training hyperparameters (such as batch size).", "!pip install keras-tuner -q", "Introduction\nThe HyperModel class in KerasTuner provides a convenient way to define your\nsearch space in a reusable object. You can override HyperModel.build() to\ndefine and hypertune the model itself. To hypertune the training process (e.g.\nby selecting the proper batch size, number of training epochs, or data\naugmentation setup), you can override HyperModel.fit(), where you can access:\n\nThe hp object, which is an instance of keras_tuner.HyperParameters\nThe model built by HyperModel.build()\n\nA basic example is shown in the \"tune model training\" section of\nGetting Started with KerasTuner.\nTuning the custom training loop\nIn this guide, we will subclass the HyperModel class and write a custom\ntraining loop by overriding HyperModel.fit(). For how to write a custom\ntraining loop with Keras, you can refer to the guide\nWriting a training loop from scratch.\nFirst, we import the libraries we need, and we create datasets for training and\nvalidation. Here, we just use some random data for demonstration purposes.", "import keras_tuner\nimport tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\n\n\nx_train = np.random.rand(1000, 28, 28, 1)\ny_train = np.random.randint(0, 10, (1000, 1))\nx_val = np.random.rand(1000, 28, 28, 1)\ny_val = np.random.randint(0, 10, (1000, 1))", "Then, we subclass the HyperModel class as MyHyperModel. In\nMyHyperModel.build(), we build a simple Keras model to do image\nclassification for 10 different classes. MyHyperModel.fit() accepts several\narguments. Its signature is shown below:\npython\ndef fit(self, hp, model, x, y, validation_data, callbacks=None, **kwargs):\n\nThe hp argument is for defining the hyperparameters.\nThe model argument is the model returned by MyHyperModel.build().\nx, y, and validation_data are all custom-defined arguments. We will\npass our data to them by calling tuner.search(x=x, y=y,\nvalidation_data=(x_val, y_val)) later. You can define any number of them and\ngive custom names.\nThe callbacks argument was intended to be used with model.fit().\nKerasTuner put some helpful Keras callbacks in it, for example, the callback\nfor checkpointing the model at its best epoch.\n\nWe will manually call the callbacks in the custom training loop. Before we\ncan call them, we need to assign our model to them with the following code so\nthat they have access to the model for checkpointing.\npy\nfor callback in callbacks:\n callback.model = model\nIn this example, we only called the on_epoch_end() method of the callbacks\nto help us checkpoint the model. You may also call other callback methods\nif needed. If you don't need to save the model, you don't need to use the\ncallbacks.\nIn the custom training loop, we tune the batch size of the dataset as we wrap\nthe NumPy data into a tf.data.Dataset. Note that you can tune any\npreprocessing steps here as well. We also tune the learning rate of the\noptimizer.\nWe will use the validation loss as the evaluation metric for the model. To\ncompute the mean validation loss, we will use keras.metrics.Mean(), which\naverages the validation loss across the batches. We need to return the\nvalidation loss for the tuner to make a record.", "\nclass MyHyperModel(keras_tuner.HyperModel):\n def build(self, hp):\n \"\"\"Builds a convolutional model.\"\"\"\n inputs = keras.Input(shape=(28, 28, 1))\n x = keras.layers.Flatten()(inputs)\n x = keras.layers.Dense(\n units=hp.Choice(\"units\", [32, 64, 128]), activation=\"relu\"\n )(x)\n outputs = keras.layers.Dense(10)(x)\n return keras.Model(inputs=inputs, outputs=outputs)\n\n def fit(self, hp, model, x, y, validation_data, callbacks=None, **kwargs):\n # Convert the datasets to tf.data.Dataset.\n batch_size = hp.Int(\"batch_size\", 32, 128, step=32, default=64)\n train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(\n batch_size\n )\n validation_data = tf.data.Dataset.from_tensor_slices(validation_data).batch(\n batch_size\n )\n\n # Define the optimizer.\n optimizer = keras.optimizers.Adam(\n hp.Float(\"learning_rate\", 1e-4, 1e-2, sampling=\"log\", default=1e-3)\n )\n loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n\n # The metric to track validation loss.\n epoch_loss_metric = keras.metrics.Mean()\n\n # Function to run the train step.\n @tf.function\n def run_train_step(images, labels):\n with tf.GradientTape() as tape:\n logits = model(images)\n loss = loss_fn(labels, logits)\n # Add any regularization losses.\n if model.losses:\n loss += tf.math.add_n(model.losses)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\n # Function to run the validation step.\n @tf.function\n def run_val_step(images, labels):\n logits = model(images)\n loss = loss_fn(labels, logits)\n # Update the metric.\n epoch_loss_metric.update_state(loss)\n\n # Assign the model to the callbacks.\n for callback in callbacks:\n callback.model = model\n\n # Record the best validation loss value\n best_epoch_loss = float(\"inf\")\n\n # The custom training loop.\n for epoch in range(2):\n print(f\"Epoch: {epoch}\")\n\n # Iterate the training data to run the training step.\n for images, labels in train_ds:\n run_train_step(images, labels)\n\n # Iterate the validation data to run the validation step.\n for images, labels in validation_data:\n run_val_step(images, labels)\n\n # Calling the callbacks after epoch.\n epoch_loss = float(epoch_loss_metric.result().numpy())\n for callback in callbacks:\n # The \"my_metric\" is the objective passed to the tuner.\n callback.on_epoch_end(epoch, logs={\"my_metric\": epoch_loss})\n epoch_loss_metric.reset_states()\n\n print(f\"Epoch loss: {epoch_loss}\")\n best_epoch_loss = min(best_epoch_loss, epoch_loss)\n\n # Return the evaluation metric value.\n return best_epoch_loss\n", "Now, we can initialize the tuner. Here, we use Objective(\"my_metric\", \"min\")\nas our metric to be minimized. The objective name should be consistent with the\none you use as the key in the logs passed to the 'on_epoch_end()' method of\nthe callbacks. The callbacks need to use this value in the logs to find the\nbest epoch to checkpoint the model.", "tuner = keras_tuner.RandomSearch(\n objective=keras_tuner.Objective(\"my_metric\", \"min\"),\n max_trials=2,\n hypermodel=MyHyperModel(),\n directory=\"results\",\n project_name=\"custom_training\",\n overwrite=True,\n)\n", "We start the search by passing the arguments we defined in the signature of\nMyHyperModel.fit() to tuner.search().", "tuner.search(x=x_train, y=y_train, validation_data=(x_val, y_val))", "Finally, we can retrieve the results.", "best_hps = tuner.get_best_hyperparameters()[0]\nprint(best_hps.values)\n\nbest_model = tuner.get_best_models()[0]\nbest_model.summary()", "In summary, to tune the hyperparameters in your custom training loop, you just\noverride HyperModel.fit() to train the model and return the evaluation\nresults. With the provided callbacks, you can easily save the trained models at\ntheir best epochs and load the best models later.\nTo find out more about the basics of KerasTuner, please see\nGetting Started with KerasTuner." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
NGSchool2016/ngschool2016-materials
jupyter/ndolgikh/.ipynb_checkpoints/NGSchool_python-checkpoint.ipynb
gpl-3.0
[ "Set the matplotlib magic to notebook enable inline plots", "%pylab inline", "Calculate the Nonredundant Read Fraction (NRF)\nSAM format example:\nSRR585264.8766235 0 1 4 15 35M * 0 0 CTTAAACAATTATTCCCCCTGCAAACATTTTCAAT GGGGGGGGGGGGGGGGGGGGGGFGGGGGGGGGGGG XT:A:U NM:i:1 X0:i:1 X1:i:6 XM:i:1 XO:i:0 XG:i:0 MD:Z:8T26\nImport the required modules", "import subprocess\nimport matplotlib.pyplot as plt\nimport random\nimport numpy as np", "Make figures prettier and biger", "plt.style.use('ggplot')\nfigsize(10,5)", "Parse the SAM file and extract the unique start coordinates.\nFirst store the file name in the variable", "file = \"/ngschool/chip_seq/bwa/input.sorted.bam\"", "Next we read the file using samtools. From each read we need to store the flag, chromosome name and start coordinate.", "p = subprocess.Popen([\"samtools\", \"view\", \"-q10\", \"-F260\", file],\n stdout=subprocess.PIPE)\ncoords = []\nfor line in p.stdout:\n flag, ref, start = line.decode('utf-8').split()[1:4]\n coords.append([flag, ref, start])\n\ncoords[:3]", "What is the total number of our unique reads?", "len(coords)", "Randomly sample the coordinates to get 1M for NRF calculations", "random.seed(1234)\nsample = random.sample(coords, 1000000)\n\nlen(sample)", "How many of those coordinates are unique? (We will use the set python object which only the unique items.)", "uniqueStarts = {'watson': set(), 'crick': set()}\nfor coord in sample:\n flag, ref, start = coord\n if int(flag) & 16:\n uniqueStarts['crick'].add((ref, start))\n else:\n uniqueStarts['watson'].add((ref, start))", "How many on the Watson strand?", "len(uniqueStarts['watson'])", "And on the Crick?", "len(uniqueStarts['crick'])", "Calculate the NRF", "NRF_input = (len(uniqueStarts['watson']) + len(uniqueStarts['crick']))*1.0/len(sample)\nprint(NRF_input)", "Lets create a function from what we did above and apply it to all of our files!\nTo use our function on the real sequencing datasets (not only on a small subset) we need to optimize our method a bit- we will use python module called numpy.", "def calculateNRF(filePath, pickSample=True, sampleSize=10000000, seed=1234):\n p = subprocess.Popen(['samtools', 'view', '-q10', '-F260', filePath],\n stdout=subprocess.PIPE)\n coordType = np.dtype({'names': ['flag', 'ref', 'start'],\n 'formats': ['uint16', 'U10', 'uint32']})\n coordArray = np.empty(10000000, dtype=coordType)\n i = 0\n for line in p.stdout:\n if i >= len(coordArray):\n coordArray = np.append(coordArray, np.empty(1000000, dtype=coordType), axis=0)\n fg, rf, st = line.decode('utf-8').split()[1:4]\n coordArray[i] = np.array((fg, rf, st), dtype=coordType)\n i += 1\n coordArray = coordArray[:i]\n sample = coordArray\n if pickSample and len(coordArray) > sampleSize:\n np.random.seed(seed)\n sample = np.random.choice(coordArray, sampleSize, replace=False)\n uniqueStarts = {'watson': set(), 'crick': set()}\n for read in sample:\n flag, ref, start = read\n if flag & 16:\n uniqueStarts['crick'].add((ref, start))\n else:\n uniqueStarts['watson'].add((ref, start))\n NRF = (len(uniqueStarts['watson']) + len(uniqueStarts['crick']))*1.0/len(sample)\n return NRF", "Calculate the NRF for the chip-seq sample", "NRF_chip = calculateNRF(\"/ngschool/chip_seq/bwa/sox2_chip.sorted.bam\", sampleSize=1000000)\nprint(NRF_chip)", "Plot the NRF!", "plt.bar([0,2],[NRF_input, NRF_chip], width=1)\nplt.xlim([-0.5,3.5]), plt.xticks([0.5, 2.5], ['Input', 'ChIP'])\nplt.xlabel('Sample')\nplt.ylabel('NRF')\nplt.ylim([0, 1.25]), plt.yticks(np.arange(0, 1.2, 0.2))\nplt.plot((-0.5,3.5), (0.8,0.8), 'red', linestyle='dashed')\nplt.show()", "Calculate the Signal Extraction Scaling\nLoad the results from the coverage calculations", "countList = []\nwith open('/ngschool/chip_seq/bedtools/input_coverage.bed', 'r') as covFile:\n for line in covFile:\n countList.append(int(line.strip('\\n').split('\\t')[3]))\ncountList[0:6]\n\ncountList[-15:]", "Lets see where do our reads align to the genome. Plot the distribution of tags along the genome.", "plt.plot(range(len(countList)), countList)\nplt.xlabel('Bin number')\nplt.ylabel('Bin coverage')\nplt.xlim([0, len(countList)])\nplt.show()", "Now sort the list- order the windows based on the tag count", "countList.sort()\n\ncountList[0:6]", "Sum all the aligned tags", "countSum = sum(countList)\ncountSum", "Calculate the summaric fraction of tags along the ordered windows.", "countFraction = []\nfor i, count in enumerate(countList):\n if i == 0:\n countFraction.append(count*1.0 / countSum)\n else:\n countFraction.append((count*1.0 / countSum) + countFraction[i-1])", "Look at the last five items of the list:", "countFraction[-5:]", "Calculate the number of windows.", "winNumber = len(countFraction)\nwinNumber", "Calculate what fraction of a whole is the position of each window.", "winFraction = []\nfor i in range(winNumber):\n winFraction.append(i*1.0 / winNumber)", "Look at the last five items of our new list:", "winFraction[-5:]", "Now prepare the function!", "def calculateSES(filePath):\n countList = []\n with open(filePath, 'r') as covFile:\n for line in covFile:\n countList.append(int(line.strip('\\n').split('\\t')[3]))\n plt.plot(range(len(countList)), countList)\n plt.xlabel('Bin number')\n plt.ylabel('Bin coverage')\n plt.xlim([0, len(countList)])\n plt.show()\n countList.sort()\n countSum = sum(countList)\n countFraction = []\n for i, count in enumerate(countList):\n if i == 0:\n countFraction.append(count*1.0 / countSum)\n else:\n countFraction.append((count*1.0 / countSum) + countFraction[i-1])\n winNumber = len(countFraction)\n winFraction = []\n for i in range(winNumber):\n winFraction.append(i*1.0 / winNumber)\n return [winFraction, countFraction]", "Use our function to calculate the signal extraction scaling for the Sox2 ChIP sample:", "chipSes = calculateSES(\"/ngschool/chip_seq/bedtools/sox2_chip_coverage.bed\")", "Now we can plot the calculated fractions for both the input and ChIP sample:", "plt.plot(winFraction, countFraction, label='input')\nplt.plot(chipSes[0], chipSes[1], label='Sox2 ChIP')\nplt.ylim([0,1])\nplt.xlabel('Ordered window franction')\nplt.ylabel('Genome coverage fraction')\nplt.legend(loc='best')\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
dtrimarco/blog
posts/product_data_071317.ipynb
mit
[ "Pandas for Product Analysis Part 1: Apply and Transform\nPython's pandas package is one of the most powerful tools for data analysis in the Python ecosystem. Built on top of NumPy, it makes working with tabular data quite effective and adds an astounding amount of functionality to your toolkit. Despite its strengths, there are some very useful functions that are challenging to grasp based on the pandas docs. apply and transform are two such examples. \nOne quick note before we dive in: this series assumes basic working knowledge of pandas. There are several resources like Dataquest, Data Camp and pandas cheat sheets to get you up to speed if this is hard to follow.\nWhat are apply and transform?\nIn short, these two functions are used to operate on data structures, similarly to Python's built in map function. We will get into the differences, but typically they are used in combination with groupby to perform aggregate functions on various groups of a dataset. This a direct analogy to GROUP BY in SQL and I am going to assume familiarity with how it works (if you aren't, here is a decent intro). The major difference is that we can leverage the flexibility of Python and pandas DataFrames to do basically whatever we want.\nData\nTo keep things practical, let's start with event data from a hypothetical mobile game. I created some randomly generated, but logical data for us to analyze.", "import pandas as pd\n\ndata = pd.read_csv('test_user_data.csv')\nprint(data.head(10))", "The data contains one event per row and has 5 variables:\n\nuser_id: Identifier for each user.\nevent_timestamp: The time each event happened.\nlat: The latitude of the user when the event occurred.\nlon: The longitude of the user when the event occurred.\nevent_type: The type of event that occurred: login, level, buy_coins and megapack.\n\nBasic differences between apply and transform\nSuppose we wanted to count the number of events for each user. Both functions can do this, but in different ways. Let's try it first with apply.", "apply_ex = data.groupby('user_id').apply(len)\nprint(apply_ex.head())", "The output here is a pandas Series with each user_id as the index and the count of the number of events as values. Now to try the same thing with transform.", "transform_ex = data.groupby('user_id').transform(len)\nprint(transform_ex.head())", "What the heck happened here? This odd DataFrame highlights a key difference: apply by default returns an object with one element per group and transform returns an object of the exact same size as the input object. Unless specified, it operates column by column in order.\nHow about we clean this up a bit and create a new column in our original DataFrame that contains the total event count for each group in it.", "data['event_count'] = data.groupby('user_id')['user_id'].transform(len)\nprint(data.head(7))", "Much better. All we had to do was assign to the new event_count column and then specify the ['user_id'] column after the groupby statement. Whether you would prefer to have this additional column of repeating values depends on what you intend to do with the data afterwards. Let's assume this is acceptable. Now for something a bit more involved.\nCustom Functions\nSay we didn't have Google Analytics or Mixpanel implemented into our app and wanted to assign a monetary value to each event. Of course, we could loop through the entire DataFrame, but this can be very inefficient with a lot of data. Let's try it using a custom function.", "def add_value(x):\n if x == 'buy_coins':\n y = 1.00\n elif x == 'megapack':\n y = 10.00\n else:\n y=0.0\n \n return y", "Here we've defined a very simple custom function that assigns values to each of the four event types. Now to apply it to our data.", "data['event_value'] = data['event_type'].apply(add_value)\nprint(data.head(7))", "That worked out nicely. Since we didn't care about event_values per user, groupby wasn't necessary. If we were to run this using transform, we'd get an error. Since it is run column-by-column, there isn't a practical way to reference other columns like with apply.\nIn the next post of the series, we'll continue using pandas to answer more interesting product questions like:\n\nHow much time does it take our users to purchase after downloading the app?\nHow many logins does it take our users for in-app purchases?\nWhat is the lifetime value of our users?" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
authman/DAT210x
Module5/Module5 - Lab5.ipynb
mit
[ "DAT210x - Programming with Python for DS\nModule5- Lab5", "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nmatplotlib.style.use('ggplot') # Look Pretty", "A Convenience Function", "def plotDecisionBoundary(model, X, y):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n padding = 0.6\n resolution = 0.0025\n colors = ['royalblue','forestgreen','ghostwhite']\n\n # Calculate the boundaris\n x_min, x_max = X[:, 0].min(), X[:, 0].max()\n y_min, y_max = X[:, 1].min(), X[:, 1].max()\n x_range = x_max - x_min\n y_range = y_max - y_min\n x_min -= x_range * padding\n y_min -= y_range * padding\n x_max += x_range * padding\n y_max += y_range * padding\n\n # Create a 2D Grid Matrix. The values stored in the matrix\n # are the predictions of the class at at said location\n xx, yy = np.meshgrid(np.arange(x_min, x_max, resolution),\n np.arange(y_min, y_max, resolution))\n\n # What class does the classifier say?\n Z = model.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n\n # Plot the contour map\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.terrain)\n\n # Plot the test original points as well...\n for label in range(len(np.unique(y))):\n indices = np.where(y == label)\n plt.scatter(X[indices, 0], X[indices, 1], c=colors[label], label=str(label), alpha=0.8)\n\n p = model.get_params()\n plt.axis('tight')\n plt.title('K = ' + str(p['n_neighbors']))", "The Assignment\nLoad up the dataset into a variable called X. Check .head and dtypes to make sure you're loading your data properly--don't fail on the 1st step!", "# .. your code here ..", "Copy the wheat_type series slice out of X, and into a series called y. Then drop the original wheat_type column from the X:", "# .. your code here ..", "Do a quick, \"ordinal\" conversion of y. In actuality our classification isn't ordinal, but just as an experiment...", "# .. your code here ..", "Do some basic nan munging. Fill each row's nans with the mean of the feature:", "# .. your code here ..", "Split X into training and testing data sets using train_test_split(). Use 0.33 test size, and use random_state=1. This is important so that your answers are verifiable. In the real world, you wouldn't specify a random_state:", "# .. your code here ..", "Create an instance of SKLearn's Normalizer class and then train it using its .fit() method against your training data. The reason you only fit against your training data is because in a real-world situation, you'll only have your training data to train with! In this lab setting, you have both train+test data; but in the wild, you'll only have your training data, and then unlabeled data you want to apply your models to.", "# .. your code here ..", "With your trained pre-processor, transform both your training AND testing data. Any testing data has to be transformed with your preprocessor that has ben fit against your training data, so that it exist in the same feature-space as the original data used to train your models.", "# .. your code here ..", "Just like your preprocessing transformation, create a PCA transformation as well. Fit it against your training data, and then project your training and testing features into PCA space using the PCA model's .transform() method. This has to be done because the only way to visualize the decision boundary in 2D would be if your KNN algo ran in 2D as well:", "# .. your code here ..", "Create and train a KNeighborsClassifier. Start with K=9 neighbors. Be sure train your classifier against the pre-processed, PCA- transformed training data above! You do not, of course, need to transform your labels.", "# .. your code here ..\n\n# I hope your KNeighbors classifier model from earlier was named 'knn'\n# If not, adjust the following line:\nplotDecisionBoundary(knn, X_train, y_train)", "Display the accuracy score of your test data/labels, computed by your KNeighbors model. You do NOT have to run .predict before calling .score, since .score will take care of running your predictions for you automatically.", "# .. your code here ..", "Bonus\nInstead of the ordinal conversion, try and get this assignment working with a proper Pandas get_dummies for feature encoding. You might have to update some of the plotDecisionBoundary() code.", "plt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
NREL/bifacial_radiance
docs/tutorials/4 - Medium Level Example - Debugging your Scene with Custom Objects (Fixed Tilt 2-up with Torque Tube + CLEAN Routine + CustomObject).ipynb
bsd-3-clause
[ "4 - Medium Level Example - Debugging your Scene with Custom Objects\nFixed Tilt 2-up with Torque Tube + CLEAN Routine + CustomObject\nThis journal has examples of various things, some which hav ebeen covered before and some in more depth:\n<ul>\n <li> Running a fixed_tilt simulation beginning to end. </li>\n <li> Creating a 2-up module with torque-tube, and detailed geometry of spacings in xgap, ygap and zgap. </li>\n <li> Calculating the tracker angle for a specific time, in case you want to use that value to model a fixed_tilt setup. </li>\n <li> Loading and cleaning results, particularly important when using setups with torquetubes / ygaps. </li>\n <li> Adding a \"Custom Object\" or **marker** at the Origin of the Scene, to do a visual sanity-check of the geometry. </li>\n</ul>\n\nIt will look something like this (without the marker in this visualization):\n\nSTEPS:\n<ol type='1'>\n <li> <a href='#step1'> Specify Working Folder and Import Program </a></li>\n <li> <a href='#step2'> Specify all variables </a></li>\n <li> <a href='#step3'> Create the Radiance Object and generate the Sky </a></li>\n <li> <a href='#step4'> Calculating tracker angle/geometry for a specific timestamp </a></li>\n <li> <a href='#step5'> Making the Module & the Scene, Visualize and run Analysis </a></li>\n <li> <a href='#step6'> Calculate Bifacial Ratio (clean results) </a></li>\n <li> <a href='#step7'> Add Custom Elements to your Scene Example: Marker at 0,0 position </a></li>\n</ol>\n\n<a id='step1'></a>\n1. Specify Working Folder and Import Program", "import os\nfrom pathlib import Path\n\ntestfolder = Path().resolve().parent.parent / 'bifacial_radiance' / 'TEMP' / 'Tutorial_04'\n\n# Another option using relative address; for some operative systems you might need '/' instead of '\\'\n# testfolder = os.path.abspath(r'..\\..\\bifacial_radiance\\TEMP') \n\nprint (\"Your simulation will be stored in %s\" % testfolder)\n\nif not os.path.exists(testfolder):\n os.makedirs(testfolder)\n\nimport bifacial_radiance\nimport numpy as np\nimport pandas as pd", "<a id='step2'></a>\n2. Specify all variables for the module and scene\nBelow find a list of all of the possible parameters for makeModule. \nscene and simulation parameters are also organized below. \nThis simulation will be a complete simulation in terms of parameters that you can modify.\nThe below routine creates a HEXAGONAL torque tube, for a 2-UP configuration of a specific module size. Parameters for the module, the torque tube, and the scene are below.\nThis is being run with gendaylit, for one specific timestamp", "simulationname = 'tutorial_4'\n\n## SceneDict Parameters\ngcr = 0.33 # ground cover ratio, = module_height / pitch\nalbedo = 0.28 #'concrete' # ground albedo\nhub_height = 2.35 # we could also pass clearance_height. \nazimuth_ang = 90 # Modules will be facing East.\nlat = 37.5\nlon = -77.6\nnMods = 4 # doing a smaller array for better visualization on this example.\nnRows = 2 \n\n# MakeModule Parameters\nmodule_type='test-module'\nx = 1.996 # landscape, sinze x > y. Remember that orientation has been deprecated.\ny = 0.991\ntilt = 10\nnumpanels = 2 # doing a 2-up system!\n\n\n# Gaps:\nxgap = 0.05 # distance between modules in the row.\nygap = 0.15 # distance between the 2 modules along the collector slope.\nzgap = 0.175 # if there is a torquetube, this is the distance between the torquetube and the modules.\n# If there is not a module, zgap is the distance between the module and the axis of rotation (relevant for \n# tracking systems. \n\n# TorqueTube Parameters\ntubetype = 'Hex'\ndiameter = 0.15\nmaterial = 'Metal_Grey' # IT's NOT GRAY, IT's GREY.\n", "<a id='step3'></a>\n3. Create the Radiance Object and generate the Sky", "demo = bifacial_radiance.RadianceObj(simulationname, path=str(testfolder)) # Create a RadianceObj 'object'\ndemo.setGround(albedo) # input albedo number or material name like 'concrete'. To see options, run this without any input.\nepwfile = demo.getEPW(lat,lon) # pull TMY data for any global lat/lon\nmetdata = demo.readWeatherFile(epwfile, coerce_year=2001) # read in the EPW weather data from above\n\ntimestamp = metdata.datetime.index(pd.to_datetime('2001-06-17 13:0:0 -5')) # Make this timezone aware, use -5 for EST.\ndemo.gendaylit(timestamp) # Mid-day, June 17th", "<a id='step4'></a>\n4. Calculating tracker angle/geometry for a specific timestamp\nThis trick is useful if you are trying to use the fixed-tilt steps in bifacial_radiance to model a tracker for one specific point in time (if you take a picture of a tracker, it looks fixed, right? Well then). \nWe assigned a 10 degree tilt at the beginning, but if we were to model a tracker as a fixed-tilt element because we are interested in only one point in time, this routine will tell us what tilt to use. Please note that to model a tracker as fixed tilt, we suggest passing a hub_height, otherwise you will have to calculate the clearance_height manually.\n<div class=\"alert alert-warning\">\nDetails: you might have noticed in the previoust tutorial looking at the tracker dictionary, but the way that bifacial_radiance handles tracking: If the tracker is N-S axis azimuth, the surface azimuth of the modules will be set to 90 always, with a tilt that is either positive (for the early morning, facing East), or negative (for the afternoon, facing west).\n</div>", "# Some tracking parameters that won't be needed after getting this angle:\naxis_azimuth = 180\naxis_tilt = 0\nlimit_angle = 60\nbacktrack = True\ntilt = demo.getSingleTimestampTrackerAngle(metdata, timestamp, gcr, axis_azimuth, axis_tilt,limit_angle, backtrack)\n\nprint (\"\\n NEW Calculated Tilt: %s \" % tilt)", "<a id='step5'></a>\n5. Making the Module & the Scene, Visualize and run Analysis", "# Making module with all the variables\nmodule = demo.makeModule(name=module_type,x=x,y=y,bifi=1, \n zgap=zgap, ygap=ygap, xgap=xgap, numpanels=numpanels)\nmodule.addTorquetube(diameter=diameter, material=material, tubetype=tubetype,\n visible=True, axisofrotation=True)\n\n# create a scene with all the variables. \n# Specifying the pitch automatically with the collector width (sceney) returned by the module object.\n# Height has been deprecated as an input. pass clearance_height or hub_height in the scenedict.\n\nsceneDict = {'tilt':tilt,'pitch': np.round(module.sceney / gcr,3),\n 'hub_height':hub_height,'azimuth':azimuth_ang, \n 'module_type':module_type, 'nMods': nMods, 'nRows': nRows} \n\nscene = demo.makeScene(module=module, sceneDict=sceneDict) #makeScene creates a .rad file of the Scene\n\noctfile = demo.makeOct(demo.getfilelist()) # makeOct combines all of the ground, sky and object files into a .oct file.", "At this point you should be able to go into a command window (cmd.exe) and check the geometry. It should look like the image at the beginning of the journal. Example:\nrvu -vf views\\front.vp -e .01 -pe 0.02 -vp -2 -12 14.5 tutorial_4.oct", "\n## Comment the line below to run rvu from the Jupyter notebook instead of your terminal.\n## Simulation will stop until you close the rvu window\n\n#!rvu -vf views\\front.vp -e .01 tutorial_4.oct\n", "And then proceed happily with your analysis:", "analysis = bifacial_radiance.AnalysisObj(octfile, demo.name) # return an analysis object including the scan dimensions for back irradiance\n\nsensorsy = 200 # setting this very high to see a detailed profile of the irradiance, including\n#the shadow of the torque tube on the rear side of the module.\nfrontscan, backscan = analysis.moduleAnalysis(scene, modWanted = 2, rowWanted = 1, sensorsy = 200)\nfrontDict, backDict = analysis.analysis(octfile, demo.name, frontscan, backscan) # compare the back vs front irradiance \n\n# print('\"Annual\" bifacial ratio average: %0.3f' %( sum(analysis.Wm2Back) / sum(analysis.Wm2Front) ) )\n# See comment below of why this line is commented out.", "<a id='step6'></a>\n6. Calculate Bifacial Ratio (clean results)\nAlthough we could calculate a bifacial ratio average at this point, this value would be misleading, since some of the sensors generated will fall on the torque tube, the sky, and/or the ground since we have torquetube and ygap in the scene. To calculate the real bifacial ratio average, we must use the clean routines.", "resultFile='results/irr_tutorial_4.csv'\nresults_loaded = bifacial_radiance.load.read1Result(resultFile)\nprint(\"Printing the dataframe containing the results just calculated in %s: \" % resultFile)\nresults_loaded\n\nprint(\"Looking at only 1 sensor in the middle -- position 100 out of the 200 sensors sampled:\")\nresults_loaded.loc[100]", "As an example, we can see above that sensor 100 falls in the hextube, and in the sky. We need to remove this to calculate the real bifacial_gain from the irradiance falling into the modules. To do this we use cleanResult form the load.py module in bifacial_radiance. This finds the invalid materials and sets the irradiance values for those materials to NaN\nThis might take some time in the current version.", "# Cleaning Results:\n# remove invalid materials and sets the irradiance values to NaN\nclean_results = bifacial_radiance.load.cleanResult(results_loaded) \n\nprint(\"Sampling the same location as before to see what the results are now:\")\nclean_results.loc[100]\n\nprint('CORRECT Annual bifacial ratio average: %0.3f' %( clean_results['Wm2Back'].sum() / clean_results['Wm2Front'].sum() ))\n\nprint (\"\\n(If we had not done the cleaning routine, the bifacial ratio would have been \", \\\n \"calculated to %0.3f <-- THIS VALUE IS WRONG)\" %( sum(analysis.Wm2Back) / sum(analysis.Wm2Front) )) \n", "<a id='step7'></a>\n7. Add Custom Elements to your Scene Example: Marker at 0,0 position\nThis shows how to add a custom element, in this case a Cube, that will be placed in the center of your already created scene to mark the 0,0 location. \nThis can be added at any point after makeScene has been run once. Notice that if this extra element is in the scene and the analysis sensors fall on this element, they will measure irradiance at this element and no the modules.\nWe are going to create a \"MyMarker.rad\" file in the objects folder, right after we make the Module. \nThis is a prism (so we use 'genbox'), that is black from the ground.rad list of materials ('black')\nWe are naming it 'CenterMarker'\nIts sides are going to be 0.5x0.5x0.5 m \nand We are going to leave its bottom surface coincident with the plane z=0, but going to center on X and Y.", "name='MyMarker'\ntext='! genbox black CenterMarker 0.1 0.1 4 | xform -t -0.05 -0.05 0'\ncustomObject = demo.makeCustomObject(name,text)\n", "This should have created a MyMarker.rad object on your objects folder.\nBut creating the object does not automatically adds it to the seen. So let's now add the customObject to the Scene. We are not going to translate it or anything because we want it at the center, but you can pass translation, rotation, and any other XFORM command from Radiance.\nI am passing a rotation 0 because xform has to have something (I think) otherwise it gets confused.", "demo.appendtoScene(scene.radfiles, customObject, '!xform -rz 0')\n# makeOct combines all of the ground, sky and object files into a .oct file.\noctfile = demo.makeOct(demo.getfilelist()) ", "appendtoScene appended to the Scene.rad file the name of the custom object we created and the xform transformation we included as text. Then octfile merged this new scene with the ground and sky files.\nAt this point you should be able to go into a command window (cmd.exe) and check the geometry, and the marker should be there. Example:\n#### rvu -vf views\\front.vp -e .01 tutorial_4.oct", "\n## Comment the line below to run rvu from the Jupyter notebook instead of your terminal.\n## Simulation will stop until you close the rvu window\n\n#!rvu -vf views\\front.vp -e .01 tutorial_4.oct\n", "If you ran the getTrackerAngle detour and appended the marker, it should look like this:\n\nIf you do an analysis and any of the sensors hits the Box object we just created, the list of materials in the result.csv file should say something with \"CenterMarker\" on it. \nSee more examples of the use of makeCustomObject and appendtoScene on the Bifacial Carport/Canopies Tutorial" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
thehackerwithin/berkeley
code_examples/python_mayavi/mayavi_intermediate.ipynb
bsd-3-clause
[ "%matplotlib qt\nimport numpy as np\nfrom mayavi import mlab\n\nfrom scipy.integrate import odeint", "Lorenz Attractor - 3D line and point plotting demo\nLorenz attractor is a 3D differential equation that we will use to demonstrate mayavi's 3D plotting ability. We will look at some ways to make plotting lots of data more efficient.", "# setup parameters for Lorenz equations\nsigma=10\nbeta=8/3.\nrho=28\n\ndef lorenz(x, t, ):\n dx = np.zeros(3)\n dx[0] = -sigma*x[0] + sigma*x[1]\n dx[1] = rho*x[0] - x[1] - x[0]*x[2]\n dx[2] = -beta*x[2] + x[0]*x[1]\n return dx\n\n# solve for a specific particle\n# initial condition\ny0 = np.ones(3) + .01\n\n# time steps to compute location\nn_time = 20000\nt = np.linspace(0,200,n_time)\n\n# solve the ODE \ny = odeint( lorenz, y0, t )\n\ny.shape", "Rendering Points and Lines\nMayavi has several ways to render 3D line and point data. The default is to use surfaces, which uses more resources. There are kwargs that can be changed to make it render with 2-D lines and points that make plotting large amounts of data more efficient.\nLinePlot", "# plot the data as a line\n# change the tube radius to see the difference\nmlab.figure('Line')\nmlab.clf()\nmlab.plot3d(y[:,0], y[:,1], y[:,2], tube_radius=.1)\nmlab.colorbar()\n\n# plot the data as a line, with color representing the time evolution\nmlab.figure('Line')\nmlab.clf()\nmlab.plot3d(y[:,0], y[:,1], y[:,2], t, tube_radius=None, )\nmlab.colorbar()", "Point Plot", "# plot the data as a line, with color representing the time evolution\nmlab.figure()\n\n# By default, mayavi will plot points as spheres, so each point will \n# be represented by a surface. \n# Using mode='2dvertex' is needed for plotting large numbers of points.\nmlab.figure('Points')\nmlab.clf()\nmlab.points3d(y[:,0], y[:,1], y[:,2], t, mode='2dvertex')\nmlab.colorbar( title='time')\nmlab.axes()", "Line + Point Plot", "# plot the data as a line, with color representing the time evolution\nmlab.figure('Line and Points')\nmlab.clf()\n\n# plot the data as a line, with color representing the time evolution\nmlab.plot3d(y[:,0], y[:,1], y[:,2], t, tube_radius=None, line_width=1 )\nmlab.colorbar()\n\n# By default, mayavi will plot points as spheres, so each point will \n# be represented by a surface. \n# Using mode='2dvertex' is needed for plotting large numbers of points.\nmlab.points3d(y[:,0], y[:,1], y[:,2], t, scale_factor=.3, scale_mode='none')\n #mode='2dvertex')\nmlab.colorbar( title='time')", "Contour Plot\nLet's see how long the particle spends in each location", "h3d = np.histogramdd(y, bins=50)\n\n# generate the midpoint coordinates\nxg,yg,zg = h3d[1]\nxm = xg[1:] - .5*(xg[1]-xg[0])\nym = yg[1:] - .5*(yg[1]-yg[0])\nzm = zg[1:] - .5*(zg[1]-zg[0])\nxg, yg, zg = np.meshgrid(xm, ym, zm)\n\nmlab.figure('contour')\nmlab.clf()\nmlab.contour3d( h3d[0], opacity=.5, contours=25 )", "Animation\nAnimation can be accomplished with a mlab.animate decorator. You must define a function that yields to the animate decorator. The yield defines when mayavi will rerender the image.", "# plot the data as a line\nmlab.figure('Animate')\nmlab.clf()\n# mlab.plot3d(y[:,0], y[:,1], y[:,2], tube_radius=None)\n# mlab.colorbar()\n\na = mlab.points3d(y0[0], y0[1], y0[2], mode='2dvertex')\n\n# number of points to plot\n# n_plot = n_time\nn_plot = 1000\n\[email protected](delay=10, ui=True )\ndef anim():\n for i in range(n_time):\n # a.mlab_source.set(x=y[i,0],y=y[i,1],z=y[i,2], color=(1,0,0))\n mlab.points3d(y[i,0],y[i,1],y[i,2], mode='2dvertex', reset_zoom=False)\n yield\n \nanim()" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
johnnyliu27/openmc
examples/jupyter/mgxs-part-iii.ipynb
mit
[ "This IPython Notebook illustrates the use of the openmc.mgxs.Library class. The Library class is designed to automate the calculation of multi-group cross sections for use cases with one or more domains, cross section types, and/or nuclides. In particular, this Notebook illustrates the following features:\n\nCalculation of multi-group cross sections for a fuel assembly\nAutomated creation, manipulation and storage of MGXS with openmc.mgxs.Library\nValidation of multi-group cross sections with OpenMOC\nSteady-state pin-by-pin fission rates comparison between OpenMC and OpenMOC\n\nNote: This Notebook was created using OpenMOC to verify the multi-group cross-sections generated by OpenMC. You must install OpenMOC on your system to run this Notebook in its entirety. In addition, this Notebook illustrates the use of Pandas DataFrames to containerize multi-group cross section data.\nGenerate Input Files", "import math\nimport pickle\n\nfrom IPython.display import Image\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport openmc\nimport openmc.mgxs\nfrom openmc.openmoc_compatible import get_openmoc_geometry\nimport openmoc\nimport openmoc.process\nfrom openmoc.materialize import load_openmc_mgxs_lib\n\n%matplotlib inline", "First we need to define materials that will be used in the problem. We'll create three materials for the fuel, water, and cladding of the fuel pins.", "# 1.6 enriched fuel\nfuel = openmc.Material(name='1.6% Fuel')\nfuel.set_density('g/cm3', 10.31341)\nfuel.add_nuclide('U235', 3.7503e-4)\nfuel.add_nuclide('U238', 2.2625e-2)\nfuel.add_nuclide('O16', 4.6007e-2)\n\n# borated water\nwater = openmc.Material(name='Borated Water')\nwater.set_density('g/cm3', 0.740582)\nwater.add_nuclide('H1', 4.9457e-2)\nwater.add_nuclide('O16', 2.4732e-2)\nwater.add_nuclide('B10', 8.0042e-6)\n\n# zircaloy\nzircaloy = openmc.Material(name='Zircaloy')\nzircaloy.set_density('g/cm3', 6.55)\nzircaloy.add_nuclide('Zr90', 7.2758e-3)", "With our three materials, we can now create a Materials object that can be exported to an actual XML file.", "# Instantiate a Materials object\nmaterials_file = openmc.Materials([fuel, water, zircaloy])\n\n# Export to \"materials.xml\"\nmaterials_file.export_to_xml()", "Now let's move on to the geometry. This problem will be a square array of fuel pins and control rod guide tubes for which we can use OpenMC's lattice/universe feature. The basic universe will have three regions for the fuel, the clad, and the surrounding coolant. The first step is to create the bounding surfaces for fuel and clad, as well as the outer bounding surfaces of the problem.", "# Create cylinders for the fuel and clad\nfuel_outer_radius = openmc.ZCylinder(x0=0.0, y0=0.0, R=0.39218)\nclad_outer_radius = openmc.ZCylinder(x0=0.0, y0=0.0, R=0.45720)\n\n# Create boundary planes to surround the geometry\nmin_x = openmc.XPlane(x0=-10.71, boundary_type='reflective')\nmax_x = openmc.XPlane(x0=+10.71, boundary_type='reflective')\nmin_y = openmc.YPlane(y0=-10.71, boundary_type='reflective')\nmax_y = openmc.YPlane(y0=+10.71, boundary_type='reflective')\nmin_z = openmc.ZPlane(z0=-10., boundary_type='reflective')\nmax_z = openmc.ZPlane(z0=+10., boundary_type='reflective')", "With the surfaces defined, we can now construct a fuel pin cell from cells that are defined by intersections of half-spaces created by the surfaces.", "# Create a Universe to encapsulate a fuel pin\nfuel_pin_universe = openmc.Universe(name='1.6% Fuel Pin')\n\n# Create fuel Cell\nfuel_cell = openmc.Cell(name='1.6% Fuel')\nfuel_cell.fill = fuel\nfuel_cell.region = -fuel_outer_radius\nfuel_pin_universe.add_cell(fuel_cell)\n\n# Create a clad Cell\nclad_cell = openmc.Cell(name='1.6% Clad')\nclad_cell.fill = zircaloy\nclad_cell.region = +fuel_outer_radius & -clad_outer_radius\nfuel_pin_universe.add_cell(clad_cell)\n\n# Create a moderator Cell\nmoderator_cell = openmc.Cell(name='1.6% Moderator')\nmoderator_cell.fill = water\nmoderator_cell.region = +clad_outer_radius\nfuel_pin_universe.add_cell(moderator_cell)", "Likewise, we can construct a control rod guide tube with the same surfaces.", "# Create a Universe to encapsulate a control rod guide tube\nguide_tube_universe = openmc.Universe(name='Guide Tube')\n\n# Create guide tube Cell\nguide_tube_cell = openmc.Cell(name='Guide Tube Water')\nguide_tube_cell.fill = water\nguide_tube_cell.region = -fuel_outer_radius\nguide_tube_universe.add_cell(guide_tube_cell)\n\n# Create a clad Cell\nclad_cell = openmc.Cell(name='Guide Clad')\nclad_cell.fill = zircaloy\nclad_cell.region = +fuel_outer_radius & -clad_outer_radius\nguide_tube_universe.add_cell(clad_cell)\n\n# Create a moderator Cell\nmoderator_cell = openmc.Cell(name='Guide Tube Moderator')\nmoderator_cell.fill = water\nmoderator_cell.region = +clad_outer_radius\nguide_tube_universe.add_cell(moderator_cell)", "Using the pin cell universe, we can construct a 17x17 rectangular lattice with a 1.26 cm pitch.", "# Create fuel assembly Lattice\nassembly = openmc.RectLattice(name='1.6% Fuel Assembly')\nassembly.pitch = (1.26, 1.26)\nassembly.lower_left = [-1.26 * 17. / 2.0] * 2", "Next, we create a NumPy array of fuel pin and guide tube universes for the lattice.", "# Create array indices for guide tube locations in lattice\ntemplate_x = np.array([5, 8, 11, 3, 13, 2, 5, 8, 11, 14, 2, 5, 8,\n 11, 14, 2, 5, 8, 11, 14, 3, 13, 5, 8, 11])\ntemplate_y = np.array([2, 2, 2, 3, 3, 5, 5, 5, 5, 5, 8, 8, 8, 8,\n 8, 11, 11, 11, 11, 11, 13, 13, 14, 14, 14])\n\n# Initialize an empty 17x17 array of the lattice universes\nuniverses = np.empty((17, 17), dtype=openmc.Universe)\n\n# Fill the array with the fuel pin and guide tube universes\nuniverses[:,:] = fuel_pin_universe\nuniverses[template_x, template_y] = guide_tube_universe\n\n# Store the array of universes in the lattice\nassembly.universes = universes", "OpenMC requires that there is a \"root\" universe. Let us create a root cell that is filled by the assembly and then assign it to the root universe.", "# Create root Cell\nroot_cell = openmc.Cell(name='root cell')\nroot_cell.fill = assembly\n\n# Add boundary planes\nroot_cell.region = +min_x & -max_x & +min_y & -max_y & +min_z & -max_z\n\n# Create root Universe\nroot_universe = openmc.Universe(universe_id=0, name='root universe')\nroot_universe.add_cell(root_cell)", "We now must create a geometry that is assigned a root universe and export it to XML.", "# Create Geometry and set root Universe\ngeometry = openmc.Geometry(root_universe)\n\n# Export to \"geometry.xml\"\ngeometry.export_to_xml()", "With the geometry and materials finished, we now just need to define simulation parameters. In this case, we will use 10 inactive batches and 40 active batches each with 2500 particles.", "# OpenMC simulation parameters\nbatches = 50\ninactive = 10\nparticles = 10000\n\n# Instantiate a Settings object\nsettings_file = openmc.Settings()\nsettings_file.batches = batches\nsettings_file.inactive = inactive\nsettings_file.particles = particles\nsettings_file.output = {'tallies': False}\n\n# Create an initial uniform spatial source distribution over fissionable zones\nbounds = [-10.71, -10.71, -10, 10.71, 10.71, 10.]\nuniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True)\nsettings_file.source = openmc.source.Source(space=uniform_dist)\n\n# Export to \"settings.xml\"\nsettings_file.export_to_xml()", "Let us also create a Plots file that we can use to verify that our fuel assembly geometry was created successfully.", "# Instantiate a Plot\nplot = openmc.Plot(plot_id=1)\nplot.filename = 'materials-xy'\nplot.origin = [0, 0, 0]\nplot.pixels = [250, 250]\nplot.width = [-10.71*2, -10.71*2]\nplot.color_by = 'material'\n\n# Instantiate a Plots object, add Plot, and export to \"plots.xml\"\nplot_file = openmc.Plots([plot])\nplot_file.export_to_xml()", "With the plots.xml file, we can now generate and view the plot. OpenMC outputs plots in .ppm format, which can be converted into a compressed format like .png with the convert utility.", "# Run openmc in plotting mode\nopenmc.plot_geometry(output=False)\n\n# Convert OpenMC's funky ppm to png\n!convert materials-xy.ppm materials-xy.png\n\n# Display the materials plot inline\nImage(filename='materials-xy.png')", "As we can see from the plot, we have a nice array of fuel and guide tube pin cells with fuel, cladding, and water!\nCreate an MGXS Library\nNow we are ready to generate multi-group cross sections! First, let's define a 2-group structure using the built-in EnergyGroups class.", "# Instantiate a 2-group EnergyGroups object\ngroups = openmc.mgxs.EnergyGroups()\ngroups.group_edges = np.array([0., 0.625, 20.0e6])", "Next, we will instantiate an openmc.mgxs.Library for the energy groups with the fuel assembly geometry.", "# Initialize a 2-group MGXS Library for OpenMOC\nmgxs_lib = openmc.mgxs.Library(geometry)\nmgxs_lib.energy_groups = groups", "Now, we must specify to the Library which types of cross sections to compute. In particular, the following are the multi-group cross section MGXS subclasses that are mapped to string codes accepted by the Library class:\n\nTotalXS (\"total\")\nTransportXS (\"transport\" or \"nu-transport with nu set to True)\nAbsorptionXS (\"absorption\")\nCaptureXS (\"capture\")\nFissionXS (\"fission\" or \"nu-fission\" with nu set to True)\nKappaFissionXS (\"kappa-fission\")\nScatterXS (\"scatter\" or \"nu-scatter\" with nu set to True)\nScatterMatrixXS (\"scatter matrix\" or \"nu-scatter matrix\" with nu set to True)\nChi (\"chi\")\nChiPrompt (\"chi prompt\")\nInverseVelocity (\"inverse-velocity\")\nPromptNuFissionXS (\"prompt-nu-fission\")\nDelayedNuFissionXS (\"delayed-nu-fission\")\nChiDelayed (\"chi-delayed\")\nBeta (\"beta\")\n\nIn this case, let's create the multi-group cross sections needed to run an OpenMOC simulation to verify the accuracy of our cross sections. In particular, we will define \"nu-transport\", \"nu-fission\", '\"fission\", \"nu-scatter matrix\" and \"chi\" cross sections for our Library.\nNote: A variety of different approximate transport-corrected total multi-group cross sections (and corresponding scattering matrices) can be found in the literature. At the present time, the openmc.mgxs module only supports the \"P0\" transport correction. This correction can be turned on and off through the boolean Library.correction property which may take values of \"P0\" (default) or None.", "# Specify multi-group cross section types to compute\nmgxs_lib.mgxs_types = ['nu-transport', 'nu-fission', 'fission', 'nu-scatter matrix', 'chi']", "Now we must specify the type of domain over which we would like the Library to compute multi-group cross sections. The domain type corresponds to the type of tally filter to be used in the tallies created to compute multi-group cross sections. At the present time, the Library supports \"material\", \"cell\", \"universe\", and \"mesh\" domain types. We will use a \"cell\" domain type here to compute cross sections in each of the cells in the fuel assembly geometry.\nNote: By default, the Library class will instantiate MGXS objects for each and every domain (material, cell or universe) in the geometry of interest. However, one may specify a subset of these domains to the Library.domains property. In our case, we wish to compute multi-group cross sections in each and every cell since they will be needed in our downstream OpenMOC calculation on the identical combinatorial geometry mesh.", "# Specify a \"cell\" domain type for the cross section tally filters\nmgxs_lib.domain_type = 'cell'\n\n# Specify the cell domains over which to compute multi-group cross sections\nmgxs_lib.domains = geometry.get_all_material_cells().values()", "We can easily instruct the Library to compute multi-group cross sections on a nuclide-by-nuclide basis with the boolean Library.by_nuclide property. By default, by_nuclide is set to False, but we will set it to True here.", "# Compute cross sections on a nuclide-by-nuclide basis\nmgxs_lib.by_nuclide = True", "Lastly, we use the Library to construct the tallies needed to compute all of the requested multi-group cross sections in each domain and nuclide.", "# Construct all tallies needed for the multi-group cross section library\nmgxs_lib.build_library()", "The tallies can now be export to a \"tallies.xml\" input file for OpenMC. \nNOTE: At this point the Library has constructed nearly 100 distinct Tally objects. The overhead to tally in OpenMC scales as $O(N)$ for $N$ tallies, which can become a bottleneck for large tally datasets. To compensate for this, the Python API's Tally, Filter and Tallies classes allow for the smart merging of tallies when possible. The Library class supports this runtime optimization with the use of the optional merge paramter (False by default) for the Library.add_to_tallies_file(...) method, as shown below.", "# Create a \"tallies.xml\" file for the MGXS Library\ntallies_file = openmc.Tallies()\nmgxs_lib.add_to_tallies_file(tallies_file, merge=True)", "In addition, we instantiate a fission rate mesh tally to compare with OpenMOC.", "# Instantiate a tally Mesh\nmesh = openmc.Mesh(mesh_id=1)\nmesh.type = 'regular'\nmesh.dimension = [17, 17]\nmesh.lower_left = [-10.71, -10.71]\nmesh.upper_right = [+10.71, +10.71]\n\n# Instantiate tally Filter\nmesh_filter = openmc.MeshFilter(mesh)\n\n# Instantiate the Tally\ntally = openmc.Tally(name='mesh tally')\ntally.filters = [mesh_filter]\ntally.scores = ['fission', 'nu-fission']\n\n# Add tally to collection\ntallies_file.append(tally)\n\n# Export all tallies to a \"tallies.xml\" file\ntallies_file.export_to_xml()\n\n# Run OpenMC\nopenmc.run()", "Tally Data Processing\nOur simulation ran successfully and created statepoint and summary output files. We begin our analysis by instantiating a StatePoint object.", "# Load the last statepoint file\nsp = openmc.StatePoint('statepoint.50.h5')", "The statepoint is now ready to be analyzed by the Library. We simply have to load the tallies from the statepoint into the Library and our MGXS objects will compute the cross sections for us under-the-hood.", "# Initialize MGXS Library with OpenMC statepoint data\nmgxs_lib.load_from_statepoint(sp)", "Voila! Our multi-group cross sections are now ready to rock 'n roll!\nExtracting and Storing MGXS Data\nThe Library supports a rich API to automate a variety of tasks, including multi-group cross section data retrieval and storage. We will highlight a few of these features here. First, the Library.get_mgxs(...) method allows one to extract an MGXS object from the Library for a particular domain and cross section type. The following cell illustrates how one may extract the NuFissionXS object for the fuel cell.\nNote: The MGXS.get_mgxs(...) method will accept either the domain or the integer domain ID of interest.", "# Retrieve the NuFissionXS object for the fuel cell from the library\nfuel_mgxs = mgxs_lib.get_mgxs(fuel_cell, 'nu-fission')", "The NuFissionXS object supports all of the methods described previously in the openmc.mgxs tutorials, such as Pandas DataFrames:\nNote that since so few histories were simulated, we should expect a few division-by-error errors as some tallies have not yet scored any results.", "df = fuel_mgxs.get_pandas_dataframe()\ndf", "Similarly, we can use the MGXS.print_xs(...) method to view a string representation of the multi-group cross section data.", "fuel_mgxs.print_xs()", "One can export the entire Library to HDF5 with the Library.build_hdf5_store(...) method as follows:", "# Store the cross section data in an \"mgxs/mgxs.h5\" HDF5 binary file\nmgxs_lib.build_hdf5_store(filename='mgxs.h5', directory='mgxs')", "The HDF5 store will contain the numerical multi-group cross section data indexed by domain, nuclide and cross section type. Some data workflows may be optimized by storing and retrieving binary representations of the MGXS objects in the Library. This feature is supported through the Library.dump_to_file(...) and Library.load_from_file(...) routines which use Python's pickle module. This is illustrated as follows.", "# Store a Library and its MGXS objects in a pickled binary file \"mgxs/mgxs.pkl\"\nmgxs_lib.dump_to_file(filename='mgxs', directory='mgxs')\n\n# Instantiate a new MGXS Library from the pickled binary file \"mgxs/mgxs.pkl\"\nmgxs_lib = openmc.mgxs.Library.load_from_file(filename='mgxs', directory='mgxs')", "The Library class may be used to leverage the energy condensation features supported by the MGXS class. In particular, one can use the Library.get_condensed_library(...) with a coarse group structure which is a subset of the original \"fine\" group structure as shown below.", "# Create a 1-group structure\ncoarse_groups = openmc.mgxs.EnergyGroups(group_edges=[0., 20.0e6])\n\n# Create a new MGXS Library on the coarse 1-group structure\ncoarse_mgxs_lib = mgxs_lib.get_condensed_library(coarse_groups)\n\n# Retrieve the NuFissionXS object for the fuel cell from the 1-group library\ncoarse_fuel_mgxs = coarse_mgxs_lib.get_mgxs(fuel_cell, 'nu-fission')\n\n# Show the Pandas DataFrame for the 1-group MGXS\ncoarse_fuel_mgxs.get_pandas_dataframe()", "Verification with OpenMOC\nOf course it is always a good idea to verify that one's cross sections are accurate. We can easily do so here with the deterministic transport code OpenMOC. We first construct an equivalent OpenMOC geometry.", "# Create an OpenMOC Geometry from the OpenMC Geometry\nopenmoc_geometry = get_openmoc_geometry(mgxs_lib.geometry)", "Now, we can inject the multi-group cross sections into the equivalent fuel assembly OpenMOC geometry. The openmoc.materialize module supports the loading of Library objects from OpenMC as illustrated below.", "# Load the library into the OpenMOC geometry\nmaterials = load_openmc_mgxs_lib(mgxs_lib, openmoc_geometry)", "We are now ready to run OpenMOC to verify our cross-sections from OpenMC.", "# Generate tracks for OpenMOC\ntrack_generator = openmoc.TrackGenerator(openmoc_geometry, num_azim=32, azim_spacing=0.1)\ntrack_generator.generateTracks()\n\n# Run OpenMOC\nsolver = openmoc.CPUSolver(track_generator)\nsolver.computeEigenvalue()", "We report the eigenvalues computed by OpenMC and OpenMOC here together to summarize our results.", "# Print report of keff and bias with OpenMC\nopenmoc_keff = solver.getKeff()\nopenmc_keff = sp.k_combined.nominal_value\nbias = (openmoc_keff - openmc_keff) * 1e5\n\nprint('openmc keff = {0:1.6f}'.format(openmc_keff))\nprint('openmoc keff = {0:1.6f}'.format(openmoc_keff))\nprint('bias [pcm]: {0:1.1f}'.format(bias))", "There is a non-trivial bias between the eigenvalues computed by OpenMC and OpenMOC. One can show that these biases do not converge to <100 pcm with more particle histories. For heterogeneous geometries, additional measures must be taken to address the following three sources of bias:\n\nAppropriate transport-corrected cross sections\nSpatial discretization of OpenMOC's mesh\nConstant-in-angle multi-group cross sections\n\nFlux and Pin Power Visualizations\nWe will conclude this tutorial by illustrating how to visualize the fission rates computed by OpenMOC and OpenMC. First, we extract volume-integrated fission rates from OpenMC's mesh fission rate tally for each pin cell in the fuel assembly.", "# Get the OpenMC fission rate mesh tally data\nmesh_tally = sp.get_tally(name='mesh tally')\nopenmc_fission_rates = mesh_tally.get_values(scores=['nu-fission'])\n\n# Reshape array to 2D for plotting\nopenmc_fission_rates.shape = (17,17)\n\n# Normalize to the average pin power\nopenmc_fission_rates /= np.mean(openmc_fission_rates[openmc_fission_rates > 0.])", "Next, we extract OpenMOC's volume-averaged fission rates into a 2D 17x17 NumPy array.", "# Create OpenMOC Mesh on which to tally fission rates\nopenmoc_mesh = openmoc.process.Mesh()\nopenmoc_mesh.dimension = np.array(mesh.dimension)\nopenmoc_mesh.lower_left = np.array(mesh.lower_left)\nopenmoc_mesh.upper_right = np.array(mesh.upper_right)\nopenmoc_mesh.width = openmoc_mesh.upper_right - openmoc_mesh.lower_left\nopenmoc_mesh.width /= openmoc_mesh.dimension\n\n# Tally OpenMOC fission rates on the Mesh\nopenmoc_fission_rates = openmoc_mesh.tally_fission_rates(solver)\nopenmoc_fission_rates = np.squeeze(openmoc_fission_rates)\nopenmoc_fission_rates = np.fliplr(openmoc_fission_rates)\n\n# Normalize to the average pin fission rate\nopenmoc_fission_rates /= np.mean(openmoc_fission_rates[openmoc_fission_rates > 0.])", "Now we can easily use Matplotlib to visualize the fission rates from OpenMC and OpenMOC side-by-side.", "# Ignore zero fission rates in guide tubes with Matplotlib color scheme\nopenmc_fission_rates[openmc_fission_rates == 0] = np.nan\nopenmoc_fission_rates[openmoc_fission_rates == 0] = np.nan\n\n# Plot OpenMC's fission rates in the left subplot\nfig = plt.subplot(121)\nplt.imshow(openmc_fission_rates, interpolation='none', cmap='jet')\nplt.title('OpenMC Fission Rates')\n\n# Plot OpenMOC's fission rates in the right subplot\nfig2 = plt.subplot(122)\nplt.imshow(openmoc_fission_rates, interpolation='none', cmap='jet')\nplt.title('OpenMOC Fission Rates')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
mssalvador/Fifa2018
Teknisk Tirsdag Tutorial (Supervised Learning).ipynb
apache-2.0
[ "Teknisk Tirsdag: Supervised Learning\nI denne opgave skal vi bruge Logistisk Regression til at forudsige hvilke danske fodboldspillere der egentlig kunne spille for en storklub.", "# Run the datacleaning notebook to get all the variables\n%run 'Teknisk Tirsdag - Data Cleaning.ipynb'", "Efter at have hentet vores rensede data, hvor vi minder os selv om at vi har: <br>\n* dansker_set\n* topklub_set\n* ikke_topklub_set\n* overall_set \nDet første, vi gerne vil kigge lidt på, er, om vi var grundige nok i vores foranalyse. Derfor laver vi et heatmap, der skal fortælle os hvor stor sammenhængen er (korrelation) mellem kolonnerne i forhold til hinanden.", "corr = overall_set.corr()\n\nfig = plt.figure(figsize=(20, 16))\nax = sb.heatmap(corr, xticklabels=corr.columns.values,\n yticklabels=corr.columns.values,\n linewidths=0.25, vmax=1.0, square=True,\n linecolor='black', annot=False\n )\nplt.show()", "Hvad vi ser her, er en korrelationsmatrix. Jo mørkere farver, des højere korrelation, rød for positiv- og blå for negativ-korrelation. <br>\nVi ser altså at der er høj korrelation, i vores nedre højre hjørne; Dette er spilpositionerne. Vi ser også et stort blåt kryds, som er målmandsdata. Disse har meget negativ korrelation med resten af vores datasæt. (Dobbeltklik evt. på plottet, hvis det er meget svært at læse teksten)<br>\nDerudover kan vi se, at ID kolonnen slet ikke korrelere. Man kan derfor vælge at tage den ud.\nVi tilføjer nu vores \"kendte\" labels til vores data. (Hvis man spiller for en af vores topklubber, får man et 1-tal, og ellers får man et 0) <br>\nVi deler også vores træningssæt op i en X matrix med alle vores numeriske features, og en y vektor med alle vores labels.", "overall_set['label'] = overall_set['Club'].isin(topklub_set.Club).astype(int)\ny = overall_set['label']\nX = overall_set.iloc[:,0:-1].select_dtypes(include=['float64', 'int64'])", "Vi kan kigge lidt overordnet på tallene mellem de 2 klasser.", "overall_set.groupby('label').mean()", "Observationer\n\nAlderen siger ikke rigtig noget om, hvorvidt du spiller for en topklub eller ej\nTopklubsspillere er i gennemsnittet en faktor 10 mere værd end ikke-topklub spillere\nTopklubsspillere er i gennemsnittet generelt ca. 10+ på alt i forhold til ikke-topklub spillere\n\nVi er nu klar til at gå i gang med vores første Machine Learning algoritme.\nPå forhånd ved vi, at der i vores træningssæt er {{y.where(y==1).count()}} som spiller i topklubber, og {{y.where(y==0).count()}} der ikke gør. <br>\nDer er en 50/50 chance for at ramme rigtigt, hvis man bare gætte tilfældigt. Vi håber derfor, at algoritmen kan slå den 50% svarrate.\nLogistisk regression", "# hent nødvendige pakker fra Scikit Learn biblioteket (generelt super hvis man vil lave data science)\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split", "Vi fitter nu en logistic regression classifier til vores data, og fitter en model, så den kan genkende om man spiller for en topklub eller ej, og evaluere resultatet:", "model = LogisticRegression()\nmodel = model.fit(X,y)\n\nmodel.score(X,y)", "Altså har vores model ret i \n{{'{:.0f}'.format(100*model.score(X, y))}}% af tiden i træningssættet. <br>\nPretty good!! Den har altså fundet nogle mønstre der kan mappe data til labels, og gætter ikke bare.\nMen vi kan ikke vide, om den har overfittet, og derved har tilpasset sig for godt til sit kendte data, så nyt data vil blive fejlmappet. <br>\nHvad vi kan prøve, er at splitte vores træningssæt op i et trænings- og testsæt. På den måde kan vi først fitte og derefter evaluere på \"nyt\" kendt data, om den stadig performer som forventet.", "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)\nprint('Træningsæt størrelse: {} - Testsæt størrelse: {}'.format(len(X_train), len(X_test)))", "Og vi er nu klar til at prøve igen!\nLogistisk regression 2.0\nIgen fitter vi en logistisk regression til vores træningsdata, og danner en model, men denne gang uden at bruge testdatasættet.", "model2 = LogisticRegression()\nmodel2 = model2.fit(X_train, y_train)\nmodel2.score(X_train, y_train)", "Modellen matcher nu \n{{'{:.0f}'.format(100*model2.score(X, y))}}% af tiden i træningssættet. <br> \nMen har den overfittet?\nEvaluering af modellen\nVi genererer derfor vores y forudsigelse og også sandsynlighederne for vores testsæt, da disse bruges til at evaluere modellen.", "y_pred = model2.predict(X_test)\ny_probs = model2.predict_proba(X_test)\n\n# Evalueringsmålinger\nfrom sklearn import metrics\nprint('Nøjagtigheden af vores logistiske regressions models prediction på testsættet er {:.0f}'.format(100*metrics.accuracy_score(y_test, y_pred))+'%', '\\n')\nprint('Arealet under vores ROC AUC kurve er {:.0f}'.format(100*metrics.roc_auc_score(y_test, y_probs[:, 1]))+'%')", "Det ser jo ret fornuftigt ud.<br>\nFor at sige noget om vores nye model, kan vi også lave en \"confusion_matrix\"\n<img src='http://revolution-computing.typepad.com/.a/6a010534b1db25970b01bb08c97955970d-pi',\n align=\"center\"\n width=\"40%\"\n alt=\"confusion matrix\">\nT og F står for henholdsvist True og False<br>\nP og N står for henholdsvist Positive og Negative", "confusion_matrix = metrics.confusion_matrix(y_test, y_pred)\nprint(confusion_matrix)", "Resultatet fortæller os, at vi har {{confusion_matrix[0,0]}}+{{confusion_matrix[1,1]}} = {{confusion_matrix[0,0]+confusion_matrix[1,1]}} korrekte forudsigelser og {{confusion_matrix[0,1]}}+{{confusion_matrix[1,0]}} = {{confusion_matrix[0,1]+confusion_matrix[1,0]}} ukorrekte\nMan kan også bede classifieren om en rapport:", "print(metrics.classification_report(y_test, y_pred))", "Logistisk regression med krydsvalidering\nVi er egentlig meget tilfredse med vores model, men ofte kan det være en god idé at teste på flere små testsæt, og holde dem op mod hinanden. <br>\nHer laver vi en 10-folds krydsvalidering og får altså 10 scorer ud:", "# 10-folds cross-validation\nfrom sklearn.model_selection import cross_val_score\nscores = cross_val_score(LogisticRegression(), X, y, scoring='accuracy', cv=10)\nprint(scores,'\\n')\nprint(scores.mean())", "Her preformer modellen altså i gennemsnit \n{{'{:.0f}'.format(100*scores.mean())}}%. \nDet lyder meget lovende, men vi holder os til vores model2 og kan nu prøve modellen af på det rigtige datasæt\nDanskersættet\nVi skal nu prøve vores model på vores danske spillere<br>\nOpgave:\nVi skal lave prediction og probability på vores danske spillere, ligesom vi gjorde tidligere for testsættet. (Lige under Evaluering af modellen)<br>\nHusk din dataframe kun må indeholder numeriske værdier, når vi bruger modellen.<br>\nFx. \"df.select_dtypes(include=['float64', 'int64'])\"", "dansker_pred = None ### Fjern NONE og UDFYLD MIG ###\ndansker_probs = None ### Fjern NONE og UDFYLD MIG ###", "Modellen har fundet {{np.bincount(dansker_pred)[0]}} nuller og {{np.bincount(dansker_pred)[1]}} ét-taller\nHvis du satte top_klub_ratio til 75 i Opgave 1 i Data Cleaning, skulle der være omkring 27-28 ét-taller. <br>\ntop_klub_ratio blev sat til: {{top_klub_ratio}}\nVi tilføjer disse kolonner til vores dataframe.", "dansker_set_df = dansker_set.copy()\ndansker_set_df[['prob1','prob2']] = pd.DataFrame(dansker_probs, index=dansker_set.index)\ndansker_set_df['Probabilities [0,1]'] = dansker_set_df[['prob1','prob2']].values.tolist()\ndansker_set_df['Prediction'] = pd.Series(dansker_pred, index=dansker_set.index)\ndel dansker_set_df['prob1'], dansker_set_df['prob2'] \n# dansker_set_df.head()", "Og sortere listen, så de bedste danske spillere står øvers, og tilføjer et index, så vi kan få et bedre overblik", "dansker_set_df.loc[:,'pred=1'] = dansker_set_df['Probabilities [0,1]'].map(lambda x: x[1]).sort_values(ascending=False)\ndansker_sorted = dansker_set_df.sort_values('pred=1', ascending=False)\ndansker_sorted = dansker_sorted[['Name', 'Club', 'Overall', 'Potential', 'Probabilities [0,1]', 'Prediction']]\ndansker_sorted.loc[:,'in'] = np.arange(1, len(dansker_set_df)+1)\ndansker_sorted.set_index('in')", "Efter flot hattrick mod Irland, kan man vidst ikke være i tvivl om Kong Christian tager pladsen på tronen\n<img src='kongen.png',\n align=\"center\"\n width=\"40%\"\n alt=\"kongen\">\nMen hvilke danske spillere spiller egentlig for topklubber, og hvordan er de rangeret i forhold til vores model?", "dansker_sorted[dansker_sorted['Club'].isin(top_clubs)].set_index('in')", "Man kan undre sig over hvad Jacob Larsen laver hos stopklubben Borussia Dortmund, men en hurtig googling viser, at han simpelthen blev headhuntet til klubben som 16-årig.\nOg så er der jo nok nogen, der vil spørger - Hvad med Bendtner?\nSå han skal da også lige have en plads i vores analyse:", "dansker_sorted.loc[dansker_sorted.Name == 'N. Bendtner'].set_index('in')", "Opgave:\nVi kan også kigge på ham i det store billede. Prøv evt. at lege lidt rundt med forskellige spillere eller andre features.<br>\nEr der noget specielt, der kunne være sjovt at kigge på?", "df.loc[df.Name == 'N. Bendtner']", "Ekstra lege/analyse opgaver\nDanske Rezan Corlu som ellers ligger ret lavt selv på potentiale har alligevel sikret sig en plads hos A.S. Roma i en alder af 20 år.\nMen hvordan var det egentlig med de topklub spillere? Hvor langt ned kan man gå i potentiale, og stadig spille for en topklub?", "top_df = df[df.Club.isin(top_clubs)]\ntop_df[top_df.Overall < 70].sort_values('Overall', ascending=True)", "Vi kan altså se, at der bliver satset på ungdommen, hvor deres kommende potentiale nok taler for deres plads i en storklub.<br>\nMen hvad så med ikke-topklubsspillere og deres performance?", "bund_df = df[~df.Club.isin(top_clubs)]\nbund_df[bund_df.Overall > 70]", "Måske er de 22 klubber, vi har udvalgt ikke helt nok til at beskrive topklubber", "top_clubs", "Du kan evt. gå tilbage til Data Cleaning notebooken, og prøve at ændre tallet for top_klub_ratio" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
amkatrutsa/MIPT-Opt
Spring2021/intro_gd.ipynb
mit
[ "Введение в численные методы оптимизации (Ю. Е. Нестеров Введение в выпуклую оптимизацию, гл. 1 $\\S$ 1.1)\n\nОбзор материала весеннего семестра\nПостановка задачи\nОбщая схема решения\nСравнение методов оптимизации\nМетоды одномерной минимизации\n\nОбзор материала весеннего семестра\nТакже на странице курса.\n\nМетоды решения задач безусловной оптимизации\nОдномерная минимизация (уже сегодня!)\nГрадиентный спуск и способы его ускорения\nМетод Ньютона\nКвазиньютоновские методы\nМетод сопряжённых градиентов \nРешение задачи наименьших квадратов\n\n\nМетоды решения задач условной оптимизации\nМетоды проекции градиента и условного градиента\nПроксимальные методы\nМетоды штрафных и барьерных функций\nМетод модифицированой функции Лагранжа\n\n\n\nПостановка задачи\n\\begin{equation}\n\\begin{split}\n& \\min_{x \\in S} f_0(x)\\\n\\text{s.t. } & f_j(x) = 0, \\; j = 1,\\ldots,m\\\n& g_k(x) \\leq 0, \\; k = 1,\\ldots,p\n\\end{split}\n\\end{equation}\nгде $S \\subseteq \\mathbb{R}^n$, $f_j: S \\rightarrow \\mathbb{R}, \\; j = 0,\\ldots,m$, $g_k: S \\rightarrow \\mathbb{R}, \\; k=1,\\ldots,p$\nВсе функции как минимум непрерывны. \nВажный факт</span>: задачи нелинейной оптимизации \nв их самой общей форме являются численно неразрешимыми!\nАналитические результаты\n\nНеобходимое условие первого порядка: \n\nесли $x^*$ точка локального минимума дифференцируемой функции $f(x)$, тогда \n$$\nf'(x^*) = 0\n$$\n\nНеобходимое условие второго порядка \n\nесли $x^*$ точка локального минимума дважды дифференцируемой функции $f(x)$, тогда \n$$\nf'(x^) = 0 \\quad \\text{и} \\quad f''(x^) \\succeq 0\n$$\n\nДостаточное условие: \n\nпусть $f(x)$ дважды дифференцируемая функция, и пусть точка $x^*$ удовлетворяет условиям\n$$\nf'(x^) = 0 \\quad f''(x^) \\succ 0,\n$$\nтогда $x^*$ является точкой строго локального минимума функции $f(x)$.\nЗамечание: убедитесь, что Вы понимаете, как доказывать эти\nрезультаты!\nОсобенности численного решения\n\nТочно решить задачу принципиально невозможно из-за погрешности машинной арифметики\nНеобходимо задать критерий обнаружения решения\nНеобходимо определить, какую информацию о задаче использовать\n\nОбщая итеративная схема\nДано: начальное приближение $x$, требуемая точность $\\varepsilon$.\n```python\ndef GeneralScheme(x, epsilon):\nwhile StopCriterion(x) &gt; epsilon:\n\n OracleResponse = RequestOracle(x)\n\n UpdateInformation(I, x, OracleResponse)\n\n x = NextPoint(I, x)\n\nreturn x\n\n```\nВопросы\n\nКакие критерии остановки могут быть?\nЧто такое оракул и зачем он нужен?\nЧто такое информационная модель?\nКак вычисляется новая точка?\n\nКритерии остановки\n\nСходимость по аргументу: \n$$\n\\| x_k - x^* \\|_2 < \\varepsilon\n$$ \nСходимость по функции: \n$$\n\\| f_k - f^* \\|_2 < \\varepsilon\n$$ \nВыполнение необходимого условия \n$$\n\\| f'(x_k) \\|_2 < \\varepsilon\n$$\n\nНо ведь $x^*$ неизвестна!\nТогда\n\\begin{align}\n& \\|x_{k+1} - x_k \\| = \\|x_{k+1} - x_k + x^ - x^ \\| \\leq \\\n& \\|x_{k+1} - x^ \\| + \\| x_k - x^ \\| \\leq 2\\varepsilon\n\\end{align}\nАналогично для сходимости по функции, \nоднако иногда можно оценить $f^*$! \nЗамечание: лучше использовать относительные изменения \nэтих величин! \nНапример $\\dfrac{\\|x_{k+1} - x_k \\|_2}{\\| x_k \\|_2}$\nЧто такое оракул?\nОпределение: оракулом называют некоторое абстрактное \nустройство, которое отвечает на последовательные вопросы \nметода\nАналогия из ООП: \n\nоракул - это виртуальный метод базового класса\nкаждая задача - производный класс\nоракул определяется для каждой задачи отдельно согласно общему определению в базовом классе\n\nКонцепция чёрного ящика\n1. Единственной информацией, получаемой в ходе работы итеративного метода, являются ответы оракула\n2. Ответы оракула являются локальными\nИнформация о задаче\n\nКаждый ответ оракула даёт локальную информацию о поведении функции в точке\nАгрегируя все полученные ответы оракула, обновляем информацию о глобальном виде целевой функции:\nкривизна\nнаправление убывания\netc\n\n\n\nВычисление следующей точки\n$$\nx_{k+1} = x_{k} + \\alpha_k h_k\n$$\n\nЛинейный поиск: фиксируется направление $h_k$ и производится поиск по этому направлению \"оптимального\" значения $\\alpha_k$\n\nМетод доверительных областей: фиксируется допустимый размер области по некоторой норме $\\| \\cdot \\| \\leq \\alpha$ и модель целевой функции, которая хорошо её аппроксимирует в выбранной области. \nДалее производится поиск направления $h_k$, минимизирующего модель целевой функции и не выводящего точку $x_k + h_k$ за пределы доверительной области\n\n\nВопросы\n\nКак выбрать $\\alpha_k$?\nКак выбрать $h_k$?\nКак выбрать модель?\nКак выбрать область?\nКак выбрать размер области? \n\n<span style=\"color:red\">\n В курсе рассматривается только линейный поиск!</span> \nОднако несколько раз копцепция метода доверительных областей \nбудет использована.\nКак сравнивать методы оптимизации?\nДля заданного класса задач сравнивают следующие величины:\n1. Сложность\n - аналитическая: число обращений к оракулу для решения задачи с точностью $\\varepsilon$\n - арифметическая: общее число всех вычислений, необходимых для решения задачи с точностью $\\varepsilon$\n2. Скорость сходимости\n3. Эксперименты\nСкорости сходимости\n1. Сублинейная\n$$\n\\| x_{k+1} - x^* \\|_2 \\leq C k^{\\alpha},\n$$\nгде $\\alpha < 0$ и $ 0 < C < \\infty$\n2. Линейная (геометрическая прогрессия)\n$$\n\\| x_{k+1} - x^* \\|_2 \\leq Cq^k, \n$$\nгде $q \\in (0, 1)$ и $ 0 < C < \\infty$\n3. Сверхлинейная \n$$\n\\| x_{k+1} - x^* \\|_2 \\leq Cq^{k^p}, \n$$\nгде $q \\in (0, 1)$, $ 0 < C < \\infty$ и $p > 1$\n4. Квадратичная\n$$\n\\| x_{k+1} - x^ \\|_2 \\leq C\\| x_k - x^ \\|^2_2, \\qquad \\text{или} \\qquad \\| x_{k+1} - x^* \\|_2 \\leq C q^{2^k}\n$$\nгде $q \\in (0, 1)$ и $ 0 < C < \\infty$", "%matplotlib inline\nimport matplotlib.pyplot as plt\n\nUSE_COLAB = False\nif not USE_COLAB:\n plt.rc(\"text\", usetex=True)\n\nimport numpy as np\nC = 10\nalpha = -0.5\nq = 0.9\nnum_iter = 50\nsublinear = np.array([C * k**alpha for k in range(1, num_iter + 1)])\nlinear = np.array([C * q**k for k in range(1, num_iter + 1)])\nsuperlinear = np.array([C * q**(k**2) for k in range(1, num_iter + 1)])\nquadratic = np.array([C * q**(2**k) for k in range(1, num_iter + 1)])\nplt.figure(figsize=(12,8))\nplt.semilogy(np.arange(1, num_iter+1), sublinear, \n label=r\"Sublinear, $\\alpha = -0.5$\", linewidth=5)\n# plt.semilogy(np.arange(1, num_iter+1), superlinear, linewidth=5,\n# label=r\"Superlinear, $q = 0.5, p=2$\")\nplt.semilogy(np.arange(1, num_iter+1), linear, \n label=r\"Linear, $q = 0.5$\", linewidth=5)\n# plt.semilogy(np.arange(1, num_iter+1), quadratic, \n# label=r\"Quadratic, $q = 0.5$\", linewidth=5)\nplt.xlabel(\"Number of iterations, $k$\", fontsize=28)\nplt.ylabel(\"Error rate upper bound\", fontsize=28)\nplt.legend(loc=\"best\", fontsize=26)\nplt.xticks(fontsize = 28)\n_ = plt.yticks(fontsize = 28)", "Значение теорем сходимости (Б.Т. Поляк Введение в оптимизацию, гл. 1, $\\S$ 6)\n\n\nЧто дают теоремы сходимости\n\nкласс задач, для которых можно рассчитывать на применимость метода (важно не завышать условия!)\nвыпуклость\nгладкость\n\n\nкачественное поведение метода\nсущественно ли начальное приближение\nпо какому функционалу есть сходимость\n\n\nоценку скорости сходимости\nтеоретическая оценка поведения метода без проведения экспериментов\nопределение факторов, которые влияют на сходимость (обусловленность, размерность, etc)\nиногда заранее можно выбрать число итераций для достижения заданной точности \n\n\n\n\n\nЧто НЕ дают теоремы сходимости\n\nсходимость метода ничего не говорит о целесообразности его применения\nоценки сходимости зависят от неизвестных констант - неконструктивный характер\nучёт ошибок округления и точности решения вспомогательных задач\n\n\n\nМораль: нужно проявлять разумную осторожность \nи здравый смысл!\nКлассификация задач\n\nБезусловная оптимизация\nцелевая функция липшицева\nградиент целевой функции липшицев\n\n\nУсловная оптимизация\nмногогранник\nмножество простой структуры\nобщего вида\n\n\n\nКлассификация методов\nКакой размер истории нужно хранить для обновления?\n\nОдношаговые методы \n\n$$\nx_{k+1} = \\Phi(x_k)\n$$\n\nМногошаговые методы\n\n$$\nx_{k+1} = \\Phi(x_k, x_{k-1}, ...)\n$$\nКакой порядок поизводных нужно вычислить?\n\n\nМетоды нулевого порядка: оракул возвращает только значение функции $f(x)$\n\n\nМетоды первого порядка: оракул возвращает значение функции $f(x)$ и её градиент $f'(x)$\n\n\nМетоды второго порядка: оракул возвращает значение функции $f(x)$, её градиент $f'(x)$ и гессиан $f''(x)$.\n\n\nQ: существуют ли методы более высокого порядка?\nА: Implementable tensor methods in unconstrained convex optimization by Y. Nesterov, 2019 \nОдномерная минимизация\nОпределение. Функция $f(x)$ называется унимодальной на $[a, b]$, если существует такая точка $x^ \\in [a, b]$, что \n- $f(x_1) > f(x_2)$ для любых $a \\leq x_1 < x_2 < x^$, \nи \n- $f(x_1) < f(x_2)$ для любых $x^* < x_1 < x_2 \\leq b$.\nВопрос: какая геометрия унимодальных функций?\nМетод дихотомии\nИдея из информатики первого семестра: \nделим отрезок $[a,b]$ на две равные части \nпока не найдём минимум унимодальной функции.\n\n$N$ - число вычислений функции $f$\n$K = \\frac{N - 1}{2}$ - число итераций\n\nТогда\n$$\n|x_{K+1} - x^*| \\leq \\frac{b_{K+1} - a_{K+1}}{2} = \\left( \\frac{1}{2} \\right)^{\\frac{N-1}{2}} (b - a) \\approx 0.5^{K} (b - a) \n$$", "def binary_search(f, a, b, epsilon, callback=None):\n c = (a + b) / 2.0\n while abs(b - a) > epsilon:\n# Check left subsegment\n y = (a + c) / 2.0\n if f(y) <= f(c):\n b = c\n c = y\n else:\n# Check right subsegment\n z = (b + c) / 2.0\n if f(c) <= f(z):\n a = y\n b = z\n else:\n a = c\n c = z\n if callback is not None:\n callback(a, b)\n return c\n\ndef my_callback(a, b, left_bound, right_bound, approximation):\n left_bound.append(a)\n right_bound.append(b)\n approximation.append((a + b) / 2.0)\n\nimport numpy as np\n\nleft_boud_bs = []\nright_bound_bs = []\napproximation_bs = []\n\ncallback_bs = lambda a, b: my_callback(a, b, \n left_boud_bs, right_bound_bs, approximation_bs)\n\n# Target unimodal function on given segment\nf = lambda x: (x - 2) * x * (x + 2)**2 # np.power(x+2, 2)\n# f = lambda x: -np.sin(x)\nx_true = -2\n# x_true = np.pi / 2.0\na = -3\nb = -1.5\nepsilon = 1e-8\nx_opt = binary_search(f, a, b, epsilon, callback_bs)\nprint(np.abs(x_opt - x_true))\nplt.figure(figsize=(10,6))\nplt.plot(np.linspace(a,b), f(np.linspace(a,b)))\nplt.title(\"Objective function\", fontsize=28)\nplt.xticks(fontsize = 28)\n_ = plt.yticks(fontsize = 28)", "Метод золотого сечения\nИдея: \nделить отрезок $[a,b]$ не на две равные насти, \nа в пропорции \"золотого сечения\".\nОценим скорость сходимости аналогично методу дихотомии:\n$$\n|x_{K+1} - x^*| \\leq b_{K+1} - a_{K+1} = \\left( \\frac{1}{\\tau} \\right)^{N-1} (b - a) \\approx 0.618^K(b-a),\n$$\nгде $\\tau = \\frac{\\sqrt{5} + 1}{2}$.\n\nКонстанта геометрической прогрессии больше, чем у метода дихотомии\nКоличество вызовов функции меньше, чем у метода дихотомии", "def golden_search(f, a, b, tol=1e-5, callback=None):\n tau = (np.sqrt(5) + 1) / 2.0\n y = a + (b - a) / tau**2\n z = a + (b - a) / tau\n while b - a > tol:\n if f(y) <= f(z):\n b = z\n z = y\n y = a + (b - a) / tau**2\n else:\n a = y\n y = z\n z = a + (b - a) / tau\n if callback is not None:\n callback(a, b)\n return (a + b) / 2.0\n\nleft_boud_gs = []\nright_bound_gs = []\napproximation_gs = []\n\ncb_gs = lambda a, b: my_callback(a, b, left_boud_gs, right_bound_gs, approximation_gs)\nx_gs = golden_search(f, a, b, epsilon, cb_gs)\n\nprint(f(x_opt))\nprint(f(x_gs))\nprint(np.abs(x_opt - x_true))", "Сравнение методов одномерной минимизации", "plt.figure(figsize=(10,6))\nplt.semilogy(np.arange(1, len(approximation_bs) + 1), np.abs(x_true - np.array(approximation_bs, dtype=np.float64)), label=\"Binary search\")\nplt.semilogy(np.arange(1, len(approximation_gs) + 1), np.abs(x_true - np.array(approximation_gs, dtype=np.float64)), label=\"Golden search\")\nplt.xlabel(r\"Number of iterations, $k$\", fontsize=26)\nplt.ylabel(\"Error rate upper bound\", fontsize=26)\nplt.legend(loc=\"best\", fontsize=26)\nplt.xticks(fontsize = 26)\n_ = plt.yticks(fontsize = 26)\n\n%timeit binary_search(f, a, b, epsilon)\n%timeit golden_search(f, a, b, epsilon)", "Пример иного поведения методов\n$$\nf(x) = \\sin(\\sin(\\sin(\\sqrt{x}))), \\; x \\in [2, 60]\n$$", "f = lambda x: np.sin(np.sin(np.sin(np.sqrt(x))))\nx_true = (3 * np.pi / 2)**2\na = 2\nb = 60\nepsilon = 1e-8\nplt.plot(np.linspace(a,b), f(np.linspace(a,b)))\nplt.xticks(fontsize = 28)\n_ = plt.yticks(fontsize = 28)", "Сравнение скорости сходимости и времени работы методов\nМетод дихотомии", "left_boud_bs = []\nright_bound_bs = []\napproximation_bs = []\n\ncallback_bs = lambda a, b: my_callback(a, b, \n left_boud_bs, right_bound_bs, approximation_bs)\n\nx_opt = binary_search(f, a, b, epsilon, callback_bs)\nprint(np.abs(x_opt - x_true))", "Метод золотого сечения", "left_boud_gs = []\nright_bound_gs = []\napproximation_gs = []\n\ncb_gs = lambda a, b: my_callback(a, b, left_boud_gs, right_bound_gs, approximation_gs)\nx_gs = golden_search(f, a, b, epsilon, cb_gs)\n\nprint(np.abs(x_opt - x_true))", "Сходимость", "plt.figure(figsize=(8,6))\nplt.semilogy(np.abs(x_true - np.array(approximation_bs, dtype=np.float64)), label=\"Binary\")\nplt.semilogy(np.abs(x_true - np.array(approximation_gs, dtype=np.float64)), label=\"Golden\")\nplt.legend(fontsize=28)\nplt.xticks(fontsize=28)\n_ = plt.yticks(fontsize=28)\nplt.xlabel(r\"Number of iterations, $k$\", fontsize=26)\nplt.ylabel(\"Error rate upper bound\", fontsize=26)", "Время работы", "%timeit binary_search(f, a, b, epsilon)\n%timeit golden_search(f, a, b, epsilon)", "Резюме\n\nВведение в численные методы оптимизации\nОбщая схема работы метода\nСпособы сравнения методов оптимизации\nЗоопарк задач и методов\nОдномерная минимизация\n\nМетоды спуска. Градиентный спуск и его ускоренные модификации\nЧто такое методы спуска?\nПоследовательность $x_k$ генерируется по правилу\n$$\nx_{k+1} = x_k + \\alpha_k h_k\n$$\nтак что\n$$\nf(x_{k+1}) < f(x_k)\n$$\nНаправление $h_k$ называется направлением убывания.\nЗамечание: существуют методы, которые не требуют монотонного убывания функции от итерации к итерации.\n```python\ndef DescentMethod(f, x0, epsilon, **kwargs):\nx = x0\n\nwhile StopCriterion(x, f, **kwargs) &gt; epsilon:\n\n h = ComputeDescentDirection(x, f, **kwargs)\n\n alpha = SelectStepSize(x, h, f, **kwargs)\n\n x = x + alpha * h\n\nreturn x\n\n```\nСпособ 1: направление убывания\nРассмотрим линейную аппроксимацию дифференцируемой функции $f$ вдоль некоторого направления убывания $h, \\|h\\|_2 = 1$:\n$$\nf(x + \\alpha h) = f(x) + \\alpha \\langle f'(x), h \\rangle + o(\\alpha)\n$$\nИз условия убывания\n$$\nf(x) + \\alpha \\langle f'(x), h \\rangle + o(\\alpha) < f(x)\n$$\nи переходя к пределу при $\\alpha \\rightarrow 0$:\n$$\n\\langle f'(x), h \\rangle \\leq 0\n$$\nТакже из неравенства Коши-Буняковского-Шварца\n$$\n\\langle f'(x), h \\rangle \\geq -\\| f'(x) \\|_2 \\| h \\|_2 = -\\| f'(x) \\|_2\n$$\nТаким образом, направление антиградиента \n$$\nh = -\\dfrac{f'(x)}{\\|f'(x)\\|_2}\n$$\nдаёт направление наискорейшего локального убывания функции$~f$.\nВ итоге метод имеет вид\n$$\nx_{k+1} = x_k - \\alpha f'(x_k)\n$$\nСпособ 2: схема Эйлера решения ОДУ\nРассмотрим обыкновенное диференциальное уравнение вида:\n$$\n\\frac{dx}{dt} = -f'(x(t))\n$$\nи дискретизуем его на равномерной сетке с шагом $\\alpha$:\n$$\n\\frac{x_{k+1} - x_k}{\\alpha} = -f'(x_k),\n$$\nгде $x_k \\equiv x(t_k)$ и $\\alpha = t_{k+1} - t_k$ - шаг сетки.\nОтсюда получаем выражение для $x_{k+1}$\n$$\nx_{k+1} = x_k - \\alpha f'(x_k),\n$$\nкоторое в точности совпадает с выражением для градиентного спуска.\nТакая схема называется явной или прямой схемой Эйлера.\nQ: какая схема называется неявной или обратной?\nСпособ 3: минимизация квадратичной оценки сверху\n(А. В. Гасников \"Метод универсального градиентного спуска\" https://arxiv.org/abs/1711.00394)\nГлобальная оценка сверху на функцию $f$ в точке $x_k$:\n$$\nf(y) \\leq f(x_k) + \\langle f'(x_k), y - x_k \\rangle + \\frac{L}{2} \\|y - x_k \\|_2^2 = g(y), \n$$\nгде $\\lambda_{\\max}(f''(x)) \\leq L$ для всех допустимых $x$.\nСправа &mdash; квадратичная форма, точка минимума которой имеет аналитическое выражение:\n\\begin{align}\n& g'(y^) = 0 \\\n& f'(x_k) + L (y^ - x_k) = 0 \\\n& y^ = x_k - \\frac{1}{L}f'(x_k) = x_{k+1}\n\\end{align*}\nЭтот способ позволяет оценить значение шага как $\\frac{1}{L}$. Однако часто константа $L$ неизвестна.\nИтого: метод градиентного спуска &mdash; дёшево и сердито\n```python\ndef GradientDescentMethod(f, x0, epsilon, **kwargs):\nx = x0\n\nwhile StopCriterion(x, f, **kwargs) &gt; epsilon:\n\n h = ComputeGradient(x, f, **kwargs)\n\n alpha = SelectStepSize(x, h, f, **kwargs)\n\n x = x - alpha * h\n\nreturn x\n\n```\nКак выбрать шаг $\\alpha_k$? (J. Nocedal, S. Wright Numerical Optimization, $\\S$ 3.1.)\nСписок подходов:\n- Постоянный шаг \n$$\n\\alpha_k = \\overline{\\alpha}\n$$\n\nАприорно заданная последовательность, например\n\n$$\n\\alpha_k = \\dfrac{\\overline{\\alpha}}{\\sqrt{k+1}}\n$$\n\nНаискорейший спуск\n\n$$\n\\alpha_k = \\arg\\min_{\\alpha \\geq 0} f(x_k - \\alpha f'(x_k))\n$$\n\n\nТребование достаточного убывания, требование существенного убывания и условие кривизны: для некоторых $\\beta_1, \\beta_2$, таких что $0 < \\beta_1 < \\beta_2 < 1$ найти $x_{k+1}$ такую что\n\nДостаточное убывание: $f(x_{k+1}) \\leq f(x_k) + \\beta_1 \\alpha_k \\langle f'(x_k), h_k \\rangle$ или\n$ f(x_k) - f(x_{k+1}) \\geq \\beta_1 \\alpha_k \\langle f'(x_k), h_k \\rangle\n$\nСущественное убывание: $f(x_{k+1}) \\geq f(x_k) + \\beta_2 \\alpha_k \\langle f'(x_k), h_k \\rangle$ или\n$\nf(x_k) - f(x_{k+1}) \\leq \\beta_2 \\alpha_k \\langle f'(x_k), h_k \\rangle\n$\nУсловие кривизны: $\\langle f'(x_{k+1}), h_k \\rangle \\geq \\beta_2 \\langle f'(x_k), h_k \\rangle$\n\n\n\nОбычно коэффициенты выбирают так: $\\beta_1 \\in (0, 0.3)$, а $\\beta_2 \\in (0.9, 1)$\nАнализ и мотивация подходов к выбору шага $\\alpha_k$\n\nПостоянный шаг: самое простое и неэффективное решение\nАприорно заданная последовательность: немногим лучше постоянного шага\nНаискорейший спуск: самое лучшее решение, но применимо только если вспомогательная задача решается аналитически или ооооооочень быстро. <br></br>\nТо есть почти всегда неприменимо :)\nТребование достаточного убывания, требование существенного убывания и условие кривизны:\nтребование достаточного убывания гарантирует, что функция в точке $x_{k+1}$ не превосходит линейной аппроксимации с коэффициентом наклона $\\beta_1$\nтребование существенного убывания гарантирует, что функция в точке $x_{k+1}$ убывает не меньше, чем линейная аппроксимация c коэффициентом наклона $\\beta_2$\nусловие кривизны гарантирует, что угол наклона касательной в точке $x_{k+1}$ не меньше, чем угол наклона касательной в точке $x_k$, <br></br>\nумноженный на $\\beta_2$ \n\n\n\nТребование существенного убывания и условие кривизны обеспечивают убывание функции по выбранному направлению $h_k$. Обычно выбирают одно из них.\nАльтернативные названия\n\nТребование достаточного убывания $\\equiv$ правило Армихо\nТребование достаточного убывания + условие кривизны $\\equiv$ правило Вольфа\nТребование достаточного убывания + требование существенного убывания $\\equiv$ правило Гольдштейна\n\nЗачем нужно условие существенного убывания?", "%matplotlib notebook\nimport matplotlib.pyplot as plt\nplt.rc(\"text\", usetex=True)\nimport ipywidgets as ipywidg\nimport numpy as np\nimport liboptpy.unconstr_solvers as methods\nimport liboptpy.step_size as ss\nfrom tqdm import tqdm\n\nf = lambda x: np.power(x, 2)\ngradf = lambda x: 2 * x\n\nfig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\n\n\n\ndef update(x0, step):\n gd = methods.fo.GradientDescent(f, gradf, ss.ConstantStepSize(step))\n _ = gd.solve(np.array([x0]), max_iter=10)\n x_hist = gd.get_convergence()\n x = np.linspace(-5, 5)\n ax.clear()\n ax.plot(x, f(x), color=\"r\", label=\"$f(x) = x^2$\")\n y_hist = np.array([f(x) for x in x_hist])\n x_hist = np.array(x_hist)\n plt.quiver(x_hist[:-1], y_hist[:-1], x_hist[1:]-x_hist[:-1], y_hist[1:]-y_hist[:-1], \n scale_units='xy', angles='xy', scale=1, width=0.005, color=\"green\", label=\"Descent path\")\n ax.legend()\n fig.canvas.draw()\n\nstep_slider = ipywidg.FloatSlider(value=0.8, min=0, max=1.2, step=0.1, description=\"Step\")\nx0_slider = ipywidg.FloatSlider(value=1.5, min=-4, max=4, step=0.1, description=\"Initial point\")\n_ = ipywidg.interact(update, x0=x0_slider, step=step_slider)\n\ndef plot_alpha(f, grad, x, h, alphas, beta1, beta2):\n df = np.zeros_like(alphas)\n for i, alpha in enumerate(alphas):\n df[i] = f(x + alpha * h)\n upper_bound = f(x) + beta1 * alphas * grad(x) * h\n lower_bound = f(x) + beta2 * alphas * grad(x) * h\n plt.plot(alphas, df, label=r\"$f(x + \\alpha h)$\")\n plt.plot(alphas, upper_bound, label=\"Upper bound\")\n plt.plot(alphas, lower_bound, label=\"Lower bound\")\n plt.xlabel(r\"$\\alpha$\", fontsize=18)\n plt.legend(loc=\"best\", fontsize=18)\n\nf = lambda x: x**2\ngrad = lambda x: 2 * x\nbeta1 = 0.1\nbeta2 = 0.9\nx0 = 0.5\nplot_alpha(f, grad, x0, -grad(x0), np.linspace(1e-3, 1.01, 10), beta1, beta2)", "$f(x) = x\\log x$", "x_range = np.linspace(1e-10, 4)\nplt.plot(x_range, x_range * np.log(x_range))\n\nx0 = 1\nf = lambda x: x * np.log(x)\ngrad = lambda x: np.log(x) + 1\nbeta1 = 0.3\nbeta2 = 0.7\nplot_alpha(f, grad, x0, -grad(x0), np.linspace(1e-3, 0.9, 10), beta1, beta2)", "Backtracking\n```python\ndef SelectStepSize(x, f, h, rho, alpha0, beta1, beta2):\n# 0 &lt; rho &lt; 1\n\n# alpha0 - initial guess of step size\n\n# beta1 and beta2 - constants from conditions\n\nalpha = alpha0\n\n# Check violating sufficient decrease and curvature conditions\n\nwhile (f(x - alpha * h) &gt;= f(x) + beta1 * alpha grad_f(x_k).dot(h)) and\n\n (grad_f(x - alpha * h).dot(h) &lt;= beta2 * grad_f(x_k).dot(h)):\n\n alpha *= rho\n\nreturn alpha\n\n```\nТеоремы сходимости (Б.Т. Поляк Введение в оптимизацию, гл. 1, $\\S$ 4; гл. 3, $\\S$ 1; Ю.Е. Нестеров Введение в выпуклую оптимизацию, $\\S$ 2.2)\nОт общего к частному:\nТеорема 1. \nПусть \n\n$f(x)$ дифференцируема на $\\mathbb{R}^n$, \nградиент $f(x)$ удовлетворяет условию Липшица с константой $L$\n$f(x)$ ограничена снизу\n$\\alpha = const$ и $0 < \\alpha < \\frac{2}{L}$\n\nТогда для градиентного метода выполнено:\n$$\n\\lim\\limits_{k \\to \\infty} f'(x_k) = 0,\n$$\nа функция монотонно убывает $f(x_{k+1}) < f(x_k)$.\nТеорема 2. Пусть\n- $f(x)$ дифференцируема на $\\mathbb{R}^n$\n- $f(x)$ выпукла \n- $f'(x)$ удовлетворяет условию Липшица с константой $L$\n- $\\alpha = \\dfrac{1}{L}$\nТогда \n$$\nf(x_k) - f^ \\leq \\dfrac{2L \\| x_0 - x^\\|^2_2}{k+4}\n$$\nТеорема 3.\nПусть \n- $f(x)$ дважды дифференцируема и $\\mu\\mathbf{I} \\preceq f''(x) \\preceq L\\mathbf{I}$ для всех $x$\n- $\\alpha = const$ и $0 < \\alpha < \\frac{2}{L}$\nТогда \n$$\n\\| x_k - x^\\|_2 \\leq \\|x_0 - x^\\|_2 q^k, \\qquad q = \\max(|1 - \\alpha l|, |1 - \\alpha L|) < 1\n$$\nи минимальное $q^ = \\dfrac{L - \\mu}{L + \\mu}$ при $\\alpha^ = \\dfrac{2}{L + \\mu}$\nОт чего зависит $q^*$ и как это использовать?\nИз Теоремы 3 имеем \n$$\nq^* = \\dfrac{L - \\mu}{L + \\mu} = \\dfrac{L/\\mu - 1}{L/\\mu + 1} = \\dfrac{M - 1}{M + 1},\n$$\nгде $M$ - оценка числа обусловленности $f''(x)$.\nВопрос: что такое число обусловленности матрицы?\n\nПри $M \\gg 1$, $q^ \\to 1 \\Rightarrow$ оооочень медленная сходимости градиентного метода. Например при $M = 100$: $q^ \\approx 0.98 $\nПри $M \\simeq 1$, $q^ \\to 0 \\Rightarrow$ ускорение сходимости градиентного метода. Например при $M = 4$: $q^ = 0.6 $\n\nВопрос: какая геометрия у этого требования?\nМораль: необходимо сделать оценку $M$ как можно ближе к 1!\nО том, как это сделать, Вам будет предложено подумать в домашнем задании :)\nВычислительный аспект и эксперименты\n\nДля каждого шага метода нужно хранить только текущую точку и вектор градиента: $O(n)$ памяти\n\nПоиск $\\alpha_k$:\n\nдан априори\nищется из аналитического решения задачи наискорейшего спуска\nзаканчивается за конечное число шагов\nДля каждого шага метода нужно вычислять линейную комбинацию векторов: $O(n)$ вычислений + высокопроизводительные реализации\n\n\n\nPеализация градиентного спуска", "def GradientDescent(f, gradf, x0, epsilon, num_iter, line_search, \n disp=False, callback=None, **kwargs):\n x = x0.copy()\n iteration = 0\n opt_arg = {\"f\": f, \"grad_f\": gradf}\n for key in kwargs:\n opt_arg[key] = kwargs[key]\n while True:\n gradient = gradf(x)\n alpha = line_search(x, -gradient, **opt_arg)\n x = x - alpha * gradient\n if callback is not None:\n callback(x)\n iteration += 1\n if disp:\n print(\"Current function val =\", f(x))\n print(\"Current gradient norm = \", np.linalg.norm(gradf(x)))\n if np.linalg.norm(gradf(x)) < epsilon:\n break\n if iteration >= num_iter:\n break\n res = {\"x\": x, \"num_iter\": iteration, \"tol\": np.linalg.norm(gradf(x))}\n return res", "Выбор шага\nРеализации различных способов выбора шага приведены тут\nЗависимость от обусловленности матрицы $f''(x)$\nРассмотрим задачу \n$$\n\\min f(x),\n$$ \nгде\n$$ f(x) = x^{\\top}Ax, \\; A = \\begin{bmatrix} 1 & 0\\ 0 & \\gamma \\end{bmatrix} $$\n$$\nf'(x) = 2Ax\n$$", "def my_f(x, A):\n return 0.5 * x.dot(A.dot(x))\n\ndef my_gradf(x, A):\n return A.dot(x)\n\nplt.rc(\"text\", usetex=True)\n\ngammas = [0.1, 0.5, 1, 2, 3, 4, 5, 10, 20, 50, 100, 1000, 5000, 10000]\n# gammas = [1]\nnum_iter_converg = []\nfor g in gammas:\n A = np.array([[1, 0], \n [0, g]], dtype=np.float64)\n f = lambda x: my_f(x, A)\n gradf = lambda x: my_gradf(x, A)\n# x0 = np.random.rand(A.shape[0])\n# x0 = np.sort(x0)\n# x0 = x0[::-1]\n x0 = np.array([g, 1], dtype=np.float64)\n# print x0[1] / x0[0]\n gd = methods.fo.GradientDescent(f, gradf, ss.ExactLineSearch4Quad(A))\n x = gd.solve(x0, tol=1e-7, max_iter=100)\n num_iter_converg.append(len(gd.get_convergence()))\n\nplt.figure(figsize=(8, 6))\nplt.loglog(gammas, num_iter_converg)\nplt.xticks(fontsize = 20)\nplt.yticks(fontsize = 20)\nplt.xlabel(r\"$\\gamma$\", fontsize=20)\nplt.ylabel(r\"Number of iterations with $\\varepsilon = 10^{-7}$\", fontsize=20)", "При неудачном начальном приближении сходимость для плохо обусловенной задачи очень медленная\nПри случайном начальном приближении сходимость может быть гораздо быстрее теоретических оценок\n\nЭксперимент на многомерной задаче\nПусть $A \\in \\mathbb{R}^{m \\times n}$. Рассмотрим систему линейных неравенств: $Ax \\leq 1$ при условии $|x_i| \\leq 1$ для всех $i$.\nОпределение. Аналитическим центром системы неравенств $Ax \\leq 1$ при условии $|x_i| \\leq 1$ является решение задачи\n$$\nf(x) = - \\sum_{i=1}^m \\log(1 - a_i^{\\top}x) - \\sum_{i = 1}^n \\log (1 - x^2_i) \\to \\min_x\n$$\n$$\nf'(x) - ?\n$$\nТочное решение с помощью CVXPy", "import numpy as np\n\nn = 1000\nm = 2000\nA = np.random.rand(n, m)\n\nx = cvx.Variable(n)\n\nobj = cvx.Minimize(cvx.sum(-cvx.log(1 - A.T * x)) - \n cvx.sum(cvx.log(1 - cvx.square(x))))\nprob = cvx.Problem(obj)\nprob.solve(solver=\"SCS\", verbose=True)\nx = x.value\nprint(\"Optimal value =\", prob.value)", "Решение с помощью градиентного спуска", "import cvxpy as cvx\nprint(cvx.installed_solvers())\n\n# !pip install jax \n# !pip install jaxlib\n\nimport jax.numpy as jnp\nimport jax\n\n# from jax.config import config\n# config.update(\"jax_enable_x64\", True)\n\nA = jnp.array(A)\nprint(A.dtype)\nx0 = jnp.zeros(n)\nf = lambda x: -jnp.sum(jnp.log(1 - A.T@x)) - jnp.sum(jnp.log(1 - x*x))\ngrad_f = lambda x: jnp.sum(A @ (jnp.diagflat(1 / (1 - A.T @ x))), \\\n axis=1) + 2 * x / (1 - jnp.power(x, 2))\ngrad_f_jax = jax.grad(f)\nprint(jnp.linalg.norm(grad_f(x0) - grad_f_jax(x0)))", "Подробнее про jax, его возможности и особенности можно посмотреть например тут", "gd = methods.fo.GradientDescent(f, grad_f_jax, ss.Backtracking(\"Armijo\", rho=0.5, beta=0.1, init_alpha=1.))\nx = gd.solve(x0, tol=1e-5, max_iter=100, disp=True)\n\nx_conv = gd.get_convergence()\ngrad_conv = [jnp.linalg.norm(grad_f_jax(x)) for x in x_conv]\nplt.figure(figsize=(8,6))\nplt.semilogy(grad_conv, label=r\"$\\| f'(x_k) \\|_2$\")\nplt.semilogy([np.linalg.norm(x - np.array(x_k)) for x_k in x_conv], label=r\"$\\|x_k - x^*\\|_2$\")\nplt.semilogy([np.linalg.norm(prob.value - f(np.array(x_k))) for x_k in x_conv], label=r\"$\\|f(x_k) - f^*\\|_2$\")\nplt.semilogy([np.linalg.norm(np.array(x_conv[i]) - np.array(x_conv[i+1])) for i in range(len(x_conv) - 1)], label=r\"$\\|x_k - x_{k+1}\\|_2$\")\nplt.semilogy([np.linalg.norm(f(np.array(x_conv[i])) - f(np.array(x_conv[i+1]))) for i in range(len(x_conv) - 1)], label=r\"$\\|f(x_k) - f(x_{k+1})\\|_2$\")\nplt.xlabel(r\"Number of iteration, $k$\", fontsize=20)\nplt.ylabel(r\"Convergence rate\", fontsize=20)\nplt.xticks(fontsize = 20)\nplt.yticks(fontsize = 20)\nplt.legend(loc=\"best\", fontsize=20)\nplt.tight_layout()", "Pro & Contra\nPro\n- легко реализовать\n- сходимость как минимум к стационарной точке\n- параметры при выборе шага влияют на сходимость не столь сильно\n- имеет многочисленные вариации\nContra\n- линейная сходимость для сильно выпуклых функций\n- очень сильно зависит от числа обусловленности $f''(x)$, выбор начального приближения может помочь\n- не является оптимальным для выпуклых функций с липшицевым градиентом и сильновыпуклых функций (см. ускорение Нестерова)\nРезюме\n\nМетоды спуска\nНаправление убывания\nМетод градиентного спуска\nПравила выбора шага\nТеоремы сходимости\nЭксперименты" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
LedaLima/incubator-spot
spot-oa/oa/proxy/ipynb_templates/Advanced_Mode_master.ipynb
apache-2.0
[ "Apache Spot's Ipython Advanced Mode\nProxy\nThis guide provides examples about how to request data, show data with some cool libraries like pandas and more.\nImport Libraries\nThe next cell will import the necessary libraries to execute the functions. Do not remove", "import datetime\nimport pandas as pd\nimport numpy as np\nimport linecache, bisect\nimport os\n\nspath = os.getcwd()\npath = spath.split(\"/\")\ndate = path[len(path)-1]", "Request Data\nIn order to request data we are using Graphql (a query language for APIs, more info at: http://graphql.org/).\nWe provide the function to make a data request, all you need is a query and variables", "def makeGraphqlRequest(query, variables):\n return GraphQLClient.request(query, variables)", "Now that we have a function, we can run a query like this:\n*Note: There's no need to manually set the date for the query, by default the code will read the date from the current path", "suspicious_query = \"\"\"query($date:SpotDateType) {\n proxy {\n suspicious(date:$date)\n { clientIp\n clientToServerBytes\n datetime\n duration\n host\n networkContext\n referer\n requestMethod\n responseCode\n responseCodeLabel\n responseContentType\n score\n serverIp\n serverToClientBytes\n uri\n uriPath\n uriPort\n uriQuery\n uriRep\n userAgent\n username\n webCategory \n }\n }\n }\"\"\"\n\n##If you want to use a different date for your query, switch the \n##commented/uncommented following lines\n\nvariables={\n 'date': datetime.datetime.strptime(date, '%Y%m%d').strftime('%Y-%m-%d')\n# 'date': \"2016-10-08\"\n }\n \nsuspicious_request = makeGraphqlRequest(suspicious_query,variables)\n\n##The variable suspicious_request will contain the resulting data from the query.\nresults = suspicious_request['data']['proxy']['suspicious']\n", "Pandas Dataframes\nThe following cell loads the results into a pandas dataframe\nFor more information on how to use pandas, you can learn more here: https://pandas.pydata.org/pandas-docs/stable/10min.html", "df = pd.read_json(json.dumps(results))\n##Printing only the selected column list from the dataframe\n##Unless specified otherwise, \nprint df[['clientIp','uriQuery','datetime','clientToServerBytes','serverToClientBytes', 'host']]\n", "Additional operations\nAdditional operations can be performed on the dataframe like sorting the data, filtering it and grouping it\nFiltering the data", "##Filter results where the destination port = 3389\n##The resulting data will be stored in df2 \n\ndf2 = df[df['clientIp'].isin(['10.173.202.136'])]\nprint df2[['clientIp','uriQuery','datetime','host']]", "Ordering the data", "srtd = df.sort_values(by=\"host\")\nprint srtd[['host','clientIp','uriQuery','datetime']]", "Grouping the data", "## This command will group the results by pairs of source-destination IP\n## summarizing all other columns \ngrpd = df.groupby(['clientIp','host']).sum()\n## This will print the resulting dataframe displaying the input and output bytes columnns\nprint grpd[[\"clientToServerBytes\",\"serverToClientBytes\"]]", "Reset Scored Connections\nUncomment and execute the following cell to reset all scored connections for this day", "# reset_scores = \"\"\"mutation($date:SpotDateType!) {\n# proxy{\n# resetScoredConnections(date:$date){\n# success\n# }\n# }\n# }\"\"\"\n\n\n# variables={\n# 'date': datetime.datetime.strptime(date, '%Y%m%d').strftime('%Y-%m-%d')\n# }\n \n# request = makeGraphqlRequest(reset_scores,variables)\n\n\n# print request['data']['proxy']['resetScoredConnections']['success']", "Sandbox\nAt this point you can perform your own analysis using the previously provided functions as a guide.\nHappy threat hunting!", "#Your code here" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
alfkjartan/nvgimu
notebooks/.ipynb_checkpoints/Get started-checkpoint.ipynb
gpl-3.0
[ "Getting started with the analysis of nvg data\nThis notebook assumes that data exists in a database in the hdf5 format. For instructions how to set up the database with data see [../readme.md].\nImport modules", "import numpy as np\nimport matplotlib.pyplot as plt\nimport nvg.ximu.ximudata as ximudata\n%matplotlib notebook", "Load the database", "reload(ximudata)\ndbfilename = \"/home/kjartan/Dropbox/Public/nvg201209.hdf5\"\ndb = ximudata.NVGData(dbfilename);", "Explore contents of the database file", "dbfile = db.hdfFile;\nprint \"Subjects: \", dbfile.keys()\nprint \"Trials: \", dbfile['S5'].keys()\nprint \"IMUs: \", dbfile['S5/B'].keys()\nprint \"Attributes of example trial\", dbfile['S5/B'].attrs.keys()\nprint \"Shape of example IMU data entry\", dbfile['S5/B/N'].shape\n", "The content of the raw IMU file\nThe columns of the IMU data contain: \n0: Packet number, \n1: Gyroscope X (deg/s),\n2: Gyroscope Y (deg/s),\n3: Gyroscope Z (deg/s),\n4: Accelerometer X (g),\n5: Accelerometer Y (g),\n6: Accelerometer Z (g),\n7: Magnetometer X (G),\n8: Magnetometer Y (G),\n9: Magnetometer Z (G)\nPlot example data", "db.plot_imu_data()", "Implemented analysis methods", "print [s for s in dir(db) if s.startswith(\"get\")]" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
fionapigott/Data-Science-45min-Intros
count-min-101/CountMinSketch.ipynb
unlicense
[ "Basic Idea of Count Min sketch\nWe map the input value to multiple points in a relatively small output space. Therefore, the count associated with a given input will be applied to multiple counts in the output space. Even though collisions will occur, the minimum count associated with a given input will have some desirable properties, including the ability to be used to estimate the largest N counts.\n<img src=\"files/count_min_2.png\">\nhttp://debasishg.blogspot.com/2014/01/count-min-sketch-data-structure-for.html\nParameters of the sketch:\n\nepsilon\ndelta\n\nThese parameters are inversely and exponentially (respectively) related to the sketch size parameters, d and w. \nImplementation of the CM sketch", "import sys\nimport random\nimport numpy as np\nimport heapq\nimport json\nimport time\n\nBIG_PRIME = 9223372036854775783\n\ndef random_parameter():\n return random.randrange(0, BIG_PRIME - 1)\n\n\nclass Sketch:\n def __init__(self, delta, epsilon, k):\n \"\"\"\n Setup a new count-min sketch with parameters delta, epsilon and k\n\n The parameters delta and epsilon control the accuracy of the\n estimates of the sketch\n\n Cormode and Muthukrishnan prove that for an item i with count a_i, the\n estimate from the sketch a_i_hat will satisfy the relation\n\n a_hat_i <= a_i + epsilon * ||a||_1\n\n with probability at least 1 - delta, where a is the the vector of all\n all counts and ||x||_1 is the L1 norm of a vector x\n\n Parameters\n ----------\n delta : float\n A value in the unit interval that sets the precision of the sketch\n epsilon : float\n A value in the unit interval that sets the precision of the sketch\n k : int\n A positive integer that sets the number of top items counted\n\n Examples\n --------\n >>> s = Sketch(10**-7, 0.005, 40)\n\n Raises\n ------\n ValueError\n If delta or epsilon are not in the unit interval, or if k is\n not a positive integer\n\n \"\"\"\n if delta <= 0 or delta >= 1:\n raise ValueError(\"delta must be between 0 and 1, exclusive\")\n if epsilon <= 0 or epsilon >= 1:\n raise ValueError(\"epsilon must be between 0 and 1, exclusive\")\n if k < 1:\n raise ValueError(\"k must be a positive integer\")\n\n self.w = int(np.ceil(np.exp(1) / epsilon))\n self.d = int(np.ceil(np.log(1 / delta)))\n self.k = k\n self.hash_functions = [self.__generate_hash_function() for i in range(self.d)]\n self.count = np.zeros((self.d, self.w), dtype='int32')\n self.heap, self.top_k = [], {} # top_k => [estimate, key] pairs\n\n def update(self, key, increment):\n \"\"\"\n Updates the sketch for the item with name of key by the amount\n specified in increment\n\n Parameters\n ----------\n key : string\n The item to update the value of in the sketch\n increment : integer\n The amount to update the sketch by for the given key\n\n Examples\n --------\n >>> s = Sketch(10**-7, 0.005, 40)\n >>> s.update('http://www.cnn.com/', 1)\n\n \"\"\"\n for row, hash_function in enumerate(self.hash_functions):\n column = hash_function(abs(hash(key)))\n self.count[row, column] += increment\n\n self.update_heap(key)\n\n def update_heap(self, key):\n \"\"\"\n Updates the class's heap that keeps track of the top k items for a\n given key\n\n For the given key, it checks whether the key is present in the heap,\n updating accordingly if so, and adding it to the heap if it is\n absent\n\n Parameters\n ----------\n key : string\n The item to check against the heap\n\n \"\"\"\n estimate = self.get(key)\n\n if not self.heap or estimate >= self.heap[0][0]:\n if key in self.top_k:\n old_pair = self.top_k.get(key)\n old_pair[0] = estimate\n heapq.heapify(self.heap)\n else:\n if len(self.top_k) < self.k:\n heapq.heappush(self.heap, [estimate, key])\n self.top_k[key] = [estimate, key]\n else:\n new_pair = [estimate, key]\n old_pair = heapq.heappushpop(self.heap, new_pair)\n if new_pair[1] != old_pair[1]:\n del self.top_k[old_pair[1]]\n self.top_k[key] = new_pair\n self.top_k[key] = new_pair\n\n def get(self, key):\n \"\"\"\n Fetches the sketch estimate for the given key\n\n Parameters\n ----------\n key : string\n The item to produce an estimate for\n\n Returns\n -------\n estimate : int\n The best estimate of the count for the given key based on the\n sketch\n\n Examples\n --------\n >>> s = Sketch(10**-7, 0.005, 40)\n >>> s.update('http://www.cnn.com/', 1)\n >>> s.get('http://www.cnn.com/')\n 1\n\n \"\"\"\n value = sys.maxint\n for row, hash_function in enumerate(self.hash_functions):\n column = hash_function(abs(hash(key)))\n value = min(self.count[row, column], value)\n\n return value\n\n def __generate_hash_function(self):\n \"\"\"\n Returns a hash function from a family of pairwise-independent hash\n functions\n\n \"\"\"\n a, b = random_parameter(), random_parameter()\n return lambda x: (a * x + b) % BIG_PRIME % self.w\n \n\n# define a function to return a list of the exact top users, sorted by count\ndef exact_top_users(f, top_n = 10):\n import operator\n counts = {}\n for user in f:\n user = user.rstrip('\\n')\n try:\n if user not in counts:\n counts[user] = 1\n else:\n counts[user] += 1\n except ValueError:\n pass\n except KeyError:\n pass\n counter = 0\n results = []\n for user,count in reversed(sorted(counts.iteritems(), key=operator.itemgetter(1))):\n if counter >= top_n:\n break\n results.append('{},{}'.format(user,str(count)))\n counter += 1\n return results\n# note that the output format is '[user] [count]'\n\nf = open('CM_small.txt')\nresults_exact = sorted(exact_top_users(f))\nprint(results_exact)\n\n# define a function to return a list of the estimated top users, sorted by count\ndef CM_top_users(f, s, top_n = 10):\n for user_name in f:\n s.update(user_name.rstrip('\\n'),1)\n \n results = []\n counter = 0\n for value in reversed(sorted(s.top_k.values())):\n if counter >= top_n:\n break\n results.append('{1},{0}'.format(str(value[0]),str(value[1])))\n counter += 1\n return results\n# note that the output format is '[user] [count]'\n\n# instantiate a Sketch object\ns = Sketch(10**-3, 0.1, 10)\n\nf = open('CM_small.txt')\nresults_CM = sorted(CM_top_users(f,s))\nprint(results_CM)\n\nfor item in zip(results_exact,results_CM):\n print(item)", "Is it possible to make the sketch so coarse that its estimates are wrong even for this data set?", "s = Sketch(0.9, 0.9, 10)\nf = open('CM_small.txt')\nresults_coarse_CM = CM_top_users(f,s)\nprint(results_coarse_CM)", "Yes! (if you try enough) Why? \n\nThe 'w' parameter goes like ceiling(exp(1)/epsilon), which is always >=~ 3.\nThe 'd' parameter goes like ceiling(log(1/delta), which is always >= 1.\n\nSo, you're dealing with a space with minimum size 3 x 1. With 10 records, it's possible that all 4 users map their counts to the point. So it's possible to see an estimate as high as 10, in this case.\nNow for a larger data set.", "f = open('CM_large.txt')\n%time results_exact = exact_top_users(f)\nprint(results_exact)\n\n# this could take a few minutes\n\nf = open('CM_large.txt')\ns = Sketch(10**-4, 0.001, 10)\n%time results_CM = CM_top_users(f,s)\nprint(results_CM)", "For this precision and dataset size, the CM algo takes much longer than the exact solution. In fact, the crossover point at which the CM sketch can achieve reasonable accuracy in the same time as the exact solution is a very large number of entries.", "for item in zip(results_exact,results_CM):\n print(item)\n\n# the CM sketch gets the top entry (an outlier) correct but doesn't do well estimating the order of the more degenerate counts\n\n# let's decrease the precision via both the epsilon and delta parameters, and see whether it still gets the \"heavy-hitter\" correct\nf = open('CM_large.txt')\ns = Sketch(10**-3, 0.01, 10)\n%time results_CM = CM_top_users(f,s)\nprint(results_CM)\n\n# nope...sketch is too coarse, too many collisions, and the prominence of user 'Euph0r1a__ 129' is obscured\nfor item in zip(results_exact,results_CM):\n print(item)", "The most common use of the CM sketch is analysis of streaming data. Why?\n\nBecasue the data are arriving in real time, the hashing of the inputs is not a bottleneck as it is when the data are already collected.\nSimilarly, the recalculation of the top-k list (implemented as a heap, in this case) is done on insert. No need to sort the entire list.\nThe sketches are associative, meaning that the operation can be easily parallelized, and the results combined in the end.\n\nTake away: use the CM sketch to estimate of the top-k most frequent elements in a streaming environment." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
tensorflow/docs-l10n
site/zh-cn/lattice/tutorials/shape_constraints_for_ethics.ipynb
apache-2.0
[ "Copyright 2020 The TensorFlow Authors.", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "使用 Tensorflow Lattice 实现道德形状约束\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://tensorflow.google.cn/lattice/tutorials/shape_constraints_for_ethics\"><img src=\"https://tensorflow.google.cn/images/tf_logo_32px.png\">在 TensorFlow.org 上查看 </a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/lattice/tutorials/shape_constraints_for_ethics.ipynb\"><img src=\"https://tensorflow.google.cn/images/colab_logo_32px.png\">在 Google Colab 中运行 </a></td>\n <td><a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/lattice/tutorials/shape_constraints_for_ethics.ipynb\"><img src=\"https://tensorflow.google.cn/images/GitHub-Mark-32px.png\">在 GitHub 中查看源代码</a></td>\n <td><a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/lattice/tutorials/shape_constraints_for_ethics.ipynb\"><img src=\"https://tensorflow.google.cn/images/download_logo_32px.png\"> 下载笔记本</a></td>\n</table>\n\n概述\n本教程演示了如何使用 TensorFlow Lattice (TFL) 库训练对行为负责,并且不违反特定的道德或公平假设的模型。特别是,我们将侧重于使用单调性约束来避免对某些特性的不公平惩罚。本教程包括 Serena Wang 和 Maya Gupta 在 AISTATS 2020 上发表的论文 Deontological Ethics By Monotonicity Shape Constraints 中的实验演示。\n我们将在公共数据集上使用 TFL Canned Estimator,但请注意,本教程中的所有内容也可以使用通过 TFL Keras 层构造的模型来完成。\n在继续之前,请确保您的运行时已安装所有必需的软件包(如下方代码单元中导入的软件包)。\n设置\n安装 TF Lattice 软件包:", "#@test {\"skip\": true}\n!pip install tensorflow-lattice seaborn", "导入所需的软件包:", "import tensorflow as tf\n\nimport logging\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split\nimport sys\nimport tensorflow_lattice as tfl\nlogging.disable(sys.maxsize)", "本教程中使用的默认值:", "# List of learning rate hyperparameters to try.\n# For a longer list of reasonable hyperparameters, try [0.001, 0.01, 0.1].\nLEARNING_RATES = [0.01]\n# Default number of training epochs and batch sizes.\nNUM_EPOCHS = 1000\nBATCH_SIZE = 1000\n# Directory containing dataset files.\nDATA_DIR = 'https://raw.githubusercontent.com/serenalwang/shape_constraints_for_ethics/master'", "案例研究 1:法学院入学\n在本教程的第一部分中,我们将考虑一个使用法学院招生委员会 (LSAC) 的 Law School Admissions 数据集的案例研究。我们将训练分类器利用以下两个特征来预测学生是否会通过考试:学生的 LSAT 分数和本科生的 GPA。\n假设分类器的分数用于指导法学院的招生或奖学金评定。根据基于成绩的社会规范,我们预期具有更高 GPA 和更高 LSAT 分数的学生应当从分类器中获得更高的分数。但是,我们会观察到,模型很容易违反这些直观的规范,有时会惩罚 GPA 或 LSAT 分数较高的人员。\n为了解决这种不公平的惩罚问题,我们可以施加单调性约束,这样在其他条件相同的情况下,模型永远不会惩罚更高的 GPA 或更高的 LSAT 分数。在本教程中,我们将展示如何使用 TFL 施加这些单调性约束。\n加载法学院数据", "# Load data file.\nlaw_file_name = 'lsac.csv'\nlaw_file_path = os.path.join(DATA_DIR, law_file_name)\nraw_law_df = pd.read_csv(law_file_path, delimiter=',')", "预处理数据集:", "# Define label column name.\nLAW_LABEL = 'pass_bar'\n\ndef preprocess_law_data(input_df):\n # Drop rows with where the label or features of interest are missing.\n output_df = input_df[~input_df[LAW_LABEL].isna() & ~input_df['ugpa'].isna() &\n (input_df['ugpa'] > 0) & ~input_df['lsat'].isna()]\n return output_df\n\n\nlaw_df = preprocess_law_data(raw_law_df)", "将数据划分为训练/验证/测试集", "def split_dataset(input_df, random_state=888):\n \"\"\"Splits an input dataset into train, val, and test sets.\"\"\"\n train_df, test_val_df = train_test_split(\n input_df, test_size=0.3, random_state=random_state)\n val_df, test_df = train_test_split(\n test_val_df, test_size=0.66, random_state=random_state)\n return train_df, val_df, test_df\n\n\nlaw_train_df, law_val_df, law_test_df = split_dataset(law_df)", "可视化数据分布\n首先,我们可视化数据的分布。我们将为所有通过考试的学生以及所有未通过考试的学生绘制 GPA 和 LSAT 分数。", "def plot_dataset_contour(input_df, title):\n plt.rcParams['font.family'] = ['serif']\n g = sns.jointplot(\n x='ugpa',\n y='lsat',\n data=input_df,\n kind='kde',\n xlim=[1.4, 4],\n ylim=[0, 50])\n g.plot_joint(plt.scatter, c='b', s=10, linewidth=1, marker='+')\n g.ax_joint.collections[0].set_alpha(0)\n g.set_axis_labels('Undergraduate GPA', 'LSAT score', fontsize=14)\n g.fig.suptitle(title, fontsize=14)\n # Adust plot so that the title fits.\n plt.subplots_adjust(top=0.9)\n plt.show()\n\nlaw_df_pos = law_df[law_df[LAW_LABEL] == 1]\nplot_dataset_contour(\n law_df_pos, title='Distribution of students that passed the bar')\n\nlaw_df_neg = law_df[law_df[LAW_LABEL] == 0]\nplot_dataset_contour(\n law_df_neg, title='Distribution of students that failed the bar')", "训练校准线性模型以预测考试的通过情况\n接下来,我们将通过 TFL 训练校准线性模型,以预测学生是否会通过考试。两个输入特征分别是 LSAT 分数和本科 GPA,而训练标签将是学生是否通过了考试。\n我们首先在没有任何约束的情况下训练校准线性模型。然后,我们在具有单调性约束的情况下训练校准线性模型,并观察模型输出和准确率的差异。\n用于训练 TFL 校准线性 Estimator 的辅助函数\n下面这些函数将用于此法学院案例研究以及下面的信用违约案例研究。", "def train_tfl_estimator(train_df, monotonicity, learning_rate, num_epochs,\n batch_size, get_input_fn,\n get_feature_columns_and_configs):\n \"\"\"Trains a TFL calibrated linear estimator.\n\n Args:\n train_df: pandas dataframe containing training data.\n monotonicity: if 0, then no monotonicity constraints. If 1, then all\n features are constrained to be monotonically increasing.\n learning_rate: learning rate of Adam optimizer for gradient descent.\n num_epochs: number of training epochs.\n batch_size: batch size for each epoch. None means the batch size is the full\n dataset size.\n get_input_fn: function that returns the input_fn for a TF estimator.\n get_feature_columns_and_configs: function that returns TFL feature columns\n and configs.\n\n Returns:\n estimator: a trained TFL calibrated linear estimator.\n\n \"\"\"\n feature_columns, feature_configs = get_feature_columns_and_configs(\n monotonicity)\n\n model_config = tfl.configs.CalibratedLinearConfig(\n feature_configs=feature_configs, use_bias=False)\n\n estimator = tfl.estimators.CannedClassifier(\n feature_columns=feature_columns,\n model_config=model_config,\n feature_analysis_input_fn=get_input_fn(input_df=train_df, num_epochs=1),\n optimizer=tf.keras.optimizers.Adam(learning_rate))\n\n estimator.train(\n input_fn=get_input_fn(\n input_df=train_df, num_epochs=num_epochs, batch_size=batch_size))\n return estimator\n\n\ndef optimize_learning_rates(\n train_df,\n val_df,\n test_df,\n monotonicity,\n learning_rates,\n num_epochs,\n batch_size,\n get_input_fn,\n get_feature_columns_and_configs,\n):\n \"\"\"Optimizes learning rates for TFL estimators.\n\n Args:\n train_df: pandas dataframe containing training data.\n val_df: pandas dataframe containing validation data.\n test_df: pandas dataframe containing test data.\n monotonicity: if 0, then no monotonicity constraints. If 1, then all\n features are constrained to be monotonically increasing.\n learning_rates: list of learning rates to try.\n num_epochs: number of training epochs.\n batch_size: batch size for each epoch. None means the batch size is the full\n dataset size.\n get_input_fn: function that returns the input_fn for a TF estimator.\n get_feature_columns_and_configs: function that returns TFL feature columns\n and configs.\n\n Returns:\n A single TFL estimator that achieved the best validation accuracy.\n \"\"\"\n estimators = []\n train_accuracies = []\n val_accuracies = []\n test_accuracies = []\n for lr in learning_rates:\n estimator = train_tfl_estimator(\n train_df=train_df,\n monotonicity=monotonicity,\n learning_rate=lr,\n num_epochs=num_epochs,\n batch_size=batch_size,\n get_input_fn=get_input_fn,\n get_feature_columns_and_configs=get_feature_columns_and_configs)\n estimators.append(estimator)\n train_acc = estimator.evaluate(\n input_fn=get_input_fn(train_df, num_epochs=1))['accuracy']\n val_acc = estimator.evaluate(\n input_fn=get_input_fn(val_df, num_epochs=1))['accuracy']\n test_acc = estimator.evaluate(\n input_fn=get_input_fn(test_df, num_epochs=1))['accuracy']\n print('accuracies for learning rate %f: train: %f, val: %f, test: %f' %\n (lr, train_acc, val_acc, test_acc))\n train_accuracies.append(train_acc)\n val_accuracies.append(val_acc)\n test_accuracies.append(test_acc)\n max_index = val_accuracies.index(max(val_accuracies))\n return estimators[max_index]", "用于配置法学院数据集特征的辅助函数\n下面这些辅助函数专用于法学院案例研究。", "def get_input_fn_law(input_df, num_epochs, batch_size=None):\n \"\"\"Gets TF input_fn for law school models.\"\"\"\n return tf.compat.v1.estimator.inputs.pandas_input_fn(\n x=input_df[['ugpa', 'lsat']],\n y=input_df['pass_bar'],\n num_epochs=num_epochs,\n batch_size=batch_size or len(input_df),\n shuffle=False)\n\n\ndef get_feature_columns_and_configs_law(monotonicity):\n \"\"\"Gets TFL feature configs for law school models.\"\"\"\n feature_columns = [\n tf.feature_column.numeric_column('ugpa'),\n tf.feature_column.numeric_column('lsat'),\n ]\n feature_configs = [\n tfl.configs.FeatureConfig(\n name='ugpa',\n lattice_size=2,\n pwl_calibration_num_keypoints=20,\n monotonicity=monotonicity,\n pwl_calibration_always_monotonic=False),\n tfl.configs.FeatureConfig(\n name='lsat',\n lattice_size=2,\n pwl_calibration_num_keypoints=20,\n monotonicity=monotonicity,\n pwl_calibration_always_monotonic=False),\n ]\n return feature_columns, feature_configs", "用于可视化训练的模型输出的辅助函数", "def get_predicted_probabilities(estimator, input_df, get_input_fn):\n predictions = estimator.predict(\n input_fn=get_input_fn(input_df=input_df, num_epochs=1))\n return [prediction['probabilities'][1] for prediction in predictions]\n\n\ndef plot_model_contour(estimator, input_df, num_keypoints=20):\n x = np.linspace(min(input_df['ugpa']), max(input_df['ugpa']), num_keypoints)\n y = np.linspace(min(input_df['lsat']), max(input_df['lsat']), num_keypoints)\n\n x_grid, y_grid = np.meshgrid(x, y)\n\n positions = np.vstack([x_grid.ravel(), y_grid.ravel()])\n plot_df = pd.DataFrame(positions.T, columns=['ugpa', 'lsat'])\n plot_df[LAW_LABEL] = np.ones(len(plot_df))\n predictions = get_predicted_probabilities(\n estimator=estimator, input_df=plot_df, get_input_fn=get_input_fn_law)\n grid_predictions = np.reshape(predictions, x_grid.shape)\n\n plt.rcParams['font.family'] = ['serif']\n plt.contour(\n x_grid,\n y_grid,\n grid_predictions,\n colors=('k',),\n levels=np.linspace(0, 1, 11))\n plt.contourf(\n x_grid,\n y_grid,\n grid_predictions,\n cmap=plt.cm.bone,\n levels=np.linspace(0, 1, 11)) # levels=np.linspace(0,1,8));\n plt.xticks(fontsize=20)\n plt.yticks(fontsize=20)\n\n cbar = plt.colorbar()\n cbar.ax.set_ylabel('Model score', fontsize=20)\n cbar.ax.tick_params(labelsize=20)\n\n plt.xlabel('Undergraduate GPA', fontsize=20)\n plt.ylabel('LSAT score', fontsize=20)", "训练无约束(非单调)的校准线性模型", "nomon_linear_estimator = optimize_learning_rates(\n train_df=law_train_df,\n val_df=law_val_df,\n test_df=law_test_df,\n monotonicity=0,\n learning_rates=LEARNING_RATES,\n batch_size=BATCH_SIZE,\n num_epochs=NUM_EPOCHS,\n get_input_fn=get_input_fn_law,\n get_feature_columns_and_configs=get_feature_columns_and_configs_law)\n\nplot_model_contour(nomon_linear_estimator, input_df=law_df)", "训练单调的校准线性模型", "mon_linear_estimator = optimize_learning_rates(\n train_df=law_train_df,\n val_df=law_val_df,\n test_df=law_test_df,\n monotonicity=1,\n learning_rates=LEARNING_RATES,\n batch_size=BATCH_SIZE,\n num_epochs=NUM_EPOCHS,\n get_input_fn=get_input_fn_law,\n get_feature_columns_and_configs=get_feature_columns_and_configs_law)\n\nplot_model_contour(mon_linear_estimator, input_df=law_df)", "训练其他无约束的模型\n我们演示了可以将 TFL 校准线性模型训练成在 LSAT 分数和 GPA 上均单调,而不会牺牲过多的准确率。\n但是,与其他类型的模型(如深度神经网络 (DNN) 或梯度提升树 (GBT))相比,校准线性模型表现如何?DNN 和 GBT 看起来会有公平合理的输出吗?为了解决这一问题,我们接下来将训练无约束的 DNN 和 GBT。实际上,我们将观察到 DNN 和 GBT 都很容易违反 LSAT 分数和本科生 GPA 中的单调性。\n训练无约束的深度神经网络 (DNN) 模型\n之前已对此架构进行了优化,可以实现较高的验证准确率。", "feature_names = ['ugpa', 'lsat']\n\ndnn_estimator = tf.estimator.DNNClassifier(\n feature_columns=[\n tf.feature_column.numeric_column(feature) for feature in feature_names\n ],\n hidden_units=[100, 100],\n optimizer=tf.keras.optimizers.Adam(learning_rate=0.008),\n activation_fn=tf.nn.relu)\n\ndnn_estimator.train(\n input_fn=get_input_fn_law(\n law_train_df, batch_size=BATCH_SIZE, num_epochs=NUM_EPOCHS))\ndnn_train_acc = dnn_estimator.evaluate(\n input_fn=get_input_fn_law(law_train_df, num_epochs=1))['accuracy']\ndnn_val_acc = dnn_estimator.evaluate(\n input_fn=get_input_fn_law(law_val_df, num_epochs=1))['accuracy']\ndnn_test_acc = dnn_estimator.evaluate(\n input_fn=get_input_fn_law(law_test_df, num_epochs=1))['accuracy']\nprint('accuracies for DNN: train: %f, val: %f, test: %f' %\n (dnn_train_acc, dnn_val_acc, dnn_test_acc))\n\nplot_model_contour(dnn_estimator, input_df=law_df)", "训练无约束的梯度提升树 (GBT) 模型\n之前已对此树形结构进行了优化,可以实现较高的验证准确率。", "tree_estimator = tf.estimator.BoostedTreesClassifier(\n feature_columns=[\n tf.feature_column.numeric_column(feature) for feature in feature_names\n ],\n n_batches_per_layer=2,\n n_trees=20,\n max_depth=4)\n\ntree_estimator.train(\n input_fn=get_input_fn_law(\n law_train_df, num_epochs=NUM_EPOCHS, batch_size=BATCH_SIZE))\ntree_train_acc = tree_estimator.evaluate(\n input_fn=get_input_fn_law(law_train_df, num_epochs=1))['accuracy']\ntree_val_acc = tree_estimator.evaluate(\n input_fn=get_input_fn_law(law_val_df, num_epochs=1))['accuracy']\ntree_test_acc = tree_estimator.evaluate(\n input_fn=get_input_fn_law(law_test_df, num_epochs=1))['accuracy']\nprint('accuracies for GBT: train: %f, val: %f, test: %f' %\n (tree_train_acc, tree_val_acc, tree_test_acc))\n\nplot_model_contour(tree_estimator, input_df=law_df)", "案例研究 2:信用违约\n我们将在本教程中考虑的第二个案例研究是预测个人的信用违约概率。我们将使用 UCI 存储库中的 Default of Credit Card Clients 数据集。这些数据收集自 30,000 名中国台湾信用卡用户,并包含一个二元标签,用于标识用户是否在时间窗口内拖欠了付款。特征包括婚姻状况、性别、教育程度以及在 2005 年 4-9 月的每个月中,用户拖欠现有账单的时间有多长。\n正如我们在第一个案例研究中所做的那样,我们再次阐明了使用单调性约束来避免不公平的惩罚:使用该模型来确定用户的信用评分时,在其他条件都相同的情况下,如果许多人因较早支付账单而受到惩罚,那么这对他们来说是不公平的。因此,我们应用了单调性约束,使模型不会惩罚提前付款。\n加载信用违约数据", "# Load data file.\ncredit_file_name = 'credit_default.csv'\ncredit_file_path = os.path.join(DATA_DIR, credit_file_name)\ncredit_df = pd.read_csv(credit_file_path, delimiter=',')\n\n# Define label column name.\nCREDIT_LABEL = 'default'", "将数据划分为训练/验证/测试集", "credit_train_df, credit_val_df, credit_test_df = split_dataset(credit_df)", "可视化数据分布\n首先,我们可视化数据的分布。我们将为婚姻状况和还款状况不同的人绘制观察到的违约率的平均值和标准误差。还款状态表示一个人已偿还贷款的月数(截至 2005 年 4 月)。", "def get_agg_data(df, x_col, y_col, bins=11):\n xbins = pd.cut(df[x_col], bins=bins)\n data = df[[x_col, y_col]].groupby(xbins).agg(['mean', 'sem'])\n return data\n\n\ndef plot_2d_means_credit(input_df, x_col, y_col, x_label, y_label):\n plt.rcParams['font.family'] = ['serif']\n _, ax = plt.subplots(nrows=1, ncols=1)\n plt.setp(ax.spines.values(), color='black', linewidth=1)\n ax.tick_params(\n direction='in', length=6, width=1, top=False, right=False, labelsize=18)\n df_single = get_agg_data(input_df[input_df['MARRIAGE'] == 1], x_col, y_col)\n df_married = get_agg_data(input_df[input_df['MARRIAGE'] == 2], x_col, y_col)\n ax.errorbar(\n df_single[(x_col, 'mean')],\n df_single[(y_col, 'mean')],\n xerr=df_single[(x_col, 'sem')],\n yerr=df_single[(y_col, 'sem')],\n color='orange',\n marker='s',\n capsize=3,\n capthick=1,\n label='Single',\n markersize=10,\n linestyle='')\n ax.errorbar(\n df_married[(x_col, 'mean')],\n df_married[(y_col, 'mean')],\n xerr=df_married[(x_col, 'sem')],\n yerr=df_married[(y_col, 'sem')],\n color='b',\n marker='^',\n capsize=3,\n capthick=1,\n label='Married',\n markersize=10,\n linestyle='')\n leg = ax.legend(loc='upper left', fontsize=18, frameon=True, numpoints=1)\n ax.set_xlabel(x_label, fontsize=18)\n ax.set_ylabel(y_label, fontsize=18)\n ax.set_ylim(0, 1.1)\n ax.set_xlim(-2, 8.5)\n ax.patch.set_facecolor('white')\n leg.get_frame().set_edgecolor('black')\n leg.get_frame().set_facecolor('white')\n leg.get_frame().set_linewidth(1)\n plt.show()\n\nplot_2d_means_credit(credit_train_df, 'PAY_0', 'default',\n 'Repayment Status (April)', 'Observed default rate')", "训练校准线性模型以预测信用违约率\n接下来,我们将通过 TFL 训练校准线性模型,以预测某人是否会拖欠贷款。两个输入特征将是该人的婚姻状况以及该人截至 4 月已偿还贷款的月数(还款状态)。训练标签将是该人是否拖欠过贷款。\n我们首先在没有任何约束的情况下训练校准线性模型。然后,我们在具有单调性约束的情况下训练校准线性模型,并观察模型输出和准确率的差异。\n用于配置信用违约数据集特征的辅助函数\n下面这些辅助函数专用于信用违约案例研究。", "def get_input_fn_credit(input_df, num_epochs, batch_size=None):\n \"\"\"Gets TF input_fn for credit default models.\"\"\"\n return tf.compat.v1.estimator.inputs.pandas_input_fn(\n x=input_df[['MARRIAGE', 'PAY_0']],\n y=input_df['default'],\n num_epochs=num_epochs,\n batch_size=batch_size or len(input_df),\n shuffle=False)\n\n\ndef get_feature_columns_and_configs_credit(monotonicity):\n \"\"\"Gets TFL feature configs for credit default models.\"\"\"\n feature_columns = [\n tf.feature_column.numeric_column('MARRIAGE'),\n tf.feature_column.numeric_column('PAY_0'),\n ]\n feature_configs = [\n tfl.configs.FeatureConfig(\n name='MARRIAGE',\n lattice_size=2,\n pwl_calibration_num_keypoints=3,\n monotonicity=monotonicity,\n pwl_calibration_always_monotonic=False),\n tfl.configs.FeatureConfig(\n name='PAY_0',\n lattice_size=2,\n pwl_calibration_num_keypoints=10,\n monotonicity=monotonicity,\n pwl_calibration_always_monotonic=False),\n ]\n return feature_columns, feature_configs", "用于可视化训练的模型输出的辅助函数", "def plot_predictions_credit(input_df,\n estimator,\n x_col,\n x_label='Repayment Status (April)',\n y_label='Predicted default probability'):\n predictions = get_predicted_probabilities(\n estimator=estimator, input_df=input_df, get_input_fn=get_input_fn_credit)\n new_df = input_df.copy()\n new_df.loc[:, 'predictions'] = predictions\n plot_2d_means_credit(new_df, x_col, 'predictions', x_label, y_label)", "训练无约束(非单调)的校准线性模型", "nomon_linear_estimator = optimize_learning_rates(\n train_df=credit_train_df,\n val_df=credit_val_df,\n test_df=credit_test_df,\n monotonicity=0,\n learning_rates=LEARNING_RATES,\n batch_size=BATCH_SIZE,\n num_epochs=NUM_EPOCHS,\n get_input_fn=get_input_fn_credit,\n get_feature_columns_and_configs=get_feature_columns_and_configs_credit)\n\nplot_predictions_credit(credit_train_df, nomon_linear_estimator, 'PAY_0')", "训练单调的校准线性模型", "mon_linear_estimator = optimize_learning_rates(\n train_df=credit_train_df,\n val_df=credit_val_df,\n test_df=credit_test_df,\n monotonicity=1,\n learning_rates=LEARNING_RATES,\n batch_size=BATCH_SIZE,\n num_epochs=NUM_EPOCHS,\n get_input_fn=get_input_fn_credit,\n get_feature_columns_and_configs=get_feature_columns_and_configs_credit)\n\nplot_predictions_credit(credit_train_df, mon_linear_estimator, 'PAY_0')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Jackie789/JupyterNotebooks
CorrectingForAssumptions.ipynb
gpl-3.0
[ "Engineering Existing Data to Follow Multivariate Linear Regression Assumptions\nJackie Zuker\nAssumptions:\n\nLinear relationship - Features should have a linear relationship with the outcome\nMultivariate normality - The error from the model should be normally distributed\nHomoscedasticity - The distribution of error should be consistent for all predicted values\nLow multicollinearity - correlations between features should be low or non-existent\n\nThe model in use has problems with heterscedasticity and multivariate non-normality.", "import math\nimport warnings\n\nfrom IPython.display import display\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn import linear_model\n#import statsmodels.formula.api as smf\n#import statsmodels as smf\n\n# Display preferences.\n%matplotlib inline\npd.options.display.float_format = '{:.3f}'.format\n\n# Suppress annoying harmless error.\nwarnings.filterwarnings(\n action=\"ignore\",\n module=\"scipy\",\n message=\"^internal gelsd\"\n)\n\n# Acquire, load, and preview the data.\ndata = pd.read_csv(\n 'http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv',\n index_col=0\n)\ndisplay(data.head())\n\n# Instantiate and fit our model.\nregr = linear_model.LinearRegression()\nY = data['Sales'].values.reshape(-1, 1)\nX = data[['TV','Radio','Newspaper']]\nregr.fit(X, Y)\n\n# Inspect the results.\nprint('\\nCoefficients: \\n', regr.coef_)\nprint('\\nIntercept: \\n', regr.intercept_)\nprint('\\nR-squared:')\nprint(regr.score(X, Y))\n\n# Extract predicted values.\npredicted = regr.predict(X).ravel()\nactual = data['Sales']\n\n# Calculate the error, also called the residual.\nresidual = actual - predicted\n\nsns.set_style(\"whitegrid\")\n\n# This looks a bit concerning.\nplt.hist(residual)\nplt.title('Residual counts (skewed to the right)')\nplt.xlabel('Residual')\nplt.ylabel('Count')\nplt.show()\n\nplt.scatter(predicted, residual)\nplt.xlabel('Predicted')\nplt.ylabel('Residual')\nplt.axhline(y=0)\nplt.title('Residual vs. Predicted( Heteroscedasticity present)')\nplt.show()", "As shown above, the error from the model is not normally distributed. The error is skewed to the right, similar to the raw data itself. \nAdditionally, the distribution of error terms is not consistent, it is heteroscadastic. \nInspect the Data and Transform\nThe data is skewed to the right. The data is transformed by taking its square root to see if we can obtain a more normal distribution.", "plt.hist(actual)\nplt.show()\n\nsqrt_actual = np.sqrt(actual)\n\nplt.hist(sqrt_actual)\nplt.show()", "That's a little better. Has this helped the multivariate normality? Yes.", "# Extract predicted values.\npredicted = regr.predict(X).ravel()\nactual = data['Sales']\n\n# Calculate the error, also called the residual.\ncorr_residual = sqrt_actual - predicted\n\nplt.hist(corr_residual)\nplt.title('Residual counts')\nplt.xlabel('Residual')\nplt.ylabel('Count')\nplt.show()", "Transforming the data into the sqrt of the data lessened the skewness to the right, and allowed the error from the model to be more normally-distributed. Let's see if our transformation helped the problem with heteroscedasticity. \nHomoscedasticity", "plt.scatter(predicted, corr_residual)\nplt.xlabel('Predicted')\nplt.ylabel('Residual')\nplt.axhline(y=0)\nplt.title('Residual vs. Predicted')\nplt.show()\n", "There was a big improvement here as well. Success!" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
pycroscopy/pycroscopy
jupyter_notebooks/image_registration.ipynb
mit
[ "<font size = \"5\"> Image Registration </font>\n<hr style=\"height:1px;border-top:4px solid #FF8200\" />\n\nby \nGerd Duscher and Matthew. F. Chisholm\nMaterials Science & Engineering<br>\nJoint Institute of Advanced Materials<br>\nThe University of Tennessee, Knoxville\nRegistration of a Stack of Images\nWe us this notebook only for a stack of images.\nPrerequesites\nInstall pycroscopy", "import sys\nfrom pkg_resources import get_distribution, DistributionNotFound\n\ndef test_package(package_name):\n \"\"\"Test if package exists and returns version or -1\"\"\"\n try:\n version = (get_distribution(package_name).version)\n except (DistributionNotFound, ImportError) as err:\n version = '-1'\n return version\n\n# Colab setup ------------------\nif 'google.colab' in sys.modules:\n !pip install git+https://github.com/pycroscopy/pyTEMlib/ -q\n\n# pyTEMlib setup ------------------\nelse:\n if test_package('sidpy') < '0.0.4':\n print('installing sidpy')\n !{sys.executable} -m pip install --upgrade sidpy -q \n if test_package('pyNSID') < '0.0.2':\n print('installing pyNSID')\n !{sys.executable} -m pip install --upgrade pyNSID -q \n if test_package('pycroscopy') < '0':\n print('installing pyTEMlib')\n !{sys.executable} -m pip install --upgrade pyTEMlib -q\n# ------------------------------\nprint('done')", "Import the usual libraries\nYou can load that library with the code cell above:", "# import matplotlib and numpy\n# use \"inline\" instead of \"notebook\" for non-interactive\n# use widget for jupyterlab needs ipympl to be installed\nimport sys\nif 'google.colab' in sys.modules:\n %pylab --no-import-all notebook\nelse: \n %pylab --no-import-all widget\n\nfrom sidpy.io.interface_utils import open_file_dialog\n\nfrom SciFiReaders import DM3Reader\nimport SciFiReaders\n\n%load_ext autoreload\n%autoreload 2\n\nsys.path.insert(0, '../')\nimport pycroscopy as px\n\n__notebook__ = 'Image_Registration'\n__notebook_version__ = '2021_10_04'", "Load an image stack :\nPlease, load an image stack. <br>\nA stack of images is used to reduce noise, but for an added image the images have to be aligned to compensate for drift and other microscope instabilities.\nYou select here (with the open_file_dialog parameter), whether an open file dialog apears in the code cell below the next one or whether you want to get a list of files (Nion has a weird way of dealing with file names).", "if 'google.colab' in sys.modules:\n from google.colab import drive\n drive.mount(\"/content/drive\")\n drive_directory = 'drive/MyDrive/'\nelse:\n drive_directory = '.'\n\nfile_widget = open_file_dialog(drive_directory)\nfile_widget", "Plot Image Stack\nEither we load the selected file in hte widget above above or a file dialog window appears.\nThis is the point the notebook can be repeated with a new file. Either select a file above again (without running the code cell above) or open a file dialog here\nNote that the open file dialog might not apear in the foreground!", "try: \n main_dataset.h5_dataset.file.close()\nexcept:\n pass\ndm3_reader = DM3Reader(file_widget.selected)\nmain_dataset = dm3_reader.read()\n\nif main_dataset.data_type.name != 'IMAGE_STACK':\n print(f\"Please load an image stack for this notebook, this is an {main_dataset.data_type}\")\nprint(main_dataset) \nmain_dataset.dim_0.dimension_type = 'spatial'\nmain_dataset.dim_1.dimension_type = 'spatial'\nmain_dataset.z.dimension_type = 'temporal'\nmain_dataset.plot() # note this needs a view reference for interaction\n\nmain_dataset._axes\nframe_dim = []\nspatial_dim = []\nfor i, axis in main_dataset._axes.items():\n if axis.dimension_type.name == 'SPATIAL':\n spatial_dim.append(i)\n else:\n frame_dim.append(i)\n \nif len(spatial_dim) != 2:\n print('need two spatial dimensions')\nif len(frame_dim) != 1:\n print('need one frame dimensions')\n", "Complete Registration\nTakes a while, depending on your computer between 1 and 10 minutes.", "## Do all of registration\nnotebook_tags ={'notebook': __notebook__, 'notebook_version': __notebook_version__}\n\nnon_rigid_registered, rigid_registered_dataset = px.image.complete_registration(main_dataset)\n \nnon_rigid_registered.plot()\nnon_rigid_registered", "Check Drift", "scale_x = (rigid_registered_dataset.x[1]-rigid_registered_dataset.x[0])*1.\ndrift = rigid_registered_dataset.metadata['drift']\nx = np.linspace(0,drift.shape[0]-1,drift.shape[0])\n\npolynom_degree = 2 # 1 is linear fit, 2 is parabolic fit, ...\nline_fit_x = np.polyfit(x, drift[:,0], polynom_degree)\npoly_x = np.poly1d(line_fit_x)\nline_fit_y = np.polyfit(x, drift[:,1], polynom_degree)\npoly_y = np.poly1d(line_fit_y)\n\n\nplt.figure()\n# plot drift and fit of drift\nplt.axhline(color = 'gray')\nplt.plot(x, drift[:,0], label = 'drift x')\nplt.plot(x, drift[:,1], label = 'drift y')\nplt.plot(x, poly_x(x), label = 'fit_drift_x')\nplt.plot(x, poly_y(x), label = 'fit_drift_y')\nplt.legend();\n\n# set second axis in pico meter\nax_pixels = plt.gca()\nax_pixels.step(1, 1)\nax_pm = ax_pixels.twinx()\nx_1, x_2 = ax_pixels.get_ylim()\nax_pm.set_ylim(x_1*scale_x, x_2*scale_x)\n\n# add labels\nax_pixels.set_ylabel('drift [pixels]')\nax_pm.set_ylabel('drift [nm]')\nax_pixels.set_xlabel('image number');\nplt.tight_layout()\n", "Appendix\nDemon Registration\nHere we use the Diffeomorphic Demon Non-Rigid Registration as provided by simpleITK. \nPlease Cite: \n* simpleITK\nand\n\n\nT. Vercauteren, X. Pennec, A. Perchant and N. Ayache Diffeomorphic Demons Using ITK\\'s Finite Difference Solver Hierarchy The Insight Journal, 2007\n\nThis Non-Rigid Registration consists of the following steps:\n\n\ndetermine reference image\n\nFor this we use the average of the rigid registered stack\nthis averaged stack is then smeared with a Gaussian of sigma 2 pixel to reduce noise\nunder the assumption that high frequency scan distortions cancel out over several images, we, therefore, obtained the center of mass of the atoms. \n\n\n\nperform the demon registration filter to determine a distortion matrix\n\neach single image of a stack is first smeared with a Gaussian of sigma of 2pixels\nthen the deformation matrix is determined for these images\nthe deformation matrix is a matrix where each pixel has a vector ( x, and y value) for the relative shift of this pixel.\n\n\n\nThis deformation matrix is used to transform the image\n\nThe transformation is performed on the original image.\nImportant, here, is to set the interpolator method, (the image needs to be interpolated because the new pixels are not on an integer grid.)\n\n\n\nLet's see what the different interpolators do.\n|Method | RMS contrast | Standard | Mean |\n|-------|:--------------|:-------------|:-------|\n|original |0.1965806 |0.07764114 |0.3949583\n|Linear |0.20159315 |0.079470366 |0.39421165\n|BSpline |0.20162606 |0.0794831 |0.39421043\n|Gaussian |0.14310582 |0.056414302 |0.39421389\n|Hamming |0.20163293 |0.07948672 |0.39421496\nThe Gaussian interpolator is the only one seems to smear the signal.\nWe will use the Bspline method a fast and simple method that does not introduce spurious features and does not smear the signal.\nFull Code of Demon registration", "import simpleITK as sitk\n\ndef DemonReg(cube, verbose = False):\n \"\"\"\n Diffeomorphic Demon Non-Rigid Registration \n Usage:\n \n DemReg = DemonReg(cube, verbose = False)\n\n Input:\n cube: stack of image after rigid registration and cropping\n Output:\n DemReg: stack of images with non-rigid registration\n\n Dempends on:\n simpleITK and numpy\n \n Please Cite: http://www.simpleitk.org/SimpleITK/project/parti.html\n and T. Vercauteren, X. Pennec, A. Perchant and N. Ayache\n Diffeomorphic Demons Using ITK\\'s Finite Difference Solver Hierarchy\n The Insight Journal, http://hdl.handle.net/1926/510 2007\n \"\"\"\n \n DemReg = np.zeros_like(cube)\n nimages = cube.shape[0]\n print(nimages)\n # create fixed image by summing over rigid registration\n\n fixed_np = np.average(current_dataset, axis=0)\n\n fixed = sitk.GetImageFromArray(fixed_np)\n fixed = sitk.DiscreteGaussian(fixed, 2.0)\n\n #demons = sitk.SymmetricForcesDemonsRegistrationFilter()\n demons = sitk.DiffeomorphicDemonsRegistrationFilter()\n\n demons.SetNumberOfIterations(200)\n demons.SetStandardDeviations(1.0)\n\n resampler = sitk.ResampleImageFilter()\n resampler.SetReferenceImage(fixed);\n resampler.SetInterpolator(sitk.sitkBspline)\n resampler.SetDefaultPixelValue(0)\n\n done = 0\n \n for i in range(nimages):\n if done < int((i+1)/nimages*50):\n done = int((i+1)/nimages*50)\n sys.stdout.write('\\r')\n # progress output :\n sys.stdout.write(\"[%-50s] %d%%\" % ('*'*done, 2*done))\n sys.stdout.flush()\n \n moving = sitk.GetImageFromArray(cube[i])\n movingf = sitk.DiscreteGaussian(moving, 2.0)\n displacementField = demons.Execute(fixed,movingf)\n outTx = sitk.DisplacementFieldTransform( displacementField )\n resampler.SetTransform(outTx)\n out = resampler.Execute(moving)\n DemReg[i,:,:] = sitk.GetArrayFromImage(out)\n #print('image ', i)\n \n \n print(':-)')\n print('You have succesfully completed Diffeomorphic Demons Registration')\n \n return DemReg\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ImAlexisSaez/deep-learning-specialization-coursera
course_1/week_3/assignment_1/planar_data_classification_with_one_hidden_layer_v1.ipynb
mit
[ "Planar data classification with one hidden layer\nWelcome to your week 3 programming assignment. It's time to build your first neural network, which will have a hidden layer. You will see a big difference between this model and the one you implemented using logistic regression. \nYou will learn how to:\n- Implement a 2-class classification neural network with a single hidden layer\n- Use units with a non-linear activation function, such as tanh \n- Compute the cross entropy loss \n- Implement forward and backward propagation\n1 - Packages\nLet's first import all the packages that you will need during this assignment.\n- numpy is the fundamental package for scientific computing with Python.\n- sklearn provides simple and efficient tools for data mining and data analysis. \n- matplotlib is a library for plotting graphs in Python.\n- testCases provides some test examples to assess the correctness of your functions\n- planar_utils provide various useful functions used in this assignment", "# Package imports\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom testCases import *\nimport sklearn\nimport sklearn.datasets\nimport sklearn.linear_model\nfrom planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets\n\n%matplotlib inline\n\nnp.random.seed(1) # set a seed so that the results are consistent", "2 - Dataset\nFirst, let's get the dataset you will work on. The following code will load a \"flower\" 2-class dataset into variables X and Y.", "X, Y = load_planar_dataset()", "Visualize the dataset using matplotlib. The data looks like a \"flower\" with some red (label y=0) and some blue (y=1) points. Your goal is to build a model to fit this data.", "# Visualize the data:\nplt.scatter(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral);", "You have:\n - a numpy-array (matrix) X that contains your features (x1, x2)\n - a numpy-array (vector) Y that contains your labels (red:0, blue:1).\nLets first get a better sense of what our data is like. \nExercise: How many training examples do you have? In addition, what is the shape of the variables X and Y? \nHint: How do you get the shape of a numpy array? (help)", "### START CODE HERE ### (≈ 3 lines of code)\nshape_X = X.shape\nshape_Y = Y.shape\nm = Y.flatten().shape # training set size\n### END CODE HERE ###\n\nprint ('The shape of X is: ' + str(shape_X))\nprint ('The shape of Y is: ' + str(shape_Y))\nprint ('I have m = %d training examples!' % (m))", "Expected Output:\n<table style=\"width:20%\">\n\n <tr>\n <td>**shape of X**</td>\n <td> (2, 400) </td> \n </tr>\n\n <tr>\n <td>**shape of Y**</td>\n <td>(1, 400) </td> \n </tr>\n\n <tr>\n <td>**m**</td>\n <td> 400 </td> \n </tr>\n\n</table>\n\n3 - Simple Logistic Regression\nBefore building a full neural network, lets first see how logistic regression performs on this problem. You can use sklearn's built-in functions to do that. Run the code below to train a logistic regression classifier on the dataset.", "# Train the logistic regression classifier\nclf = sklearn.linear_model.LogisticRegressionCV();\nclf.fit(X.T, Y.T);", "You can now plot the decision boundary of these models. Run the code below.", "# Plot the decision boundary for logistic regression\nplot_decision_boundary(lambda x: clf.predict(x), X, Y)\nplt.title(\"Logistic Regression\")\n\n# Print accuracy\nLR_predictions = clf.predict(X.T)\nprint ('Accuracy of logistic regression: %d ' % float((np.dot(Y,LR_predictions) + np.dot(1-Y,1-LR_predictions))/float(Y.size)*100) +\n '% ' + \"(percentage of correctly labelled datapoints)\")", "Expected Output:\n<table style=\"width:20%\">\n <tr>\n <td>**Accuracy**</td>\n <td> 47% </td> \n </tr>\n\n</table>\n\nInterpretation: The dataset is not linearly separable, so logistic regression doesn't perform well. Hopefully a neural network will do better. Let's try this now! \n4 - Neural Network model\nLogistic regression did not work well on the \"flower dataset\". You are going to train a Neural Network with a single hidden layer.\nHere is our model:\n<img src=\"images/classification_kiank.png\" style=\"width:600px;height:300px;\">\nMathematically:\nFor one example $x^{(i)}$:\n$$z^{[1] (i)} = W^{[1]} x^{(i)} + b^{[1] (i)}\\tag{1}$$ \n$$a^{[1] (i)} = \\tanh(z^{[1] (i)})\\tag{2}$$\n$$z^{[2] (i)} = W^{[2]} a^{[1] (i)} + b^{[2] (i)}\\tag{3}$$\n$$\\hat{y}^{(i)} = a^{[2] (i)} = \\sigma(z^{ [2] (i)})\\tag{4}$$\n$$y^{(i)}_{prediction} = \\begin{cases} 1 & \\mbox{if } a^{2} > 0.5 \\ 0 & \\mbox{otherwise } \\end{cases}\\tag{5}$$\nGiven the predictions on all the examples, you can also compute the cost $J$ as follows: \n$$J = - \\frac{1}{m} \\sum\\limits_{i = 0}^{m} \\large\\left(\\small y^{(i)}\\log\\left(a^{[2] (i)}\\right) + (1-y^{(i)})\\log\\left(1- a^{[2] (i)}\\right) \\large \\right) \\small \\tag{6}$$\nReminder: The general methodology to build a Neural Network is to:\n 1. Define the neural network structure ( # of input units, # of hidden units, etc). \n 2. Initialize the model's parameters\n 3. Loop:\n - Implement forward propagation\n - Compute loss\n - Implement backward propagation to get the gradients\n - Update parameters (gradient descent)\nYou often build helper functions to compute steps 1-3 and then merge them into one function we call nn_model(). Once you've built nn_model() and learnt the right parameters, you can make predictions on new data.\n4.1 - Defining the neural network structure\nExercise: Define three variables:\n - n_x: the size of the input layer\n - n_h: the size of the hidden layer (set this to 4) \n - n_y: the size of the output layer\nHint: Use shapes of X and Y to find n_x and n_y. Also, hard code the hidden layer size to be 4.", "# GRADED FUNCTION: layer_sizes\n\ndef layer_sizes(X, Y):\n \"\"\"\n Arguments:\n X -- input dataset of shape (input size, number of examples)\n Y -- labels of shape (output size, number of examples)\n \n Returns:\n n_x -- the size of the input layer\n n_h -- the size of the hidden layer\n n_y -- the size of the output layer\n \"\"\"\n ### START CODE HERE ### (≈ 3 lines of code)\n n_x = X.shape[0] # size of input layer\n n_h = 4\n n_y = Y.shape[0] # size of output layer\n ### END CODE HERE ###\n return (n_x, n_h, n_y)\n\nX_assess, Y_assess = layer_sizes_test_case()\n(n_x, n_h, n_y) = layer_sizes(X_assess, Y_assess)\nprint(\"The size of the input layer is: n_x = \" + str(n_x))\nprint(\"The size of the hidden layer is: n_h = \" + str(n_h))\nprint(\"The size of the output layer is: n_y = \" + str(n_y))", "Expected Output (these are not the sizes you will use for your network, they are just used to assess the function you've just coded).\n<table style=\"width:20%\">\n <tr>\n <td>**n_x**</td>\n <td> 5 </td> \n </tr>\n\n <tr>\n <td>**n_h**</td>\n <td> 4 </td> \n </tr>\n\n <tr>\n <td>**n_y**</td>\n <td> 2 </td> \n </tr>\n\n</table>\n\n4.2 - Initialize the model's parameters\nExercise: Implement the function initialize_parameters().\nInstructions:\n- Make sure your parameters' sizes are right. Refer to the neural network figure above if needed.\n- You will initialize the weights matrices with random values. \n - Use: np.random.randn(a,b) * 0.01 to randomly initialize a matrix of shape (a,b).\n- You will initialize the bias vectors as zeros. \n - Use: np.zeros((a,b)) to initialize a matrix of shape (a,b) with zeros.", "# GRADED FUNCTION: initialize_parameters\n\ndef initialize_parameters(n_x, n_h, n_y):\n \"\"\"\n Argument:\n n_x -- size of the input layer\n n_h -- size of the hidden layer\n n_y -- size of the output layer\n \n Returns:\n params -- python dictionary containing your parameters:\n W1 -- weight matrix of shape (n_h, n_x)\n b1 -- bias vector of shape (n_h, 1)\n W2 -- weight matrix of shape (n_y, n_h)\n b2 -- bias vector of shape (n_y, 1)\n \"\"\"\n \n np.random.seed(2) # we set up a seed so that your output matches ours although the initialization is random.\n \n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = np.random.randn(n_h, n_x) * 0.01\n b1 = np.zeros((n_h, 1))\n W2 = np.random.randn(n_y, n_h) * 0.01\n b2 = np.zeros((n_y, 1))\n ### END CODE HERE ###\n \n assert (W1.shape == (n_h, n_x))\n assert (b1.shape == (n_h, 1))\n assert (W2.shape == (n_y, n_h))\n assert (b2.shape == (n_y, 1))\n \n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2}\n \n return parameters\n\nn_x, n_h, n_y = initialize_parameters_test_case()\n\nparameters = initialize_parameters(n_x, n_h, n_y)\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))", "Expected Output:\n<table style=\"width:90%\">\n <tr>\n <td>**W1**</td>\n <td> [[-0.00416758 -0.00056267]\n [-0.02136196 0.01640271]\n [-0.01793436 -0.00841747]\n [ 0.00502881 -0.01245288]] </td> \n </tr>\n\n <tr>\n <td>**b1**</td>\n <td> [[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]] </td> \n </tr>\n\n <tr>\n <td>**W2**</td>\n <td> [[-0.01057952 -0.00909008 0.00551454 0.02292208]]</td> \n </tr>\n\n\n <tr>\n <td>**b2**</td>\n <td> [[ 0.]] </td> \n </tr>\n\n</table>\n\n4.3 - The Loop\nQuestion: Implement forward_propagation().\nInstructions:\n- Look above at the mathematical representation of your classifier.\n- You can use the function sigmoid(). It is built-in (imported) in the notebook.\n- You can use the function np.tanh(). It is part of the numpy library.\n- The steps you have to implement are:\n 1. Retrieve each parameter from the dictionary \"parameters\" (which is the output of initialize_parameters()) by using parameters[\"..\"].\n 2. Implement Forward Propagation. Compute $Z^{[1]}, A^{[1]}, Z^{[2]}$ and $A^{[2]}$ (the vector of all your predictions on all the examples in the training set).\n- Values needed in the backpropagation are stored in \"cache\". The cache will be given as an input to the backpropagation function.", "# GRADED FUNCTION: forward_propagation\n\ndef forward_propagation(X, parameters):\n \"\"\"\n Argument:\n X -- input data of size (n_x, m)\n parameters -- python dictionary containing your parameters (output of initialization function)\n \n Returns:\n A2 -- The sigmoid output of the second activation\n cache -- a dictionary containing \"Z1\", \"A1\", \"Z2\" and \"A2\"\n \"\"\"\n # Retrieve each parameter from the dictionary \"parameters\"\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n ### END CODE HERE ###\n \n # Implement Forward Propagation to calculate A2 (probabilities)\n ### START CODE HERE ### (≈ 4 lines of code)\n Z1 = np.dot(W1, X) + b1\n A1 = np.tanh(Z1)\n Z2 = np.dot(W2, A1) + b2\n A2 = sigmoid(Z2)\n ### END CODE HERE ###\n \n assert(A2.shape == (1, X.shape[1]))\n \n cache = {\"Z1\": Z1,\n \"A1\": A1,\n \"Z2\": Z2,\n \"A2\": A2}\n \n return A2, cache\n\nX_assess, parameters = forward_propagation_test_case()\n\nA2, cache = forward_propagation(X_assess, parameters)\n\n# Note: we use the mean here just to make sure that your output matches ours. \nprint(np.mean(cache['Z1']) ,np.mean(cache['A1']),np.mean(cache['Z2']),np.mean(cache['A2']))", "Expected Output:\n<table style=\"width:55%\">\n <tr>\n <td> -0.000499755777742 -0.000496963353232 0.000438187450959 0.500109546852 </td> \n </tr>\n</table>\n\nNow that you have computed $A^{[2]}$ (in the Python variable \"A2\"), which contains $a^{2}$ for every example, you can compute the cost function as follows:\n$$J = - \\frac{1}{m} \\sum\\limits_{i = 0}^{m} \\large{(} \\small y^{(i)}\\log\\left(a^{[2] (i)}\\right) + (1-y^{(i)})\\log\\left(1- a^{[2] (i)}\\right) \\large{)} \\small\\tag{13}$$\nExercise: Implement compute_cost() to compute the value of the cost $J$.\nInstructions:\n- There are many ways to implement the cross-entropy loss. To help you, we give you how we would have implemented\n$- \\sum\\limits_{i=0}^{m} y^{(i)}\\log(a^{2})$:\npython\nlogprobs = np.multiply(np.log(A2),Y)\ncost = - np.sum(logprobs) # no need to use a for loop!\n(you can use either np.multiply() and then np.sum() or directly np.dot()).", "# GRADED FUNCTION: compute_cost\n\ndef compute_cost(A2, Y, parameters):\n \"\"\"\n Computes the cross-entropy cost given in equation (13)\n \n Arguments:\n A2 -- The sigmoid output of the second activation, of shape (1, number of examples)\n Y -- \"true\" labels vector of shape (1, number of examples)\n parameters -- python dictionary containing your parameters W1, b1, W2 and b2\n \n Returns:\n cost -- cross-entropy cost given equation (13)\n \"\"\"\n \n m = Y.shape[1] # number of example\n \n # Retrieve W1 and W2 from parameters\n ### START CODE HERE ### (≈ 2 lines of code)\n W1 = parameters[\"W1\"]\n W2 = parameters[\"W2\"]\n ### END CODE HERE ###\n \n # Compute the cross-entropy cost\n ### START CODE HERE ### (≈ 2 lines of code)\n logprobs = np.multiply(Y, np.log(A2)) + np.multiply(np.log(1 - A2), 1 - Y)\n cost = - 1 / m * np.sum(logprobs)\n ### END CODE HERE ###\n \n cost = np.squeeze(cost) # makes sure cost is the dimension we expect. \n # E.g., turns [[17]] into 17 \n assert(isinstance(cost, float))\n \n return cost\n\nA2, Y_assess, parameters = compute_cost_test_case()\n\nprint(\"cost = \" + str(compute_cost(A2, Y_assess, parameters)))", "Expected Output:\n<table style=\"width:20%\">\n <tr>\n <td>**cost**</td>\n <td> 0.692919893776 </td> \n </tr>\n\n</table>\n\nUsing the cache computed during forward propagation, you can now implement backward propagation.\nQuestion: Implement the function backward_propagation().\nInstructions:\nBackpropagation is usually the hardest (most mathematical) part in deep learning. To help you, here again is the slide from the lecture on backpropagation. You'll want to use the six equations on the right of this slide, since you are building a vectorized implementation. \n<img src=\"images/grad_summary.png\" style=\"width:600px;height:300px;\">\n<!--\n$\\frac{\\partial \\mathcal{J} }{ \\partial z_{2}^{(i)} } = \\frac{1}{m} (a^{[2](i)} - y^{(i)})$\n\n$\\frac{\\partial \\mathcal{J} }{ \\partial W_2 } = \\frac{\\partial \\mathcal{J} }{ \\partial z_{2}^{(i)} } a^{[1] (i) T} $\n\n$\\frac{\\partial \\mathcal{J} }{ \\partial b_2 } = \\sum_i{\\frac{\\partial \\mathcal{J} }{ \\partial z_{2}^{(i)}}}$\n\n$\\frac{\\partial \\mathcal{J} }{ \\partial z_{1}^{(i)} } = W_2^T \\frac{\\partial \\mathcal{J} }{ \\partial z_{2}^{(i)} } * ( 1 - a^{[1] (i) 2}) $\n\n$\\frac{\\partial \\mathcal{J} }{ \\partial W_1 } = \\frac{\\partial \\mathcal{J} }{ \\partial z_{1}^{(i)} } X^T $\n\n$\\frac{\\partial \\mathcal{J} _i }{ \\partial b_1 } = \\sum_i{\\frac{\\partial \\mathcal{J} }{ \\partial z_{1}^{(i)}}}$\n\n- Note that $*$ denotes elementwise multiplication.\n- The notation you will use is common in deep learning coding:\n - dW1 = $\\frac{\\partial \\mathcal{J} }{ \\partial W_1 }$\n - db1 = $\\frac{\\partial \\mathcal{J} }{ \\partial b_1 }$\n - dW2 = $\\frac{\\partial \\mathcal{J} }{ \\partial W_2 }$\n - db2 = $\\frac{\\partial \\mathcal{J} }{ \\partial b_2 }$\n\n!-->\n\n\nTips:\nTo compute dZ1 you'll need to compute $g^{[1]'}(Z^{[1]})$. Since $g^{[1]}(.)$ is the tanh activation function, if $a = g^{[1]}(z)$ then $g^{[1]'}(z) = 1-a^2$. So you can compute \n$g^{[1]'}(Z^{[1]})$ using (1 - np.power(A1, 2)).", "# GRADED FUNCTION: backward_propagation\n\ndef backward_propagation(parameters, cache, X, Y):\n \"\"\"\n Implement the backward propagation using the instructions above.\n \n Arguments:\n parameters -- python dictionary containing our parameters \n cache -- a dictionary containing \"Z1\", \"A1\", \"Z2\" and \"A2\".\n X -- input data of shape (2, number of examples)\n Y -- \"true\" labels vector of shape (1, number of examples)\n \n Returns:\n grads -- python dictionary containing your gradients with respect to different parameters\n \"\"\"\n m = X.shape[1]\n \n # First, retrieve W1 and W2 from the dictionary \"parameters\".\n ### START CODE HERE ### (≈ 2 lines of code)\n W1 = parameters[\"W1\"]\n W2 = parameters[\"W2\"]\n ### END CODE HERE ###\n \n # Retrieve also A1 and A2 from dictionary \"cache\".\n ### START CODE HERE ### (≈ 2 lines of code)\n A1 = cache[\"A1\"]\n A2 = cache[\"A2\"]\n ### END CODE HERE ###\n \n # Backward propagation: calculate dW1, db1, dW2, db2. \n ### START CODE HERE ### (≈ 6 lines of code, corresponding to 6 equations on slide above)\n dZ2 = A2 - Y\n dW2 = 1 / m * np.dot(dZ2, A1.T)\n db2 = 1 / m * np.sum(dZ2, axis=1, keepdims=True)\n dZ1 = np.dot(W2.T, dZ2) * (1 - np.power(A1, 2))\n dW1 = 1 / m * np.dot(dZ1, X.T)\n db1 = 1 / m * np.sum(dZ1, axis=1, keepdims=True)\n ### END CODE HERE ###\n \n grads = {\"dW1\": dW1,\n \"db1\": db1,\n \"dW2\": dW2,\n \"db2\": db2}\n \n return grads\n\nparameters, cache, X_assess, Y_assess = backward_propagation_test_case()\n\ngrads = backward_propagation(parameters, cache, X_assess, Y_assess)\nprint (\"dW1 = \"+ str(grads[\"dW1\"]))\nprint (\"db1 = \"+ str(grads[\"db1\"]))\nprint (\"dW2 = \"+ str(grads[\"dW2\"]))\nprint (\"db2 = \"+ str(grads[\"db2\"]))", "Expected output:\n<table style=\"width:80%\">\n <tr>\n <td>**dW1**</td>\n <td> [[ 0.01018708 -0.00708701]\n [ 0.00873447 -0.0060768 ]\n [-0.00530847 0.00369379]\n [-0.02206365 0.01535126]] </td> \n </tr>\n\n <tr>\n <td>**db1**</td>\n <td> [[-0.00069728]\n [-0.00060606]\n [ 0.000364 ]\n [ 0.00151207]] </td> \n </tr>\n\n <tr>\n <td>**dW2**</td>\n <td> [[ 0.00363613 0.03153604 0.01162914 -0.01318316]] </td> \n </tr>\n\n\n <tr>\n <td>**db2**</td>\n <td> [[ 0.06589489]] </td> \n </tr>\n\n</table>\n\nQuestion: Implement the update rule. Use gradient descent. You have to use (dW1, db1, dW2, db2) in order to update (W1, b1, W2, b2).\nGeneral gradient descent rule: $ \\theta = \\theta - \\alpha \\frac{\\partial J }{ \\partial \\theta }$ where $\\alpha$ is the learning rate and $\\theta$ represents a parameter.\nIllustration: The gradient descent algorithm with a good learning rate (converging) and a bad learning rate (diverging). Images courtesy of Adam Harley.\n<img src=\"images/sgd.gif\" style=\"width:400;height:400;\"> <img src=\"images/sgd_bad.gif\" style=\"width:400;height:400;\">", "# GRADED FUNCTION: update_parameters\n\ndef update_parameters(parameters, grads, learning_rate = 1.2):\n \"\"\"\n Updates parameters using the gradient descent update rule given above\n \n Arguments:\n parameters -- python dictionary containing your parameters \n grads -- python dictionary containing your gradients \n \n Returns:\n parameters -- python dictionary containing your updated parameters \n \"\"\"\n # Retrieve each parameter from the dictionary \"parameters\"\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n ### END CODE HERE ###\n \n # Retrieve each gradient from the dictionary \"grads\"\n ### START CODE HERE ### (≈ 4 lines of code)\n dW1 = grads[\"dW1\"]\n db1 = grads[\"db1\"]\n dW2 = grads[\"dW2\"]\n db2 = grads[\"db2\"]\n ## END CODE HERE ###\n \n # Update rule for each parameter\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = W1 - learning_rate * dW1\n b1 = b1 - learning_rate * db1\n W2 = W2 - learning_rate * dW2\n b2 = b2 - learning_rate * db2\n ### END CODE HERE ###\n \n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2}\n \n return parameters\n\nparameters, grads = update_parameters_test_case()\nparameters = update_parameters(parameters, grads)\n\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))", "Expected Output:\n<table style=\"width:80%\">\n <tr>\n <td>**W1**</td>\n <td> [[-0.00643025 0.01936718]\n [-0.02410458 0.03978052]\n [-0.01653973 -0.02096177]\n [ 0.01046864 -0.05990141]]</td> \n </tr>\n\n <tr>\n <td>**b1**</td>\n <td> [[ -1.02420756e-06]\n [ 1.27373948e-05]\n [ 8.32996807e-07]\n [ -3.20136836e-06]]</td> \n </tr>\n\n <tr>\n <td>**W2**</td>\n <td> [[-0.01041081 -0.04463285 0.01758031 0.04747113]] </td> \n </tr>\n\n\n <tr>\n <td>**b2**</td>\n <td> [[ 0.00010457]] </td> \n </tr>\n\n</table>\n\n4.4 - Integrate parts 4.1, 4.2 and 4.3 in nn_model()\nQuestion: Build your neural network model in nn_model().\nInstructions: The neural network model has to use the previous functions in the right order.", "# GRADED FUNCTION: nn_model\n\ndef nn_model(X, Y, n_h, num_iterations = 10000, print_cost=False):\n \"\"\"\n Arguments:\n X -- dataset of shape (2, number of examples)\n Y -- labels of shape (1, number of examples)\n n_h -- size of the hidden layer\n num_iterations -- Number of iterations in gradient descent loop\n print_cost -- if True, print the cost every 1000 iterations\n \n Returns:\n parameters -- parameters learnt by the model. They can then be used to predict.\n \"\"\"\n \n np.random.seed(3)\n n_x = layer_sizes(X, Y)[0]\n n_y = layer_sizes(X, Y)[2]\n \n # Initialize parameters, then retrieve W1, b1, W2, b2. Inputs: \"n_x, n_h, n_y\". Outputs = \"W1, b1, W2, b2, parameters\".\n ### START CODE HERE ### (≈ 5 lines of code)\n parameters = initialize_parameters(n_x, n_h, n_y)\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n ### END CODE HERE ###\n \n # Loop (gradient descent)\n\n for i in range(0, num_iterations):\n \n ### START CODE HERE ### (≈ 4 lines of code)\n # Forward propagation. Inputs: \"X, parameters\". Outputs: \"A2, cache\".\n A2, cache = forward_propagation(X, parameters)\n \n # Cost function. Inputs: \"A2, Y, parameters\". Outputs: \"cost\".\n cost = compute_cost(A2, Y, parameters)\n \n # Backpropagation. Inputs: \"parameters, cache, X, Y\". Outputs: \"grads\".\n grads = backward_propagation(parameters, cache, X, Y)\n \n # Gradient descent parameter update. Inputs: \"parameters, grads\". Outputs: \"parameters\".\n parameters = update_parameters(parameters, grads)\n \n ### END CODE HERE ###\n \n # Print the cost every 1000 iterations\n if print_cost and i % 1000 == 0:\n print (\"Cost after iteration %i: %f\" %(i, cost))\n\n return parameters\n\nX_assess, Y_assess = nn_model_test_case()\n\nparameters = nn_model(X_assess, Y_assess, 4, num_iterations=10000, print_cost=False)\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))", "Expected Output:\n<table style=\"width:90%\">\n <tr>\n <td>**W1**</td>\n <td> [[-4.18494056 5.33220609]\n [-7.52989382 1.24306181]\n [-4.1929459 5.32632331]\n [ 7.52983719 -1.24309422]]</td> \n </tr>\n\n <tr>\n <td>**b1**</td>\n <td> [[ 2.32926819]\n [ 3.79458998]\n [ 2.33002577]\n [-3.79468846]]</td> \n </tr>\n\n <tr>\n <td>**W2**</td>\n <td> [[-6033.83672146 -6008.12980822 -6033.10095287 6008.06637269]] </td> \n </tr>\n\n\n <tr>\n <td>**b2**</td>\n <td> [[-52.66607724]] </td> \n </tr>\n\n</table>\n\n4.5 Predictions\nQuestion: Use your model to predict by building predict().\nUse forward propagation to predict results.\nReminder: predictions = $y_{prediction} = \\mathbb 1 \\text{{activation > 0.5}} = \\begin{cases}\n 1 & \\text{if}\\ activation > 0.5 \\\n 0 & \\text{otherwise}\n \\end{cases}$ \nAs an example, if you would like to set the entries of a matrix X to 0 and 1 based on a threshold you would do: X_new = (X &gt; threshold)", "# GRADED FUNCTION: predict\n\ndef predict(parameters, X):\n \"\"\"\n Using the learned parameters, predicts a class for each example in X\n \n Arguments:\n parameters -- python dictionary containing your parameters \n X -- input data of size (n_x, m)\n \n Returns\n predictions -- vector of predictions of our model (red: 0 / blue: 1)\n \"\"\"\n \n # Computes probabilities using forward propagation, and classifies to 0/1 using 0.5 as the threshold.\n ### START CODE HERE ### (≈ 2 lines of code)\n A2, cache = forward_propagation(X, parameters)\n predictions = (A2 > 0.5)\n ### END CODE HERE ###\n \n return predictions\n\nparameters, X_assess = predict_test_case()\n\npredictions = predict(parameters, X_assess)\nprint(\"predictions mean = \" + str(np.mean(predictions)))", "Expected Output: \n<table style=\"width:40%\">\n <tr>\n <td>**predictions mean**</td>\n <td> 0.666666666667 </td> \n </tr>\n\n</table>\n\nIt is time to run the model and see how it performs on a planar dataset. Run the following code to test your model with a single hidden layer of $n_h$ hidden units.", "# Build a model with a n_h-dimensional hidden layer\nparameters = nn_model(X, Y, n_h = 4, num_iterations = 10000, print_cost=True)\n\n# Plot the decision boundary\nplot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)\nplt.title(\"Decision Boundary for hidden layer size \" + str(4))", "Expected Output:\n<table style=\"width:40%\">\n <tr>\n <td>**Cost after iteration 9000**</td>\n <td> 0.218607 </td> \n </tr>\n\n</table>", "# Print accuracy\npredictions = predict(parameters, X)\nprint ('Accuracy: %d' % float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100) + '%')", "Expected Output: \n<table style=\"width:15%\">\n <tr>\n <td>**Accuracy**</td>\n <td> 90% </td> \n </tr>\n</table>\n\nAccuracy is really high compared to Logistic Regression. The model has learnt the leaf patterns of the flower! Neural networks are able to learn even highly non-linear decision boundaries, unlike logistic regression. \nNow, let's try out several hidden layer sizes.\n4.6 - Tuning hidden layer size (optional/ungraded exercise)\nRun the following code. It may take 1-2 minutes. You will observe different behaviors of the model for various hidden layer sizes.", "# This may take about 2 minutes to run\n\nplt.figure(figsize=(16, 32))\nhidden_layer_sizes = [1, 2, 3, 4, 5, 20, 50]\nfor i, n_h in enumerate(hidden_layer_sizes):\n plt.subplot(5, 2, i+1)\n plt.title('Hidden Layer of size %d' % n_h)\n parameters = nn_model(X, Y, n_h, num_iterations = 5000)\n plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)\n predictions = predict(parameters, X)\n accuracy = float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100)\n print (\"Accuracy for {} hidden units: {} %\".format(n_h, accuracy))", "Interpretation:\n- The larger models (with more hidden units) are able to fit the training set better, until eventually the largest models overfit the data. \n- The best hidden layer size seems to be around n_h = 5. Indeed, a value around here seems to fits the data well without also incurring noticable overfitting.\n- You will also learn later about regularization, which lets you use very large models (such as n_h = 50) without much overfitting. \nOptional questions:\nNote: Remember to submit the assignment but clicking the blue \"Submit Assignment\" button at the upper-right. \nSome optional/ungraded questions that you can explore if you wish: \n- What happens when you change the tanh activation for a sigmoid activation or a ReLU activation?\n- Play with the learning_rate. What happens?\n- What if we change the dataset? (See part 5 below!)\n<font color='blue'>\nYou've learnt to:\n- Build a complete neural network with a hidden layer\n- Make a good use of a non-linear unit\n- Implemented forward propagation and backpropagation, and trained a neural network\n- See the impact of varying the hidden layer size, including overfitting.\nNice work! \n5) Performance on other datasets\nIf you want, you can rerun the whole notebook (minus the dataset part) for each of the following datasets.", "# Datasets\nnoisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets()\n\ndatasets = {\"noisy_circles\": noisy_circles,\n \"noisy_moons\": noisy_moons,\n \"blobs\": blobs,\n \"gaussian_quantiles\": gaussian_quantiles}\n\n### START CODE HERE ### (choose your dataset)\ndataset = \"gaussian_quantiles\"\n### END CODE HERE ###\n\nX, Y = datasets[dataset]\nX, Y = X.T, Y.reshape(1, Y.shape[0])\n\n# make blobs binary\nif dataset == \"blobs\":\n Y = Y%2\n\n# Visualize the data\nplt.scatter(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral);", "Congrats on finishing this Programming Assignment!\nReference:\n- http://scs.ryerson.ca/~aharley/neural-networks/\n- http://cs231n.github.io/neural-networks-case-study/" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
dtamayo/rebound
ipython_examples/HyperbolicOrbits.ipynb
gpl-3.0
[ "Loading Hyperbolic Orbits into REBOUND\nImagine we have a table of orbital elements for comets (kindly provided by Toni Engelhardt).", "from io import StringIO\nimport numpy as np\nimport rebound\nepoch_of_elements = 53371.0 # [MJD, days]\nc = StringIO(u\"\"\"\n# id e q[AU] i[deg] Omega[deg] argperi[deg] t_peri[MJD, days] epoch_of_observation[MJD, days]\n168026 12.181214 15.346358 136.782470 37.581438 268.412314 54776.806093 55516.41727\n21170 2.662235 2.013923 140.646538 23.029490 46.292039 54336.126288 53673.44043 \n189298 15.503013 11.550314 20.042232 203.240743 150.855761 55761.641176 55718.447145 \n72278 34.638392 24.742323 157.984412 126.431540 178.612758 54382.158401 54347.240445\n109766 8.832472 9.900228 144.857801 243.102255 271.345342 55627.501618 54748.37722\n\"\"\")\ncomets = np.loadtxt(c) # load the table into a numpy array", "We want to add these comits to a REBOUND simulation(s). The first thing to do is set the units, which have to be consistent throughout. Here we have a table in AU and days, so we'll use the gaussian gravitational constant (AU, days, solar masses).", "sim = rebound.Simulation()\nk = 0.01720209895 # Gaussian constant\nsim.G = k**2", "We also set the simulation time to the epoch at which the elements are valid:", "sim.t = epoch_of_elements", "We then add the giant planets in our Solar System to the simulation. You could for example query JPL HORIZONS for the states of the planets at each comet's corresponding epoch of observation (see Horizons.ipynb). Here we set up toy masses and orbits for Jupiter & Saturn:", "sim.add(m=1.) # Sun\nsim.add(m=1.e-3, a=5.) # Jupiter\nsim.add(m=3.e-4, a=10.) # Saturn", "Let's write a function that takes a comet from the table and adds it to our simulation:", "def addOrbit(sim, comet_elem):\n tracklet_id, e, q, inc, Omega, argperi, t_peri, epoch_of_observation = comet_elem\n sim.add(primary=sim.particles[0], \n a = q/(1.-e),\n e = e,\n inc = inc*np.pi/180., # have to convert to radians\n Omega = Omega*np.pi/180.,\n omega = argperi*np.pi/180.,\n T = t_peri # time of pericenter passage\n )", "By default, REBOUND adds and outputs particles in Jacobi orbital elements. Typically orbital elements for comets are heliocentric. Mixing the two will give you relative errors in elements, positions etc. of order the mass ratio of Jupiter to the Sun ($\\sim 0.001$) which is why we pass the additional primary=sim.particles[0] argument to the add() function. If this level of accuracy doesn't matters to you, you can ignore the primary argument.\nWe can now set up the first comet and quickly plot to see what the system looks like:", "addOrbit(sim, comets[0])\n%matplotlib inline\nfig = rebound.OrbitPlot(sim, trails=True)", "Now we just integrate until whatever final time we’re interested in. Here it's the epoch at which we observe the comet, which is the last column in our table:", "tfinal = comets[0][-1]\nsim.integrate(tfinal)\nfig = rebound.OrbitPlot(sim, trails=True)", "REBOUND automatically find out if you want to integrate forward or backward in time.\nFor fun, let's add all the coments to a simulation:", "sim = rebound.Simulation()\nsim.G = k**2\nsim.t = epoch_of_elements \nsim.add(m=1.) # Sun\nsim.add(m=1.e-3, a=5.) # Jupiter\nsim.add(m=3.e-4, a=10.) # Saturn\nfor comet in comets:\n addOrbit(sim, comet)\nfig = rebound.OrbitPlot(sim, trails=True)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
getsmarter/bda
module_4/M4_NB3_NetworkClustering.ipynb
mit
[ "<div align=\"right\">Python 3.6 Jupyter Notebook</div>\n\nFinding connected components using clustering\n<br><div class=\"alert alert-warning\">\n<b>Note that this notebook contains advanced exercises applicable only to students who wish to deepen their understanding and qualify for bonus marks on the course.</b> You will be able to achieve 100% for this notebook by only completing Exercise 1. Optional, additional exercises can be completed to qualify for bonus marks.\n</div>\nYour completion of the notebook exercises will be graded based on your ability to do the following:\n\nUnderstand: Do your pseudo-code and comments show evidence that you recall and understand technical concepts?\nApply: Are you able to execute code (using the supplied examples) that performs the required functionality on supplied or generated data sets? \nAnalyze: Are you able to pick the relevant method or library to resolve specific stated questions?\nEvaluate: Are you able to interpret the results and justify your interpretation based on the observed data?\n\nNotebook objectives\nBy the end of this notebook, you will be expected to:\n\n\nFind connected components in networks (using the techniques of hierarchical clustering, modularity maximization, and spectral graph partitioning); and\nInterpret clustering results.\n\n\nList of exercises\n\n\nExercise 1: Understanding hierarchical clustering.\nExercise 2 [Advanced]: Interpreting the results of hierarchical clustering.\nExercise 3 [Advanced]: Summarizing clustering results based on modularity maximization and spectral graph partitioning.\n\n\nNotebook introduction\nCommunity detection is an important task in social network analysis. The idea behind it is to identify groups of people that share a common interest, based on the assumption that these people tend to link to each other more than to the rest of the network. Specifically, real-world networks exhibit clustering behavior that can be observed in the graph representation of these networks by the formation of clusters or partitions. These groups of nodes on a graph (clusters) correspond to communities that share common properties, or have a common role in the system under study.\nIntuitively, it is expected that such clusters are associated with a high concentration of nodes. In the following examples, you will explore the identification of these clusters using the following approaches, as discussed in the video content:\n\nHierarchical clustering (using a distance matrix)\nThe Louvain Algorithm (using modularity maximization)\nSpectral graph partitioning\n\nImport required modules", "import networkx as nx\nimport pandas as pd\nimport numpy as np\n\n%matplotlib inline\nimport matplotlib.pylab as plt\nfrom networkx.drawing.nx_agraph import graphviz_layout\n\nfrom collections import defaultdict, Counter\nimport operator\n\n## For hierarchical clustering.\nfrom scipy.cluster import hierarchy\nfrom scipy.spatial import distance\n\n## For spectral graph partitioning.\nfrom sklearn.cluster import spectral_clustering as spc\n\n## For Community Detection (Louvain Method).\nimport community\n\n\nimport sys\nsys.path.append('..')\nfrom utils import draw_partitioned_graph\nfrom utils import fancy_dendrogram\n\nplt.rcParams['figure.figsize'] = (15, 9)\nplt.rcParams['axes.titlesize'] = 'large'", "1. Data preparation\nYou are going to read the graph from an adjacency list saved in earlier exercises.", "call_adjmatrix = pd.read_csv('./call.adjmatrix', index_col=0)\ncall_graph = nx.from_numpy_matrix(call_adjmatrix.as_matrix())\n\n# Display call graph object.\nplt.figure(figsize=(10,10))\nplt.axis('off')\n\npos = graphviz_layout(call_graph, prog='dot')\nnx.draw_networkx(call_graph, pos=pos, node_color='#11DD11', with_labels=False)\n_ = plt.axis('off')", "2. Hierarchical clustering\nThis notebook makes use of a hierarchical clustering algorithm, as implemented in Scipy. The following example uses the average distance measure. Since the graph is weighted, you can also use the single linkage inter-cluster distance measure (see exercises).", "def create_hc(G, linkage='average'):\n \"\"\"\n Creates hierarchical cluster of graph G from distance matrix\n \"\"\" \n \n path_length=nx.all_pairs_shortest_path_length(G)\n distances=np.zeros((G.order(),G.order())) \n \n for u,p in dict(path_length).items():\n for v,d in p.items():\n distances[list(G.nodes)[u]][list(G.nodes)[v]] = d\n distances[list(G.nodes)[v]][list(G.nodes)[u]] = d\n if u==v: \n distances[list(G.nodes)[u]][list(G.nodes)[u]]=0\n # Create hierarchical cluster (HC).\n Y=distance.squareform(distances)\n if linkage == 'max':\n # Creates HC using farthest point linkage.\n Z=hierarchy.complete(Y) \n if linkage == 'single':\n # Creates HC using closest point linkage.\n Z=hierarchy.single(Y) \n if linkage == 'average':\n # Creates HC using average point linkage.\n Z=hierarchy.average(Y)\n \n return Z\n\ndef get_cluster_membership(Z, maxclust):\n '''\n Assigns cluster membership by specifying cluster size.\n '''\n hc_out=list(hierarchy.fcluster(Z,maxclust, criterion='maxclust'))\n \n # Map cluster values to a dictionary variable.\n cluster_membership = {}\n i = 0\n for i in range(len(hc_out)):\n cluster_membership[i]=hc_out[i]\n \n return cluster_membership", "Below is a demonstration of hierarchical clustering when applied to the call graph.", "# Perform hierarchical clustering using 'average' linkage. \nZ = create_hc(call_graph, linkage='average') ", "The dendrogram corresponding to the partitioned graph is obtained as follows:", "hierarchy.dendrogram(Z)\nplt.show()", "You will notice that the full dendrogram is unwieldy, and difficult to use or read. Fortunately, the dendrogram method has a feature that allows one to only show the lastp merged clusters, where $p$ is the desired number of last p merged clusters.", "plt.title('Hierarchical Clustering Dendrogram (pruned)')\nplt.xlabel('sample index (or leaf size)')\nplt.ylabel('distance')\nhierarchy.dendrogram(\n Z,\n truncate_mode='lastp', # show only the last p merged clusters\n p=10, # show only the last p merged clusters\n show_leaf_counts=True, # numbers in brackets are counts for each leaf\n leaf_rotation=90,\n leaf_font_size=12)\nplt.show()", "This dendrogram can help explain what happens as a result of the agglomerative method of hierarchical clustering. Starting at the bottom-most level, each node is assigned its own cluster. The closest pair of nodes (according to a distance function) are then merged into a new cluster. The distance matrix is recomputed, treating the merged cluster as an individual node. This process is repeated until the entire network has been merged into a single, large cluster, which the top level in the dendrogram above represents. You can now understand why this method is agglomerative.\nThe linkage function is used to determine the distance between a cluster and a node, or between two clusters, using the following possibilities:\n\nSingle: Merge two clusters with the smallest minimum pairwise distance.\nAverage: Merge two clusters with the smallest average pairwise distance.\nMaximum or complete: Merge the two clusters with the smallest maximum pairwise distance.\n\nNow, you can finally retrieve the clusters, based on the analysis of the dendrogram. In this post-processing, there are different ways of determining $k$, the number of clusters to partition the data into. Scipy's hierarchical flat clustering function - \"hierarchy.fcluster()\" - is used to assign cluster membership by specifying a distance threshold, or the number of clusters required. In the function definition (above), you have been provided with a utility function, \"get_cluster_membership()\", which does the latter.\nSelecting the number of clusters $k$ is, in general, an ill-posed problem. Different interpretations are possible, depending on the nature of the problem, the scale of the distribution of points in a data set, and the required clustering resolution. In agglomerative clustering, as used in the example above, you can get zero error for the objective function by considering each data point as its own cluster. Hence, the selection of $k$ invariably involves a trade-off maximum compression of the data (using a single cluster), and maximum accuracy by assigning each data point to its own cluster. The selection of an optimal $k$ can be done using automated techniques or manually.\nHere, identification of an appropriate cluster is ideally done manually as this has the advantages of gaining some insights into your data as well as providing an opportunity to perform sanity checks. To select the cluster size, look for a large shift in the distance metric. In our example with dendrograms plots shown above, say a case has been made for an ideal cutoff of 3.5. The number of clusters is then simply the number of intersections of a horizontal line (with height of 3.5) with the vertical lines of the dendrogram. Therefore, 3 clusters would be obtained in this case as shown below.", "fancy_dendrogram( Z, truncate_mode='lastp', p=12, leaf_rotation=90.,\n leaf_font_size=12.0,\n show_contracted=False,\n annotate_above=10,\n max_d=3.5)\nplt.show()\n\nopt_clust = 3\nopt_clust", "You can now assign the data to these \"opt_clust\" clusters.", "cluster_assignments = get_cluster_membership(Z, maxclust=opt_clust)", "The partitioned graph, corresponding to the dendrogram above, can now be visualized.", "clust = list(set(cluster_assignments.values()))\nclust\n\ncluster_centers = sorted(set(cluster_assignments.values()))\nfreq = [list(cluster_assignments.values()).count(x) for x in cluster_centers]\n\n# Creata a DataFrame object containing list of cluster centers and number of objects in each cluster\ndf = pd.DataFrame({'cluster_centres':cluster_centers, 'number_of_objects':freq})\ndf.head(10)", "<br>\n<div class=\"alert alert-info\">\n<b>Exercise 1 Start.</b>\n</div>\n\nInstructions\n\n\nHow many clusters are obtained after the final step of a generic agglomerative clustering algorithm (before post-processing)?\nNote: Post-processing involves determining the optimal clusters for the problem at hand.\n\n\nBased on your answer above, would you consider agglomerative clustering a top-down approach, or a bottom-up approach?\nWhich of the three linkage functions (i.e. single, average, or maximum or complete) do you think is likely to be most sensitive to outliers? \nHint: Look at this single-link and complete-link clustering resource.\n\n\n\n\nYour markdown answer here.\n\n\n\n\n\n<br>\n<div class=\"alert alert-info\">\n<b>Exercise 1 End.</b>\n</div>\n\n\nExercise complete:\n\n<br>\n<div class=\"alert alert-info\">\n<b>Exercise 2 [Advanced] Start.</b>\n</div>\n\nInstructions\n\nIn this exercise, you will investigate the structural properties of the clusters generated from above.\n\nAssign the values from your \"cluster_assignments\" to a Pandas DataFrame named \"df1\", with the column name \"cluster_label\".\nHint: The variable \"cluster_assignments\" is of type dict. You will need to get the values component of this dict, not the keys.\n\n\nAdd a field called \"participantID\" to \"df1\", and assign to this the index values from the previously-loaded \"call_adjmatrix\" DataFrame.\nLoad the DataFrame containing the centrality measures that you saved in Notebook 1 of this module, into \"df2\".\nPerform an inner join by merging \"df1\" and \"df2\" on the field \"participantID\". Assign the result of this join to variable \"df3\".\nPerform a groupby on \"df3\" (using \"cluster_label\" field), and then evaluate the mean of the four centrality measures (using the \"agg()\" method). Assign the aggregation result to \"df4\".\nReview \"df4\", and plot its barplot. \nMerge clusters which share the same mean values for a centrality measure into a single cluster. Assign the smallest value of the labels in the set to the merged cluster.\nNote:<br>\nCombine clusters such that, given a cluster with centrality measures $[x1, x2, x3, x4]$, and another cluster with centrality measures $[z1, z2, z3, z4]$, the following holds true:<br>\n$x1 = z1$ <br> $x2 = z2$ <br> $x3 = z3$ <br> $x4 = z4$<br>\n\n\nPrint the size of each cluster, in descending order, after performing the cluster merging in the preceding step.", "# Your code here.\n", "<br>\n<div class=\"alert alert-info\">\n<b>Exercise 2 [Advanced] End.</b>\n</div>\n\n\nExercise complete: \nThis is a good time to \"Save and Checkpoint\".\n\n3. Community detection\nCommunity detection is an important component in the analysis of large and complex networks. Identifying these subgraph structures helps in understanding organizational and functional characteristics of the underlying physical networks. In this section, you will study a few approaches that are widely used in community detection using graph representations.\n3.1 The Louvain modularity-maximization approach\nThe Louvain method is one of the most widely-used methods for detecting communities in large networks. It was developed by a team of researchers at the Université catholique de Louvain. The method can unveil hierarchies of communities, and allows you to zoom within communities in order to discover sub-communities, sub-sub-communities, and so forth. The modularity QQ quantifies how good a \"community\" or partition is, and is defined as follows:\n$$Q_c =\\frac{1}{2m}\\sum {(ij)} \\left [ A{ij}-\\frac{k_ik_j}{2m} \\right] \\delta(c_i, c_j)$$\nThe higher the $Q_c$ of a community is, the better the partition is.\nThe Louvain method is a greedy optimization method that attempts to optimize the \"modularity\" of a partition of the network via two steps:\n\nLocally optimize the modularity to identify \"small\" communities.\nAggregate nodes belonging to the same community, and create a new network with aggregated nodes as individual nodes.\n\nSteps 1 and 2 are then repeated until a maximum of modularity produces a hierarchy of communities.\n3.2 Spectral graph partitioning\nSpectral graph partitioning and clustering is based on the spectrum — the eigenvalues and associated eigenvectors — of the Laplacian matrix that corresponds to a given graph. The approach is mathematically complex, but involves performing a $k$-means clustering, on a spectral projection of the graph, with $k$=2 (using an adjacency matrix as the affinity). A schematic illustration of the process is depicted in the figure below.\nOptional: You can read more about spectral graph processing.\n\nNow, apply spectral graph partitioning to your call graph, and visualize the resulting community structure. You can read more about Scikit-Learn, and the Spectral Clustering function utilized in this section. Spectral graph partitioning needs input in the form of the number of clusters sought (default setting is 8). There are various approaches one can take to optimize the final number of clusters, depending on problem domain knowledge. Below you will use a value of $k=9$.", "# Create the spectral partition using the spectral clustering function from Scikit-Learn.\nspectral_partition = spc(call_adjmatrix.as_matrix(), 9, assign_labels='discretize')\n\npos = graphviz_layout(call_graph, prog='dot')\nnx.draw_networkx_nodes(call_graph, pos, cmap=plt.cm.RdYlBu, node_color=spectral_partition)\nnx.draw_networkx_edges(call_graph, pos, alpha=0.5)\nplt.axis('off')\nplt.show()\n\nprint(spectral_partition)", "<br>\n<div class=\"alert alert-info\">\n<b>Exercise 3 [Advanced] Start.</b>\n</div>\n\nInstructions\nCompute the size of each the clusters obtained using the spectral graph partitioning method.", "# Your code here.", "<br>\n<div class=\"alert alert-info\">\n<b>Exercise 3 [Advanced] End.</b>\n</div>\n\n\nExercise complete:\nThis is a good time to \"Save and Checkpoint\".\n\n4. Submit your notebook\nPlease make sure that you:\n- Perform a final \"Save and Checkpoint\";\n- Download a copy of the notebook in \".ipynb\" format to your local machine using \"File\", \"Download as\", and \"IPython Notebook (.ipynb)\"; and\n- Submit a copy of this file to the Online Campus." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mjbommar/cscs-530-w2016
samples/cscs530-w2015-midterm-sample1.ipynb
bsd-2-clause
[ "Midterm\nGoal\nI will explore whether a network-theory driven approach shown to improve the efficiency of an agricultural extension program is sensitive to the models and parameters originally used.\nJustification\nSocial networks have been shown to be important vehicles for the transmission of new agricultural methods or 'technologies' (Bandiera and Rasul 2006, Conley and Udry 2010). These types of dynamics and time-varying agent behavior are best captured with through network modeling.\nMy project is based off a recent paper which used network modeling in conjunction with a large-scale field experiment (Beaman et al 2014). I wish to test the robustness of the findings of their model and so will employ a similar network modeling method.\nBackground on base paper\nBeaman and co-authors aimed to improve the rollout of an agricultural extension program using predictions from network theory to optimally select 'seed farmers'. 'Seed farmers' are the select farmers in a village that the agricultural extension program trains. Because it is costly to train farmers in this way, it is most efficient to pick seed farmers such that their adoption of the agricultural technology will lead to the greatest spread of the technology throughout the village. \nBeaman and coauthors first elicit the social networks of various rural villages. Then under the condition that the extension program only trains two farmers in each village, they take every possible combination of two nodes in a village network and simulate an information diffusion process for 4 periods. They take a measure of information diffusion at the end of each simulation and the pair of nodes which gives the greatest diffusion is their optimal seeding pair. \nTheir findings are then used in a field experiment where a random half of total villages are seeded according to their simulated optimal seeds while the other half is seeded according to the extension program's default procedure, usually based off of a field officer's own knowledge of the village and its influential farmers. They find evidence that network-theory informed seeding leads to increased technological adoption over baseline seeding procedures. \nMy extensions and measures of interest\nI wish to recreate and expand upon their simulations in the following ways:\n- I will compare optimal seeds found with their method against optimal seeds found with an extended process of information diffusion. The extended process will include the possibility that households can reject a new technology even after being exposed to it by multiple connections. The original process assumes that a household will automatically adopt a technology after the number of connections who have adopted the technology passes a certain threshold\n- I will also sweep across the number of periods simulated and the alpha which the adoption threshold is normally distributed around to see if this produces alternate optimal seeds.\nOutline\nThe original paper looks at rural village in Malawi. I do not have access to their network data but I have a dataset of social graphs from 74 villages in South India. Though there may be differences in network structure between villages in these two locations, I will assume they are reasonably comparable.\nFirst, I will recreate results from Beaman et al by selecting all combinations of node pairs in a subset of 25 villages. For each pair, I will run them through a information diffusion simulation for {3,4,5,6} steps. I will also sweep through values {1,2,3} for a alpha parameter. Each household has an adoption threshold, T, which determines whether they adopt the new technology or not. If X number of connections have adopted the technology and X=>T, then the household will adopt the new technology in the next period. Each household independently drawns a threshold from a normal distribution N(alpha, 0.5) bounded positive, so sweeping through alpha parameters will push up and down the distribution of household thresholds T. \nTo mitigate stochasticity, I will repeat 2000 times, and take an average measure of information diffusion (given by percent of households adopted at last step). The pair of nodes which give the greatest information difussion are my theory-driven seed farmers equivalent to those found in Beaman et al. I will examine whether the determination of these optimal seed farmers depends on the number of steps run and the alpha parameter used. Then, I will run the same simulations except using the extended information diffusion process described above. I want to see whether seed farmers selected through this method are different than those selected by Beaman's process. For the midterm, I will concentrate on coding the re-creation of method from Beaman et al.\nI. Space\nI will model space with an undirected social network. Each node represents a rural household and each edge represents a social connection. \nII. Actors\nEach node in my network is a household. They are modeled simply and have only a few properties:\n- id: household id\n- adopted: whether they have adopted the new technology or not\n- threshold: the threshold above which they will adopt the new technology in the next period. This threshold will be drawn from a normal distribution with mean alpha and standard deviation 0.5 which is bounded to be positive.\nIn each step, each unadopted household will count the number of connections who have adopted the new technology. If this count exceeds a household's adoption threshold, it will also adopt the technology in the next period.\nIII. Model Wrapper\nI will wrap my model in a function which loops through each village, and in each village, loops through every possible pair of nodes. Then, I will sweep through my parameters, number of steps and alpha. I will repeat this under the alternate information diffusion process. I will also determine and collect optimal seeds here.\nIV. Initial Conditions\nEach model will start with a list of adopted households. In the first step, only seed households will be in this list which will be read in through the wrapper.\nV. Model Parameters\nMy model will have the following parameters:\n- network: adjacency matrix that is read in from wrapper\n- alpha: parameter determining distribution of adoption threshold\n- HH_adopted: list of adopted households, in first step these are seed households given by wrapper\n- HH_not_adopted: list of all not adopted households", "#Imports\n\n%matplotlib inline\n\n# Standard imports\nimport copy\nimport itertools\n\n# Scientific computing imports\nimport numpy\nimport matplotlib.pyplot as plt\nimport networkx\nimport pandas\nimport seaborn; seaborn.set()\nimport scipy.stats as stats\n\n\n# Import widget methods\nfrom IPython.html.widgets import *", "Household class\nBelow is a rough draft of the household class. It only has one component:\n\nconstructor: class constructor, which \"initializes\" or \"creates\" the household when we call Household(). This is in the init method.", "class Household(object):\n \"\"\"\n Household class, which encapsulates the entire behavior of a household.\n \"\"\"\n \n def __init__(self, model, household_id, adopted=False, threshold=1):\n \"\"\"\n Constructor for HH class. By default,\n * not adopted\n * threshold = 1\n \n Must \"link\" the Household to their \"parent\" Model object.\n \"\"\"\n # Set model link and ID\n self.model = model\n self.household_id = household_id\n \n # Set HH parameters.\n self.adopted = adopted\n self.threshold = threshold\n\n def __repr__(self):\n '''\n Return string representation.\n '''\n skip_none = True\n repr_string = type(self).__name__ + \" [\"\n except_list = \"model\"\n\n elements = [e for e in dir(self) if str(e) not in except_list]\n for e in elements:\n # Make sure we only display \"public\" fields; skip anything private (_*), that is a method/function, or that is a module.\n if not e.startswith(\"_\") and eval('type(self.{0}).__name__'.format(e)) not in ['DataFrame', 'function', 'method', 'builtin_function_or_method', 'module', 'instancemethod']:\n value = eval(\"self.\" + e)\n if value != None and skip_none == True:\n repr_string += \"{0}={1}, \".format(e, value)\n\n # Clean up trailing space and comma.\n return repr_string.strip(\" \").strip(\",\") + \"]\"", "Model class\nBelow, we will define our model class. This can be broken up as follows:\n- constructor: class constructor, which \"initializes\" or \"creates\" the model when we call Model(). This is in the init method.\n- setup_network: sets up graph\n- setup_households: sets up households\n- get_neighborhood: defines a function to get a list of connected nodes\n- step_adopt_decision: method to step through household decision\n- step: main step method", "class Model(object):\n \"\"\"\n Model class, which encapsulates the entire behavior of a single \"run\" in network model.\n \"\"\"\n \n def __init__(self, network, alpha, HH_adopted, HH_not_adopted):\n \"\"\"\n Class constructor.\n \"\"\"\n # Set our model parameters\n self.network = network\n self.alpha = alpha\n self.HH_adopted = HH_adopted\n self.HH_not_adopted = HH_not_adopted\n \n # Set our state variables\n self.t = 0\n self.households = []\n \n # Setup our history variables.\n self.history_adopted = []\n self.history_not_adopted = []\n self.percent_adopted = 0\n \n # Call our setup methods\n self.setup_network()\n self.setup_household()\n \n def setup_network(self):\n \"\"\"\n Method to setup network.\n \"\"\"\n ## need to flesh this out. will network be an input given from wrapper? \n ## what do I need to do to set up network?\n g = network\n \n def setup_households(self):\n \"\"\"\n Method to setup households.\n \"\"\"\n num_households = nx.nodes(g)\n # Create all households.\n for i in xrange(self.num_households):\n self.households.append(Household(model=self,\n household_id=i,\n adopted=False,\n threshold=stats.truncnorm.rvs((0 - alpha) / 0.5, (alpha) / 0.5, loc=alpha, scale=0.5,size=1) \n\n \n \n \n def get_neighborhood(self, x):\n \"\"\"\n Get a list of connected nodes.\n \"\"\"\n neighbors = []\n for i in g.neighbors(x):\n neighbors.append(i)\n return neighbors\n \n def step_adopt_decision(self):\n \n \"\"\"\n Model a household evaluating their connections and making an adopt/not adopt decision\n \"\"\"\n will_adopt = []\n for i in HH_not_adopted:\n adopt_count = 0\n for j in get_neighborhood(i):\n if j.adopted:\n adopt_count+=1\n if adopt_count >= i.threshold:\n will_adopt.append(i) \n \n \n \n def step(self):\n \"\"\"\n Model step function.\n \"\"\"\n \n # Adoption decision\n self.step_adopt_decision()\n \n # Increment steps and track history.\n self.t += 1\n self.HH_adopted.append(will_adopt)\n self.HH_not_adopted.remove(will_adopt)\n self.history_adopted.append(self.HH_adopted)\n self.history_not_adopted.append(self.HH_not_adopted)\n self.percent_adopted = len(HH_adopted)/len(households)\n\n\n def __repr__(self):\n '''\n Return string representation.\n '''\n skip_none = True\n repr_string = type(self).__name__ + \" [\"\n\n elements = dir(self)\n for e in elements:\n # Make sure we only display \"public\" fields; skip anything private (_*), that is a method/function, or that is a module.\n e_type = eval('type(self.{0}).__name__'.format(e))\n if not e.startswith(\"_\") and e_type not in ['DataFrame', 'function', 'method', 'builtin_function_or_method', 'module', 'instancemethod']:\n value = eval(\"self.\" + e)\n if value != None and skip_none == True:\n if e_type in ['list', 'set', 'tuple']:\n repr_string += \"\\n\\n\\t{0}={1},\\n\\n\".format(e, value)\n elif e_type in ['ndarray']:\n repr_string += \"\\n\\n\\t{0}=\\t\\n{1},\\n\\n\".format(e, value)\n else:\n repr_string += \"{0}={1}, \".format(e, value)\n\n # Clean up trailing space and comma.\n return repr_string.strip(\" \").strip(\",\") + \"]\"", "Wrapper with parameter sweep\nBelow is the code which wrappers around the model. It does the following:\n- Loops through all villages we wish to examine\n - Pulls network data from a csv and puts in the appropriate format\n- Loops through all possible pairs of nodes within each village\n- Sweeps through alpha and number of steps parameters\n- Runs 2000 samples", "\n## cycle through villages:\n## (need to create village list where each item points to a different csv file)\nnum_samples = 2000\n\nfor fn in village_list:\n village = np.genfromtxt(fn, delimiter=\",\")\n network = from_numpy_matrix(village)\n for HH_adopted in itertools.combinations(nx.nodes(network),2):\n HH_not_adopted = [node for node in nx.nodes(network) if node not in HH_adopted]\n for alpha in [1,2,3]:\n for num_steps in [3,4,5,6]:\n for n in xrange(num_samples):\n m = Model(network, alpha, HH_adopted, HH_not_adopted)\n for t in xrange(num_steps):\n m.step() \n## I need to collect adoption rate at each final step and average over all samples\n## I am not sure where to fit this in\n\n\n## I also need to write a function which determines optimal seed pairing\n\n#######\n#######", "Results\nI hope to present charts which list optimal seed pairings at each parameter level and information diffusion process. This means optimal pairing will be given for alpha = {1,2,3} for each of num_steps = {3,4,5,6} and this will be done for both the original information diffusion process and the extended process.\nHypothetical results\nI expect that the optimal seeding is dependent upon the alpha parameter though I suspect it may not be as dependent upon the number of steps parameter.\nI am more interested in whether optimal seeds given by the extended information diffusion process are different than those given by the original process. I am actually not sure but I suspect that they will be different. If so, they could provide improved predictions to test in the field which hopefully may lead to even more efficient seed farmer targeting." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
OceanPARCELS/parcels
parcels/examples/tutorial_sampling.ipynb
mit
[ "Field sampling tutorial\nThe particle trajectories allow us to study fields like temperature, plastic concentration or chlorophyll from a Lagrangian perspective. \nIn this tutorial we will go through how particles can sample Fields, using temperature as an example. Along the way we will get to know the parcels class Variable (see here for the documentation) and some of its methods. This tutorial covers several applications of a sampling setup:\n* Basic along trajectory sampling\n* Sampling initial conditions\n* Sampling initial and along-trajectory values with repeated release\nBasic sampling\nWe import the Variable class as well as the standard modules needed to set up a simulation.", "# Modules needed for the Parcels simulation\nfrom parcels import Variable, FieldSet, ParticleSet, JITParticle, AdvectionRK4\nimport numpy as np\nfrom datetime import timedelta as delta\n\n# To open and look at the temperature data\nimport xarray as xr \nimport matplotlib as mpl\nimport matplotlib.pyplot as plt", "Suppose we want to study the environmental temperature for plankton drifting around a peninsula. We have a dataset with surface ocean velocities and the corresponding sea surface temperature stored in netcdf files in the folder \"Peninsula_data\". Besides the velocity fields, we load the temperature field using extra_fields={'T': 'T'}. The particles are released on the left hand side of the domain.", "# Velocity and temperature fields\nfieldset = FieldSet.from_parcels(\"Peninsula_data/peninsula\", extra_fields={'T': 'T'}, allow_time_extrapolation=True)\n\n# Particle locations and initial time\nnpart = 10 # number of particles to be released\nlon = 3e3 * np.ones(npart)\nlat = np.linspace(3e3 , 45e3, npart, dtype=np.float32)\ntime = np.arange(0, npart) * delta(hours=2).total_seconds() # release each particle two hours later\n\n# Plot temperature field and initial particle locations\nT_data = xr.open_dataset(\"Peninsula_data/peninsulaT.nc\")\nplt.figure()\nax = plt.axes()\nT_contour = ax.contourf(T_data.x.values, T_data.y.values, T_data.T.values[0,0], cmap=plt.cm.inferno)\nax.scatter(lon, lat, c='w')\nplt.colorbar(T_contour, label='T [$^{\\circ} C$]')\nplt.show()", "To sample the temperature field, we need to create a new class of particles where temperature is a Variable. As an argument for the Variable class, we need to provide the initial values for the particles. The easiest option is to access fieldset.T, but this option has some drawbacks.", "class SampleParticle(JITParticle): # Define a new particle class\n temperature = Variable('temperature', initial=fieldset.T) # Variable 'temperature' initialised by sampling the temperature\n\npset = ParticleSet(fieldset=fieldset, pclass=SampleParticle, lon=lon, lat=lat, time=time)", "Using fieldset.T leads to the WARNING displayed above because Variable accesses the fieldset in the slower SciPy mode. Another problem can occur when using the repeatdt argument instead of time:\n<a id='repeatdt_error'></a>", "repeatdt = delta(hours=3)\n\npset = ParticleSet(fieldset=fieldset, pclass=SampleParticle, lon=lon, lat=lat, repeatdt=repeatdt)", "Since the initial time is not defined, the Variable class does not know at what time to access the temperature field.\nThe solution to this initialisation problem is to leave the initial value zero and sample the initial condition in JIT mode with the sampling Kernel:", "class SampleParticleInitZero(JITParticle): # Define a new particle class\n temperature = Variable('temperature', initial=0) # Variable 'temperature' initially zero\n\npset = ParticleSet(fieldset=fieldset, pclass=SampleParticleInitZero, lon=lon, lat=lat, time=time)\n\ndef SampleT(particle, fieldset, time):\n particle.temperature = fieldset.T[time, particle.depth, particle.lat, particle.lon]\nsample_kernel = pset.Kernel(SampleT) # Casting the SampleT function to a kernel.", "To sample the initial values we can execute the Sample kernel over the entire particleset with dt = 0 so that time does not increase", "pset.execute(sample_kernel, dt=0) # by only executing the sample kernel we record the initial temperature of the particles\n\noutput_file = pset.ParticleFile(name=\"InitZero.nc\", outputdt=delta(hours=1))\npset.execute(AdvectionRK4 + sample_kernel, runtime=delta(hours=30), dt=delta(minutes=5),\n output_file=output_file)\noutput_file.export() # export the trajectory data to a netcdf file\noutput_file.close()", "The particle dataset now contains the particle trajectories and the corresponding environmental temperature", "Particle_data = xr.open_dataset(\"InitZero.nc\")\n\nplt.figure()\nax = plt.axes()\nax.set_ylabel('Y')\nax.set_xlabel('X')\nax.set_ylim(1000, 49000)\nax.set_xlim(1000, 99000)\nax.plot(Particle_data.lon.transpose(), Particle_data.lat.transpose(), c='k', zorder=1)\nT_scatter = ax.scatter(Particle_data.lon, Particle_data.lat, c=Particle_data.temperature, \n cmap=plt.cm.inferno, norm=mpl.colors.Normalize(vmin=0., vmax=20.), \n edgecolor='k', zorder=2)\nplt.colorbar(T_scatter, label='T [$^{\\circ} C$]')\nplt.show()", "Sampling initial values\nIn some simulations only the particles initial value within the field is of interest: the variable does not need to be known along the entire trajectory. To reduce computing we can specify the to_write argument to the temperature Variable. This argument can have three values: True, False or 'once'. It determines whether to write the Variable to the output file. If we want to know only the initial value, we can enter 'once' and only the first value will be written to the output file.", "class SampleParticleOnce(JITParticle): # Define a new particle class\n temperature = Variable('temperature', initial=0, to_write='once') # Variable 'temperature'\n \npset = ParticleSet(fieldset=fieldset, pclass=SampleParticleOnce, lon=lon, lat=lat, time=time)\n\npset.execute(sample_kernel, dt=0) # by only executing the sample kernel we record the initial temperature of the particles\n\noutput_file = pset.ParticleFile(name=\"WriteOnce.nc\", outputdt=delta(hours=1))\npset.execute(AdvectionRK4, runtime=delta(hours=24), dt=delta(minutes=5),\n output_file=output_file)\noutput_file.close()", "Since all the particles are released at the same x-position and the temperature field is invariant in the y-direction, all particles have an initial temperature of 0.4$^\\circ$C", "Particle_data = xr.open_dataset(\"WriteOnce.nc\")\n\nplt.figure()\nax = plt.axes()\nax.set_ylabel('Y')\nax.set_xlabel('X')\nax.set_ylim(1000, 49000)\nax.set_xlim(1000, 99000)\nax.plot(Particle_data.lon.transpose(), Particle_data.lat.transpose(), c='k', zorder=1)\nT_scatter = ax.scatter(Particle_data.lon, Particle_data.lat, \n c=np.tile(Particle_data.temperature, (Particle_data.lon.shape[1], 1)).T,\n cmap=plt.cm.inferno, norm=mpl.colors.Normalize(vmin=0., vmax=1.), \n edgecolor='k', zorder=2)\nplt.colorbar(T_scatter, label='Initial T [$^{\\circ} C$]')\nplt.show()", "Sampling with repeatdt\nSome experiments require large sets of particles to be released repeatedly on the same locations. The particleset object has the option repeatdt for this, but when you want to sample the initial values this introduces some problems as we have seen here. For more advanced control over the repeated release of particles, you can manually write a for-loop using the function particleset.add(). Note that this for-loop is very similar to the one that repeatdt would execute under the hood in particleset.execute().\nAdding particles to the particleset during the simulation reduces the memory used compared to specifying the delayed particle release times upfront, which improves the computational speed. In the loop, we want to initialise new particles and sample their initial temperature. If we want to write both the initialised particles with the sampled temperature and the older particles that have already been advected, we have to make sure both sets of particles find themselves at the same moment in time. The initial conditions must be written to the output file before advecting them, because during advection the particle.time will increase.\nWe do not specify the outputdt argument for the output_file and instead write the data with output_file.write(pset, time) on each iteration. A new particleset is initialised whenever time is a multiple of repeatdt. Because the particles are advected after being written, the last displacement must be written once more after the loop.", "outputdt = delta(hours=1).total_seconds() # write the particle data every hour\nrepeatdt = delta(hours=6).total_seconds() # release each set of particles six hours later\nruntime = delta(hours=24).total_seconds() \n\npset = ParticleSet(fieldset=fieldset, pclass=SampleParticleInitZero, lon=[], lat=[], time=[]) # Using SampleParticleInitZero\nkernels = AdvectionRK4 + sample_kernel\n\noutput_file = pset.ParticleFile(name=\"RepeatLoop.nc\") # Do not specify the outputdt yet, so we can manually write the output\n\nfor time in np.arange(0, runtime, outputdt):\n if np.isclose(np.fmod(time, repeatdt), 0): # time is a multiple of repeatdt\n pset_init = ParticleSet(fieldset=fieldset, pclass=SampleParticleInitZero, lon=lon, lat=lat, time=time)\n pset_init.execute(sample_kernel, dt=0) # record the initial temperature of the particles\n pset.add(pset_init) # add the newly released particles to the total particleset\n \n output_file.write(pset,time) # write the initialised particles and the advected particles\n\n pset.execute(kernels, runtime=outputdt, dt=delta(minutes=5))\n print('Length of pset at time %d: %d' % (time, len(pset)))\n \noutput_file.write(pset, time+outputdt) \n\noutput_file.close()", "In each iteration of the loop, spanning six hours, we have added ten particles.", "Particle_data = xr.open_dataset(\"RepeatLoop.nc\")\nprint(Particle_data.time[:,0].values / np.timedelta64(1, 'h')) # The initial hour at which each particle is released\nassert np.allclose(Particle_data.time[:,0].values / np.timedelta64(1, 'h'), [int(k/10)*6 for k in range(40)])", "Let's check if the initial temperatures were sampled correctly for all particles", "print(Particle_data.temperature[:,0].values)\nassert np.allclose(Particle_data.temperature[:,0].values, Particle_data.temperature[:,0].values[0])", "And see if the sampling of the temperature field is done correctly along the trajectories", "Release0 = Particle_data.where(Particle_data.time[:,0]==np.timedelta64(0, 's')) # the particles released at t = 0\n\nplt.figure()\nax = plt.axes()\nax.set_ylabel('Y')\nax.set_xlabel('X')\nax.set_ylim(1000, 49000)\nax.set_xlim(1000, 99000)\nax.plot(Release0.lon.transpose(), Release0.lat.transpose(), c='k', zorder=1)\nT_scatter = ax.scatter(Release0.lon, Release0.lat, c=Release0.temperature, \n cmap=plt.cm.inferno, norm=mpl.colors.Normalize(vmin=0., vmax=20.), \n edgecolor='k', zorder=2)\nplt.colorbar(T_scatter, label='T [$^{\\circ} C$]')\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Luke035/dlnd-lessons
gan_mnist/Intro_to_GANs_Solution.ipynb
mit
[ "Generative Adversarial Network\nIn this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!\nGANs were first reported on in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:\n\nPix2Pix \nCycleGAN\nA whole list\n\nThe idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks as close as possible to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator.\n\nThe general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can foold the discriminator.\nThe output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow.", "%matplotlib inline\n\nimport pickle as pkl\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\ntf.__version__\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data')", "Model Inputs\nFirst we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input inputs_real and the generator input inputs_z. We'll assign them the appropriate sizes for each of the networks.", "def model_inputs(real_dim, z_dim):\n inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real') \n inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')\n \n return inputs_real, inputs_z", "Generator network\n\nHere we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.\nVariable Scope\nHere we need to use tf.variable_scope for two reasons. Firstly, we're going to make sure all the variable names start with generator. Similarly, we'll prepend discriminator to the discriminator variables. This will help out later when we're training the separate networks.\nWe could just use tf.name_scope to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also sample from it as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the reuse keyword for tf.variable_scope to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again.\nTo use tf.variable_scope, you use a with statement:\npython\nwith tf.variable_scope('scope_name', reuse=False):\n # code here\nHere's more from the TensorFlow documentation to get another look at using tf.variable_scope.\nLeaky ReLU\nTensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can use take the outputs from a linear fully connected layer and pass them to tf.maximum. Typically, a parameter alpha sets the magnitude of the output for negative values. So, the output for negative input (x) values is alpha*x, and the output for positive x is x:\n$$\nf(x) = max(\\alpha * x, x)\n$$\nTanh Output\nThe generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1.", "def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):\n with tf.variable_scope('generator', reuse=reuse):\n # Hidden layer\n h1 = tf.layers.dense(z, n_units, activation=None)\n # Leaky ReLU\n h1 = tf.maximum(alpha * h1, h1)\n \n # Logits and tanh output\n logits = tf.layers.dense(h1, out_dim, activation=None)\n out = tf.tanh(logits)\n \n return out", "Discriminator\nThe discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer.", "def discriminator(x, n_units=128, reuse=False, alpha=0.01):\n with tf.variable_scope('discriminator', reuse=reuse):\n # Hidden layer\n h1 = tf.layers.dense(x, n_units, activation=None)\n # Leaky ReLU\n h1 = tf.maximum(alpha * h1, h1)\n \n logits = tf.layers.dense(h1, 1, activation=None)\n out = tf.sigmoid(logits)\n \n return out, logits", "Hyperparameters", "# Size of input image to discriminator\ninput_size = 784\n# Size of latent vector to generator\nz_size = 100\n# Sizes of hidden layers in generator and discriminator\ng_hidden_size = 128\nd_hidden_size = 128\n# Leak factor for leaky ReLU\nalpha = 0.01\n# Smoothing \nsmooth = 0.1", "Build network\nNow we're building the network from the functions defined above.\nFirst is to get our inputs, input_real, input_z from model_inputs using the sizes of the input and z.\nThen, we'll create the generator, generator(input_z, input_size). This builds the generator with the appropriate input and output sizes.\nThen the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as g_model. So the real data discriminator is discriminator(input_real) while the fake discriminator is discriminator(g_model, reuse=True).", "tf.reset_default_graph()\n# Create our input placeholders\ninput_real, input_z = model_inputs(input_size, z_size)\n\n# Build the model\ng_model = generator(input_z, input_size, n_units=g_hidden_size, alpha=alpha)\n# g_model is the generator output\n\nd_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, alpha=alpha)\nd_model_fake, d_logits_fake = discriminator(g_model, reuse=True, n_units=d_hidden_size, alpha=alpha)", "Discriminator and Generator Losses\nNow we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, d_loss = d_loss_real + d_loss_fake. The losses will by sigmoid cross-entropys, which we can get with tf.nn.sigmoid_cross_entropy_with_logits. We'll also wrap that in tf.reduce_mean to get the mean for all the images in the batch. So the losses will look something like \npython\ntf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))\nFor the real image logits, we'll use d_logits_real which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter smooth. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like labels = tf.ones_like(tensor) * (1 - smooth)\nThe discriminator loss for the fake data is similar. The logits are d_logits_fake, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.\nFinally, the generator losses are using d_logits_fake, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images.", "# Calculate losses\nd_loss_real = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, \n labels=tf.ones_like(d_logits_real) * (1 - smooth)))\nd_loss_fake = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, \n labels=tf.zeros_like(d_logits_real)))\nd_loss = d_loss_real + d_loss_fake\n\ng_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,\n labels=tf.ones_like(d_logits_fake)))", "Optimizers\nWe want to update the generator and discriminator variables separately. So we need to get the variables for each part build optimizers for the two parts. To get all the trainable variables, we use tf.trainable_variables(). This creates a list of all the variables we've defined in our graph.\nFor the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with generator. So, we just need to iterate through the list from tf.trainable_variables() and keep variables to start with generator. Each variable object has an attribute name which holds the name of the variable as a string (var.name == 'weights_0' for instance). \nWe can do something similar with the discriminator. All the variables in the discriminator start with discriminator.\nThen, in the optimizer we pass the variable lists to var_list in the minimize method. This tells the optimizer to only update the listed variables. Something like tf.train.AdamOptimizer().minimize(loss, var_list=var_list) will only train the variables in var_list.", "# Optimizers\nlearning_rate = 0.002\n\n# Get the trainable_variables, split into G and D parts\nt_vars = tf.trainable_variables()\ng_vars = [var for var in t_vars if var.name.startswith('generator')]\nd_vars = [var for var in t_vars if var.name.startswith('discriminator')]\n\nd_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)\ng_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)", "Training", "batch_size = 100\nepochs = 100\nsamples = []\nlosses = []\n# Only save generator variables\nsaver = tf.train.Saver(var_list=g_vars)\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for e in range(epochs):\n for ii in range(mnist.train.num_examples//batch_size):\n batch = mnist.train.next_batch(batch_size)\n \n # Get images, reshape and rescale to pass to D\n batch_images = batch[0].reshape((batch_size, 784))\n batch_images = batch_images*2 - 1\n \n # Sample random noise for G\n batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))\n \n # Run optimizers\n _ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})\n _ = sess.run(g_train_opt, feed_dict={input_z: batch_z})\n \n # At the end of each epoch, get the losses and print them out\n train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})\n train_loss_g = g_loss.eval({input_z: batch_z})\n \n print(\"Epoch {}/{}...\".format(e+1, epochs),\n \"Discriminator Loss: {:.4f}...\".format(train_loss_d),\n \"Generator Loss: {:.4f}\".format(train_loss_g)) \n # Save losses to view after training\n losses.append((train_loss_d, train_loss_g))\n \n # Sample from generator as we're training for viewing afterwards\n sample_z = np.random.uniform(-1, 1, size=(16, z_size))\n gen_samples = sess.run(\n generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),\n feed_dict={input_z: sample_z})\n samples.append(gen_samples)\n saver.save(sess, './checkpoints/generator.ckpt')\n\n# Save training generator samples\nwith open('train_samples.pkl', 'wb') as f:\n pkl.dump(samples, f)", "Training loss\nHere we'll check out the training losses for the generator and discriminator.", "fig, ax = plt.subplots()\nlosses = np.array(losses)\nplt.plot(losses.T[0], label='Discriminator')\nplt.plot(losses.T[1], label='Generator')\nplt.title(\"Training Losses\")\nplt.legend()", "Generator samples from training\nHere we can view samples of images from the generator. First we'll look at images taken while training.", "def view_samples(epoch, samples):\n fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)\n for ax, img in zip(axes.flatten(), samples[epoch]):\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')\n \n return fig, axes\n\n# Load samples from generator taken while training\nwith open('train_samples.pkl', 'rb') as f:\n samples = pkl.load(f)", "These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 1, 7, 3, 2. Since this is just a sample, it isn't representative of the full range of images this generator can make.", "_ = view_samples(-1, samples)", "Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!", "rows, cols = 10, 6\nfig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)\n\nfor sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):\n for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):\n ax.imshow(img.reshape((28,28)), cmap='Greys_r')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)", "It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise like 1s and 9s.\nSampling from the generator\nWe can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples!", "saver = tf.train.Saver(var_list=g_vars)\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n sample_z = np.random.uniform(-1, 1, size=(16, z_size))\n gen_samples = sess.run(\n generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),\n feed_dict={input_z: sample_z})\n_ = view_samples(0, [gen_samples])" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
opengridcc/opengrid
notebooks/Multi-variable Linear Regression Demo.ipynb
apache-2.0
[ "Multi-variable linear regression\nThe multivariable linear regression analysis is used to create a model of a single variable, typically an energy consumption. We call this the dependent variable. The model is constructed as a linear combination of explanatory variables, like weather measurements or occupation. More information can be found on <a href=\"https://en.wikipedia.org/wiki/Linear_regression\" target=\"_blank\">wikipedia</a>.\nThe model is static. This means that the data set should not contain dynamic effects. For buildings, dynamic effects are mostly neglegible on a weekly basis unless the building has a very high thermal inertia.\nTypical use of this analysis is to create a model of eg. the gas consumption of a building, and then use this model to detect and quantify changes in the gas consumption. For example, the savings resulting from a new gas boiler can be computed as the difference between the consumption predicted by the model and the actual consumption. \nImports and loading data", "import opengrid as og\nimport pandas as pd\nplt = og.plot_style()\n\ndf = og.datasets.get('gas_2016_hour')\n# for this demo, we only compute a model for the sensor 313b\ndf = df[['313b']]\n# load weather\ndfw = og.datasets.get('weather_2016_hour')", "Compute degree-days\nFirst we compute heating degree-days for different base temperatures. More information on the computation of degree-days can be found in this demo.", "%matplotlib inline\n\n# resample weather data to daily values and compute degree-days\ndfw = dfw.resample('D').mean()\ndfw_HDD = og.library.weather.compute_degree_days(ts=dfw['temperature'],\n heating_base_temperatures=range(8, 18, 2),\n cooling_base_temperatures=range(16, 26, 2)).bfill()\n\n# resample the gas consumption to daily values and add the weather data and the degree-days\ndf_day = df.resample('D').sum()/1000. # kWh/day\ndf_day = pd.concat([df_day, dfw, dfw_HDD], axis=1).loc['2016']", "Create a monthly model for the gas consumption", "# resample to monthly data and plot \ndf_month = df_day.resample('MS').sum()\n\n# create the model\nmvlr = og.MultiVarLinReg(df_month, endog='313b')\n\nprint(mvlr.fit.summary())\nmvlr.plot()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
woobe/h2o_tutorials
introduction_to_machine_learning/py_04b_classification_ensembles.ipynb
mit
[ "Machine Learning with H2O - Tutorial 4b: Classification Models (Ensembles)\n<hr>\n\nObjective:\n\nThis tutorial explains how to create stacked ensembles of classification models for better out-of-bag performance.\n\n<hr>\n\nTitanic Dataset:\n\nSource: https://www.kaggle.com/c/titanic/data\n\n<hr>\n\nSteps:\n\nBuild GBM models using random grid search and extract the best one.\nBuild DRF models using random grid search and extract the best one. \nUse model stacking to combining different models.\n\n<hr>\n\nFull Technical Reference:\n\nhttp://docs.h2o.ai/h2o/latest-stable/h2o-py/docs/modeling.html\nhttp://docs.h2o.ai/h2o/latest-stable/h2o-docs/data-science/stacked-ensembles.html\n\n<br>", "# Import all required modules\nimport h2o\nfrom h2o.estimators.gbm import H2OGradientBoostingEstimator\nfrom h2o.estimators.random_forest import H2ORandomForestEstimator\nfrom h2o.estimators.deeplearning import H2ODeepLearningEstimator\nfrom h2o.estimators.stackedensemble import H2OStackedEnsembleEstimator\nfrom h2o.grid.grid_search import H2OGridSearch\n\n# Start and connect to a local H2O cluster\nh2o.init(nthreads = -1)", "<br>", "# Import Titanic data (local CSV)\ntitanic = h2o.import_file(\"kaggle_titanic.csv\")\ntitanic.head(5)\n\n# Convert 'Survived' and 'Pclass' to categorical values\ntitanic['Survived'] = titanic['Survived'].asfactor()\ntitanic['Pclass'] = titanic['Pclass'].asfactor()\n\n# Define features (or predictors) manually\nfeatures = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']\n\n# Split the H2O data frame into training/test sets\n# so we can evaluate out-of-bag performance\ntitanic_split = titanic.split_frame(ratios = [0.8], seed = 1234)\n\ntitanic_train = titanic_split[0] # using 80% for training\ntitanic_test = titanic_split[1] # using the rest 20% for out-of-bag evaluation\n\ntitanic_train.shape\n\ntitanic_test.shape", "<br>\nDefine Search Criteria for Random Grid Search", "# define the criteria for random grid search\nsearch_criteria = {'strategy': \"RandomDiscrete\", \n 'max_models': 9,\n 'seed': 1234}", "<br>\nStep 1: Build GBM Models using Random Grid Search and Extract the Best Model", "# define the range of hyper-parameters for GBM grid search\n# 27 combinations in total\nhyper_params = {'sample_rate': [0.7, 0.8, 0.9],\n 'col_sample_rate': [0.7, 0.8, 0.9],\n 'max_depth': [3, 5, 7]}\n\n# Set up GBM grid search\n# Add a seed for reproducibility\ngbm_rand_grid = H2OGridSearch(\n H2OGradientBoostingEstimator(\n model_id = 'gbm_rand_grid', \n seed = 1234,\n ntrees = 10000, \n nfolds = 5,\n fold_assignment = \"Modulo\", # needed for stacked ensembles\n keep_cross_validation_predictions = True, # needed for stacked ensembles\n stopping_metric = 'mse', \n stopping_rounds = 15, \n score_tree_interval = 1),\n search_criteria = search_criteria, # full grid search\n hyper_params = hyper_params)\n\n# Use .train() to start the grid search\ngbm_rand_grid.train(x = features, \n y = 'Survived', \n training_frame = titanic_train)\n\n# Sort and show the grid search results\ngbm_rand_grid_sorted = gbm_rand_grid.get_grid(sort_by='auc', decreasing=True)\nprint(gbm_rand_grid_sorted)\n\n# Extract the best model from random grid search\nbest_gbm_model_id = gbm_rand_grid_sorted.model_ids[0]\nbest_gbm_from_rand_grid = h2o.get_model(best_gbm_model_id)\nbest_gbm_from_rand_grid.summary()", "<br>\nStep 2: Build DRF Models using Random Grid Search and Extract the Best Model", "# define the range of hyper-parameters for DRF grid search\n# 27 combinations in total\nhyper_params = {'sample_rate': [0.5, 0.6, 0.7],\n 'col_sample_rate_per_tree': [0.7, 0.8, 0.9],\n 'max_depth': [3, 5, 7]}\n\n# Set up DRF grid search\n# Add a seed for reproducibility\ndrf_rand_grid = H2OGridSearch(\n H2ORandomForestEstimator(\n model_id = 'drf_rand_grid', \n seed = 1234,\n ntrees = 200, \n nfolds = 5,\n fold_assignment = \"Modulo\", # needed for stacked ensembles\n keep_cross_validation_predictions = True), # needed for stacked ensembles\n search_criteria = search_criteria, # full grid search\n hyper_params = hyper_params)\n\n# Use .train() to start the grid search\ndrf_rand_grid.train(x = features, \n y = 'Survived', \n training_frame = titanic_train)\n\n# Sort and show the grid search results\ndrf_rand_grid_sorted = drf_rand_grid.get_grid(sort_by='auc', decreasing=True)\nprint(drf_rand_grid_sorted)\n\n# Extract the best model from random grid search\nbest_drf_model_id = drf_rand_grid_sorted.model_ids[0]\nbest_drf_from_rand_grid = h2o.get_model(best_drf_model_id)\nbest_drf_from_rand_grid.summary()", "<br>\nModel Stacking", "# Define a list of models to be stacked\n# i.e. best model from each grid\nall_ids = [best_gbm_model_id, best_drf_model_id]\n\n# Set up Stacked Ensemble\nensemble = H2OStackedEnsembleEstimator(model_id = \"my_ensemble\",\n base_models = all_ids)\n\n# use .train to start model stacking\n# GLM as the default metalearner\nensemble.train(x = features, \n y = 'Survived', \n training_frame = titanic_train)", "<br>\nComparison of Model Performance on Test Data", "print('Best GBM model from Grid (AUC) : ', best_gbm_from_rand_grid.model_performance(titanic_test).auc())\nprint('Best DRF model from Grid (AUC) : ', best_drf_from_rand_grid.model_performance(titanic_test).auc())\nprint('Stacked Ensembles (AUC) : ', ensemble.model_performance(titanic_test).auc())", "<br>\n<br>" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
GoogleCloudPlatform/asl-ml-immersion
notebooks/feature_engineering/solutions/5_tftransform_taxifare.ipynb
apache-2.0
[ "TfTransform #\nLearning Objectives\n1. Preproccess data and engineer new features using TfTransform \n1. Create and deploy Apache Beam pipeline \n1. Use processed data to train taxifare model locally then serve a prediction\nOverview\nWhile Pandas is fine for experimenting, for operationalization of your workflow it is better to do preprocessing in Apache Beam. This will also help if you need to preprocess data in flight, since Apache Beam allows for streaming. In this lab we will pull data from BigQuery then use Apache Beam TfTransform to process the data. \nOnly specific combinations of TensorFlow/Beam are supported by tf.transform so make sure to get a combo that works. In this lab we will be using: \n* TFT 0.15.0\n* TF 2.0 \n* Apache Beam [GCP] 2.16.0", "!pip install --user apache-beam[gcp]==2.16.0\n!pip install --user tensorflow-transform==0.15.0", "NOTE: You may ignore specific incompatibility errors and warnings. These components and issues do not impact your ability to complete the lab.\nDownload .whl file for tensorflow-transform. We will pass this file to Beam Pipeline Options so it is installed on the DataFlow workers", "!pip download tensorflow-transform==0.15.0 --no-deps", "<b>Restart the kernel</b> (click on the reload button above).", "%%bash\npip freeze | grep -e 'flow\\|beam'\n\nimport shutil\n\nimport tensorflow as tf\nimport tensorflow_transform as tft\n\nprint(tf.__version__)\n\nimport os\n\nPROJECT = !gcloud config get-value project\nPROJECT = PROJECT[0]\nBUCKET = PROJECT\nREGION = \"us-central1\"\n\nos.environ[\"PROJECT\"] = PROJECT\nos.environ[\"BUCKET\"] = BUCKET\nos.environ[\"REGION\"] = REGION\n\n%%bash\ngcloud config set project $PROJECT\ngcloud config set compute/region $REGION\n\n%%bash\nif ! gsutil ls | grep -q gs://${BUCKET}/; then\n gsutil mb -l ${REGION} gs://${BUCKET}\nfi", "Input source: BigQuery\nGet data from BigQuery but defer the majority of filtering etc. to Beam.\nNote that the dayofweek column is now strings.", "from google.cloud import bigquery\n\n\ndef create_query(phase, EVERY_N):\n \"\"\"Creates a query with the proper splits.\n\n Args:\n phase: int, 1=train, 2=valid.\n EVERY_N: int, take an example EVERY_N rows.\n\n Returns:\n Query string with the proper splits.\n \"\"\"\n base_query = \"\"\"\n WITH daynames AS\n (SELECT ['Sun', 'Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat'] AS daysofweek)\n SELECT\n (tolls_amount + fare_amount) AS fare_amount,\n daysofweek[ORDINAL(EXTRACT(DAYOFWEEK FROM pickup_datetime))] AS dayofweek,\n EXTRACT(HOUR FROM pickup_datetime) AS hourofday,\n pickup_longitude AS pickuplon,\n pickup_latitude AS pickuplat,\n dropoff_longitude AS dropofflon,\n dropoff_latitude AS dropofflat,\n passenger_count AS passengers,\n 'notneeded' AS key\n FROM\n `nyc-tlc.yellow.trips`, daynames\n WHERE\n trip_distance > 0 AND fare_amount > 0\n \"\"\"\n if EVERY_N is None:\n if phase < 2:\n # training\n query = \"\"\"{} AND ABS(MOD(FARM_FINGERPRINT(CAST\n (pickup_datetime AS STRING), 4)) < 2\"\"\".format(\n base_query\n )\n else:\n query = \"\"\"{} AND ABS(MOD(FARM_FINGERPRINT(CAST(\n pickup_datetime AS STRING), 4)) = {}\"\"\".format(\n base_query, phase\n )\n else:\n query = \"\"\"{} AND ABS(MOD(FARM_FINGERPRINT(CAST(\n pickup_datetime AS STRING)), {})) = {}\"\"\".format(\n base_query, EVERY_N, phase\n )\n\n return query\n\n\nquery = create_query(2, 100000)", "Let's pull this query down into a Pandas DataFrame and take a look at some of the statistics.", "df_valid = bigquery.Client().query(query).to_dataframe()\ndisplay(df_valid.head())\ndf_valid.describe()", "Create ML dataset using tf.transform and Dataflow\nLet's use Cloud Dataflow to read in the BigQuery data and write it out as TFRecord files. Along the way, let's use tf.transform to do scaling and transforming. Using tf.transform allows us to save the metadata to ensure that the appropriate transformations get carried out during prediction as well.\ntransformed_data is type pcollection.", "import datetime\n\nimport apache_beam as beam\nimport tensorflow as tf\nimport tensorflow_metadata as tfmd\nimport tensorflow_transform as tft\nfrom tensorflow_transform.beam import impl as beam_impl\n\n\ndef is_valid(inputs):\n \"\"\"Check to make sure the inputs are valid.\n\n Args:\n inputs: dict, dictionary of TableRow data from BigQuery.\n\n Returns:\n True if the inputs are valid and False if they are not.\n \"\"\"\n try:\n pickup_longitude = inputs[\"pickuplon\"]\n dropoff_longitude = inputs[\"dropofflon\"]\n pickup_latitude = inputs[\"pickuplat\"]\n dropoff_latitude = inputs[\"dropofflat\"]\n hourofday = inputs[\"hourofday\"]\n dayofweek = inputs[\"dayofweek\"]\n passenger_count = inputs[\"passengers\"]\n fare_amount = inputs[\"fare_amount\"]\n return (\n fare_amount >= 2.5\n and pickup_longitude > -78\n and pickup_longitude < -70\n and dropoff_longitude > -78\n and dropoff_longitude < -70\n and pickup_latitude > 37\n and pickup_latitude < 45\n and dropoff_latitude > 37\n and dropoff_latitude < 45\n and passenger_count > 0\n )\n except:\n return False\n\n\ndef preprocess_tft(inputs):\n \"\"\"Preproccess the features and add engineered features with tf transform.\n\n Args:\n dict, dictionary of TableRow data from BigQuery.\n\n Returns:\n Dictionary of preprocessed data after scaling and feature engineering.\n \"\"\"\n import datetime\n\n print(inputs)\n result = {}\n result[\"fare_amount\"] = tf.identity(inputs[\"fare_amount\"])\n\n # build a vocabulary\n result[\"dayofweek\"] = tft.string_to_int(inputs[\"dayofweek\"])\n result[\"hourofday\"] = tf.identity(inputs[\"hourofday\"]) # pass through\n\n # scaling numeric values\n result[\"pickuplon\"] = tft.scale_to_0_1(inputs[\"pickuplon\"])\n result[\"pickuplat\"] = tft.scale_to_0_1(inputs[\"pickuplat\"])\n result[\"dropofflon\"] = tft.scale_to_0_1(inputs[\"dropofflon\"])\n result[\"dropofflat\"] = tft.scale_to_0_1(inputs[\"dropofflat\"])\n result[\"passengers\"] = tf.cast(inputs[\"passengers\"], tf.float32) # a cast\n\n # arbitrary TF func\n result[\"key\"] = tf.as_string(tf.ones_like(inputs[\"passengers\"]))\n\n # engineered features\n latdiff = inputs[\"pickuplat\"] - inputs[\"dropofflat\"]\n londiff = inputs[\"pickuplon\"] - inputs[\"dropofflon\"]\n\n # Scale our engineered features latdiff and londiff between 0 and 1\n result[\"latdiff\"] = tft.scale_to_0_1(latdiff)\n result[\"londiff\"] = tft.scale_to_0_1(londiff)\n dist = tf.sqrt(latdiff * latdiff + londiff * londiff)\n result[\"euclidean\"] = tft.scale_to_0_1(dist)\n return result\n\n\ndef preprocess(in_test_mode):\n \"\"\"Sets up preprocess pipeline.\n\n Args:\n in_test_mode: bool, False to launch DataFlow job, True to run locally.\n \"\"\"\n import os\n import os.path\n import tempfile\n\n from apache_beam.io import tfrecordio\n from tensorflow_transform.beam import tft_beam_io\n from tensorflow_transform.beam.tft_beam_io import transform_fn_io\n from tensorflow_transform.coders import example_proto_coder\n from tensorflow_transform.tf_metadata import (\n dataset_metadata,\n dataset_schema,\n )\n\n job_name = \"preprocess-taxi-features\" + \"-\"\n job_name += datetime.datetime.now().strftime(\"%y%m%d-%H%M%S\")\n if in_test_mode:\n import shutil\n\n print(\"Launching local job ... hang on\")\n OUTPUT_DIR = \"./preproc_tft\"\n shutil.rmtree(OUTPUT_DIR, ignore_errors=True)\n EVERY_N = 100000\n else:\n print(f\"Launching Dataflow job {job_name} ... hang on\")\n OUTPUT_DIR = f\"gs://{BUCKET}/taxifare/preproc_tft/\"\n import subprocess\n\n subprocess.call(f\"gsutil rm -r {OUTPUT_DIR}\".split())\n EVERY_N = 10000\n\n options = {\n \"staging_location\": os.path.join(OUTPUT_DIR, \"tmp\", \"staging\"),\n \"temp_location\": os.path.join(OUTPUT_DIR, \"tmp\"),\n \"job_name\": job_name,\n \"project\": PROJECT,\n \"num_workers\": 1,\n \"max_num_workers\": 1,\n \"teardown_policy\": \"TEARDOWN_ALWAYS\",\n \"no_save_main_session\": True,\n \"direct_num_workers\": 1,\n \"extra_packages\": [\"tensorflow-transform-0.15.0.tar.gz\"],\n }\n\n opts = beam.pipeline.PipelineOptions(flags=[], **options)\n if in_test_mode:\n RUNNER = \"DirectRunner\"\n else:\n RUNNER = \"DataflowRunner\"\n\n # Set up raw data metadata\n raw_data_schema = {\n colname: dataset_schema.ColumnSchema(\n tf.string, [], dataset_schema.FixedColumnRepresentation()\n )\n for colname in \"dayofweek,key\".split(\",\")\n }\n\n raw_data_schema.update(\n {\n colname: dataset_schema.ColumnSchema(\n tf.float32, [], dataset_schema.FixedColumnRepresentation()\n )\n for colname in \"fare_amount,pickuplon,pickuplat,dropofflon,dropofflat\".split(\n \",\"\n )\n }\n )\n\n raw_data_schema.update(\n {\n colname: dataset_schema.ColumnSchema(\n tf.int64, [], dataset_schema.FixedColumnRepresentation()\n )\n for colname in \"hourofday,passengers\".split(\",\")\n }\n )\n\n raw_data_metadata = dataset_metadata.DatasetMetadata(\n dataset_schema.Schema(raw_data_schema)\n )\n\n # Run Beam\n with beam.Pipeline(RUNNER, options=opts) as p:\n with beam_impl.Context(temp_dir=os.path.join(OUTPUT_DIR, \"tmp\")):\n # Save the raw data metadata\n (\n raw_data_metadata\n | \"WriteInputMetadata\"\n >> tft_beam_io.WriteMetadata(\n os.path.join(OUTPUT_DIR, \"metadata/rawdata_metadata\"),\n pipeline=p,\n )\n )\n\n # Read training data from bigquery and filter rows\n raw_data = (\n p\n | \"train_read\"\n >> beam.io.Read(\n beam.io.BigQuerySource(\n query=create_query(1, EVERY_N), use_standard_sql=True\n )\n )\n | \"train_filter\" >> beam.Filter(is_valid)\n )\n\n raw_dataset = (raw_data, raw_data_metadata)\n\n # Analyze and transform training data\n (\n transformed_dataset,\n transform_fn,\n ) = raw_dataset | beam_impl.AnalyzeAndTransformDataset(\n preprocess_tft\n )\n transformed_data, transformed_metadata = transformed_dataset\n\n # Save transformed train data to disk in efficient tfrecord format\n transformed_data | \"WriteTrainData\" >> tfrecordio.WriteToTFRecord(\n os.path.join(OUTPUT_DIR, \"train\"),\n file_name_suffix=\".gz\",\n coder=example_proto_coder.ExampleProtoCoder(\n transformed_metadata.schema\n ),\n )\n\n # Read eval data from bigquery and filter rows\n raw_test_data = (\n p\n | \"eval_read\"\n >> beam.io.Read(\n beam.io.BigQuerySource(\n query=create_query(2, EVERY_N), use_standard_sql=True\n )\n )\n | \"eval_filter\" >> beam.Filter(is_valid)\n )\n\n raw_test_dataset = (raw_test_data, raw_data_metadata)\n\n # Transform eval data\n transformed_test_dataset = (\n raw_test_dataset,\n transform_fn,\n ) | beam_impl.TransformDataset()\n transformed_test_data, _ = transformed_test_dataset\n\n # Save transformed train data to disk in efficient tfrecord format\n (\n transformed_test_data\n | \"WriteTestData\"\n >> tfrecordio.WriteToTFRecord(\n os.path.join(OUTPUT_DIR, \"eval\"),\n file_name_suffix=\".gz\",\n coder=example_proto_coder.ExampleProtoCoder(\n transformed_metadata.schema\n ),\n )\n )\n\n # Save transformation function to disk for use at serving time\n (\n transform_fn\n | \"WriteTransformFn\"\n >> transform_fn_io.WriteTransformFn(\n os.path.join(OUTPUT_DIR, \"metadata\")\n )\n )\n\n\n# Change to True to run locally\npreprocess(in_test_mode=False)", "This will take 10-15 minutes. You cannot go on in this lab until your DataFlow job has succesfully completed.", "%%bash\n# ls preproc_tft\ngsutil ls gs://${BUCKET}/taxifare/preproc_tft/", "Train off preprocessed data\nNow that we have our data ready and verified it is in the correct location we can train our taxifare model locally.", "%%bash\nrm -r ./taxi_trained\nexport PYTHONPATH=${PYTHONPATH}:$PWD\npython3 -m tft_trainer.task \\\n --train_data_path=\"gs://${BUCKET}/taxifare/preproc_tft/train*\" \\\n --eval_data_path=\"gs://${BUCKET}/taxifare/preproc_tft/eval*\" \\\n --output_dir=./taxi_trained \\\n\n!ls $PWD/taxi_trained/export/exporter", "Now let's create fake data in JSON format and use it to serve a prediction with gcloud ai-platform local predict", "%%writefile /tmp/test.json\n{\"dayofweek\":0, \"hourofday\":17, \"pickuplon\": -73.885262, \"pickuplat\": 40.773008, \"dropofflon\": -73.987232, \"dropofflat\": 40.732403, \"passengers\": 2.0}\n\n%%bash\nsudo find \"/usr/lib/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml_engine\" -name '*.pyc' -delete\n\n%%bash\nmodel_dir=$(ls $PWD/taxi_trained/export/exporter/)\ngcloud ai-platform local predict \\\n --model-dir=./taxi_trained/export/exporter/${model_dir} \\\n --json-instances=/tmp/test.json", "Copyright 2021 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
BoasWhip/Black
Notebook/M269 Unit 4 Notes -- Search.ipynb
mit
[ "4. Searching\n4.1 Searching Lists\nAlgorithm: Selection\nFinding the median of a collection of numbers is selection problem with $k=(n+1)/2$ if $n$ is odd (and, if $n$ is even, the median is the mean of the $k$th and $(k+1)$th smallest items, where $k=n/2$).\nInitial Insight\nChoose a value from $S$, to be used as <b>pivotValue</b>. Then divide the list into two partitions, <b>leftPart</b> (containing the list items that are smaller than <b>pivotValue</b>) and <b>rightPart</b> (containing the list items that are greater than <b>pivotValue</b>).\nIf the $k$th smallest item has been found, stop. Otherwise, select the partition that must contain the $k$th smallest item, and do the whole thing again with this partition.\nSpecification\n<table>\n <tr>\n <th>Name:</th>\n <td><b>Selection</b></td>\n </tr>\n <tr>\n <th>Inputs:</th>\n <td>A sequence of integers $S = \\{s_1, s_2, s_3, ..., s_n\\}$<br/>An integer $k$</td>\n </tr>\n <tr>\n <th>Outputs:</th>\n <td>An integer $x$</td>\n </tr>\n <tr>\n <th>Preconditions:</th>\n <td>Length of $S>0$ and $k>0$ and $k\\le n$</td>\n </tr>\n <tr>\n <th>Postcondition:</th>\n <td>$x$ is the $k$th smallest item in $S$</td>\n </tr>\n</table>\n\nCode", "def quickSelect(k, aList):\n\n if len(aList) == 1: \n return aList[0] # Base case\n \n pivotValue = aList[0]\n leftPart = []\n rightPart = []\n \n for item in aList[1:]:\n if item < pivotValue: \n leftPart.append(item)\n else: \n rightPart.append(item)\n \n if len(leftPart) >= k: \n return quickSelect(k, leftPart)\n elif len(leftPart) == k - 1: \n return pivotValue\n else: \n return quickSelect(k - len(leftPart) -1, rightPart) \n\n\n\nprint(\"Median:\", quickSelect(6, [2, 36, 5, 21, 8, 13, 11, 20, 4, 1]))\n\ndef quickSelect(k, aList):\n\n if len(aList) == 1: return aList[0]\n \n pivotValue = aList[0]\n leftPart = [x for x in aList[1:] if x < pivotValue]\n rightPart = [x for x in aList[1:] if not x < pivotValue]\n\n if len(leftPart) >= k: return quickSelect(k, leftPart)\n elif len(leftPart) == k - 1: return pivotValue\n else: return quickSelect(k - len(leftPart) -1, rightPart) \n\n\n\nprint(\"Median:\", quickSelect(6, [2, 36, 5, 21, 8, 13, 11, 20, 4, 1]))", "Remarks\nThe crucial step (<i>cf.</i> <b>Quick Sort</b>) that determines whether we have best case or worst case performance is the choice of the pivot – if we are really lucky we will get a value that cuts down the list the algorithm needs to search very substantially at each step.<br/><br/>\nThe algorithm is divide-and-conquer and each iteration makes the sub-problem substantially smaller. In <b>Quick Sort</b>, both partitions are sorted recursively and provided that the pivot, at each stage, divides the list up into equal parts, we achieve $O(n $log$ n)$ complexity.<br/><br/>\nHowever, in the <b>Selection</b> algorithm we know which partition to search, so we only deal with one of them on each recursive call and as a result it is even more efficient. Hence, it can be shown that its complexity is $O(n)$.\n4.2 Searching for patterns\nIt often happens that we need to search through a string of characters to find an occurrence (if there is one) of a given pattern, e.g. genetics and DNA searches, keyword searches.\nBasic string search\nAlgorithm: StringMatch\nWe are representing the sequence to be searched simply as a string of characters, referred to as the search string $S$, a shorter sequence is the target string $T$ and we are trying to find where the first occurrence of $T$ is, if it is present in $S$.\nInitial Insight\nRepeatedly shift $T$ one place along $S$ and then compare the characters of $T$ with those of $S$. Do this until a match of $T$ in $S$ is found, or the end of $S$ is reached.\nSpecification\n<table>\n <tr>\n <th>Name:</th>\n <td><b>StringMatch</b></td>\n </tr>\n <tr>\n <th>Inputs:</th>\n <td>A search string $S = (s_1, s_2, s_3, ..., s_n)$<br/>A target string $T = (t_1, t_2, t_3, ..., t_m)$</td>\n </tr>\n <tr>\n <th>Outputs:</th>\n <td>An integer $x$</td>\n </tr>\n <tr>\n <th>Preconditions:</th>\n <td>$m\\le n$, $m>0$ and $n>0$</td>\n </tr>\n <tr>\n <th>Postcondition:</th>\n <td>If there is an occurrence of $T$ in $S$, $x$ is the start position of the first occurrence of $T$ in $S$; otherwise $x = -1$</td>\n </tr>\n</table>\n\nCode", "def basicStringSearch(searchString, target):\n\n searchIndex = 0\n \n lenT = len(target) \n lenS = len(searchString) \n \n while searchIndex + lenT <= lenS:\n\n targetIndex = 0\n\n while targetIndex < lenT and target[targetIndex] == searchString[ targetIndex + searchIndex]:\n targetIndex += 1\n\n if targetIndex == lenT:\n return searchIndex\n\n searchIndex += 1\n\n return -1\n\n# Test Code\nfor target, index in [('per', 0), ('lta', 14), ('ad', 10), ('astra', -1)]:\n print(basicStringSearch('per ardua ad alta', target)==index)", "Remarks\nIt becomes immediately apparent when implement that this algorithm would consist of two nested loops leading to complexity $O(mn) > O(m^2)$.<br/><br/>\nWe know that if the character in $S$ following the failed comparison with $T$ is not in $T$ then there is no need to slide along one place to do another comparison. We should slide to the next point beyond it. This gives us the basis for an improved algorithm.\nQuick search\nInitial Insight\nFor each character in $T$ calculate the number of positions to shift $T$ if a comparison fails, according to where (if at all) that character appears in $T$.<br/><br/>\nRepeatedly compare the characters of $T$ with those of $S$. If a comparison fails, examine the next character along in $S$ and shift $T$ by the calculated shift distance for that character.<br/><br/>\nDo this until an occurrence of $T$ in $S$ is found, or the end of $S$ is reached.\nRemarks\nAn important point to note first of all is that the part of the algorithm calculating the shifts depends entirely on an analysis of the target string $T$ – there is no need to examine the search string $S$ at all because for any character in $S$ that is not in $T$, the shift is a fixed distance.<br/><br/>\nThe database is called a <b>shift table</b> and it stores a <b>shift distance</b> for each character in the domain of $S$ – e.g. for each character of the alphabet, or say, all upper and lower case plus punctuation.<br/><br/>\nThe <b>shift distance</b> is calculated according to the following rules:\n<ol>\n <li>If the character does not appear in T, the shift distance is one more than the length of T.</li>\n <li>If the character does appear in T, the shift distance is the first position at which it appears, counting from right to left and starting at 1. (Hence when a character appears more than once in $T$ keeps the lowest position.)</li>\n</ol>\n\nSuppose $S = $'GGGGGAGGCGGCGGT'. Then for target string $T = $'TCCACC', we have:\n<table>\n <tr>\n <th>G</th>\n <th>A</th>\n <th>C</th>\n <th>T</th>\n </tr>\n <tr>\n <td>7</td>\n <td>3</td>\n <td>1</td>\n <td>6</td>\n </tr>\n</table>\nand if $T = $'TGGCG', we have:\n<table>\n <tr>\n <th>G</th>\n <th>A</th>\n <th>C</th>\n <th>T</th>\n </tr>\n <tr>\n <td>1</td>\n <td>6</td>\n <td>2</td>\n <td>5</td>\n </tr>\n</table>\n\n<br/>\nOnce the shift table has been computed, the search part of the quick search algorithm is similar to the basic string search algorithm, except that at the end of each failed attempt we look at the next character along in $S$ that is beyond $T$ and use this to look up in the shift table how many steps to slide $T$.<br/>\nWe implement the <b>shift table</b> as a dictionary in Python:\nCode", "def buildShiftTable(target, alphabet):\n\n shiftTable = {}\n\n for character in alphabet:\n shiftTable[character] = len(target) + 1\n\n for i in range(len(target)):\n char = target[i]\n shift = len(target) - i\n shiftTable[char] = shift\n\n return shiftTable\n\ndef quickSearch (searchString, target, alphabet):\n\n shiftTable = buildShiftTable(target, alphabet)\n searchIndex = 0\n\n while searchIndex + len(target) <= len(searchString):\n \n targetIndex = 0\n\n # Compares the strings \n while targetIndex < len(target) and target[targetIndex] == searchString[searchIndex + targetIndex]:\n targetIndex = targetIndex + 1\n\n # Return index if target found\n if targetIndex == len(target): return searchIndex\n\n # Continue search with new shivt value or exit\n if searchIndex + len(target) < len(searchString):\n next = searchString[searchIndex + len(target)]\n shift = shiftTable[next]\n searchIndex = searchIndex + shift\n else:\n return -1\n\n return -1", "Tests", "theAlphabet = {'G', 'A', 'C', 'T'}\nstringToSearch = 'ATGAATACCCACCTTACAGAAACCTGGGAAAAGGCAATAAATATTATAAAAGGTGAACTTACAGAAGTAA'\n\nfor thetarget in ['ACAG', 'AAGTAA', 'CCCC']:\n print(quickSearch(stringToSearch, thetarget, theAlphabet))", "Remarks\nThe basic brute-force algorithm we wrote first will work fine with relatively short search strings but, as with all algorithms, inputs of huge size may overwhelm it. For example, DNA strings can be billions of bases long, so algorithmic efficiency can be vital. We noted already that the complexity of the basic string search can be as bad as O(nm) in the worst case.<br/><br/>\nAs for the quick search algorithm, research has shown that its average-case performance is good but, unfortunately, its worst case behaviour is still O(mn).<br/><br/>\nKnuth–Morris–Pratt (KMP)\nBetter algorithms have been developed. One of the best-known efficient search algorithms is the <b>Knuth–Morris–Pratt (KMP)</b> algorithm. A full description of the precise details of the KMP algorithm is beyond the scope of this text.\nAlgorithm: Knuth–Morris–Pratt (KMP)\nThe <b>KMP</b> algorithm is in two parts:\n<ol>\n <li>Build a table of the lengths of prefix matches up to every character in the target string, $T$.</li>\n <li>Move along the search string, $S$, using the information in the table to do the shifting and compare.</li>\n</ol>\n\nOnce the prefix table has been built, the actual search in the second step proceeds like the other string-searching algorithms above, but when a mismatch is detected the algorithm uses the prefix table to decide how to shift $T$. The problem is to know if these prefix matches exist and – if they do – how long the matching substrings are.</br>\nThe prefix will then be aligned as shown in Figure 4.17 and comparison can continue at the next character in S.\nIf you want to take the trouble, you can verify that the final table will be:", "prefixTable = [0, 1, 0, 0, 0, 1, 2, 3, 4, 0, 0, 0, 1, 2]", "Code", "# Helper function for kmpSearch()\n\ndef buildPrefixTable(target): \n\n #The first line of code just builds a list that has len(target)\n #items all of which are given the default value 0\n\n prefixTable = [0] * len(target)\n q = 0\n\n for p in range(1, len(target)):\n\n while q > 0 and target[q] != target[p]:\n q = prefixTable[q - 1]\n\n if target[q] == target[p]:\n q = q + 1\n \n prefixTable[p] = q\n\n return prefixTable\n\ndef kmpSearch(searchString, target):\n\n n = len(searchString)\n m = len(target)\n prefixTable = buildPrefixTable(target)\n q = 0\n\n for i in range(n):\n\n while q > 0 and target[q] != searchString[i]:\n q = prefixTable[q - 1]\n\n if target[q] == searchString[i]:\n q = q + 1\n \n if q == m:\n return i - m + 1\n\n return -1", "Tests", "stringToSearch = 'ATGAATACCCACCTTACAGAAACCTGGGAAAAGGCAATAAATATTATAAAAGGTGAACTTACAGAAGTAA'\n\nfor thetarget in ['ACAG', 'AAGTAA', 'CCCC']:\n print(kmpSearch(stringToSearch, thetarget))", "Remarks\nWhat about the complexity of the KMP algorithm? Computing the prefix table takes significant effort but in fact there is an efficient algorithm for doing it. Overall, the KMP algorithm has complexity $O(m + n)$. Since $n$ is usually enormously larger than $m$ (think of searching a DNA string of billions of bases), $m$ is usually dominated by $n$, so this means that KMP has effective complexity $O(n)$.\nOther Algorithms\nString search is an immensely important application in modern computing, and at least 30 efficient algorithms have been developed for the task. Many of these depend on the principle embodied in the quick search and KMP algorithms – shifting the target string an appropriate distance along the search string at each step, based on information in a table. The <b>Boyer–Moore</b> algorithm, for example, combines elements of both these two algorithms. This algorithm is widely used in practical applications.\nThere are also string-search algorithms that work in entirely different ways from the examples we have looked at. Generally, these are beyond the scope of this text, but some are based on hashing functions, which we now move on to discuss next.\n4.3 Hashing and Hash Tables\nHashing\nWe have seen how we are able to make improvements in search algorithms by taking advantage of information about where items are stored in the collection with respect to one another. For example, by knowing that a list was ordered, we could search in logarithmic time using a binary search. In this section we will attempt to go one step further by building a data structure that can be searched in $O(1)$ time. This concept is referred to as <b>hashing</b>.\nIn order to do this, we will need to know even more about where the items might be when we go to look for them in the collection. If every item is where it should be, then the search can use a single comparison to discover the presence of an item.\nA hash table is a collection of items which are stored in such a way as to make it easy to find them later. Each position of the hash table, often called a slot, can hold an item and is named by an integer value starting at 0.\nBelow is a hash table of size $m=11$ implemented in Python as a list with empty slots intialized with a default <b>None</b> value:\n<img src=\"http://interactivepython.org/courselib/static/pythonds/_images/hashtable.png\">\nThe mapping between an item and the slot where that item belongs in the hash table is called the <b>hash function</b>. The hash function will take any item in the collection and return an integer in the range of slot names, between $0$ and $m-1$.\nOur first hash function, sometimes referred to as the <b>remainder method</b>, simply takes an item and divides it by the table size, returning the remainder as its hash value:", "set_of_integers = [54, 26, 93, 17, 77, 31]\nhash_function = lambda x: [y % 11 for y in x]\nhash_vals = hash_function(set_of_integers)\nhash_vals", "Once the hash values have been computed, we can insert each item into the hash table at the designated position:\n<img src=\"http://interactivepython.org/courselib/static/pythonds/_images/hashtable2.png\">\nNow when we want to search for an item, we simply use the hash function to compute the slot name for the item and then check the hash table to see if it is present. This searching operation is $O(1)$, since a constant amount of time is required to compute the hash value and then index the hash table at that location. If everything is where it should be, we have found a constant time search algorithm.\nIt immediately becomes apparent that this technique is going to work only if each item maps to a unique location in the hash table. When two or more items would need to be in the same slot. This is referred to as a <b>collision</b> (it may also be called a “clash”). Clearly, collisions create a problem for the hashing technique. We will discuss them in detail later.\nHash Functions\nGiven a collection of items, a hash function that maps each item into a unique slot is referred to as a <b>perfect hash function</b>.\nIf we know the items and the collection will never change, then it is possible to construct a perfect hash function (refer to the exercises for more about perfect hash functions). Unfortunately, given an arbitrary collection of items, there is no systematic way to construct a perfect hash function. Luckily, we do not need the hash function to be perfect to still gain performance efficiency.\nOne way to always have a perfect hash function is to increase the size of the hash table so that each possible value in the item range can be accommodated. This guarantees that each item will have a unique slot. Although this is practical for small numbers of items, it is not feasible when the number of possible items is large. For example, if the items were nine-digit Social Security numbers, this method would require almost one billion slots. If we only want to store data for a class of 25 students, we will be wasting an enormous amount of memory.\nOur goal is to create a hash function that minimizes the number of collisions, is easy to compute, and evenly distributes the items in the hash table. There are a number of common ways to extend the simple remainder method. We will consider a few of them here.\nThe <b>folding method</b> for constructing hash functions begins by dividing the item into equal-size pieces (the last piece may not be of equal size). These pieces are then added together to give the resulting hash value.\nFor example, if our item was the phone number $436-555-4601$, we would take the digits and divide them into groups of $2$ and sum them; that is $43+65+55+46+01=210$. If we assume our hash table has $11$ slots, then we need to perform the extra step of dividing by $11$ and keeping the remainder. In this case $210 % 11210 % 11 = 1$, so the phone number $436-555-4601$ hashes to slot $1$. (Some folding methods go one step further and reverse every other piece before the addition. For the above example, we get $43+56+55+64+01=219$ which gives $219 % 11=10219 % 11=10$.)", "word = 4365554601\nword = str(word)\nstep = 2\nslots = 11\nfolds = [int(word[n: n+2]) for n in range(0, len(word), step)]\n\nprint(folds)\nprint(sum(folds))\nprint(sum(folds)%slots)", "Another numerical technique for constructing a hash function is called the <b>mid-square method</b>. We first square the item, and then extract <i>some portion</i> of the resulting digits. For example, if the item were $44$, we would first compute $44^2=1,936$. By extracting the middle two digits, $93$, and performing the remainder step, we get remainder of $5$ on division by $11$.", "set_of_integers = [54, 26, 93, 17, 77, 31]\nhash_function = lambda x: [int(str(y**2)[1:-1])%11 for y in x]\nhash_vals = hash_function(set_of_integers)\nhash_vals", "We can also create hash functions for character-based items such as strings. The word “cat” can be thought of as a sequence of ordinal values. Summing these (unicode values), summing and then taking the remainder from division by $11$:", "word = 'cat'\nsum([ord(l) for l in word]) % 11", "To avoid conflicts from anagram, we could weights:", "sum([(ord(word[x]) * (x + 1)) for x in range(len(word))]) % 11", "You may be able to think of a number of additional ways to compute hash values for items in a collection. The important thing to remember is that the hash function has to be efficient so that it does not become the dominant part of the storage and search process. If the hash function is too complex, then it becomes more work to compute the slot name than it would be to simply do a basic sequential or binary search as described earlier. This would quickly defeat the purpose of hashing.\nCollision Resolution\nIf the hash function is perfect, collisions never occur. However, since this is often not possible. When two items hash to the same slot, we must have a systematic method for placing the second item in the hash table. This process is called <b>collision resolution</b>.\nOne method for resolving collisions looks into the hash table and tries to find another open slot to hold the item that caused the collision. A simple way to do this is to start at the original hash value position and then move in a sequential manner through the slots until we encounter the first slot that is empty.\nNote that we may need to go back to the first slot (circularly) to cover the entire hash table. This collision resolution process is referred to as <b>open addressing</b> in that it tries to find the next open slot or address in the hash table. By systematically visiting each slot one at a time, we are performing an open addressing technique called <b>linear probing</b>. Using the hash values from the remainder method example, when add $44$ and $55$ say:\n<img src=\"http://interactivepython.org/courselib/static/pythonds/_images/clustering.png\">\nOnce we have built a hash table using open addressing and linear probing, it is essential that we utilize the same methods to search for items. we are henced forced to do sequential search to find $44$ and $55$.\nSo, a disadvantage to linear probing is the tendency for <b>clustering</b>; items become clustered in the table. This means that if many collisions occur at the same hash value, a number of surrounding slots will be filled by the linear probing resolution. This will have an impact on other items that are being inserted, as we saw when we tried to add the item 20 above. A cluster of values hashing to 0 had to be skipped to finally find an open position.\nOne way to deal with clustering is to extend the linear probing technique so that instead of looking sequentially for the next open slot, we skip slots, thereby more evenly distributing the items that have caused collisions. This will potentially reduce the clustering that occurs, e.g. with a “plus 3” probe. This means that once a collision occurs, we will look at every third slot until we find one that is empty.\nThe general name for this process of looking for another slot after a collision is <b>rehashing</b>. With simple linear probing, in general, $rehash(pos)=(pos+skip)$%$sizeoftable$. It is important to note that the size of the “skip” must be such that all the slots in the table will eventually be visited. Otherwise, part of the table will be unused. To ensure this, it is often suggested that the table size be a prime number. This is the reason we have been using $11$ in our examples.\nA variation of the linear probing idea is called <b>quadratic probing</b>. Instead of using a constant “skip” value, we use a rehash function that increments the hash value by 1, 3, 5, 7, 9, and so on. This means that if the first hash value is $h$, the successive values are $h+1$, $h+4$, $h+9$, $h+16$, and so on. In other words, quadratic probing uses a skip consisting of successive <i>perfect squares</i>:\n<img src=\"http://interactivepython.org/courselib/static/pythonds/_images/linearprobing2.png\">\nAn alternative method for handling the collision problem is to allow each slot to hold a reference to a collection (or chain) of items. <b>Chaining</b> allows many items to exist at the same location in the hash table. When collisions happen, the item is still placed in the proper slot of the hash table. As more and more items hash to the same location, the difficulty of searching for the item in the collection increases:\n<img src=\"http://interactivepython.org/courselib/static/pythonds/_images/chaining.png\">\nWhen we want to search for an item, we use the hash function to generate the slot where it should reside. Since each slot holds a collection, we use a searching technique to decide whether the item is present. The advantage is that on the average there are likely to be many fewer items in each slot, so the search is perhaps more efficient.", "set_of_integers = [123456, 431941, 789012, 60375]\nprint(set_of_integers)\nset_of_integers = [((int(str(x)[0:2]) + int(str(x)[2:4]) + int(str(x)[4:])) % 80) -1 for x in set_of_integers]\nprint(set_of_integers)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
CartoDB/cartoframes
docs/examples/data_visualization/layers/add_multiple_layers.ipynb
bsd-3-clause
[ "Add Multiple Layers\nIn this example, three Layers are added to a Map. Notice the draw order and default symbology for each.\nFor more information, run help(Layer)", "from cartoframes.auth import set_default_credentials\nfrom cartoframes.viz import Map, Layer\n\nset_default_credentials('cartoframes')\n\nMap([\n Layer('countries'),\n Layer('global_power_plants'),\n Layer('world_rivers')\n])", "Using default legends", "from cartoframes.viz import default_legend\n\nMap([\n Layer('countries', legends=default_legend('Countries')),\n Layer('global_power_plants', legends=default_legend('Global Power Plants')),\n Layer('world_rivers', legends=default_legend('World Rivers'))\n])", "Adding a Layer Selector", "from cartoframes.viz import default_legend\n\nMap([\n Layer('countries', title='Countries', legends=default_legend()),\n Layer('global_power_plants', title='Global Power Plants', legends=default_legend()),\n Layer('world_rivers', title='World Rivers', legends=default_legend())\n], layer_selector=True)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
kenjisato/intro-macro
doc/python/Optimal Growth (Euler).ipynb
mit
[ "Computing the Optimal Grwoth Model by the Euler Equation", "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt", "Model\nLet's consider the optimal growth model,\n\\begin{align}\n &\\max\\int_{0}^{\\infty}e^{-\\rho t}u(c(t))dt \\\n &\\text{subject to} \\\n &\\qquad\\dot{k}(t)=f(k(t))-\\delta k(t)-c(t),\\\n &\\qquad k(0):\\text{ given.} \\\n\\end{align}\nWe will assume the following specific function forms when necessary \n\\begin{align}\n u(c) &= \\frac{c^{1-\\theta}}{1-\\theta}, \\quad \\theta > 0, \\\n f(k) &= A k^\\alpha, \\quad 0 < \\alpha < 1, \\quad A > 0\n\\end{align}\nBy using the Hamiltonian method, we have obtained the first-order dynamics of the economy\n\\begin{align}\n \\dot{c} &= \\theta^{-1} c [f'(k) - \\delta - \\rho] & \\text{(EE)} \\\n \\dot{k} &= f(k) - \\delta k - c. & \\text{(CA)}\n\\end{align}\n(EE) is the Euler equation and (CA) the capital accumulation equation. \nLet's draw the phase diagram on your computer.\n$\\dot c = 0$ locus (EE)\n$\\dot k = 0$ is equivalent to \n\\begin{align}\n f'(k) = \\delta + \\rho\n\\end{align}\nThus, the locus is a vertical line which goes through $(k^, 0)$, where $k^$ is the unique value that satisfies $f'(k^*) = \\delta + \\rho$. Under the assumption that $f(k) = Ak^\\alpha$, \n\\begin{align}\n k^* = \\left(\\frac{\\delta + \\rho}{A \\alpha}\\right)^\\frac{1}{\\alpha - 1}\n\\end{align}\n$\\dot k = 0$ locus (CA)\n$\\dot k = 0$ is equivalent to \n\\begin{align}\n c = f(k) - \\delta k.\n\\end{align}\nCode for the loci", "alpha = 0.3\ndelta = 0.05\nrho = 0.1\ntheta = 1\n\nA = 1\ndef f(x):\n return A * x**alpha\n\nkgrid = np.linspace(0.0, 7.5, 300)\n\nfig, ax = plt.subplots(1,1)\n\n# Locus obtained from (EE)\nkstar = ((delta + rho) / (A * alpha)) ** (1/(alpha - 1))\nax.axvline(kstar)\nax.text(kstar*1.01, 0.1, '$\\dot c = 0$', fontsize=16)\n\n# Locus obtained from (CA)\nax.plot(kgrid, f(kgrid) - delta * kgrid)\nax.text(4, 1.06*(f(4) - delta * 4), '$\\dot k = 0$', fontsize=16)\n\n# axis labels\nax.set_xlabel('$k$', fontsize=16)\nax.set_ylabel('$c$', fontsize=16)\nax.set_ylim([0.0, 1.8 * np.max(f(kgrid) - delta*kgrid)])\n\nplt.show()", "What we want to do is to draw paths on this phase space. It is convenient to have a function that returns this kind of figure.", "def phase_space(kmax, gridnum, yamp=1.8, colors=['black', 'black'], labels_on=False):\n\n kgrid = np.linspace(0.0, kmax, gridnum)\n\n fig, ax = plt.subplots(1,1)\n\n # EE locus\n ax.plot(kgrid, f(kgrid) - delta * kgrid, color=colors[0])\n if labels_on:\n ax.text(4, f(4) - delta * 4, '$\\dot k = 0$', fontsize=16)\n \n\n # CA locus\n kstar = ((delta + rho) / (A * alpha)) ** (1/(alpha - 1))\n ax.axvline(kstar, color=colors[1])\n if labels_on:\n ax.text(kstar*1.01, 0.1, '$\\dot c = 0$', fontsize=16)\n\n # axis labels\n ax.set_xlabel('$k$', fontsize=16)\n ax.set_ylabel('$c$', fontsize=16)\n \n ax.set_ylim([0.0, yamp * np.max(f(kgrid) - delta*kgrid)])\n\n return fig, ax", "You can draw the loci by calling the function as in the following.", "fig, ax = phase_space(kmax=7, gridnum=300)", "The dynamics\nDiscretize \n\\begin{align}\n \\dot{c} &= \\theta^{-1} c [f'(k) - \\delta - \\rho] & \\text{(EE)} \\\n \\dot{k} &= f(k) - \\delta k - c. & \\text{(CA)}\n\\end{align}\nto get the discretized dynamic equations:\n\\begin{align}\n c(t+\\Delta t) &= c(t){1 + \\theta^{-1} [f'(k(t)) - \\delta - \\rho] \\Delta t}& \\text{(D-EE)} \\\n k(t+\\Delta t) &= k(t) + {f(k(t)) - \\delta k(t) - c(t)} \\Delta t. & \\text{(D-CA)}\n\\end{align}", "dt = 0.001\n\ndef f_deriv(k):\n \"\"\"derivative of f\"\"\"\n return A * alpha * k ** (alpha - 1)\n\ndef update(k, c):\n cnew = c * (1 + (f_deriv(k) - delta - rho) * dt / theta) # D-EE\n knew = k + (f(k) - delta * k - c) * dt\n return knew, cnew\n\n\nk_initial, c_guess = 0.4, 0.2\n\n# Find a first-order path from the initial condition k0 and guess of c0\nk0, c0 = k_initial, c_guess\n\nk, c = [k0], [c0]\n\nfor i in range(10000):\n knew, cnew = update(k[-1], c[-1])\n k.append(knew)\n c.append(cnew)\n \nkgrid = np.linspace(0.0, 10., 300)\n\nfig, ax = phase_space(10., 300)\nax.plot(k, c)", "The blue curve shows the dynamic path of the system of differential equation. The solution moves from left to right in this case. This path doesn't seem to satisfy the transversality condition and so it's not the optimal path.\nWhat we do next is to find $c(0)$ that converges to the steady state. I will show you how to do this by “brute force.”\nMake many guesses about $c(0)$ and find the solution. We need to make a function to create a path that starts from $(k(0), c(0))$ and verify whether or not it's approaching to the steady state.", "def compute_path(k0, c_guess, steps, ax=None, output=True):\n \"\"\"compute a path starting from (k0, c_guess) that satisfies EE and CA\"\"\"\n \n k, c = [k0], [c_guess]\n for i in range(steps):\n knew, cnew = update(k[-1], c[-1])\n \n # stop if the new values violate nonnegativity constraints\n if knew < 0:\n break\n if cnew < 0:\n break\n \n k.append(knew)\n c.append(cnew)\n \n # plot the path if ax is given\n if ax is not None:\n ax.plot(k, c)\n \n # You may want to suppress the output when you give ax.\n if output:\n return k, c", "Typical usage:", "k_init = 0.4\nsteps = 30000\n\nfig, ax = phase_space(40, 3000)\n\nfor c_init in [0.1, 0.2, 0.3, 0.4, 0.5]:\n compute_path(k_init, c_init, steps, ax, output=False)", "Let's find the optimal path. The following code makes a plot that relates a guess of $c(0)$ to the final $c(t)$ and $k(t)$ for large $t$.", "k_init = 0.4\nsteps = 30000\n\n# set of guesses about c(0)\nc_guess = np.linspace(0.40, 0.50, 1000)\n\nk_final = []\nc_final = []\nfor c0 in c_guess:\n k, c = compute_path(k_init, c0, steps, output=True)\n \n # Final values\n k_final.append(k[-1])\n c_final.append(c[-1])\n \nplt.plot(c_guess, k_final, label='lim k')\nplt.plot(c_guess, c_final, label='lim c')\nplt.legend()", "As you can clearly see, there is a critical value around 0.41. To know the exact value of the threshold, execute the following code.", "cdiff = [c1 - c0 for c0, c1 in zip(c_final[:-1], c_final[1:])]\nc_optimal = c_guess[cdiff.index(max(cdiff))]\nc_optimal\n\nfig, ax = phase_space(7.5, 300)\ncompute_path(k_init, c_optimal, steps=15000, ax=ax, output=False)", "It still doesn't converge, which you can see if you make step size larger. \nAs a matter of fact this is the limitation of the method presented here. Moving along the optimal path is like walking on a narrow ridge of a mountain. A simulation of the optimal path as above is like rolling a pebble along the ridge. Even if you made the best shot, it would certainly fall down to the lower ground because of a small misstep or of bumpy surface; in computer simulation such errors are inevitable due to the internal representation (approximation) of real numbers.\nYou don't want to use it in practice.\nInstead, we usually employ the dynamic programing approach for which stability of the algorithm is known. That is beyond the scope of this course." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ccasotto/rmtk
rmtk/vulnerability/derivation_fragility/equivalent_linearization/lin_miranda_2008/lin_miranda_2008.ipynb
agpl-3.0
[ "Lin and Miranda (2008)\nThis method, described in Lin and Miranda (2008), estimates the maximum inelastic displacement of an existing structure based on the maximum elastic displacement response of its equivalent linear system without the need of iterations, based on the strength ratio. The equivalent linear system has a longer period of vibration and a higher viscous damping than the original system. The estimation of these parameters is based on the strength ratio $R$.\nNote: To run the code in a cell:\n\nClick on the cell to select it.\nPress SHIFT+ENTER on your keyboard or press the play button (<button class='fa fa-play icon-play btn btn-xs btn-default'></button>) in the toolbar above.", "import lin_miranda_2008\nfrom rmtk.vulnerability.common import utils\n%matplotlib inline ", "Load capacity curves\nIn order to use this methodology, it is necessary to provide one (or a group) of capacity curves, defined according to the format described in the RMTK manual.\nPlease provide the location of the file containing the capacity curves using the parameter capacity_curves_file.", "capacity_curves_file = \"../../../../../../rmtk_data/capacity_curves_Sa-Sd.csv\"\n\ncapacity_curves = utils.read_capacity_curves(capacity_curves_file)\nutils.plot_capacity_curves(capacity_curves)", "Load ground motion records\nPlease indicate the path to the folder containing the ground motion records to be used in the analysis through the parameter gmrs_folder.\nNote: Each accelerogram needs to be in a separate CSV file as described in the RMTK manual.\nThe parameters minT and maxT are used to define the period bounds when plotting the spectra for the provided ground motion fields.", "gmrs_folder = \"../../../../../../rmtk_data/accelerograms\"\ngmrs = utils.read_gmrs(gmrs_folder)\nminT, maxT = 0.1, 2.0\nutils.plot_response_spectra(gmrs, minT, maxT)", "Load damage state thresholds\nPlease provide the path to your damage model file using the parameter damage_model_file in the cell below.\nThe damage types currently supported are: capacity curve dependent, spectral displacement and interstorey drift. If the damage model type is interstorey drift the user can provide the pushover curve in terms of Vb-dfloor to be able to convert interstorey drift limit states to roof displacements and spectral displacements, otherwise a linear relationship is assumed.", "damage_model_file = \"../../../../../../rmtk_data/damage_model.csv\"\n\ndamage_model = utils.read_damage_model(damage_model_file)", "Obtain the damage probability matrix", "PDM, Sds = lin_miranda_2008.calculate_fragility(capacity_curves, gmrs, damage_model)", "Fit lognormal CDF fragility curves\nThe following parameters need to be defined in the cell below in order to fit lognormal CDF fragility curves to the damage probability matrix obtained above:\n1. IMT: This parameter specifies the intensity measure type to be used. Currently supported options are \"PGA\", \"Sd\" and \"Sa\".\n2. period: This parameter defines the time period of the fundamental mode of vibration of the structure.\n3. damping_ratio: This parameter defines the damping ratio for the structure.\n4. regression_method: This parameter defines the regression method to be used for estimating the parameters of the fragility functions. The valid options are \"least squares\" and \"max likelihood\".", "IMT = \"Sd\"\nperiod = 2.0\ndamping_ratio = 0.05\nregression_method = \"max likelihood\"\n\nfragility_model = utils.calculate_mean_fragility(gmrs, PDM, period, damping_ratio, \n IMT, damage_model, regression_method)", "Plot fragility functions\nThe following parameters need to be defined in the cell below in order to plot the lognormal CDF fragility curves obtained above:\n* minIML and maxIML: These parameters define the limits of the intensity measure level for plotting the functions", "minIML, maxIML = 0.01, 2.00\n\nutils.plot_fragility_model(fragility_model, minIML, maxIML)", "Save fragility functions\nThe derived parametric fragility functions can be saved to a file in either CSV format or in the NRML format that is used by all OpenQuake input models. The following parameters need to be defined in the cell below in order to save the lognormal CDF fragility curves obtained above:\n1. taxonomy: This parameter specifies a taxonomy string for the the fragility functions.\n2. minIML and maxIML: These parameters define the bounds of applicability of the functions.\n3. output_type: This parameter specifies the file format to be used for saving the functions. Currently, the formats supported are \"csv\" and \"nrml\".", "taxonomy = \"RC\"\nminIML, maxIML = 0.01, 2.00\noutput_type = \"nrml\"\noutput_path = \"../../../../../../rmtk_data/output/\"\n\nutils.save_mean_fragility(taxonomy, fragility_model, minIML, maxIML, output_type, output_path)", "Obtain vulnerability function\nA vulnerability model can be derived by combining the set of fragility functions obtained above with a consequence model. In this process, the fractions of buildings in each damage state are multiplied by the associated damage ratio from the consequence model, in order to obtain a distribution of loss ratio for each intensity measure level. \nThe following parameters need to be defined in the cell below in order to calculate vulnerability functions using the above derived fragility functions:\n1. cons_model_file: This parameter specifies the path of the consequence model file.\n2. imls: This parameter specifies a list of intensity measure levels in increasing order at which the distribution of loss ratios are required to be calculated.\n3. distribution_type: This parameter specifies the type of distribution to be used for calculating the vulnerability function. The distribution types currently supported are \"lognormal\", \"beta\", and \"PMF\".", "cons_model_file = \"../../../../../../rmtk_data/cons_model.csv\"\nimls = [0.05, 0.10, 0.15, 0.20, 0.25, 0.30, 0.35, 0.40, 0.45, 0.50, \n 0.60, 0.70, 0.80, 0.90, 1.00, 1.20, 1.40, 1.60, 1.80, 2.00]\ndistribution_type = \"lognormal\"\n\ncons_model = utils.read_consequence_model(cons_model_file)\nvulnerability_model = utils.convert_fragility_vulnerability(fragility_model, cons_model, \n imls, distribution_type)", "Plot vulnerability function", "utils.plot_vulnerability_model(vulnerability_model)", "Save vulnerability function\nThe derived parametric or nonparametric vulnerability function can be saved to a file in either CSV format or in the NRML format that is used by all OpenQuake input models. The following parameters need to be defined in the cell below in order to save the lognormal CDF fragility curves obtained above:\n1. taxonomy: This parameter specifies a taxonomy string for the the fragility functions.\n3. output_type: This parameter specifies the file format to be used for saving the functions. Currently, the formats supported are \"csv\" and \"nrml\".", "taxonomy = \"RC\"\noutput_type = \"nrml\"\noutput_path = \"../../../../../../rmtk_data/output/\"\n\nutils.save_vulnerability(taxonomy, vulnerability_model, output_type, output_path)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
GoogleCloudPlatform/asl-ml-immersion
notebooks/launching_into_ml/solutions/1_explore_data.ipynb
apache-2.0
[ "Explore and create ML datasets\nLearning Objectives\n* Access and explore a public BigQuery dataset on NYC Taxi Cab rides\n* Visualize your dataset using the Seaborn library\n* Inspect and clean-up the dataset for future ML model training\n* Create a benchmark to judge future ML model performance off of\nOverview\nIn this notebook, we will explore data corresponding to taxi rides in New York City to build a Machine Learning model in support of a fare-estimation tool. The idea is to suggest a likely fare to taxi riders so that they are not surprised, and so that they can protest if the charge is much higher than expected.", "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom google.cloud import bigquery", "<h3> Extract sample data from BigQuery </h3>\n\nThe dataset that we will use is <a href=\"https://bigquery.cloud.google.com/table/nyc-tlc:yellow.trips\">a BigQuery public dataset</a>. Click on the link, and look at the column names. Switch to the Details tab to verify that the number of records is one billion, and then switch to the Preview tab to look at a few rows.\nLet's write a SQL query to pick up interesting fields from the dataset. It's a good idea to get the timestamp in a predictable format.", "%%bigquery\nSELECT\n FORMAT_TIMESTAMP( \"%Y-%m-%d %H:%M:%S %Z\", pickup_datetime) AS pickup_datetime,\n pickup_longitude,\n pickup_latitude,\n dropoff_longitude,\n dropoff_latitude,\n passenger_count,\n trip_distance,\n tolls_amount,\n fare_amount,\n total_amount\nFROM\n `nyc-tlc.yellow.trips`\nLIMIT\n 10", "Let's increase the number of records so that we can do some neat graphs. There is no guarantee about the order in which records are returned, and so no guarantee about which records get returned if we simply increase the LIMIT. To properly sample the dataset, let's use the HASH of the pickup time and return 1 in 100,000 records -- because there are 1 billion records in the data, we should get back approximately 10,000 records if we do this.\nWe will also store the BigQuery result in a Pandas dataframe named \"trips\"", "%%bigquery trips\nSELECT\n FORMAT_TIMESTAMP( \"%Y-%m-%d %H:%M:%S %Z\", pickup_datetime) AS pickup_datetime,\n pickup_longitude,\n pickup_latitude,\n dropoff_longitude,\n dropoff_latitude,\n passenger_count,\n trip_distance,\n tolls_amount,\n fare_amount,\n total_amount\nFROM\n `nyc-tlc.yellow.trips`\nWHERE\n ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 100000)) = 1\n\nprint(len(trips))\n\n# We can slice Pandas dataframes as if they were arrays\ntrips[:10]", "<h3> Exploring data </h3>\n\nLet's explore this dataset and clean it up as necessary. We'll use the Python Seaborn package to visualize graphs and Pandas to do the slicing and filtering.", "ax = sns.regplot(\n x=\"trip_distance\",\n y=\"fare_amount\",\n fit_reg=False,\n ci=None,\n truncate=True,\n data=trips,\n)\nax.figure.set_size_inches(10, 8)", "Hmm ... do you see something wrong with the data that needs addressing?\nIt appears that we have a lot of invalid data that is being coded as zero distance and some fare amounts that are definitely illegitimate. Let's remove them from our analysis. We can do this by modifying the BigQuery query to keep only trips longer than zero miles and fare amounts that are at least the minimum cab fare ($2.50).\nNote the extra WHERE clauses.", "%%bigquery trips\nSELECT\n FORMAT_TIMESTAMP( \"%Y-%m-%d %H:%M:%S %Z\", pickup_datetime) AS pickup_datetime,\n pickup_longitude,\n pickup_latitude,\n dropoff_longitude,\n dropoff_latitude,\n passenger_count,\n trip_distance,\n tolls_amount,\n fare_amount,\n total_amount\nFROM\n `nyc-tlc.yellow.trips`\nWHERE\n ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 100000)) = 1\n AND trip_distance > 0\n AND fare_amount >= 2.5\n\nprint(len(trips))\n\nax = sns.regplot(\n x=\"trip_distance\",\n y=\"fare_amount\",\n fit_reg=False,\n ci=None,\n truncate=True,\n data=trips,\n)\nax.figure.set_size_inches(10, 8)", "What's up with the streaks around 45 dollars and 50 dollars? Those are fixed-amount rides from JFK and La Guardia airports into anywhere in Manhattan, i.e. to be expected. Let's list the data to make sure the values look reasonable.\nLet's also examine whether the toll amount is captured in the total amount.", "tollrides = trips[trips[\"tolls_amount\"] > 0]\ntollrides[tollrides[\"pickup_datetime\"] == \"2012-02-27 09:19:10 UTC\"]\n\nnotollrides = trips[trips[\"tolls_amount\"] == 0]\nnotollrides[notollrides[\"pickup_datetime\"] == \"2012-02-27 09:19:10 UTC\"]", "Looking at a few samples above, it should be clear that the total amount reflects fare amount, toll and tip somewhat arbitrarily -- this is because when customers pay cash, the tip is not known. So, we'll use the sum of fare_amount + tolls_amount as what needs to be predicted. Tips are discretionary and do not have to be included in our fare estimation tool.\nLet's also look at the distribution of values within the columns.", "trips.describe()", "Hmm ... The min, max of longitude look strange.\nFinally, let's actually look at the start and end of a few of the trips.", "def showrides(df, numlines):\n lats = []\n lons = []\n for iter, row in df[:numlines].iterrows():\n lons.append(row[\"pickup_longitude\"])\n lons.append(row[\"dropoff_longitude\"])\n lons.append(None)\n lats.append(row[\"pickup_latitude\"])\n lats.append(row[\"dropoff_latitude\"])\n lats.append(None)\n\n sns.set_style(\"darkgrid\")\n plt.figure(figsize=(10, 8))\n plt.plot(lons, lats)\n\nshowrides(notollrides, 10)\n\nshowrides(tollrides, 10)", "As you'd expect, rides that involve a toll are longer than the typical ride.\n<h3> Quality control and other preprocessing </h3>\n\nWe need to do some clean-up of the data:\n<ol>\n<li>New York city longitudes are around -74 and latitudes are around 41.</li>\n<li>We shouldn't have zero passengers.</li>\n<li>Clean up the total_amount column to reflect only fare_amount and tolls_amount, and then remove those two columns.</li>\n<li>Before the ride starts, we'll know the pickup and dropoff locations, but not the trip distance (that depends on the route taken), so remove it from the ML dataset</li>\n<li>Discard the timestamp</li>\n</ol>\n\nWe could do preprocessing in BigQuery, similar to how we removed the zero-distance rides, but just to show you another option, let's do this in Python. In production, we'll have to carry out the same preprocessing on the real-time input data. \nThis sort of preprocessing of input data is quite common in ML, especially if the quality-control is dynamic.", "def preprocess(trips_in):\n trips = trips_in.copy(deep=True)\n trips.fare_amount = trips.fare_amount + trips.tolls_amount\n del trips[\"tolls_amount\"]\n del trips[\"total_amount\"]\n del trips[\"trip_distance\"] # we won't know this in advance!\n\n qc = np.all(\n [\n trips[\"pickup_longitude\"] > -78,\n trips[\"pickup_longitude\"] < -70,\n trips[\"dropoff_longitude\"] > -78,\n trips[\"dropoff_longitude\"] < -70,\n trips[\"pickup_latitude\"] > 37,\n trips[\"pickup_latitude\"] < 45,\n trips[\"dropoff_latitude\"] > 37,\n trips[\"dropoff_latitude\"] < 45,\n trips[\"passenger_count\"] > 0,\n ],\n axis=0,\n )\n\n return trips[qc]\n\n\ntripsqc = preprocess(trips)\ntripsqc.describe()", "The quality control has removed about 300 rows (11400 - 11101) or about 3% of the data. This seems reasonable.\nLet's move on to creating the ML datasets.\n<h3> Create ML datasets </h3>\n\nLet's split the QCed data randomly into training, validation and test sets.\nNote that this is not the entire data. We have 1 billion taxicab rides. This is just splitting the 10,000 rides to show you how it's done on smaller datasets. In reality, we'll have to do it on all 1 billion rides and this won't scale.", "shuffled = tripsqc.sample(frac=1)\ntrainsize = int(len(shuffled[\"fare_amount\"]) * 0.70)\nvalidsize = int(len(shuffled[\"fare_amount\"]) * 0.15)\n\ndf_train = shuffled.iloc[:trainsize, :]\ndf_valid = shuffled.iloc[trainsize : (trainsize + validsize), :] # noqa: E203\ndf_test = shuffled.iloc[(trainsize + validsize) :, :] # noqa: E203\n\ndf_train.head(n=1)\n\ndf_train.describe()\n\ndf_valid.describe()\n\ndf_test.describe()", "Let's write out the three dataframes to appropriately named csv files. We can use these csv files for local training (recall that these files represent only 1/100,000 of the full dataset) just to verify our code works, before we run it on all the data.", "def to_csv(df, filename):\n outdf = df.copy(deep=False)\n outdf.loc[:, \"key\"] = np.arange(0, len(outdf)) # rownumber as key\n # Reorder columns so that target is first column\n cols = outdf.columns.tolist()\n cols.remove(\"fare_amount\")\n cols.insert(0, \"fare_amount\")\n print(cols) # new order of columns\n outdf = outdf[cols]\n outdf.to_csv(filename, header=False, index_label=False, index=False)\n\n\nto_csv(df_train, \"taxi-train.csv\")\nto_csv(df_valid, \"taxi-valid.csv\")\nto_csv(df_test, \"taxi-test.csv\")\n\n!head -10 taxi-valid.csv", "<h3> Verify that datasets exist </h3>", "!ls -l *.csv", "We have 3 .csv files corresponding to train, valid, test. The ratio of file-sizes correspond to our split of the data.", "%%bash\nhead taxi-train.csv", "Looks good! We now have our ML datasets and are ready to train ML models, validate them and evaluate them.\n<h3> Benchmark </h3>\n\nBefore we start building complex ML models, it is a good idea to come up with a very simple model and use that as a benchmark.\nMy model is going to be to simply divide the mean fare_amount by the mean trip_distance to come up with a rate and use that to predict. Let's compute the RMSE of such a model.", "def distance_between(lat1, lon1, lat2, lon2):\n # Haversine formula to compute distance \"as the crow flies\".\n lat1_r = np.radians(lat1)\n lat2_r = np.radians(lat2)\n lon_diff_r = np.radians(lon2 - lon1)\n sin_prod = np.sin(lat1_r) * np.sin(lat2_r)\n cos_prod = np.cos(lat1_r) * np.cos(lat2_r) * np.cos(lon_diff_r)\n minimum = np.minimum(1, sin_prod + cos_prod)\n dist = np.degrees(np.arccos(minimum)) * 60 * 1.515 * 1.609344\n\n return dist\n\n\ndef estimate_distance(df):\n return distance_between(\n df[\"pickuplat\"], df[\"pickuplon\"], df[\"dropofflat\"], df[\"dropofflon\"]\n )\n\n\ndef compute_rmse(actual, predicted):\n return np.sqrt(np.mean((actual - predicted) ** 2))\n\n\ndef print_rmse(df, rate, name):\n print(\n \"{1} RMSE = {0}\".format(\n compute_rmse(df[\"fare_amount\"], rate * estimate_distance(df)), name\n )\n )\n\n\nFEATURES = [\"pickuplon\", \"pickuplat\", \"dropofflon\", \"dropofflat\", \"passengers\"]\nTARGET = \"fare_amount\"\ncolumns = list([TARGET])\ncolumns.append(\"pickup_datetime\")\ncolumns.extend(FEATURES) # in CSV, target is first column, after the features\ncolumns.append(\"key\")\ndf_train = pd.read_csv(\"taxi-train.csv\", header=None, names=columns)\ndf_valid = pd.read_csv(\"taxi-valid.csv\", header=None, names=columns)\ndf_test = pd.read_csv(\"taxi-test.csv\", header=None, names=columns)\nrate = df_train[\"fare_amount\"].mean() / estimate_distance(df_train).mean()\nprint(f\"Rate = ${rate}/km\")\nprint_rmse(df_train, rate, \"Train\")\nprint_rmse(df_valid, rate, \"Valid\")\nprint_rmse(df_test, rate, \"Test\")", "<h2>Benchmark on same dataset</h2>\n\nThe RMSE depends on the dataset, and for comparison, we have to evaluate on the same dataset each time. We'll use this query in later labs:", "validation_query = \"\"\"\nSELECT\n (tolls_amount + fare_amount) AS fare_amount,\n pickup_datetime,\n pickup_longitude AS pickuplon,\n pickup_latitude AS pickuplat,\n dropoff_longitude AS dropofflon,\n dropoff_latitude AS dropofflat,\n passenger_count*1.0 AS passengers,\n \"unused\" AS key\nFROM\n `nyc-tlc.yellow.trips`\nWHERE\n ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 10000)) = 2\n AND trip_distance > 0\n AND fare_amount >= 2.5\n AND pickup_longitude > -78\n AND pickup_longitude < -70\n AND dropoff_longitude > -78\n AND dropoff_longitude < -70\n AND pickup_latitude > 37\n AND pickup_latitude < 45\n AND dropoff_latitude > 37\n AND dropoff_latitude < 45\n AND passenger_count > 0\n\"\"\"\n\nclient = bigquery.Client()\ndf_valid = client.query(validation_query).to_dataframe()\nprint_rmse(df_valid, 2.59988, \"Final Validation Set\")", "The simple distance-based rule gives us a RMSE of <b>$8.14</b>. We have to beat this, of course, but you will find that simple rules of thumb like this can be surprisingly difficult to beat.\nLet's be ambitious, though, and make our goal to build ML models that have a RMSE of less than $6 on the test set.\nCopyright 2020 Google Inc.\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
fangohr/oommf-python
new/notebooks/fmr_standard_problem.ipynb
bsd-2-clause
[ "FMR standard problem\nAuthor: Marijan Beg\nDate: 11 May 2016\nProblem specification\nWe choose a cuboidal thin film permalloy sample measuring $120 \\times 120 \\times 10 \\,\\text{nm}^{3}$. The choice of a cuboid is important as it ensures that the finite difference method employed by OOMMF does not introduce errors due to irregular boundaries that cannot be discretized well. We choose the thin film geometry to be thin enough so that the variation of magnetization dynamics along the out-of-film direction can be neglected. Material parameters based on permalloy are:\n\nexchange energy constant $A = 1.3 \\times 10^{-11} \\,\\text{J/m}$,\nmagnetisation saturation $M_\\text{s} = 8 \\times 10^{5} \\,\\text{A/m}$,\nGilbert damping $\\alpha = 0.008$.\n\nAn external magnetic bias field with magnitude $80 \\,\\text{kA/m}$ is applied along the direction $e = (1, 0.715, 0)$. We choose the external magnetic field direction slightly off the sample diagonal in order to break the system’s symmetry and thus avoid degenerate eigenmodes. First, we initialize the system with a uniform out-of-plane magnetization $m_{0} = (0, 0, 1)$. The system is allowed to relax for $5 \\,\\text{ns}$, which was found to be sufficient time to obtain a well-converged equilibrium magnetization configuration. We refer to this stage of simulation as the relaxation stage, and its final relaxed magnetization configuration is saved to serve as the initial configuration for the next dynamic stage. Because we want to use a well defined method that is supported by all simulation tools, we minimize the system’s energy by integrating the LLG equation with a large, quasistatic Gilbert damping $\\alpha = 1$ for $5 \\,\\text{ns}$. In the next step (dynamic stage), a simulation is started using the equilibrium magnetisation configuration from the relaxation stage as the initial configuration. Now, the direction of an external magnetic field is altered to $e = (1, 0.7, 0)$. This simulation stage runs for $T = 20 \\,\\text{ns}$ while the (average and spatially resolved) magnetization $M(t)$ is recorded every $\\Delta t = 5 \\,\\text{ps}$. The Gilbert damping in this dynamic simulation stage is $\\alpha = 0.008$.\nDetails of this standard problem specification can be found in Ref. 1.", "!rm -rf fmr_standard_problem\n\nimport numpy as np\n\nL = 120e-9 # x and y dimensions of the sample(m)\nthickness = 10e-9 # sample thickness (m)\ndx = dy = dz = 5e-9 # discretisation in x, y, and z directions (m)\n\n# Minimum sample coordinate.\ncmin = (0, 0, 0)\n# Maximum sample coordinate.\ncmax = (L, L, thickness)\n# Discretisation.\nd = (dx, dy, dz)\n\nMs = 8e5 # saturation magnetisation (A/m)\nA = 1.3e-11 # exchange energy constant (J/m)\nH = 8e4 * np.array([0.81345856316858023, 0.58162287266553481, 0.0])", "Relaxation stage\nFirstly, all required modules are imported.", "import sys\n\nsys.path.append('../')\n\nfrom sim import Sim\nfrom atlases import BoxAtlas\nfrom meshes import RectangularMesh\nfrom energies.exchange import UniformExchange\nfrom energies.demag import Demag\nfrom energies.zeeman import FixedZeeman", "Now, the simulation object can be created and exchange, demagnetisation, and Zeeman energies are added.", "# Create a BoxAtlas object.\natlas = BoxAtlas(cmin, cmax)\n\n# Create a mesh object.\nmesh = RectangularMesh(atlas, d)\n\n# Create a simulation object.\nsim = Sim(mesh, Ms, name='fmr_standard_problem')\n\n# Add exchange energy.\nsim.add(UniformExchange(A))\n\n# Add demagnetisation energy.\nsim.add(Demag())\n\n# Add Zeeman energy.\nsim.add(FixedZeeman(H))", "At this point, the system is initialised in the out-of-plane direction. As an example, we use a python function. This initialisation can also be achieved using the tuple or list object.", "# Python function for initialising the system's magnetisation.\ndef m_init(pos):\n return (0, 0, 1)\n\n# Initialise the magnetisation.\nsim.set_m(m_init)\n\n# The same initialisation can be achieved using:\n# sim.set_m((0, 0, 1))\n# sim.set_m([0, 0, 1])\n# sim.set_m(np.array([0, 0, 1]))", "Finally, the system is relaxed for $5 \\,\\text{ns}$.", "sim.run_until(5e-9)", "We can now load the relaxed state to the Field object and plot the $z$ slice of magnetisation.", "%matplotlib inline\nsim.m.plot_slice('z', 5e-9)", "Dynamic stage\nIn the dynamic stage, we use the relaxed state from the relaxation stage.", "# Change external magnetic field.\nH = 8e4 * np.array([0.81923192051904048, 0.57346234436332832, 0.0])\nsim.set_H(H)", "In this stage, the Gilbert damping is reduced.", "sim.alpha = 0.008", "Finally, we run the multiple stage simulation.", "total_time = 20e-9\nstages = 4000\n\nsim.run_until(total_time, stages)", "Postprocessing\nFrom the obtained vector field samples, we can compute the average of magnetisation $y$ component and plot its time evolution.", "import glob\nimport matplotlib.pyplot as plt\nfrom field import load_oommf_file\n\n# Compute the <my>\nt_list = []\nmyav = []\nfor i in range(stages):\n omf_filename = glob.glob('fmr_standard_problem/fmr_standard_problem-Oxs_TimeDriver-Spin-%09d-*.omf' % i)[0]\n m_field = load_oommf_file(omf_filename)\n t_list.append(i*total_time/stages)\n myav.append(m_field.average()[1])\n \nt_array = np.array(t_list)\nmyav = np.array(myav)\n\n# Plot <my> time evolution.\nplt.plot(t_array/1e-9, myav)\nplt.xlabel('t (ns)')\nplt.ylabel('my average')\nplt.grid()", "From the $<m_{y}>$ time evolution, we can compute and plot its Fourier transform.", "import scipy.fftpack\n\npsd = np.log10(np.abs(scipy.fftpack.fft(myav))**2)\nf_axis = scipy.fftpack.fftfreq(stages, d=total_time/stages)\n\nplt.plot(f_axis/1e9, psd)\nplt.xlim([0, 12])\nplt.ylim([-4.5, 2])\nplt.xlabel('f (GHz)')\nplt.ylabel('Psa (a.u.)')\nplt.grid()", "References\n[1] A. Baker, M. Beg, G. Ashton, M. Albert, D. Chernyshenko, W. Wang, S. Zhang, M.-A. Bisotti, M. Franchin, C.L. Hu, R. Stamps, T. Hesjedal, and H. Fangohr, arXiv 1603.0541 (2016)." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
gangadhara691/gangadhara691.github.io
P3 wrangle_data/DataWrangling_ganga.ipynb
mit
[ "P3 - Data Wrangling with MongoDB\nOpenStreetMap Project Data Wrangling with MongoDB\nGangadhara Naga Sai<a name=\"top\"></a>\nData used -<a href=https://mapzen.com/metro-extracts/> MapZen Weekly OpenStreetMaps Metro Extracts</a>\nMap Areas:\n These two maps are selected since ,right now i am living at Hoodi,Bengaluru. And my dream is to do my masters in japan in robotics,so i had selected locality of University of tokyo, Bunkyo.I really wanted to explore differences between the regions.\n\n<a href=https://mapzen.com/data/metro-extracts/your-extracts/fdd7c4ef0518> Bonkyu,Tokyo,Japan. </a> \n<a href=https://mapzen.com/data/metro-extracts/your-extracts/c1f2842408ac> Hoodi,Bengaluru,india </a>\n\n<hr>\n\n\nProblems Encountered in the Map\nFiltering Different Language names\nOver-­abbreviated Names\nMerging both cities\nData Overview\nAdditional Data Exploration using MongoDB\nConclusion\n\n<hr>\n\n<h2><a name=\"problems\"></a> **1. Problems Encountered**</h2>\n\n\nSome of names were in different Languages so ,i had to filter out them and select english names for both maps Hoodi and Bunkyo \nStreet names with different types of abbreviations. (i.e. 'Clark Ave SE' or 'Eubank Northeast Ste E-18') \nTwo cities have to be accessed from one database\n\nNames in Different Language<a name=\"Language\"></a>\nDifferent regions have different languages ,and we find that someof names were in different language which are filltered to get only english names.\nWhich would check weather the charecters belong to ascii or not", "def isEnglish(string):\n try:\n string.encode('ascii')\n except UnicodeEncodeError:\n return False\n else:\n return True", "<hr>\n\nOver-­abbreviated Names<a name=\"abbr\"></a>\nSince the most of data being manually uploaded, there are lot of abbreviations in street names,locality names.\nWhere they are filtered and replaced with full names.", "#the city below can be hoodi or bunkyo\nfor st_type, ways in city_types.iteritems():\n for name in ways:\n better_name = update_name(name, mapping)\n if name != better_name:\n print name, \"=>\", better_name\n\n#few examples \nBunkyo:\nMeidai Jr. High Sch. => Meidai Junior High School\nSt. Mary's Cathedral => Saint Mary's Cathedral\nShinryukei brdg. E. => Shinryukei Bridge East\nIidabashi Sta. E. => Iidabashi Station East\n...\n\nHoodi:\nSt. Thomas School => Saint Thomas School\nOpp. Jagrithi Apartment => Opposite Jagrithi Apartment\n...", "<hr>\n\nMerging Both cities<a name=\"combine_cities\"></a>\nThese two maps are selected since ,right now i am living at Hoodi,Bengaluru. And one day i want do my masters in japan in robotics,so i had selected locality of University of tokyo, Bunkyo.I really wanted to explore differences between the regions. \nI need to add a tag named \"city\" so i can differentiate them from the database.\n<hr>\n\n2. Data Overview<a name=\"data_overview\"></a>\nThis section contains basic statistics about the dataset and the MongoDB queries used to gather them.\nFile Sizes", "bangalore.osm -40MB\nbangalore.osm.json-51MB\ntokyo1.osm- 82MB\ntokyo1.osm.json-102.351MB", "Number of documents", "print \"Bunkyo:\",mongo_db.cities.find({'city':'bunkyo'}).count()\nprint \"Hoodi:\",mongo_db.cities.find({'city':'hoodi'}).count()", "Bunkyo: 1268292\nHoodi: 667842\n\nNumber of node nodes.", "print \"Bunkyo:\",mongo_db.cities.find({\"type\":\"node\",\n 'city':'bunkyo'}).count()\nprint \"Hoodi:\",mongo_db.cities.find({\"type\":\"node\",\n 'city':'hoodi'}).count()\n\nBunkyo: 1051170\nHoodi: 548862", "Number of way nodes.", "print \"Bunkyo:\",mongo_db.cities.find({'type':'way',\n 'city':'bunkyo'}).count()\nprint \"Hoodi:\",mongo_db.cities.find({'type':'way',\n 'city':'hoodi'}).count()\n\nBunkyo: 217122\nHoodi: 118980", "Total Number of contributor.", "print \"Constributors:\", len(mongo_db.cities.distinct(\"created.user\"))\n\nContributors: 858", "<hr>\n\n3. Additional Data Exploration using MongoDB<a name=\"exploration\"></a>\nI am going to use the pipeline function to retrive data from the database", "def pipeline(city):\n p= [{\"$match\":{\"created.user\":{\"$exists\":1},\n \"city\":city}},\n {\"$group\": {\"_id\": {\"City\":\"$city\",\n \"User\":\"$created.user\"},\n \"contribution\": {\"$sum\": 1}}}, \n {\"$project\": {'_id':0,\n \"City\":\"$_id.City\",\n \"User_Name\":\"$_id.User\",\n \"Total_contribution\":\"$contribution\"}},\n {\"$sort\": {\"Total_contribution\": -1}},\n {\"$limit\" : 5 }]\n return p\nresult1 =mongo_db[\"cities\"].aggregate(pipeline('bunkyo'))\nfor each in result1: \n print(each)\nprint(\"\\n\")\nresult2 =mongo_db[\"cities\"].aggregate(pipeline('hoodi'))\nfor each in result2: \n print(each)\n", "The top contributors for hoodi are no where near since bunkyo being a more compact region than hoodi ,there are more places to contribute.\n<hr>\n\nTo get the top Amenities in Hoodi and Bunkyo\nI will be showing the pipeline that will go in the above mentioned \"Pipleline\" function", "pipeline=[{\"$match\":{\"Additional Information.amenity\":{\"$exists\":1},\n \"city\":city}},\n {\"$group\": {\"_id\": {\"City\":\"$city\",\n \"Amenity\":\"$Additional Information.amenity\"},\n \"count\": {\"$sum\": 1}}},\n {\"$project\": {'_id':0,\n \"City\":\"$_id.City\",\n \"Amenity\":\"$_id.Amenity\",\n \"Count\":\"$count\"}},\n {\"$sort\": {\"Count\": -1}},\n {\"$limit\" : 10 }]", "As compared to hoodi ,bunkyo have few atms,And parking can be commonly found in bunkyo locality\n<hr>\n\npopular places of worship", " p = [{\"$match\":{\"Additional Information.amenity\":{\"$exists\":1},\n \"Additional Information.amenity\":\"place_of_worship\",\n \"city\":city}},\n {\"$group\":{\"_id\": {\"City\":\"$city\",\n \"Religion\":\"$Additional Information.religion\"},\n \"count\":{\"$sum\":1}}},\n {\"$project\":{\"_id\":0,\n \"City\":\"$_id.City\",\n \"Religion\":\"$_id.Religion\",\n \"Count\":\"$count\"}},\n {\"$sort\":{\"Count\":-1}},\n {\"$limit\":6}]", "As expected japan is popular with buddism,\nbut india being a secular country it will be having most of the reglious places of worship,where hinduism being majority\n<hr>\n\npopular restaurants", "p = [{\"$match\":{\"Additional Information.amenity\":{\"$exists\":1},\n \"Additional Information.amenity\":\"restaurant\",\n \"city\":city}},\n {\"$group\":{\"_id\":{\"City\":\"$city\",\n \"Food\":\"$Additional Information.cuisine\"},\n \"count\":{\"$sum\":1}}},\n {\"$project\":{\"_id\":0,\n \"City\":\"$_id.City\",\n \"Food\":\"$_id.Food\",\n \"Count\":\"$count\"}},\n {\"$sort\":{\"Count\":-1}}, \n {\"$limit\":6}]", "{u'Count': 582, u'City': u'bunkyo'}\n{u'Food': u'japanese', u'City': u'bunkyo', u'Count': 192}\n{u'Food': u'chinese', u'City': u'bunkyo', u'Count': 126}\n{u'Food': u'italian', u'City': u'bunkyo', u'Count': 69}\n{u'Food': u'indian', u'City': u'bunkyo', u'Count': 63}\n{u'Food': u'sushi', u'City': u'bunkyo', u'Count': 63}\n{u'Count': 213, u'City': u'hoodi'}\n{u'Food': u'regional', u'City': u'hoodi', u'Count': 75}\n{u'Food': u'indian', u'City': u'hoodi', u'Count': 69}\n{u'Food': u'chinese', u'City': u'hoodi', u'Count': 36}\n{u'Food': u'international', u'City': u'hoodi', u'Count': 24}\n{u'Food': u'Andhra', u'City': u'hoodi', u'Count': 21}\nIndian style cusine in Bunkyo seems famous, Which will be better if i go to japan and do my higher studies there.\n<hr>\n\npopular fast food joints", " p = [{\"$match\":{\"Additional Information.amenity\":{\"$exists\":1},\n \"Additional Information.amenity\":\"fast_food\",\n \"city\":city}},\n {\"$group\":{\"_id\":{\"City\":\"$city\",\n \"Food\":\"$Additional Information.cuisine\"},\n \"count\":{\"$sum\":1}}},\n {\"$project\":{\"_id\":0,\n \"City\":\"$_id.City\",\n \"Food\":\"$_id.Food\",\n \"Count\":\"$count\"}},\n {\"$sort\":{\"Count\":-1}}, \n {\"$limit\":6}]", "Burger seems very popular among japanese in fast foods,i was expecting ramen to be more popular\n, but in hoodi pizza is really common,being a metropolitan city.\n<hr>\n\nATM's near locality", " p = [{\"$match\":{\"Additional Information.amenity\":{\"$exists\":1},\n \"Additional Information.amenity\":\"atm\",\n \"city\":city}},\n {\"$group\":{\"_id\":{\"City\":\"$city\",\n \"Name\":\"$Additional Information.name:en\"},\n \"count\":{\"$sum\":1}}},\n {\"$project\":{\"_id\":0,\n \"City\":\"$_id.City\",\n \"Name\":\"$_id.Name\",\n \"Count\":\"$count\"}},\n {\"$sort\":{\"Count\":-1}}, \n {\"$limit\":4}]", "There are quite a few ATM in Bunkyo as compared to hoodi\n<hr>\n\nMartial arts or Dojo Center near locality", "## Martial arts or Dojo Center near locality\nimport re\n\npat = re.compile(r'dojo', re.I)\nd=mongo_db.cities.aggregate([{\"$match\":{ \"$or\": [ { \"Additional Information.name\": {'$regex': pat}}\n ,{\"Additional Information.amenity\": {'$regex': pat}}]}}\n ,{\"$group\":{\"_id\":{\"City\":\"$city\"\n , \"Sport\":\"$Additional Information.name\"}}}])\nfor each in d: \n print(each)\n\nbunkyo: \n{u'_id': {u'City': u'bunkyo', u'Sport': u'Aikikai Hombu Dojo'}}\n{u'_id': {u'City': u'bunkyo', u'Sport': u'Kodokan Dojo'}}\n\nhoodi:\n{u'_id': {u'City': u'hoodi', u'Sport': u\"M S Gurukkal's Kalari Academy\"}}\n", "I wanted to learn martial arts , \nIn japan is known for its akido and other ninjistsu martial arts , where i can find some in bunkyo\nWhere as in hoodi,india Kalaripayattu Martial Arts are one of the ancient arts that ever existed.\n<hr>\n\nmost popular shops.", " p = [{\"$match\":{\"Additional Information.shop\":{\"$exists\":1},\n \"city\":city}},\n {\"$group\":{\"_id\":{\"City\":\"$city\",\n \"Shop\":\"$Additional Information.shop\"},\n \"count\":{\"$sum\":1}}},\n {\"$project\": {'_id':0,\n \"City\":\"$_id.City\",\n \"Shop\":\"$_id.Shop\",\n \"Count\":\"$count\"}},\n {\"$sort\":{\"Count\":-1}},\n {\"$limit\":10}]\n\n{u'Shop': u'convenience', u'City': u'bunkyo', u'Count': 1035}\n{u'Shop': u'clothes', u'City': u'bunkyo', u'Count': 282}\n{u'Shop': u'books', u'City': u'bunkyo', u'Count': 225}\n{u'Shop': u'mobile_phone', u'City': u'bunkyo', u'Count': 186}\n{u'Shop': u'confectionery', u'City': u'bunkyo', u'Count': 156}\n{u'Shop': u'supermarket', u'City': u'bunkyo', u'Count': 150}\n{u'Shop': u'computer', u'City': u'bunkyo', u'Count': 126}\n{u'Shop': u'hairdresser', u'City': u'bunkyo', u'Count': 90}\n{u'Shop': u'electronics', u'City': u'bunkyo', u'Count': 90}\n{u'Shop': u'anime', u'City': u'bunkyo', u'Count': 90}\n\n\n{u'Shop': u'clothes', u'City': u'hoodi', u'Count': 342}\n{u'Shop': u'supermarket', u'City': u'hoodi', u'Count': 129}\n{u'Shop': u'bakery', u'City': u'hoodi', u'Count': 120}\n{u'Shop': u'shoes', u'City': u'hoodi', u'Count': 72}\n{u'Shop': u'furniture', u'City': u'hoodi', u'Count': 72}\n{u'Shop': u'sports', u'City': u'hoodi', u'Count': 66}\n{u'Shop': u'electronics', u'City': u'hoodi', u'Count': 60}\n{u'Shop': u'beauty', u'City': u'hoodi', u'Count': 54}\n{u'Shop': u'car', u'City': u'hoodi', u'Count': 36}\n{u'Shop': u'convenience', u'City': u'hoodi', u'Count': 36}\n\nThe general stores are quite common in both the places", "most popular supermarkets", " p = [{\"$match\":{\"Additional Information.shop\":{\"$exists\":1},\n \"city\":city,\n \"Additional Information.shop\":\"supermarket\"}},\n {\"$group\":{\"_id\":{\"City\":\"$city\",\n \"Supermarket\":\"$Additional Information.name\"},\n \"count\":{\"$sum\":1}}},\n {\"$project\": {'_id':0,\n \"City\":\"$_id.City\",\n \"Supermarket\":\"$_id.Supermarket\",\n \"Count\":\"$count\"}},\n {\"$sort\":{\"Count\":-1}},\n {\"$limit\":5}]\n\n{u'Count': 120, u'City': u'bunkyo'}\n{u'Count': 9, u'City': u'bunkyo', u'Supermarket': u'Maruetsu'}\n{u'Count': 3, u'City': u'bunkyo', u'Supermarket': u\"Y's Mart\"}\n{u'Count': 3, u'City': u'bunkyo', u'Supermarket': u'SainE'}\n{u'Count': 3, u'City': u'bunkyo', u'Supermarket': u'DAIMARU Peacock'}\n\n\n{u'Count': 9, u'City': u'hoodi', u'Supermarket': u'Reliance Fresh'}\n{u'Count': 9, u'City': u'hoodi'}\n{u'Count': 6, u'City': u'hoodi', u'Supermarket': u\"Nilgiri's\"}\n{u'Count': 3, u'City': u'hoodi', u'Supermarket': u'Royal Mart Supermarket'}\n{u'Count': 3, u'City': u'hoodi', u'Supermarket': u'Safal'}\n\n", "These are few common supermarket brands in both the cities\nAnd Nilgiris is like 500 meters away from my home.\n<hr>\n\n4. Conclusion<a name=\"conclusion\"></a>\nAfter such a investigation on this data i think i have become familiar with bunkyo region.\nI was expecting a difficulty in merging both the cities data into a single database ,but seem a simple key like city could differentiate them.\nThere might be even robust cleaning algorithms to a better and clean database,as most of the data is from gps that goes into OpenStreetMap.org. Which needed to be regularly cleaned.\nFrom the comparision of both the cities these are qiute similar and bunkyo region interests me even more to pursue higher studies.\n<hr>" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
taliamo/Final_Project
organ_pitch/Scripts/.ipynb_checkpoints/upload_env_data-checkpoint.ipynb
mit
[ "T. Martz-Oberlander, 2015-11-12, CO2 and Speed of Sound\nFormatting ENVIRONMENTAL CONDITIONS pipe organ data for Python operations\nNOTE: Here, pitch and frequency are used interchangeably to signify the speed of sound from organ pipes.\nThe entire script looks for mathematical relationships between CO2 concentration changes and pitch changes from a pipe organ. This script uploads, cleans data and organizes new dataframes, creates figures, and performs statistical tests on the relationships between variable CO2 and frequency of sound from a note played on a pipe organ.\nThis uploader script:\n1) Uploads CO2, temp, and RH data files;\n2) Munges it (creates a Date Time column for the time stamps), establishes column contents as floats;\n3) Calculates expected frequency, as per Cramer's equation;\n4) Imports output from pitch_data.py script, the dataframe with measured frequency;\n5) Plots expected frequency curve, CO2 (ppm) curve, and measured pitch points in a figure.\n[ Here I pursue data analysis route 1 (as mentionted in my organ_pitch/notebook.md file), which involves comparing one pitch dataframe with one dataframe of environmental characteristics taken at one sensor location. Both dataframes are compared by the time of data recorded. ]", "# I import useful libraries (with functions) so I can visualize my data\n# I use Pandas because this dataset has word/string column titles and I like the readability features of commands and finish visual products that Pandas offers\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport re\nimport numpy as np\n\n%matplotlib inline\n\n#I want to be able to easily scroll through this notebook so I limit the length of the appearance of my dataframes \nfrom pandas import set_option\nset_option('display.max_rows', 10)", "Uploaded RH and temp data into Python¶\nFirst I upload my data set(s). I am working with environmental data from different locations in the church at differnet dates. Files include: environmental characteristics (CO2, temperature (deg C), and relative humidity (RH) (%) measurements). \nI can discard the CO2_2 column values since they are false measurements logged from an empty input jack in the CO2 HOBOWare ^(r) device.", "#I import a temp and RH data file\nenv=pd.read_table('../Data/CO2May.csv', sep=',')\n\n#assigning columns names\nenv.columns=[['test', 'time','temp C', 'RH %', 'CO2_1', 'CO2_2']]\n\n#I display my dataframe\nenv\n\n#change data time variable to actual values of time. \nenv['time']= pd.to_datetime(env['time'])\n\n#print the new table and the type of data. \nprint(env)\n\nenv.dtypes", "Next\n1. Create a function for expected pitch (frequency of sound waves) from CO2 data\n2. Add expected_frequency to dataframe\nCalculated pitch from CO2 levels\nHere I use Cramer's equation for frequency of sound from CO2 concentration (1992). \nfreq = a0 + a1(T) + ... + (a9 +...) +... + a14(xc^2)\nwhere xc is the mole fraction of CO2 and T is temperature. Full derivation of these equations can be found in the \"Doc\" directory.\nI will later plot measured pitch (frequency) data points from my \"pitch\" data frame on top of these calculated frequency values for comparison.", "#Here I am trying to create a function for the above equation. \n#I want to plug in each CO2_ave value for a time stamp (row) from the \"env\" data frame above. \n\n#define coefficients (Cramer, 1992)\na0 = 331.5024\n#a1 = 0.603055\n#a2 = -0.000528\na9 = -(-85.20931) #need to account for negative values\n#a10 = -0.228525\na14 = 29.179762\n\n#xc = CO2 values from dataframe\n\n\n#test function\ndef test_cramer():\n assert a0 + ((a9)*400)/100 + a14*((400/1000000)**2) == 672.33964466, 'Equation failure'\n return()\n\ntest_cramer()\n\n#This function also converts ppm to mole fraction (just quantity as a proportion of total)\ndef cramer(data):\n '''Calculate pitch from CO2_1 concentration'''\n \n calc_freq = a0 + ((a9)*data)/100 + a14*((data/1000000)**2)\n \n return(calc_freq)\n\n#run the cramer values for the calculated frequency \n#calc_freq = cramer(env['calc_freq'])\n\n#define the new column as the output of the cramer function\n#env['calc_freq'] = calc_freq\n\n#Run the function for the input column (CO2 values)\nenv['calc_freq'] = cramer(env['CO2_1'])\n\ncramer(env['CO2_1'])\n\n#check the dataframe\n#calculated frequency values seem reasonable based on changes in CO2\nenv\n\n#Now I call in my measured pitch data, \n#to be able to visually compare calculated and measured\n\n#Import the measured pitch values--the output of pitch_data.py script\nmeasured_freq = pd.read_table('../Data/pitches.csv', sep=',')\n\n#change data time variable to actual values of time. \nenv['time']= pd.to_datetime(env['time'])\n\n#I test to make sure I'm importing the correct data\nmeasured_freq", "Visualizing the expected pitch values by time\n1. Plot calculated frequency, CO2 (ppm), and measured frequency values", "print(calc_freq)\n\n#define variables from dataframe columns\nCO2_1 = env[['CO2_1']]\n\ncalc_freq=env[['calc_freq']]\n\n#measured_pitch = output_from_'pitch_data.py'\n\n\n#want to set x-axis as date_time\n#how do I format the ax2 y axis scale\n\ndef make_plot(variable_1, variable_2):\n '''Make a three variable plot with two axes'''\n\n#plot title\n plt.title('CO2 and Calculated Pitch', fontsize='14')\n\n#twinx layering\n ax1=plt.subplot()\n ax2=ax1.twinx()\n #ax3=ax1.twinx()\n\n#call data for the plot\n ax1.plot(CO2_1, color='g', linewidth=1)\n ax2.plot(calc_freq, color= 'm', linewidth=1) \n #ax3.plot(measured_freq, color = 'b', marker= 'x')\n\n#axis labeling\n ax1.yaxis.set_tick_params(labelcolor='grey')\n ax1.set_xlabel('Sample Number')\n ax1.set_ylabel('CO2 (ppm)', fontsize=12, color = 'g')\n ax2.set_ylabel('Calculated Pitch (Hz)', fontsize=12, color='m') \n #ax3.set_ylabel('Measured Pitch')\n\n#axis limits\n ax1.set_ylim([400,1300])\n ax2.set_ylim([600, 1500])\n\n #plt.savefig('../Figures/fig1.pdf')\n\n#Close function\n return()#'../Figures/fig1.pdf')\n\n\n#Call my function to test it \nmake_plot(CO2_1, calc_freq)\n\n\nmeasured_freq.head()\n\nenv.head()\n\nFreq vs. CO2\n\nplt.plot(env.CO2_1, measured_freq.time, color='g', linewidth=1)\n\n\n#def make_fig(datasets, variable_1, variable_2, savename):\n\n#twinx layering\nax1=plt.subplot()\nax2=ax1.twinx()\n\n#plot 2 variables in predertermined plot above\nax1.plot(dataset.index, variable_1, 'k-', linewidth=2)\nax2.plot(dataset.index, variable_2, )\n\n#moving plots lines\nvariable_2_spine=ax2.spines['right']\nvariable_2_spine.set_position(('axes', 1.2))\n\nax1.yaxi.set_tick_params(labelcolor='k')\nax1.set_ylabel(variable_1.name, fontsize=13, colour = 'k')\nax2.sey_ylabel(variable_2.name + '($^o$C)', fontsize=13, color='grey')\n\n#plt.savefig(savename)\nreturn(savename)\n\n\nfig = plt.figure(figsize=(11,14))\nplt.suptitle('')\n\nax1.plot(colum1, colum2, 'k-', linewidth=2)\n\" \"\n\nax1.set_ylim([0,1])\nax2.set_ylim([0,1])\n\nax1.set_xlabel('name', fontsize=14, y=0)\nax1.set_ylabel\nax2.set_ylabel\n\n#convert 'object' (CO2_1) to float \nnew = pd.Series([env.CO2_1], name = 'CO2_1')\n\nCO2_1 = new.tolist()\n\nCO2_array = np.array(CO2_1)\n\n#Test type of data in \"CO2_1\" column\nenv.CO2_1.dtypes\n\n\n\n#How can I format it so it's not an object?\n\ncramer(CO2_array)\n\n#'float' object not callable--the data in \"CO2_1\" are objects and cannot be called into the equation\n#cramer(env.CO2_ave) \n\nenv.dtypes\n\nenv.CO2_1.dtypes\n\nnew = pd.Series([env.CO2_1], name = 'CO2_1')\n\nCO2_1 = new.tolist()\n\nCO2_array = np.array(CO2_1)\n\n#Test type of data in \"CO2_1\" column\nenv.CO2_1.dtypes\n\ncramer(CO2_array)\n\ntype(CO2_array)\n\n# To choose which CO2 value to use, I first visualize which seems normal \n\n#Create CO2-only dataframs\nCO2 = env[['CO2_1', 'CO2_2']]\n\n#Make a plot\nCO2_fig = plt.plot(CO2)\n\nplt.ylabel('CO2 (ppm)')\nplt.xlabel('Sample number')\nplt.title('Two CO2 sensors, same time and place')\n\n#plt.savefig('CO2_fig.pdf')\n\ninput_file = env\n\n\n\n#Upload environmental data file\nenv = pd.read_table('', sep=',')\n\n\n\n#assigning columns names\nenv.columns=[['test', 'date_time','temp C', 'RH %', 'CO2_1', 'CO2_2']]\n\n#change data time variable to actual values of time.\nenv['date_time']= pd.to_datetime(env['date_time'])\n\n#test function\n #def test_cramer():\n #assert a0 + ((a9)*400)/100 + a14*((400/1000000)**2) == 672.339644669, 'Equation failure, math-mess-up'\n #return()\n\n#Call the test function\n #test_cramer()\n\n#pitch calculator function from Cramer equation\ndef cramer(data):\n '''Calculate pitch from CO2_1 concentration'''\n calc_freq = a0 + ((a9*data)/100) + a14*((data)**2)\n return(calc_freq)\n\n#Run the function for the input column (CO2 values) to get a new column of calculated_frequency\nenv['calc_freq'] = cramer(env['CO2_1'])\n\n#Import the measured pitch values--the output of pitch_data.py script\nmeasured_freq = pd.read_table('../organ_pitch/Data/munged_pitch.csv', sep=',')\n\n#change data time variable to actual values of time.\nenv['time']= pd.to_datetime(env['time'])\n\n#Function to make and save a plot\n\n\n\n" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
eds-uga/csci1360e-su16
lectures/L9.ipynb
mit
[ "Lecture 9: Introduction to Functions\nCSCI 1360E: Foundations for Informatics and Analytics\nOverview and Objectives\nIn this lecture, we'll introduce the concept of functions, critical abstractions in nearly every modern programming language. Functions are important for abstracting and categorizing large codebases into smaller, logical, and human-digestable components. By the end of this lecture, you should be able to:\n\nDefine a function that performs a specific task\nSet function arguments and return values\n\nPart 1: Defining Functions\nA function in Python is not very different from a function as you've probably learned since algebra.\n\"Let $f$ be a function of $x$\"...sound familiar? We're basically doing the same thing here.\nA function ($f$) will [usually] take something as input ($x$), perform some kind of operation on it, and then [usually] return a result ($y$). Which is why we usually see $f(x) = y$. A function, then, is composed of three main components:\n\nThe function itself. A [good] function will have one very specific task it performs. This task is usually reflected in its name. Take the examples of print, or sqrt, or exp, or log; all these names are very clear about what the function does.\nArguments (if any). Arguments (or parameters) are the input to the function. It's possible a function may not take any arguments at all, but often at least one is required. For example, print has 1 argument: a string.\nReturn values (if any). Return values are the output of the function. It's possible a function may not return anything; technically, print does not return anything. But common math functions like sqrt or log have clear return values: the output of that math operation.\n\nPhilosophy\nA core tenet in writing functions is that functions should do one thing, and do it well (with apologies to the Unix Philosophy).\nWriting good functions makes code much easier to troubleshoot and debug, as the code is already logically separated into components that perform very specific tasks. Thus, if your application is breaking, you usually have a good idea where to start looking.\nIt's very easy to get caught up writing \"god functions\": one or two massive functions that essentially do everything you need your program to do. But if something breaks, this design is very difficult to debug.\nFunctions vs Methods\nYou've probably heard the term \"method\" before, in this class. Quite often, these two terms are used interchangeably, and for our purposes they are pretty much the same.\nBUT. These terms ultimately identify different constructs, so it's important to keep that in mind. Specifically:\n\n\nMethods are functions inside classes. We won't be going into classes in this course, hence the reason why the distinction isn't useful for our purposes. We may make use of some classes in this course, but we won't build any. Object-oriented programming (OOP) is whole 'nother can of worms we won't touch here.\n\n\nFunctions are not inside classes. In some sense, they're \"free\" (though they may be found inside specific modules; however, since a module != a class, they're still called functions).\n\n\nOtherwise, functions and methods work identically.\nOk, how do we write functions? At this point in the course, you've probably already seen how this works, but we'll go through it step by step regardless.\nFirst, we define the function header. This is the portion of the function that defines the name of the function, the arguments, and uses the Python keyword def to make everything official:", "def our_function():\n pass", "That's everything we need for a working function! Let's walk through it:\n\ndef keyword: required before writing any function, to tell Python \"hey! this is a function!\"\nFunction name: one word (can \"fake\" spaces with underscores), which is the name of the function and how we'll refer to it later\nArguments: a comma-separated list of arguments the function takes to perform its task. If no arguments are needed (as above), then just open-paren-close-paren.\nColon: the colon indicates the end of the function header and the start of the actual function's code.\npass: since Python is sensitive to whitespace, we can't leave a function body blank; luckily, there's the pass keyword that does pretty much what it sounds like--no operation at all, just a placeholder.\n\nAdmittedly, our function doesn't really do anything interesting. It takes no parameters, and the function body consists exclusively of a placeholder keyword that also does nothing. Still, it's a perfectly valid function!\nOther notes on functions\n\n\nYou can define functions (as we did just before) almost anywhere in your code. As we'll see when we get to functional programming, you can literally define functions in the middle of a line of code. Still, good coding practices behooves you to generally group your function definitions together, e.g. at the top of your module.\n\n\nInvoking or activating a function is referred to as calling the function. When you call a function, you type its name, an open parenthesis, any arguments you're sending to the function, and a closing parenthesis. If there are no arguments, then calling the function is as simple as typing the function name and an open-close pair of parentheses.\n\n\nFunctions can be part of modules. You've already seen some of these in action: the numpy.array() functionality is indeed a function. When a function is in a module, to call it you need to prepend the name of the module (and any submodules), add a dot \".\" between the module names, and then call the function as you normally would.\n\n\nThough not recommended, it's possible to import only select functions from a module, so you no longer have to specify the module name in front of the function name when calling the function. This uses the from keyword during import:", "from numpy import array", "Now the array() method can be called directly without prepending the package name numpy in front. USE THIS CAUTIOUSLY: if you accidentally name a variable array later in your code, you will get some very strange errors!\nPart 2: Function Arguments\nArguments (or parameters), as stated before, are the function's input; the \"$x$\" to our \"$f$\", as it were.\nYou can specify as many arguments as want, separating them by commas:", "def one_arg(arg1):\n pass\n\ndef two_args(arg1, arg2):\n pass\n\ndef three_args(arg1, arg2, arg3):\n pass\n\n# And so on...", "Like functions, you can name the arguments anything you want, though also like functions you'll probably want to give them more meaningful names besides arg1, arg2, and arg3. When these become just three functions among hundreds in a massive codebase written by dozens of different people, it's helpful when the code itself gives you hints as to what it does.\nWhen you call a function, you'll need to provide the same number of arguments in the function call as appear in the function header, otherwise Python will yell at you.", "try:\n one_arg(\"some arg\")\nexcept Exception as e:\n print(\"one_arg FAILED: {}\".format(e))\nelse:\n print(\"one_arg SUCCEEDED\")\n\ntry:\n two_args(\"only1arg\")\nexcept Exception as e:\n print(\"two_args FAILED: {}\".format(e))\nelse:\n print(\"two_args SUCCEEDED\")", "To be fair, it's a pretty easy error to diagnose, but still something to keep in mind--especially as we move beyond basic \"positional\" arguments (as they are so called in the previous error message) into optional arguments.\nDefault arguments\n\"Positional\" arguments--the only kind we've seen so far--are required. If the function header specifies a positional argument, then every single call to that functions needs to have that argument specified.\nThere are cases, however, where it can be helpful to have optional, or default, arguments. In this case, when the function is called, the programmer can decide whether or not they want to override the default values.\nYou can specify default arguments in the function header:", "def func_with_default_arg(positional, default = 10):\n print(\"'{}' with default arg {}\".format(positional, default))\n\nfunc_with_default_arg(\"Input string\")\nfunc_with_default_arg(\"Input string\", default = 999)", "If you look through the NumPy online documentation, you'll find most of its functions have entire books' worth of default arguments.\nThe numpy.array function we've been using has quite a few; the only positional (required) argument for that function is some kind of list/array structure to wrap a NumPy array around. Everything else it tries to figure out on its own, unless the programmer explicitly specifies otherwise.", "import numpy as np\nx = np.array([1, 2, 3])\ny = np.array([1, 2, 3], dtype = float) # Specifying the data type of the array, using \"dtype\"\n\nprint(x)\nprint(y)", "Notice the decimal points that follow the values in the second array! This is NumPy's way of showing that these numbers are floats, not integers!\nIn this example, NumPy detected that our initial list contained integers, and we see in the first example that it left the integer type alone. But, in the second example, we override its default behavior in determining the data type of the elements of the resulting NumPy array. This is a very powerful mechanism for occasionally tweaking the behavior of functions without having to write entirely new ones.\nLet's do one more small example before moving on to return values. Let's build a method which prints out a list of video games in someone's Steam library.", "def games_in_library(username, library):\n print(\"User '{}' owns: \".format(username))\n for game in library:\n print(game)\n print()\n\ngames_in_library('fps123', ['DOTA 2', 'Left 4 Dead', 'Doom', 'Counterstrike', 'Team Fortress 2'])\ngames_in_library('rts456', ['Civilization V', 'Cities: Skylines', 'Sins of a Solar Empire'])\ngames_in_library('smrt789', ['Binding of Isaac', 'Monaco'])", "In this example, our function games_in_library has two positional arguments: username, which is the Steam username of the person, and library, which is a list of video game titles. The function simply prints out the username and the titles they own.\nPart 3: Return Values\nJust as functions [can] take input, they also [can] return output for the programmer to decide what to do with.\nAlmost any function you will ever write will most likely have a return value of some kind. If not, your function may not be \"well-behaved\", aka sticking to the general guideline of doing one thing very well.\nThere are certainly some cases where functions won't return anything--functions that just print things, functions that run forever (yep, they exist!), functions designed specifically to test other functions--but these are highly specialized cases we are not likely to encounter in this course. Keep this in mind as a \"rule of thumb.\"\nTo return a value from a function, just use the return keyword:", "def identity_function(in_arg):\n return in_arg\n\nx = \"this is the function input\"\nreturn_value = identity_function(x)\nprint(return_value)", "This is pretty basic: the function returns back to the programmer as output whatever was passed into the function as input. Hence, \"identity function.\"\nAnything you can pass in as function parameters, you can return as function output, including lists:", "def explode_string(some_string):\n list_of_characters = []\n for index in range(len(some_string)):\n list_of_characters.append(some_string[index])\n return list_of_characters\n\nwords = \"Blahblahblah\"\noutput = explode_string(words)\nprint(output)", "This function takes a string as input, uses a loop to \"explode\" the string, and returns a list of individual characters.\n(it should be noted this entire function can be replaced by one line: output = list(words), but it serves well as an illustration that you can pass in to and return from functions any data types you'd like)\nReview Questions\nSome questions to discuss and consider:\n1: You're a software engineer for a prestigious web company named after a South American rain forest. You've been tasked with rewriting their web-based shopping cart functionality for users who purchase items through the site. Without going into too much detail, quickly list out a handful of functions you'd want to write with their basic arguments. Again, no need for excessive detail; just consider the workflow of navigating an online store and purchasing items with a shopping cart, and identify some of the key bits of functionality you'd want to write standalone functions for, as well as the inputs and outputs of those functions.\n2: From where do you think the term \"positional argument\" gets its name?\n3: In NumPy you have a lot of math-oriented utility functions, like numpy.log, numpy.exp, numpy.cos, and so on. Describe in words (in terms of functions, their inputs, and their return values) how the code in this line works: x = numpy.log(numpy.exp(numpy.cos(100.0)))\n4: Go back to the explode_string example in Cell 9 above. Rewrite that loop in the form of a list comprehension (throwback review question! hashtag \"trq\").\nCourse Administrivia\n\n\nPlease let me know if you found the review session helpful, what was/wasn't helpful, or whatever other thoughts you may have on the course so far.\n\n\nA4 is due tomorrow, and A5 comes out tomorrow. It is the final assignment before the midterm; A6 won't come out until July 7.\n\n\nPlease ask me for help if you're struggling, or if something isn't clear, or if you have a question you think is stupid! No stupid questions, especially when it's an introductory course.\n\n\nAdditional Resources\n\nMatthes, Eric. Python Crash Course. 2016. ISBN-13: 978-1593276034" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ES-DOC/esdoc-jupyterhub
notebooks/test-institute-3/cmip6/models/sandbox-1/land.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Land\nMIP Era: CMIP6\nInstitute: TEST-INSTITUTE-3\nSource ID: SANDBOX-1\nTopic: Land\nSub-Topics: Soil, Snow, Vegetation, Energy Balance, Carbon Cycle, Nitrogen Cycle, River Routing, Lakes. \nProperties: 154 (96 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:46\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'test-institute-3', 'sandbox-1', 'land')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --&gt; Conservation Properties\n3. Key Properties --&gt; Timestepping Framework\n4. Key Properties --&gt; Software Properties\n5. Grid\n6. Grid --&gt; Horizontal\n7. Grid --&gt; Vertical\n8. Soil\n9. Soil --&gt; Soil Map\n10. Soil --&gt; Snow Free Albedo\n11. Soil --&gt; Hydrology\n12. Soil --&gt; Hydrology --&gt; Freezing\n13. Soil --&gt; Hydrology --&gt; Drainage\n14. Soil --&gt; Heat Treatment\n15. Snow\n16. Snow --&gt; Snow Albedo\n17. Vegetation\n18. Energy Balance\n19. Carbon Cycle\n20. Carbon Cycle --&gt; Vegetation\n21. Carbon Cycle --&gt; Vegetation --&gt; Photosynthesis\n22. Carbon Cycle --&gt; Vegetation --&gt; Autotrophic Respiration\n23. Carbon Cycle --&gt; Vegetation --&gt; Allocation\n24. Carbon Cycle --&gt; Vegetation --&gt; Phenology\n25. Carbon Cycle --&gt; Vegetation --&gt; Mortality\n26. Carbon Cycle --&gt; Litter\n27. Carbon Cycle --&gt; Soil\n28. Carbon Cycle --&gt; Permafrost Carbon\n29. Nitrogen Cycle\n30. River Routing\n31. River Routing --&gt; Oceanic Discharge\n32. Lakes\n33. Lakes --&gt; Method\n34. Lakes --&gt; Wetlands \n1. Key Properties\nLand surface key properties\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of land surface model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of land surface model code (e.g. MOSES2.2)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.3. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description of the processes modelled (e.g. dymanic vegation, prognostic albedo, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.4. Land Atmosphere Flux Exchanges\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nFluxes exchanged with the atmopshere.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"water\" \n# \"energy\" \n# \"carbon\" \n# \"nitrogen\" \n# \"phospherous\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.5. Atmospheric Coupling Treatment\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the treatment of land surface coupling with the Atmosphere model component, which may be different for different quantities (e.g. dust: semi-implicit, water vapour: explicit)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.6. Land Cover\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nTypes of land cover defined in the land surface model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.land_cover') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"bare soil\" \n# \"urban\" \n# \"lake\" \n# \"land ice\" \n# \"lake ice\" \n# \"vegetated\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.7. Land Cover Change\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe how land cover change is managed (e.g. the use of net or gross transitions)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.land_cover_change') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.8. Tiling\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general tiling procedure used in the land surface (if any). Include treatment of physiography, land/sea, (dynamic) vegetation coverage and orography/roughness", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Conservation Properties\nTODO\n2.1. Energy\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how energy is conserved globally and to what level (e.g. within X [units]/year)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.conservation_properties.energy') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.2. Water\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how water is conserved globally and to what level (e.g. within X [units]/year)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.conservation_properties.water') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.3. Carbon\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how carbon is conserved globally and to what level (e.g. within X [units]/year)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.conservation_properties.carbon') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Timestepping Framework\nTODO\n3.1. Timestep Dependent On Atmosphere\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs a time step dependent on the frequency of atmosphere coupling?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "3.2. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverall timestep of land surface model (i.e. time between calls)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.3. Timestepping Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description of time stepping method and associated time step(s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Software Properties\nSoftware properties of land surface code\n4.1. Repository\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nLocation of code for this component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Code Version\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCode version identifier.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Code Languages\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nCode language(s).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5. Grid\nLand surface grid\n5.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of the grid in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.grid.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6. Grid --&gt; Horizontal\nThe horizontal grid in the land surface\n6.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general structure of the horizontal grid (not including any tiling)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.grid.horizontal.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.2. Matches Atmosphere Grid\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the horizontal grid match the atmosphere?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "7. Grid --&gt; Vertical\nThe vertical grid in the soil\n7.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general structure of the vertical grid in the soil (not including any tiling)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.grid.vertical.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. Total Depth\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe total depth of the soil (in metres)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.grid.vertical.total_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "8. Soil\nLand surface soil\n8.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of soil in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Heat Water Coupling\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the coupling between heat and water in the soil", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_water_coupling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.3. Number Of Soil layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe number of soil layers", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.number_of_soil layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "8.4. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the soil scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Soil --&gt; Soil Map\nKey properties of the land surface soil map\n9.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description of soil map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.2. Structure\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil structure map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.structure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.3. Texture\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil texture map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.texture') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.4. Organic Matter\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil organic matter map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.organic_matter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.5. Albedo\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil albedo map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.albedo') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.6. Water Table\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil water table map, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.water_table') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.7. Continuously Varying Soil Depth\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the soil properties vary continuously with depth?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "9.8. Soil Depth\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil depth map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.soil_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Soil --&gt; Snow Free Albedo\nTODO\n10.1. Prognostic\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs snow free albedo prognostic?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "10.2. Functions\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf prognostic, describe the dependancies on snow free albedo calculations", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.snow_free_albedo.functions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"vegetation type\" \n# \"soil humidity\" \n# \"vegetation state\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.3. Direct Diffuse\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf prognostic, describe the distinction between direct and diffuse albedo", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"distinction between direct and diffuse albedo\" \n# \"no distinction between direct and diffuse albedo\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.4. Number Of Wavelength Bands\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf prognostic, enter the number of wavelength bands used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11. Soil --&gt; Hydrology\nKey properties of the land surface soil hydrology\n11.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description of the soil hydrological model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.2. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of river soil hydrology in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.3. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil hydrology tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.4. Vertical Discretisation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the typical vertical discretisation", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.5. Number Of Ground Water Layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe number of soil layers that may contain water", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.6. Lateral Connectivity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nDescribe the lateral connectivity between tiles", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"perfect connectivity\" \n# \"Darcian flow\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "11.7. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe hydrological dynamics scheme in the land surface model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Bucket\" \n# \"Force-restore\" \n# \"Choisnel\" \n# \"Explicit diffusion\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "12. Soil --&gt; Hydrology --&gt; Freezing\nTODO\n12.1. Number Of Ground Ice Layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow many soil layers may contain ground ice", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "12.2. Ice Storage Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method of ice storage", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12.3. Permafrost\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the treatment of permafrost, if any, within the land surface scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13. Soil --&gt; Hydrology --&gt; Drainage\nTODO\n13.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral describe how drainage is included in the land surface scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.drainage.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13.2. Types\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nDifferent types of runoff represented by the land surface model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.drainage.types') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Gravity drainage\" \n# \"Horton mechanism\" \n# \"topmodel-based\" \n# \"Dunne mechanism\" \n# \"Lateral subsurface flow\" \n# \"Baseflow from groundwater\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14. Soil --&gt; Heat Treatment\nTODO\n14.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description of how heat treatment properties are defined", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.2. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of soil heat scheme in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14.3. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil heat treatment tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.4. Vertical Discretisation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the typical vertical discretisation", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.5. Heat Storage\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecify the method of heat storage", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.heat_storage') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Force-restore\" \n# \"Explicit diffusion\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.6. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nDescribe processes included in the treatment of soil heat", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"soil moisture freeze-thaw\" \n# \"coupling with snow temperature\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15. Snow\nLand surface snow\n15.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of snow in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.2. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the snow tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.3. Number Of Snow Layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe number of snow levels used in the land surface scheme/model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.number_of_snow_layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "15.4. Density\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of the treatment of snow density", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.density') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"constant\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.5. Water Equivalent\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of the treatment of the snow water equivalent", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.water_equivalent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.6. Heat Content\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of the treatment of the heat content of snow", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.heat_content') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.7. Temperature\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of the treatment of snow temperature", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.temperature') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.8. Liquid Water Content\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of the treatment of snow liquid water", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.liquid_water_content') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.9. Snow Cover Fractions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSpecify cover fractions used in the surface snow scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.snow_cover_fractions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"ground snow fraction\" \n# \"vegetation snow fraction\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.10. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSnow related processes in the land surface scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"snow interception\" \n# \"snow melting\" \n# \"snow freezing\" \n# \"blowing snow\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.11. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the snow scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "16. Snow --&gt; Snow Albedo\nTODO\n16.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the treatment of snow-covered land albedo", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.snow_albedo.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"prescribed\" \n# \"constant\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.2. Functions\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\n*If prognostic, *", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.snow_albedo.functions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"vegetation type\" \n# \"snow age\" \n# \"snow density\" \n# \"snow grain type\" \n# \"aerosol deposition\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17. Vegetation\nLand surface vegetation\n17.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of vegetation in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.2. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of vegetation scheme in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "17.3. Dynamic Vegetation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there dynamic evolution of vegetation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.dynamic_vegetation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "17.4. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the vegetation tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.5. Vegetation Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nVegetation classification used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.vegetation_representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"vegetation types\" \n# \"biome types\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.6. Vegetation Types\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList of vegetation types in the classification, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.vegetation_types') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"broadleaf tree\" \n# \"needleleaf tree\" \n# \"C3 grass\" \n# \"C4 grass\" \n# \"vegetated\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.7. Biome Types\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList of biome types in the classification, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.biome_types') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"evergreen needleleaf forest\" \n# \"evergreen broadleaf forest\" \n# \"deciduous needleleaf forest\" \n# \"deciduous broadleaf forest\" \n# \"mixed forest\" \n# \"woodland\" \n# \"wooded grassland\" \n# \"closed shrubland\" \n# \"opne shrubland\" \n# \"grassland\" \n# \"cropland\" \n# \"wetlands\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.8. Vegetation Time Variation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow the vegetation fractions in each tile are varying with time", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.vegetation_time_variation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"fixed (not varying)\" \n# \"prescribed (varying from files)\" \n# \"dynamical (varying from simulation)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.9. Vegetation Map\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf vegetation fractions are not dynamically updated , describe the vegetation map used (common name and reference, if possible)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.vegetation_map') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.10. Interception\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs vegetation interception of rainwater represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.interception') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "17.11. Phenology\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTreatment of vegetation phenology", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.phenology') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic (vegetation map)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.12. Phenology Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of the treatment of vegetation phenology", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.phenology_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.13. Leaf Area Index\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTreatment of vegetation leaf area index", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.leaf_area_index') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prescribed\" \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.14. Leaf Area Index Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of the treatment of leaf area index", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.leaf_area_index_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.15. Biomass\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\n*Treatment of vegetation biomass *", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.biomass') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.16. Biomass Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of the treatment of vegetation biomass", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.biomass_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.17. Biogeography\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTreatment of vegetation biogeography", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.biogeography') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.18. Biogeography Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of the treatment of vegetation biogeography", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.biogeography_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.19. Stomatal Resistance\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSpecify what the vegetation stomatal resistance depends on", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.stomatal_resistance') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"light\" \n# \"temperature\" \n# \"water availability\" \n# \"CO2\" \n# \"O3\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.20. Stomatal Resistance Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of the treatment of vegetation stomatal resistance", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.stomatal_resistance_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.21. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the vegetation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18. Energy Balance\nLand surface energy balance\n18.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of energy balance in land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.energy_balance.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18.2. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the energy balance tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.energy_balance.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18.3. Number Of Surface Temperatures\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe maximum number of distinct surface temperatures in a grid cell (for example, each subgrid tile may have its own temperature)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "18.4. Evaporation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSpecify the formulation method for land surface evaporation, from soil and vegetation", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.energy_balance.evaporation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"alpha\" \n# \"beta\" \n# \"combined\" \n# \"Monteith potential evaporation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "18.5. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nDescribe which processes are included in the energy balance scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.energy_balance.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"transpiration\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "19. Carbon Cycle\nLand surface carbon cycle\n19.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of carbon cycle in land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19.2. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the carbon cycle tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19.3. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of carbon cycle in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "19.4. Anthropogenic Carbon\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nDescribe the treament of the anthropogenic carbon pool", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"grand slam protocol\" \n# \"residence time\" \n# \"decay time\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "19.5. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the carbon scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "20. Carbon Cycle --&gt; Vegetation\nTODO\n20.1. Number Of Carbon Pools\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEnter the number of carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "20.2. Carbon Pools\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "20.3. Forest Stand Dynamics\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the treatment of forest stand dyanmics", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "21. Carbon Cycle --&gt; Vegetation --&gt; Photosynthesis\nTODO\n21.1. Method\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the general method used for photosynthesis (e.g. type of photosynthesis, distinction between C3 and C4 grasses, Nitrogen depencence, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22. Carbon Cycle --&gt; Vegetation --&gt; Autotrophic Respiration\nTODO\n22.1. Maintainance Respiration\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the general method used for maintainence respiration", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.2. Growth Respiration\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the general method used for growth respiration", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "23. Carbon Cycle --&gt; Vegetation --&gt; Allocation\nTODO\n23.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general principle behind the allocation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "23.2. Allocation Bins\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecify distinct carbon bins used in allocation", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"leaves + stems + roots\" \n# \"leaves + stems + roots (leafy + woody)\" \n# \"leaves + fine roots + coarse roots + stems\" \n# \"whole plant (no distinction)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.3. Allocation Fractions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how the fractions of allocation are calculated", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"fixed\" \n# \"function of vegetation type\" \n# \"function of plant allometry\" \n# \"explicitly calculated\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "24. Carbon Cycle --&gt; Vegetation --&gt; Phenology\nTODO\n24.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general principle behind the phenology scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "25. Carbon Cycle --&gt; Vegetation --&gt; Mortality\nTODO\n25.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general principle behind the mortality scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "26. Carbon Cycle --&gt; Litter\nTODO\n26.1. Number Of Carbon Pools\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEnter the number of carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "26.2. Carbon Pools\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "26.3. Decomposition\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the decomposition methods used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.litter.decomposition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "26.4. Method\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the general method used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.litter.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "27. Carbon Cycle --&gt; Soil\nTODO\n27.1. Number Of Carbon Pools\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEnter the number of carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "27.2. Carbon Pools\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "27.3. Decomposition\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the decomposition methods used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.soil.decomposition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "27.4. Method\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the general method used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.soil.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "28. Carbon Cycle --&gt; Permafrost Carbon\nTODO\n28.1. Is Permafrost Included\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs permafrost included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "28.2. Emitted Greenhouse Gases\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the GHGs emitted", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "28.3. Decomposition\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the decomposition methods used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "28.4. Impact On Soil Properties\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the impact of permafrost on soil properties", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "29. Nitrogen Cycle\nLand surface nitrogen cycle\n29.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of the nitrogen cycle in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.nitrogen_cycle.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "29.2. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the notrogen cycle tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.nitrogen_cycle.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "29.3. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of nitrogen cycle in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.nitrogen_cycle.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "29.4. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the nitrogen scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30. River Routing\nLand surface river routing\n30.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of river routing in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30.2. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the river routing, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30.3. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of river routing scheme in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "30.4. Grid Inherited From Land Surface\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the grid inherited from land surface?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "30.5. Grid Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of grid, if not inherited from land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.grid_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30.6. Number Of Reservoirs\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEnter the number of reservoirs", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.number_of_reservoirs') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "30.7. Water Re Evaporation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nTODO", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.water_re_evaporation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"flood plains\" \n# \"irrigation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "30.8. Coupled To Atmosphere\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIs river routing coupled to the atmosphere model component?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "30.9. Coupled To Land\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the coupling between land and rivers", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.coupled_to_land') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30.10. Quantities Exchanged With Atmosphere\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf couple to atmosphere, which quantities are exchanged between river routing and the atmosphere model components?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"heat\" \n# \"water\" \n# \"tracers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "30.11. Basin Flow Direction Map\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat type of basin flow direction map is being used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.basin_flow_direction_map') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"present day\" \n# \"adapted for other periods\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "30.12. Flooding\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the representation of flooding, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.flooding') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30.13. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the river routing", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "31. River Routing --&gt; Oceanic Discharge\nTODO\n31.1. Discharge Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecify how rivers are discharged to the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"direct (large rivers)\" \n# \"diffuse\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "31.2. Quantities Transported\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nQuantities that are exchanged from river-routing to the ocean model component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"heat\" \n# \"water\" \n# \"tracers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "32. Lakes\nLand surface lakes\n32.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of lakes in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "32.2. Coupling With Rivers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre lakes coupled to the river routing model component?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.coupling_with_rivers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "32.3. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of lake scheme in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "32.4. Quantities Exchanged With Rivers\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf coupling with rivers, which quantities are exchanged between the lakes and rivers", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"heat\" \n# \"water\" \n# \"tracers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "32.5. Vertical Grid\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the vertical grid of lakes", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.vertical_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "32.6. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the lake scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "33. Lakes --&gt; Method\nTODO\n33.1. Ice Treatment\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs lake ice included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.method.ice_treatment') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "33.2. Albedo\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the treatment of lake albedo", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.method.albedo') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "33.3. Dynamics\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhich dynamics of lakes are treated? horizontal, vertical, etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.method.dynamics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"No lake dynamics\" \n# \"vertical\" \n# \"horizontal\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "33.4. Dynamic Lake Extent\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs a dynamic lake extent scheme included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "33.5. Endorheic Basins\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBasins not flowing to ocean included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.method.endorheic_basins') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "34. Lakes --&gt; Wetlands\nTODO\n34.1. Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the treatment of wetlands, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.wetlands.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
CodeNeuro/notebooks
worker/notebooks/thunder/tutorials/factorization.ipynb
mit
[ "Factorization\nFactorization algorithms are useful for data-driven decomposition of spatial and temporal data, for example, to recover spatial patterns with similar temporal profiles. Here, we show how to use some of the factorization algorithms in Thunder and visualize the results.\nSetup plotting", "%matplotlib inline\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_context('notebook')\nsns.set_style('darkgrid')\nfrom thunder import Colorize\nimage = Colorize.image", "PCA\nWe start by performing PCA (principal component analysis), which finds patterns that capture most of the variance in the data. First load toy example data, and cache it to speed up repeated queries.", "rawdata = tsc.loadExample('fish-series')\ndata = rawdata.toTimeSeries().normalize()\ndata.cache()\ndata.dims;", "Run PCA with three components", "from thunder import PCA\nmodel = PCA(k=2).fit(data)", "Fitting PCA adds two attributes to model: comps, which are the principal components, and scores, which are the data represented in principal component space. In this case, the input data were space-by-time, so the components are temporal basis functions, and the scores are spatial basis functions. Look at the results first by plotting the components, the temporal basis functions.", "plt.plot(model.comps.T);", "The scores are spatial basis functions. We can pack them into a local array and look at them as images one by one.", "imgs = model.scores.pack()\nimgs.shape\n\nimage(imgs[0,:,:,0], clim=(-0.05,0.05))\n\nimage(imgs[1,:,:,0], clim=(-0.05,0.05))", "Clearly there is some spatial structure to each component, but looking at them one by one can be difficult. A useful trick is to look at two components at once via a color code that converts the scores into polar coordinates. The color (hue) shows the relative amount of the two components, and the brightness shows the total amplitude.", "maps = Colorize(cmap='polar', scale=4).transform(imgs)\n\nfrom numpy import amax\nimage(amax(maps,2))", "To get more intuition for these colors, we can get the scores from a random subset of pixels. This will return two numbers per pixel, the projection onto the first and second principal component, and we threshold based on the norm so we are sure to retrieve pixels with at least some structure. Then we make a scatter plot of the two quantities against one another, using the same color conversion as used to generate the map.", "pts = model.scores.subset(500, thresh=0.01, stat='norm')\n\nfrom numpy import newaxis, squeeze\nclrs = Colorize(cmap='polar', scale=4).transform([pts[:,0][:,newaxis], pts[:,1][:,newaxis]]).squeeze()\nplt.scatter(pts[:,0],pts[:,1], c=clrs, s=75, alpha=0.7);", "Recall that each of these points represents a single pixel. Another way to better understand the PCA space is to plot the time series corresponding to each of these pixels, reconstructed using the first two principal components.", "from numpy import asarray\nrecon = asarray(map(lambda x: (x[0] * model.comps[0, :] + x[1] * model.comps[1, :]).tolist(), pts))\nplt.gca().set_color_cycle(clrs)\nplt.plot(recon.T);", "NMF\nNon-negative matrix factorization is an alternative decomposition. It is meant to be applied to data that are strictly positive, which is often approximately true of neural responses. Like PCA, it also returns a set of temporal and spatial basis functions, but unlike PCA, it tends to return basis functions that do not mix responses from different regions together. We can run NMF on the same data and look at the basis functions it recovers.", "from thunder import NMF\nmodel = NMF(k=3, maxIter=20).fit(data)", "After fitting, model will have two attributes, h and w. For these data, h contains the temporal basis functions, and w contains the spatial basis functions. Let's look at both.", "plt.plot(model.h.T);\n\nimgs = model.w.pack()\n\nimage(imgs[0][:,:,0])\n\nimage(imgs[1][:,:,0])\n\nimage(imgs[2][:,:,0])", "For NMF, a useful way to look at the basis functions is to encode each one as a separate color channel. We can do that using colorization with an rgb conversion, which simply maps the spatial basis functions directly to red, green, and blue values, and applies a global scaling factor which controls overall brightness.", "maps = Colorize(cmap='rgb', scale=1.0).transform(imgs)\nimage(maps[:,:,0,:])", "One problem with this way to look at NMF components is that the scale of the different components can cause some to dominante others. We also might like more control over color assignments. The indexed colorization option lets you specify one color per channel, and automatically normalizes the amplitude of each one.", "maps = Colorize(cmap='indexed', colors=[ \"hotpink\", \"cornflowerblue\", \"mediumseagreen\"], scale=1).transform(imgs)\nimage(maps[:,:,0,:])", "With these plots, it can be useful to add in a background image (for example, the mean). In this case, we also show how to select and colorize just two of the three map components against a background.", "ref = rawdata.seriesMean().pack()\n\nmaps = Colorize(cmap='indexed', colors=['red', 'blue'], scale=1).transform(imgs[[0,2]], background=ref, mixing=0.5)\nimage(maps[:,:,0,:])", "ICA\nIndependent component analysis is a final factorization approach. Unlike NMF, it does not require non-negative signals, but whereas PCA finds basis functions that maximize explained variance, ICA finds basis functions that maximize the non-Gaussianity of the recovered signals, and in practice, they tend to be both more distinct as well as spatially sparse.", "from thunder import ICA\nmodel = ICA(k=10,c=3).fit(data)\n\nsns.set_style('darkgrid')\nplt.plot(model.a);", "Some signals will be positive and others negative. This is expected because sign is arbitrary in ICA. It is useful to look at absolute value when making maps.", "imgs = model.sigs.pack()\n\nmaps = Colorize(cmap='indexed', colors=['red','green', 'blue'], scale=3).transform(abs(imgs))\nimage(maps[:,:,0,:])" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
wheeler-microfluidics/teensy-minimal-rpc
teensy_minimal_rpc/notebooks/dma-examples/Example - [BROKEN] Periodic multi-channel ADC multiple samples using DMA and PIT.ipynb
gpl-3.0
[ "NB Cannot use PIT to trigger periodic DMA due to hardware bug\nSee here.\nTry using PDB instead??", "import pandas as pd\n\ndef get_pdb_divide_params(frequency, F_BUS=int(48e6)):\n mult_factor = np.array([1, 10, 20, 40])\n prescaler = np.arange(8)\n\n clock_divide = (pd.DataFrame([[i, m, p, m * (1 << p)]\n for i, m in enumerate(mult_factor) for p in prescaler],\n columns=['mult_', 'mult_factor', 'prescaler', 'combined'])\n .drop_duplicates(subset=['combined'])\n .sort_values('combined', ascending=True))\n clock_divide['clock_mod'] = (F_BUS / frequency / clock_divide.combined).astype(int)\n return clock_divide.loc[clock_divide.clock_mod <= 0xffff]\n\n\nPDB0_IDLY = 0x4003600C # Interrupt Delay Register\nPDB0_SC = 0x40036000 # Status and Control Register\nPDB0_MOD = 0x40036004 # Modulus Register\n\nPDB_SC_PDBEIE = 0x00020000 # Sequence Error Interrupt Enable\nPDB_SC_SWTRIG = 0x00010000 # Software Trigger\nPDB_SC_DMAEN = 0x00008000 # DMA Enable\nPDB_SC_PDBEN = 0x00000080 # PDB Enable\nPDB_SC_PDBIF = 0x00000040 # PDB Interrupt Flag\nPDB_SC_PDBIE = 0x00000020 # PDB Interrupt Enable.\nPDB_SC_CONT = 0x00000002 # Continuous Mode Enable\nPDB_SC_LDOK = 0x00000001 # Load OK\n\n\ndef PDB_SC_TRGSEL(n): return (((n) & 15) << 8) # Trigger Input Source Select\ndef PDB_SC_PRESCALER(n): return (((n) & 7) << 12) # Prescaler Divider Select\ndef PDB_SC_MULT(n): return (((n) & 3) << 2) # Multiplication Factor\ndef PDB_SC_LDMOD(n): return (((n) & 3) << 18) # Load Mode Select\n\n\n# PDB0_IDLY = 1; // the pdb interrupt happens when IDLY is equal to CNT+1\nproxy.mem_cpy_host_to_device(PDB0_IDLY, np.uint32(1).tostring())\n\n# software trigger enable PDB continuous\nPDB_CONFIG = (PDB_SC_TRGSEL(15) | PDB_SC_PDBEN | PDB_SC_CONT | PDB_SC_LDMOD(0))\n\nPDB0_SC_ = (PDB_CONFIG | PDB_SC_PRESCALER(clock_divide.prescaler) |\n PDB_SC_MULT(clock_divide.mult_) | \n PDB_SC_DMAEN | PDB_SC_LDOK) # load all new values\nproxy.mem_cpy_host_to_device(PDB0_SC, np.uint32(PDB0_SC_).tostring())\n\nclock_divide = get_pdb_divide_params(25).iloc[0]\n# PDB0_MOD = (uint16_t)(mod-1);\nproxy.mem_cpy_host_to_device(PDB0_MOD, np.uint32(clock_divide.clock_mod).tostring())\n\nPDB0_SC_ = (PDB_CONFIG | PDB_SC_PRESCALER(clock_divide.prescaler) |\n PDB_SC_DMAEN | PDB_SC_MULT(clock_divide.mult_) |\n PDB_SC_SWTRIG) # start the counter!\nproxy.mem_cpy_host_to_device(PDB0_SC, np.uint32(PDB0_SC_).tostring())\n\nPDB0_SC_ = 0\nproxy.mem_cpy_host_to_device(PDB0_SC, np.uint32(PDB0_SC_).tostring())", "Overview\nUse linked DMA channels to perform \"scan\" across multiple ADC input channels.\nAfter each scan, use DMA scatter chain to write the converted ADC values to a\nseparate output array for each ADC channel. The length of the output array to\nallocate for each ADC channel is determined by the sample_count in the\nexample below.\nSee diagram below.\nChannel configuration ##\n\nDMA channel $i$ copies conesecutive SC1A configurations to the ADC SC1A\n register. Each SC1A configuration selects an analog input channel.\nChannel $i$ is initially triggered by software trigger\n (i.e., DMA_SSRT = i), starting the ADC conversion for the first ADC\n channel configuration.\nLoading of subsequent ADC channel configurations is triggered through\n minor loop linking of DMA channel $ii$ to DMA channel $i$.\n\n\nDMA channel $ii$ is triggered by ADC conversion complete (i.e., COCO), and\n copies the output result of the ADC to consecutive locations in the result\n array.\nChannel $ii$ has minor loop link set to channel $i$, which triggers the\n loading of the next channel SC1A configuration to be loaded immediately\n after the current ADC result has been copied to the result array.\n\n\nAfter $n$ triggers of channel $i$, the result array contains $n$ ADC results,\n one result per channel in the SC1A table.\nN.B., Only the trigger for the first ADC channel is an explicit\n software trigger. All remaining triggers occur through minor-loop DMA\n channel linking from channel $ii$ to channel $i$.\n\n\nAfter each scan through all ADC channels is complete, the ADC readings are\n scattered using the selected \"scatter\" DMA channel through a major-loop link\n between DMA channel $ii$ and the \"scatter\" channel.\n\n<img src=\"multi-channel_ADC_multi-samples_using_DMA.jpg\" style=\"max-height: 600px\" />\nDevice\nConnect to device", "import arduino_helpers.hardware.teensy as teensy\nfrom arduino_rpc.protobuf import resolve_field_values\nfrom teensy_minimal_rpc import SerialProxy\nimport teensy_minimal_rpc.DMA as DMA\nimport teensy_minimal_rpc.ADC as ADC\nimport teensy_minimal_rpc.SIM as SIM\nimport teensy_minimal_rpc.PIT as PIT\n\n\n# Disconnect from existing proxy (if available)\ntry:\n del proxy\nexcept NameError:\n pass\n\nproxy = SerialProxy()\nproxy.pin_mode(teensy.LED_BUILTIN, 1)\n\nfrom IPython.display import display\n\nproxy.update_sim_SCGC6(SIM.R_SCGC6(PDB=True))\nsim_scgc6 = SIM.R_SCGC6.FromString(proxy.read_sim_SCGC6().tostring())\ndisplay(resolve_field_values(sim_scgc6)[['full_name', 'value']].T)\n\n# proxy.update_pit_registers(PIT.Registers(MCR=PIT.R_MCR(MDIS=False)))\n# pit_registers = PIT.Registers.FromString(proxy.read_pit_registers().tostring())\n# display(resolve_field_values(pit_registers)[['full_name', 'value']].T)\n\nimport numpy as np\n\n# CORE_PIN13_PORTSET = CORE_PIN13_BITMASK;\n# CORE_PIN13_PORTCLEAR = CORE_PIN13_BITMASK;\n\n#define CORE_PIN13_PORTCLEAR\tGPIOC_PCOR\n#define CORE_PIN13_PORTSET\tGPIOC_PSOR\n#define GPIOC_PCOR\t\t(*(volatile uint32_t *)0x400FF088) // Port Clear Output Register\n#define GPIOC_PSOR\t\t(*(volatile uint32_t *)0x400FF084) // Port Set Output Register\n\nCORE_PIN13_BIT = 5\nGPIOC_PCOR = 0x400FF088 # Port Clear Output Register\nGPIOC_PSOR = 0x400FF084 # Port Set Output Register\n\nproxy.mem_cpy_host_to_device(GPIOC_PSOR, np.uint32(1 << CORE_PIN13_BIT).tostring())\n\nproxy.update_dma_mux_chcfg(0, DMA.MUX_CHCFG(ENBL=1, TRIG=0, SOURCE=48))\nproxy.update_dma_registers(DMA.Registers(SERQ=0))\n\nproxy.update_dma_registers(DMA.Registers(CERQ=0))\n\nresolve_field_values(DMA.MUX_CHCFG.FromString(proxy.read_dma_mux_chcfg(0).tostring()))[['full_name', 'value']]\n\nprint proxy.update_pit_timer_config(0, PIT.TimerConfig(LDVAL=int(48e6)))\nprint proxy.update_pit_timer_config(0, PIT.TimerConfig(TCTRL=PIT.R_TCTRL(TEN=True)))\n\npit0 = PIT.TimerConfig.FromString(proxy.read_pit_timer_config(0).tostring())\ndisplay(resolve_field_values(pit0)[['full_name', 'value']].T)\n\nPIT_LDVAL0 = 0x40037100 # Timer Load Value Register\nPIT_CVAL0 = 0x40037104 # Current Timer Value Register\nPIT_TCTRL0 = 0x40037108 # Timer Control Register\nproxy.mem_cpy_host_to_device(PIT_TCTRL0, np.uint32(1).tostring())\nproxy.mem_cpy_device_to_host(PIT_TCTRL0, 4).view('uint32')[0]\n\nproxy.digital_write(teensy.LED_BUILTIN, 0)\n\nproxy.update_dma_registers(DMA.Registers(SSRT=0))\n\nproxy.free_all()\n\ntoggle_pin_addr = proxy.mem_alloc(4)\nproxy.mem_cpy_host_to_device(toggle_pin_addr, np.uint32(1 << CORE_PIN13_BIT).tostring())\n\ntcds_addr = proxy.mem_aligned_alloc(32, 2 * 32)\nhw_tcds_addr = 0x40009000\ntcd_addrs = [tcds_addr + 32 * i for i in xrange(2)]\n\n# Create Transfer Control Descriptor configuration for first chunk, encoded\n# as a Protocol Buffer message.\ntcd0_msg = DMA.TCD(CITER_ELINKNO=DMA.R_TCD_ITER_ELINKNO(ITER=1),\n BITER_ELINKNO=DMA.R_TCD_ITER_ELINKNO(ITER=1),\n ATTR=DMA.R_TCD_ATTR(SSIZE=DMA.R_TCD_ATTR._32_BIT,\n DSIZE=DMA.R_TCD_ATTR._32_BIT),\n NBYTES_MLNO=4,\n SADDR=int(toggle_pin_addr),\n SOFF=0,\n SLAST=0,\n DADDR=int(GPIOC_PSOR),\n DOFF=0,\n# DLASTSGA=0,\n# CSR=DMA.R_TCD_CSR(START=0, DONE=False, ESG=False))\n# proxy.update_dma_TCD(0, tcd0_msg)\n DLASTSGA=int(tcd_addrs[1]),\n CSR=DMA.R_TCD_CSR(START=0, DONE=False, ESG=True))\n\n# # Convert Protocol Buffer encoded TCD to bytes structure.\ntcd0 = proxy.tcd_msg_to_struct(tcd0_msg)\n\n# Create binary TCD struct for each TCD protobuf message and copy to device\n# memory.\nfor i in xrange(2):\n tcd_i = tcd0.copy()\n tcd_i['DADDR'] = [GPIOC_PSOR, GPIOC_PCOR][i]\n tcd_i['DLASTSGA'] = tcd_addrs[(i + 1) % len(tcd_addrs)]\n tcd_i['CSR'] |= (1 << 4)\n proxy.mem_cpy_host_to_device(tcd_addrs[i], tcd_i.tostring())\n\n# Load initial TCD in scatter chain to DMA channel chosen to handle scattering.\nproxy.mem_cpy_host_to_device(hw_tcds_addr, tcd0.tostring())\n\nproxy.update_dma_registers(DMA.Registers(SSRT=0))\n\ndma_channel_scatter = 0\ndma_channel_i = 1\ndma_channel_ii = 2", "Configure ADC sample rate, etc.", "\n# Set ADC parameters\nproxy.setAveraging(16, teensy.ADC_0)\nproxy.setResolution(16, teensy.ADC_0)\nproxy.setConversionSpeed(teensy.ADC_MED_SPEED, teensy.ADC_0)\nproxy.setSamplingSpeed(teensy.ADC_MED_SPEED, teensy.ADC_0)\nproxy.update_adc_registers(\n teensy.ADC_0,\n ADC.Registers(CFG2=ADC.R_CFG2(MUXSEL=ADC.R_CFG2.B)))", "Pseudo-code to set DMA channel $i$ to be triggered by ADC0 conversion complete.\nDMAMUX0_CFGi[SOURCE] = DMAMUX_SOURCE_ADC0 // Route ADC0 as DMA channel source.\nDMAMUX0_CFGi[TRIG] = 0 // Disable periodic trigger.\nDMAMUX0_CFGi[ENBL] = 1 // Enable the DMAMUX configuration for channel.\n\nDMA_ERQ[i] = 1 // DMA request input signals and this enable request flag\n // must be asserted before a channel’s hardware service\n // request is accepted (21.3.3/394).\nDMA_SERQ = i // Can use memory mapped convenience register to set instead.\n\nSet DMA mux source for channel 0 to ADC0", "DMAMUX_SOURCE_ADC0 = 40 # from `kinetis.h`\nDMAMUX_SOURCE_ADC1 = 41 # from `kinetis.h`\n\n# DMAMUX0_CFGi[SOURCE] = DMAMUX_SOURCE_ADC0 // Route ADC0 as DMA channel source.\n# DMAMUX0_CFGi[TRIG] = 0 // Disable periodic trigger.\n# DMAMUX0_CFGi[ENBL] = 1 // Enable the DMAMUX configuration for channel.\nproxy.update_dma_mux_chcfg(dma_channel_ii,\n DMA.MUX_CHCFG(SOURCE=DMAMUX_SOURCE_ADC0,\n TRIG=False,\n ENBL=True))\n\n# DMA request input signals and this enable request flag\n# must be asserted before a channel’s hardware service\n# request is accepted (21.3.3/394).\n# DMA_SERQ = i\nproxy.update_dma_registers(DMA.Registers(SERQ=dma_channel_ii))\nproxy.enableDMA(teensy.ADC_0)\n\nproxy.DMA_registers().loc['']\n\ndmamux = DMA.MUX_CHCFG.FromString(proxy.read_dma_mux_chcfg(dma_channel_ii).tostring())\nresolve_field_values(dmamux)[['full_name', 'value']]\n\nadc0 = ADC.Registers.FromString(proxy.read_adc_registers(teensy.ADC_0).tostring())\nresolve_field_values(adc0)[['full_name', 'value']].loc[['CFG2', 'SC1A', 'SC3']]", "Analog channel list\n\nList of channels to sample.\nMap channels from Teensy references (e.g., A0, A1, etc.) to the Kinetis analog\n pin numbers using the adc.CHANNEL_TO_SC1A_ADC0 mapping.", "import re\n\nimport numpy as np\nimport pandas as pd\nimport arduino_helpers.hardware.teensy.adc as adc\n\n# The number of samples to record for each ADC channel.\nsample_count = 10\n\nteensy_analog_channels = ['A0', 'A1', 'A0', 'A3', 'A0']\nsc1a_pins = pd.Series(dict([(v, adc.CHANNEL_TO_SC1A_ADC0[getattr(teensy, v)])\n for v in dir(teensy) if re.search(r'^A\\d+', v)]))\nchannel_sc1as = np.array(sc1a_pins[teensy_analog_channels].tolist(), dtype='uint32')", "Allocate and initialize device arrays\n\nSD1A register configuration for each ADC channel in the channel_sc1as list.\nCopy channel_sc1as list to device.\n\n\nADC result array\nInitialize to zero.", "proxy.free_all()\n\nN = np.dtype('uint16').itemsize * channel_sc1as.size\n\n# Allocate source array\nadc_result_addr = proxy.mem_alloc(N)\n\n# Fill result array with zeros\nproxy.mem_fill_uint8(adc_result_addr, 0, N)\n\n# Copy channel SC1A configurations to device memory\nadc_sda1s_addr = proxy.mem_aligned_alloc_and_set(4, channel_sc1as.view('uint8'))\n\n# Allocate source array\nsamples_addr = proxy.mem_alloc(sample_count * N)\n\ntcds_addr = proxy.mem_aligned_alloc(32, sample_count * 32)\nhw_tcds_addr = 0x40009000\ntcd_addrs = [tcds_addr + 32 * i for i in xrange(sample_count)]\nhw_tcd_addrs = [hw_tcds_addr + 32 * i for i in xrange(sample_count)]\n\n# Fill result array with zeros\nproxy.mem_fill_uint8(samples_addr, 0, sample_count * N)\n\n# Create Transfer Control Descriptor configuration for first chunk, encoded\n# as a Protocol Buffer message.\ntcd0_msg = DMA.TCD(CITER_ELINKNO=DMA.R_TCD_ITER_ELINKNO(ITER=1),\n BITER_ELINKNO=DMA.R_TCD_ITER_ELINKNO(ITER=1),\n ATTR=DMA.R_TCD_ATTR(SSIZE=DMA.R_TCD_ATTR._16_BIT,\n DSIZE=DMA.R_TCD_ATTR._16_BIT),\n NBYTES_MLNO=channel_sc1as.size * 2,\n SADDR=int(adc_result_addr),\n SOFF=2,\n SLAST=-channel_sc1as.size * 2,\n DADDR=int(samples_addr),\n DOFF=2 * sample_count,\n DLASTSGA=int(tcd_addrs[1]),\n CSR=DMA.R_TCD_CSR(START=0, DONE=False, ESG=True))\n\n# Convert Protocol Buffer encoded TCD to bytes structure.\ntcd0 = proxy.tcd_msg_to_struct(tcd0_msg)\n\n# Create binary TCD struct for each TCD protobuf message and copy to device\n# memory.\nfor i in xrange(sample_count):\n tcd_i = tcd0.copy()\n tcd_i['SADDR'] = adc_result_addr\n tcd_i['DADDR'] = samples_addr + 2 * i\n tcd_i['DLASTSGA'] = tcd_addrs[(i + 1) % len(tcd_addrs)]\n tcd_i['CSR'] |= (1 << 4)\n proxy.mem_cpy_host_to_device(tcd_addrs[i], tcd_i.tostring())\n\n# Load initial TCD in scatter chain to DMA channel chosen to handle scattering.\nproxy.mem_cpy_host_to_device(hw_tcd_addrs[dma_channel_scatter],\n tcd0.tostring())\n\nprint 'ADC results:', proxy.mem_cpy_device_to_host(adc_result_addr, N).view('uint16')\nprint 'Analog pins:', proxy.mem_cpy_device_to_host(adc_sda1s_addr, len(channel_sc1as) *\n channel_sc1as.dtype.itemsize).view('uint32')", "Configure DMA channel $i$", "ADC0_SC1A = 0x4003B000 # ADC status and control registers 1\n\nsda1_tcd_msg = DMA.TCD(CITER_ELINKNO=DMA.R_TCD_ITER_ELINKNO(ELINK=False, ITER=channel_sc1as.size),\n BITER_ELINKNO=DMA.R_TCD_ITER_ELINKNO(ELINK=False, ITER=channel_sc1as.size),\n ATTR=DMA.R_TCD_ATTR(SSIZE=DMA.R_TCD_ATTR._32_BIT,\n DSIZE=DMA.R_TCD_ATTR._32_BIT),\n NBYTES_MLNO=4,\n SADDR=int(adc_sda1s_addr),\n SOFF=4,\n SLAST=-channel_sc1as.size * 4,\n DADDR=int(ADC0_SC1A),\n DOFF=0,\n DLASTSGA=0,\n CSR=DMA.R_TCD_CSR(START=0, DONE=False))\n\nproxy.update_dma_TCD(dma_channel_i, sda1_tcd_msg)", "Configure DMA channel $ii$", "ADC0_RA = 0x4003B010 # ADC data result register\nADC0_RB = 0x4003B014 # ADC data result register\n\n\ntcd_msg = DMA.TCD(CITER_ELINKYES=DMA.R_TCD_ITER_ELINKYES(ELINK=True, LINKCH=1, ITER=channel_sc1as.size),\n BITER_ELINKYES=DMA.R_TCD_ITER_ELINKYES(ELINK=True, LINKCH=1, ITER=channel_sc1as.size),\n ATTR=DMA.R_TCD_ATTR(SSIZE=DMA.R_TCD_ATTR._16_BIT,\n DSIZE=DMA.R_TCD_ATTR._16_BIT),\n NBYTES_MLNO=2,\n SADDR=ADC0_RA,\n SOFF=0,\n SLAST=0,\n DADDR=int(adc_result_addr),\n DOFF=2,\n DLASTSGA=-channel_sc1as.size * 2,\n CSR=DMA.R_TCD_CSR(START=0, DONE=False,\n MAJORELINK=True,\n MAJORLINKCH=dma_channel_scatter))\n\nproxy.update_dma_TCD(dma_channel_ii, tcd_msg)", "Trigger sample scan across selected ADC channels", "# Clear output array to zero.\nproxy.mem_fill_uint8(adc_result_addr, 0, N)\nproxy.mem_fill_uint8(samples_addr, 0, sample_count * N)\n\n# Software trigger channel $i$ to copy *first* SC1A configuration, which\n# starts ADC conversion for the first channel.\n#\n# Conversions for subsequent ADC channels are triggered through minor-loop\n# linking from DMA channel $ii$ to DMA channel $i$ (*not* through explicit\n# software trigger).\nprint 'ADC results:'\nfor i in xrange(sample_count):\n proxy.update_dma_registers(DMA.Registers(SSRT=dma_channel_i))\n\n # Display converted ADC values (one value per channel in `channel_sd1as` list).\n print ' Iteration %s:' % i, proxy.mem_cpy_device_to_host(adc_result_addr, N).view('uint16')\n\nprint ''\nprint 'Samples by channel:'\n# Trigger once per chunk\n# for i in xrange(sample_count):\n# proxy.update_dma_registers(DMA.Registers(SSRT=0))\ndevice_dst_data = proxy.mem_cpy_device_to_host(samples_addr, sample_count * N)\npd.DataFrame(device_dst_data.view('uint16').reshape(-1, sample_count).T,\n columns=teensy_analog_channels)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]