hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
cb9e27114a65630276820b2fcd2a80ee3649227c
2,543
ipynb
Jupyter Notebook
walker.ipynb
dr-kinder/jupyter-notebooks
621c70e7e428bf8d71132a962ceab2192e8427c5
[ "BSD-3-Clause" ]
1
2021-12-04T14:04:53.000Z
2021-12-04T14:04:53.000Z
walker.ipynb
dr-kinder/jupyter-notebooks
621c70e7e428bf8d71132a962ceab2192e8427c5
[ "BSD-3-Clause" ]
null
null
null
walker.ipynb
dr-kinder/jupyter-notebooks
621c70e7e428bf8d71132a962ceab2192e8427c5
[ "BSD-3-Clause" ]
null
null
null
36.855072
89
0.528116
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb9e2e75f4ebdb1d3a241670f56eee44b7532b0f
18,758
ipynb
Jupyter Notebook
workflow-3b/mnist-example-for-experts-gpu.ipynb
surfaceowl/docker-for-data-scientist-examples
d32bf8180fed8388ab8391042b9ad34e1f0d6771
[ "BSD-3-Clause" ]
null
null
null
workflow-3b/mnist-example-for-experts-gpu.ipynb
surfaceowl/docker-for-data-scientist-examples
d32bf8180fed8388ab8391042b9ad34e1f0d6771
[ "BSD-3-Clause" ]
null
null
null
workflow-3b/mnist-example-for-experts-gpu.ipynb
surfaceowl/docker-for-data-scientist-examples
d32bf8180fed8388ab8391042b9ad34e1f0d6771
[ "BSD-3-Clause" ]
null
null
null
31.847199
249
0.592654
[ [ [ "**Deep MNIST for Experts**\n\nThis is an example taken from one of the TensorFlow tutorials: https://www.tensorflow.org/versions/r1.1/get_started/mnist/pros\n\nTo run this as a docker container on my HP200, I used:\n\n```\n nvidia-docker run -it -p 8888:8888 tensorflow/tensorflow:1.5.0-gpu-py3\n ```\n \n If you have a newer CPU, you can use the image tensorflow/tensorflow:lastest-gpu-py3.", "_____no_output_____" ] ], [ [ "# Load the data\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n", "/usr/local/lib/python3.5/dist-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n" ], [ "import tensorflow as tf\n# Create our session. We enable the logging of which devices (GPU or CPU)\n# TensorFlow is using. This gets log to the console running the Notebook,\n# not in the notebook\nsess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))", "_____no_output_____" ], [ "# Placeholders\nx = tf.placeholder(tf.float32, shape=[None, 784])\ny_ = tf.placeholder(tf.float32, shape=[None, 10])", "_____no_output_____" ], [ "# Variables\nW = tf.Variable(tf.zeros([784,10]))\nb = tf.Variable(tf.zeros([10]))", "_____no_output_____" ], [ "sess.run(tf.global_variables_initializer())", "_____no_output_____" ], [ "# The regression model\ny = tf.matmul(x,W) + b", "_____no_output_____" ], [ "# Loss function\ncross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))", "WARNING:tensorflow:From <ipython-input-7-9f9d928cdbef>:3: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\n\nFuture major versions of TensorFlow will allow gradients to flow\ninto the labels input on backprop by default.\n\nSee tf.nn.softmax_cross_entropy_with_logits_v2.\n\n" ], [ "# Train the model - define a training step\ntrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)", "_____no_output_____" ], [ "# Run the step 1000 times\nfor _ in range(1000):\n batch = mnist.train.next_batch(100)\n train_step.run(feed_dict={x: batch[0], y_: batch[1]})\n", "_____no_output_____" ], [ "# Evaluate the model\ncorrect_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nprint(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))", "0.9168\n" ] ], [ [ "**Bulding a multilayer convolutional Network**", "_____no_output_____" ] ], [ [ "# Weight initialization\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)", "_____no_output_____" ], [ "# Convolution and pooling\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')", "_____no_output_____" ], [ "# first convolutional layer\nW_conv1 = weight_variable([5, 5, 1, 32])\nb_conv1 = bias_variable([32])\nx_image = tf.reshape(x, [-1,28,28,1])\nh_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\nh_pool1 = max_pool_2x2(h_conv1)", "_____no_output_____" ], [ "# Second convolutional layer\nW_conv2 = weight_variable([5, 5, 32, 64])\nb_conv2 = bias_variable([64])\n\nh_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\nh_pool2 = max_pool_2x2(h_conv2)", "_____no_output_____" ], [ "# Densely connected layer\nW_fc1 = weight_variable([7 * 7 * 64, 1024])\nb_fc1 = bias_variable([1024])\n\nh_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)", "_____no_output_____" ], [ "# Dropout\nkeep_prob = tf.placeholder(tf.float32)\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)", "_____no_output_____" ], [ "# Readout layer\nW_fc2 = weight_variable([1024, 10])\nb_fc2 = bias_variable([10])\n\ny_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2", "_____no_output_____" ], [ "import time\ncross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))\ntrain_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\ncorrect_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nsess.run(tf.global_variables_initializer())\nt1 = time.time()\nfor i in range(20000):\n batch = mnist.train.next_batch(50)\n if i%100 == 0:\n train_accuracy = accuracy.eval(feed_dict={\n x:batch[0], y_: batch[1], keep_prob: 1.0})\n print(\"step %d, training accuracy %g\"%(i, train_accuracy))\n train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\n\nprint(\"test accuracy %g\"%accuracy.eval(feed_dict={\n x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))\nt2 = time.time()\nprint(\"Total time was %.0f seconds\" % (t2-t1))", "step 0, training accuracy 0.16\nstep 100, training accuracy 0.76\nstep 200, training accuracy 0.92\nstep 300, training accuracy 0.92\nstep 400, training accuracy 0.94\nstep 500, training accuracy 0.94\nstep 600, training accuracy 0.94\nstep 700, training accuracy 0.94\nstep 800, training accuracy 0.94\nstep 900, training accuracy 0.96\nstep 1000, training accuracy 0.94\nstep 1100, training accuracy 0.96\nstep 1200, training accuracy 1\nstep 1300, training accuracy 0.96\nstep 1400, training accuracy 0.98\nstep 1500, training accuracy 1\nstep 1600, training accuracy 0.98\nstep 1700, training accuracy 1\nstep 1800, training accuracy 0.96\nstep 1900, training accuracy 0.98\nstep 2000, training accuracy 0.98\nstep 2100, training accuracy 1\nstep 2200, training accuracy 0.98\nstep 2300, training accuracy 0.94\nstep 2400, training accuracy 1\nstep 2500, training accuracy 0.98\nstep 2600, training accuracy 0.96\nstep 2700, training accuracy 0.98\nstep 2800, training accuracy 1\nstep 2900, training accuracy 0.98\nstep 3000, training accuracy 0.98\nstep 3100, training accuracy 0.98\nstep 3200, training accuracy 0.98\nstep 3300, training accuracy 0.98\nstep 3400, training accuracy 1\nstep 3500, training accuracy 0.98\nstep 3600, training accuracy 1\nstep 3700, training accuracy 1\nstep 3800, training accuracy 1\nstep 3900, training accuracy 1\nstep 4000, training accuracy 0.98\nstep 4100, training accuracy 1\nstep 4200, training accuracy 0.98\nstep 4300, training accuracy 1\nstep 4400, training accuracy 1\nstep 4500, training accuracy 1\nstep 4600, training accuracy 0.98\nstep 4700, training accuracy 0.98\nstep 4800, training accuracy 1\nstep 4900, training accuracy 1\nstep 5000, training accuracy 1\nstep 5100, training accuracy 1\nstep 5200, training accuracy 0.98\nstep 5300, training accuracy 1\nstep 5400, training accuracy 0.98\nstep 5500, training accuracy 0.98\nstep 5600, training accuracy 0.98\nstep 5700, training accuracy 0.98\nstep 5800, training accuracy 0.98\nstep 5900, training accuracy 1\nstep 6000, training accuracy 0.98\nstep 6100, training accuracy 1\nstep 6200, training accuracy 1\nstep 6300, training accuracy 1\nstep 6400, training accuracy 1\nstep 6500, training accuracy 1\nstep 6600, training accuracy 1\nstep 6700, training accuracy 1\nstep 6800, training accuracy 1\nstep 6900, training accuracy 1\nstep 7000, training accuracy 1\nstep 7100, training accuracy 1\nstep 7200, training accuracy 1\nstep 7300, training accuracy 1\nstep 7400, training accuracy 0.96\nstep 7500, training accuracy 1\nstep 7600, training accuracy 1\nstep 7700, training accuracy 1\nstep 7800, training accuracy 0.98\nstep 7900, training accuracy 1\nstep 8000, training accuracy 1\nstep 8100, training accuracy 1\nstep 8200, training accuracy 1\nstep 8300, training accuracy 1\nstep 8400, training accuracy 1\nstep 8500, training accuracy 0.98\nstep 8600, training accuracy 1\nstep 8700, training accuracy 0.98\nstep 8800, training accuracy 1\nstep 8900, training accuracy 1\nstep 9000, training accuracy 1\nstep 9100, training accuracy 1\nstep 9200, training accuracy 1\nstep 9300, training accuracy 1\nstep 9400, training accuracy 1\nstep 9500, training accuracy 1\nstep 9600, training accuracy 1\nstep 9700, training accuracy 1\nstep 9800, training accuracy 1\nstep 9900, training accuracy 1\nstep 10000, training accuracy 1\nstep 10100, training accuracy 1\nstep 10200, training accuracy 1\nstep 10300, training accuracy 1\nstep 10400, training accuracy 1\nstep 10500, training accuracy 1\nstep 10600, training accuracy 1\nstep 10700, training accuracy 0.98\nstep 10800, training accuracy 1\nstep 10900, training accuracy 1\nstep 11000, training accuracy 1\nstep 11100, training accuracy 1\nstep 11200, training accuracy 1\nstep 11300, training accuracy 0.98\nstep 11400, training accuracy 0.98\nstep 11500, training accuracy 1\nstep 11600, training accuracy 1\nstep 11700, training accuracy 1\nstep 11800, training accuracy 0.98\nstep 11900, training accuracy 1\nstep 12000, training accuracy 1\nstep 12100, training accuracy 1\nstep 12200, training accuracy 1\nstep 12300, training accuracy 1\nstep 12400, training accuracy 1\nstep 12500, training accuracy 1\nstep 12600, training accuracy 0.98\nstep 12700, training accuracy 1\nstep 12800, training accuracy 1\nstep 12900, training accuracy 1\nstep 13000, training accuracy 1\nstep 13100, training accuracy 1\nstep 13200, training accuracy 1\nstep 13300, training accuracy 1\nstep 13400, training accuracy 1\nstep 13500, training accuracy 1\nstep 13600, training accuracy 1\nstep 13700, training accuracy 0.98\nstep 13800, training accuracy 1\nstep 13900, training accuracy 1\nstep 14000, training accuracy 1\nstep 14100, training accuracy 1\nstep 14200, training accuracy 1\nstep 14300, training accuracy 1\nstep 14400, training accuracy 1\nstep 14500, training accuracy 1\nstep 14600, training accuracy 1\nstep 14700, training accuracy 1\nstep 14800, training accuracy 1\nstep 14900, training accuracy 1\nstep 15000, training accuracy 1\nstep 15100, training accuracy 1\nstep 15200, training accuracy 1\nstep 15300, training accuracy 1\nstep 15400, training accuracy 0.98\nstep 15500, training accuracy 1\nstep 15600, training accuracy 1\nstep 15700, training accuracy 1\nstep 15800, training accuracy 1\nstep 15900, training accuracy 1\nstep 16000, training accuracy 1\nstep 16100, training accuracy 1\nstep 16200, training accuracy 1\nstep 16300, training accuracy 1\nstep 16400, training accuracy 1\nstep 16500, training accuracy 1\nstep 16600, training accuracy 1\nstep 16700, training accuracy 1\nstep 16800, training accuracy 1\nstep 16900, training accuracy 1\nstep 17000, training accuracy 1\nstep 17100, training accuracy 1\nstep 17200, training accuracy 0.98\nstep 17300, training accuracy 1\nstep 17400, training accuracy 1\nstep 17500, training accuracy 1\nstep 17600, training accuracy 1\nstep 17700, training accuracy 1\nstep 17800, training accuracy 1\nstep 17900, training accuracy 1\nstep 18000, training accuracy 1\nstep 18100, training accuracy 1\nstep 18200, training accuracy 1\nstep 18300, training accuracy 1\nstep 18400, training accuracy 0.98\nstep 18500, training accuracy 1\nstep 18600, training accuracy 1\nstep 18700, training accuracy 1\nstep 18800, training accuracy 1\nstep 18900, training accuracy 1\nstep 19000, training accuracy 1\nstep 19100, training accuracy 1\nstep 19200, training accuracy 1\nstep 19300, training accuracy 1\nstep 19400, training accuracy 1\nstep 19500, training accuracy 1\nstep 19600, training accuracy 1\nstep 19700, training accuracy 1\nstep 19800, training accuracy 1\nstep 19900, training accuracy 1\ntest accuracy 0.9918\nTotal time was 136 seconds\n" ], [ "# The time with an NVIDIA GTX1060 was 136 seconds\n# The time on a macbook pro laptop (CPU-only, no GPU) was 1683 seconds.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9e2ef4d6e65b89e16df99166b079f0a7296488
50,135
ipynb
Jupyter Notebook
problem_gen/quadrotor/Numerical Norm Bounds.ipynb
locuslab/robust-nn-control
666fb1540f20555aa04bccde12603e67a1c0b913
[ "Apache-2.0" ]
34
2020-11-20T05:19:42.000Z
2022-03-14T13:03:37.000Z
problem_gen/quadrotor/Numerical Norm Bounds.ipynb
locuslab/robust-nn-control
666fb1540f20555aa04bccde12603e67a1c0b913
[ "Apache-2.0" ]
null
null
null
problem_gen/quadrotor/Numerical Norm Bounds.ipynb
locuslab/robust-nn-control
666fb1540f20555aa04bccde12603e67a1c0b913
[ "Apache-2.0" ]
6
2020-11-22T16:03:14.000Z
2021-04-24T09:45:02.000Z
42.272344
1,892
0.497497
[ [ [ "# Numerical norm bounds for quadrotor ", "_____no_output_____" ], [ "For a quadrotor system with state $x = \\begin{bmatrix}p_x & p_z & \\phi & v_x & v_z & \\dot{\\phi} \\end{bmatrix}^T$ we have \n\n\\begin{equation}\n\\dot{x} = \\begin{bmatrix} \nv_x \\cos\\phi - v_z\\sin\\phi \\\\\nv_x \\sin\\phi + v_z\\cos\\phi \\\\\n\\dot{\\phi} \\\\\nv_z\\dot{\\phi} - g\\sin{\\phi} \\\\\n-v_x\\dot{\\phi} - g\\cos{\\phi} + g \\\\\n0\n\\end{bmatrix}.\n\\end{equation}\n\nEvaluating the corresponding Jacobian at 0 yields:\n\\begin{equation}\n\\nabla f(0)x = \\begin{bmatrix} v_x & v_z & \\dot{\\phi} & -g\\phi & 0 & 0 \\end{bmatrix}^T\n\\end{equation}\n\nWe want to find an NLDI of the form\n\\begin{equation}\n\\dot{x} = \\nabla f(0) x + I p, \\;\\; \\|p\\| \\leq \\|Cx\\|\n\\end{equation}\n\nTo find $C$, we determine an entry-wise norm bound. That is, for $i=1,\\ldots,6$, we want to find $C_i$ such that for all $x$ such that $x_{\\text{min}} \\leq x \\leq x_{\\text{max}}$:\n\\begin{equation}\n(\\nabla f_i(0)x - \\dot{x}_i)^2 \\leq x^T C_i x\n\\end{equation}\nand then write\n\\begin{equation}\n\\|\\dot{x} - \\nabla f(0)x\\|_2 \\leq \\|\\begin{bmatrix} C_1^{1/2} \\\\ C_2^{1/2} \\\\ C_3^{1/2} \\\\ C_4^{1/2} \\\\ C_5^{1/2} \\\\ C_6^{1/2} \\end{bmatrix} x \\|\n\\end{equation}\n", "_____no_output_____" ] ], [ [ "import numpy as np\nimport cvxpy as cp\nimport scipy.linalg as sla", "_____no_output_____" ], [ "g = 9.81", "_____no_output_____" ] ], [ [ "## Define max and min values ", "_____no_output_____" ] ], [ [ "# State is: x = [px, pz, phi, vx, vz, phidot]^T\nx_max = np.array([1.1, 1.1, 0.06, 0.5, 1.0, 0.8])\nx_min = np.array([-1.1, -1.1, -0.06, -0.5, -1.0, -0.8])\n\npx_max, pz_max, phi_max, vx_max, vz_max, phidot_max = x_max\npx_min, pz_min, phi_min, vx_min, vz_min, phidot_min = x_min\n\nn = 6\npx_idx, pz_idx, phi_idx, vx_idx, vz_idx, phidot_idx = range(n)", "_____no_output_____" ] ], [ [ "## Find element-wise bounds ", "_____no_output_____" ], [ "### $f_1$", "_____no_output_____" ] ], [ [ "gridnum = 50\nvx = np.linspace(vx_min, vx_max, gridnum)\nvz = np.linspace(vz_min, vz_max, gridnum)\nphi = np.linspace(phi_min, phi_max, gridnum)\n\nVx, Vz, Phi = np.meshgrid(vx, vz, phi)\n\nv1 = np.ravel(( Vx - (Vx*np.cos(Phi) - Vz*np.sin(Phi)) )**2)\nU1 = np.array([np.ravel(Vx*Vx), \n np.ravel(Vz*Vz), \n np.ravel(Phi*Phi),\n 2*np.ravel(Vx*Vz), \n 2*np.ravel(Vx*Phi), \n 2*np.ravel(Vz*Phi)]).T", "_____no_output_____" ], [ "c1 = cp.Variable(6)\ncp.Problem(cp.Minimize(cp.max(U1@c1 - v1)), [U1@c1 >= v1, c1[:3]>=0]).solve(verbose=True, solver=cp.MOSEK)", "\n\nProblem\n Name : \n Objective sense : min \n Type : LO (linear optimization problem)\n Constraints : 250003 \n Cones : 0 \n Scalar variables : 7 \n Matrix variables : 0 \n Integer variables : 0 \n\nOptimizer started.\nProblem\n Name : \n Objective sense : min \n Type : LO (linear optimization problem)\n Constraints : 250003 \n Cones : 0 \n Scalar variables : 7 \n Matrix variables : 0 \n Integer variables : 0 \n\nOptimizer - threads : 2 \nOptimizer - solved problem : the dual \nOptimizer - Constraints : 7\nOptimizer - Cones : 0\nOptimizer - Scalar variables : 122936 conic : 0 \nOptimizer - Semi-definite variables: 0 scalarized : 0 \nFactor - setup time : 0.09 dense det. time : 0.00 \nFactor - ML order time : 0.00 GP order time : 0.00 \nFactor - nonzeros before factor : 28 after factor : 28 \nFactor - dense dim. : 0 flops : 6.04e+06 \nITE PFEAS DFEAS GFEAS PRSTATUS POBJ DOBJ MU TIME \n0 1.4e+00 8.9e+04 8.4e+00 0.00e+00 0.000000000e+00 6.943931180e+00 2.0e+00 0.76 \n1 1.1e+00 7.1e+04 6.7e+00 7.00e+00 3.953354935e-02 5.860630276e-01 1.6e+00 0.93 \n2 1.0e-01 8.4e+03 8.1e-01 7.37e+01 3.659181128e-03 5.356544162e-03 1.9e-01 1.01 \n3 5.6e-02 4.5e+03 4.3e-01 2.48e+00 2.424790474e-03 3.021057726e-03 9.9e-02 1.07 \n4 3.4e-02 2.7e+03 2.7e-01 1.54e+00 2.117503697e-03 2.432026790e-03 6.1e-02 1.14 \n5 8.0e-03 6.4e+02 6.2e-02 1.47e+00 1.887132372e-03 1.954297191e-03 1.4e-02 1.20 \n6 2.7e-03 2.1e+02 2.1e-02 1.06e+00 1.857914225e-03 1.879094799e-03 4.7e-03 1.25 \n7 2.8e-04 2.2e+01 2.2e-03 1.02e+00 1.854298909e-03 1.856346285e-03 4.9e-04 1.30 \n8 2.6e-05 2.1e+00 2.0e-04 1.00e+00 1.852565456e-03 1.852743840e-03 4.6e-05 1.34 \n9 4.5e-06 3.6e-01 3.5e-05 1.00e+00 1.852200060e-03 1.852230917e-03 8.0e-06 1.40 \n10 2.3e-07 1.8e-02 1.8e-06 1.00e+00 1.852167145e-03 1.852168639e-03 4.1e-07 1.45 \n11 4.6e-08 3.7e-03 3.7e-07 9.98e-01 1.852167020e-03 1.852167333e-03 8.3e-08 1.50 \n12 3.1e-09 2.5e-04 2.5e-08 1.00e+00 1.852166918e-03 1.852166939e-03 5.6e-09 1.55 \n13 2.5e-11 2.0e-06 2.0e-10 1.00e+00 1.852166914e-03 1.852166914e-03 4.4e-11 1.60 \n14 2.3e-13 2.1e-09 4.6e-13 1.00e+00 1.852166914e-03 1.852166914e-03 4.4e-15 1.70 \nBasis identification started.\nPrimal basis identification phase started.\nPrimal basis identification phase terminated. Time: 0.01\nDual basis identification phase started.\nDual basis identification phase terminated. Time: 0.01\nBasis identification terminated. Time: 0.11\nOptimizer terminated. Time: 2.05 \n\n\nInterior-point solution summary\n Problem status : PRIMAL_AND_DUAL_FEASIBLE\n Solution status : OPTIMAL\n Primal. obj: 1.8521669140e-03 nrm: 5e-01 Viol. con: 4e-18 var: 0e+00 \n Dual. obj: 1.8521669140e-03 nrm: 3e-01 Viol. con: 0e+00 var: 9e-13 \n\nBasic solution summary\n Problem status : PRIMAL_AND_DUAL_FEASIBLE\n Solution status : OPTIMAL\n Primal. obj: 1.8521669140e-03 nrm: 5e-01 Viol. con: 2e-18 var: 0e+00 \n Dual. obj: 1.8521669140e-03 nrm: 5e-01 Viol. con: 0e+00 var: 9e-17 \n" ], [ "c1 = c1.value\nc1", "_____no_output_____" ], [ "C1 = np.zeros((n,n))\nC1[vx_idx, vx_idx] = c1[0]/2\nC1[vz_idx, vz_idx] = c1[1]/2\nC1[phi_idx, phi_idx] = c1[2]/2\nC1[vx_idx, vz_idx] = c1[3]\nC1[vx_idx, phi_idx] = c1[4]\nC1[vz_idx, phi_idx] = c1[5]\nC1 += C1.T", "_____no_output_____" ], [ "gam1 = np.real(sla.sqrtm(C1))\ngam1", "_____no_output_____" ] ], [ [ "### $f_2$ ", "_____no_output_____" ] ], [ [ "gridnum = 50\nvx = np.linspace(vx_min, vx_max, gridnum)\nvz = np.linspace(vz_min, vz_max, gridnum)\nphi = np.linspace(phi_min, phi_max, gridnum)\n\nVx, Vz, Phi = np.meshgrid(vx, vz, phi)\n\nv2 = np.ravel(( Vz - (Vx*np.sin(Phi) + Vz*np.cos(Phi)) )**2)\nU2 = np.array([np.ravel(Vx*Vx), \n np.ravel(Vz*Vz), \n np.ravel(Phi*Phi),\n 2*np.ravel(Vx*Vz), \n 2*np.ravel(Vx*Phi), \n 2*np.ravel(Vz*Phi)]).T", "_____no_output_____" ], [ "c2 = cp.Variable(6)\ncp.Problem(cp.Minimize(cp.max(U2@c2 - v2)), [U2@c2 >= v2, c2[:3]>=0]).solve(verbose=True, solver=cp.MOSEK)", "\n\nProblem\n Name : \n Objective sense : min \n Type : LO (linear optimization problem)\n Constraints : 250003 \n Cones : 0 \n Scalar variables : 7 \n Matrix variables : 0 \n Integer variables : 0 \n\nOptimizer started.\nProblem\n Name : \n Objective sense : min \n Type : LO (linear optimization problem)\n Constraints : 250003 \n Cones : 0 \n Scalar variables : 7 \n Matrix variables : 0 \n Integer variables : 0 \n\nOptimizer - threads : 2 \nOptimizer - solved problem : the dual \nOptimizer - Constraints : 7\nOptimizer - Cones : 0\nOptimizer - Scalar variables : 122936 conic : 0 \nOptimizer - Semi-definite variables: 0 scalarized : 0 \nFactor - setup time : 0.08 dense det. time : 0.00 \nFactor - ML order time : 0.00 GP order time : 0.00 \nFactor - nonzeros before factor : 28 after factor : 28 \nFactor - dense dim. : 0 flops : 6.04e+06 \nITE PFEAS DFEAS GFEAS PRSTATUS POBJ DOBJ MU TIME \n0 1.4e+00 8.8e+04 2.7e+00 0.00e+00 0.000000000e+00 2.888310662e+00 2.0e+00 0.54 \n1 1.0e+00 6.3e+04 1.9e+00 3.72e+00 1.497409374e-01 5.521039135e-01 1.4e+00 0.71 \n2 3.4e-01 2.1e+04 6.5e-01 1.00e+03 6.294967125e-04 8.688822193e-04 4.9e-01 0.77 \n3 3.4e-01 2.1e+04 6.4e-01 1.72e+00 6.290829369e-04 8.657975588e-04 4.8e-01 0.83 \n4 3.4e-01 2.1e+04 6.4e-01 1.67e+00 6.279565952e-04 8.625949961e-04 4.8e-01 0.89 \n5 3.3e-01 2.1e+04 6.3e-01 1.72e+00 6.270677659e-04 8.570302389e-04 4.7e-01 0.96 \n6 3.2e-01 2.0e+04 6.1e-01 1.68e+00 6.252407083e-04 8.448957970e-04 4.6e-01 1.02 \n7 3.1e-01 2.0e+04 5.9e-01 1.67e+00 6.175296247e-04 8.261454799e-04 4.4e-01 1.08 \n8 2.5e-01 1.6e+04 4.8e-01 1.70e+00 5.824034882e-04 7.371791165e-04 3.6e-01 1.14 \n9 1.3e-01 8.0e+03 2.4e-01 1.73e+00 5.050833503e-04 5.683288332e-04 1.8e-01 1.20 \n10 7.7e-02 4.8e+03 1.4e-01 1.32e+00 5.035677722e-04 5.402474962e-04 1.1e-01 1.26 \n11 5.5e-02 3.4e+03 1.0e-01 1.14e+00 5.038167063e-04 5.297276962e-04 7.7e-02 1.31 \n12 1.8e-02 1.1e+03 3.4e-02 1.10e+00 5.050538889e-04 5.133700192e-04 2.5e-02 1.37 \n13 6.7e-03 4.2e+02 1.3e-02 1.03e+00 5.057673127e-04 5.088207948e-04 9.4e-03 1.41 \n14 1.9e-03 1.2e+02 3.5e-03 1.01e+00 5.069034788e-04 5.077525583e-04 2.6e-03 1.46 \n15 3.7e-05 2.3e+00 6.9e-05 1.00e+00 5.070399020e-04 5.070565883e-04 5.2e-05 1.51 \n16 2.6e-07 1.6e-02 5.0e-07 1.00e+00 5.070405638e-04 5.070406835e-04 3.7e-07 1.56 \n17 4.6e-08 2.9e-03 9.4e-08 9.98e-01 5.070396539e-04 5.070396768e-04 6.5e-08 1.62 \n18 2.8e-09 1.8e-04 8.5e-09 9.91e-01 5.070396708e-04 5.070396729e-04 3.8e-09 1.66 \n19 3.1e-13 2.0e-08 5.7e-13 1.00e+00 5.070396715e-04 5.070396715e-04 4.3e-13 1.69 \nBasis identification started.\nPrimal basis identification phase started.\nPrimal basis identification phase terminated. Time: 0.01\nDual basis identification phase started.\nDual basis identification phase terminated. Time: 0.00\nBasis identification terminated. Time: 0.06\nOptimizer terminated. Time: 1.92 \n\n\nInterior-point solution summary\n Problem status : PRIMAL_AND_DUAL_FEASIBLE\n Solution status : OPTIMAL\n Primal. obj: 5.0703967149e-04 nrm: 1e-01 Viol. con: 8e-17 var: 0e+00 \n Dual. obj: 5.0703967149e-04 nrm: 5e-01 Viol. con: 0e+00 var: 6e-14 \n\nBasic solution summary\n Problem status : PRIMAL_AND_DUAL_FEASIBLE\n Solution status : OPTIMAL\n Primal. obj: 5.0703967149e-04 nrm: 1e-01 Viol. con: 1e-16 var: 0e+00 \n Dual. obj: 5.0703967149e-04 nrm: 5e-01 Viol. con: 4e-17 var: 6e-17 \n" ], [ "c2 = c2.value\nc2", "_____no_output_____" ], [ "C2 = np.zeros((n,n))\nC2[vx_idx, vx_idx] = c2[0]/2\nC2[vz_idx, vz_idx] = c2[1]/2\nC2[phi_idx, phi_idx] = c2[2]/2\nC2[vx_idx, vz_idx] = c2[3]\nC2[vx_idx, phi_idx] = c2[4]\nC2[vz_idx, phi_idx] = c2[5]\nC2 += C2.T", "_____no_output_____" ], [ "gam2 = np.real(sla.sqrtm(C2))\ngam2", "_____no_output_____" ] ], [ [ "### $f_3$ ", "_____no_output_____" ], [ "No error -- linearization is the same as original", "_____no_output_____" ], [ "### $f_4$", "_____no_output_____" ] ], [ [ "gridnum = 50\nvz = np.linspace(vz_min, vz_max, gridnum)\nphi = np.linspace(phi_min, phi_max, gridnum)\nphidot = np.linspace(phidot_min, phidot_max, gridnum)\n\nVz, Phi, Phidot = np.meshgrid(vz, phi, phidot)\n\nv4 = np.ravel(( -g*Phi - (Vz*Phidot - g*np.sin(Phi)) )**2)\nU4 = np.array([np.ravel(Vz*Vz), \n np.ravel(Phi*Phi), \n np.ravel(Phidot*Phidot),\n 2*np.ravel(Vz*Phi), \n 2*np.ravel(Vz*Phidot), \n 2*np.ravel(Phi*Phidot)]).T", "_____no_output_____" ], [ "c4 = cp.Variable(6)\ncp.Problem(cp.Minimize(cp.max(U4@c4 - v4)), [U4@c4 >= v4, c4[:3]>=0]).solve(verbose=True, solver=cp.MOSEK)", "\n\nProblem\n Name : \n Objective sense : min \n Type : LO (linear optimization problem)\n Constraints : 250003 \n Cones : 0 \n Scalar variables : 7 \n Matrix variables : 0 \n Integer variables : 0 \n\nOptimizer started.\nProblem\n Name : \n Objective sense : min \n Type : LO (linear optimization problem)\n Constraints : 250003 \n Cones : 0 \n Scalar variables : 7 \n Matrix variables : 0 \n Integer variables : 0 \n\nOptimizer - threads : 2 \nOptimizer - solved problem : the dual \nOptimizer - Constraints : 7\nOptimizer - Cones : 0\nOptimizer - Scalar variables : 122149 conic : 0 \nOptimizer - Semi-definite variables: 0 scalarized : 0 \nFactor - setup time : 0.09 dense det. time : 0.00 \nFactor - ML order time : 0.00 GP order time : 0.00 \nFactor - nonzeros before factor : 28 after factor : 28 \nFactor - dense dim. : 0 flops : 6.01e+06 \nITE PFEAS DFEAS GFEAS PRSTATUS POBJ DOBJ MU TIME \n0 3.0e+00 1.1e+05 1.6e+03 0.00e+00 0.000000000e+00 9.091183346e+02 3.3e+00 0.66 \n1 3.0e-01 1.1e+04 1.7e+02 4.24e+00 6.598123515e-01 4.094768408e+01 3.3e-01 0.76 \n2 1.0e-03 3.9e+01 5.6e-01 1.12e+00 6.900962442e-01 8.139248935e-01 1.1e-03 0.80 \n3 2.5e-04 9.7e+00 1.4e-01 3.16e+00 3.450137611e-01 3.609226013e-01 2.8e-04 0.85 \n4 1.0e-05 1.4e+00 3.9e-02 1.25e+00 3.269622800e-01 3.311384313e-01 4.0e-05 0.90 \n5 1.8e-06 2.4e-01 6.5e-03 1.05e+00 3.202955012e-01 3.209802656e-01 6.7e-06 0.94 \n6 1.7e-07 2.4e-02 6.4e-04 1.00e+00 3.202073769e-01 3.202747693e-01 6.6e-07 0.99 \n7 3.6e-08 5.0e-03 1.3e-04 1.00e+00 3.201669028e-01 3.201810768e-01 1.4e-07 1.04 \n8 3.0e-09 4.0e-04 1.1e-05 1.00e+00 3.201613757e-01 3.201625250e-01 1.1e-08 1.10 \n9 8.1e-11 1.1e-05 3.0e-07 1.00e+00 3.201607921e-01 3.201608235e-01 3.1e-10 1.15 \n10 1.1e-14 1.3e-09 3.6e-11 1.00e+00 3.201607844e-01 3.201607844e-01 3.8e-14 1.19 \nBasis identification started.\nPrimal basis identification phase started.\nPrimal basis identification phase terminated. Time: 0.01\nDual basis identification phase started.\nDual basis identification phase terminated. Time: 0.00\nBasis identification terminated. Time: 0.08\nOptimizer terminated. Time: 1.43 \n\n\nInterior-point solution summary\n Problem status : PRIMAL_AND_DUAL_FEASIBLE\n Solution status : OPTIMAL\n Primal. obj: 3.2016078437e-01 nrm: 6e-01 Viol. con: 1e-15 var: 0e+00 \n Dual. obj: 3.2016078437e-01 nrm: 1e-01 Viol. con: 0e+00 var: 3e-16 \n\nBasic solution summary\n Problem status : PRIMAL_AND_DUAL_FEASIBLE\n Solution status : OPTIMAL\n Primal. obj: 3.2016078437e-01 nrm: 6e-01 Viol. con: 1e-12 var: 0e+00 \n Dual. obj: 3.2016078437e-01 nrm: 5e-01 Viol. con: 0e+00 var: 6e-17 \n" ], [ "c4 = c4.value\nc4", "_____no_output_____" ], [ "C4 = np.zeros((n,n))\nC4[vz_idx, vz_idx] = c4[0]/2\nC4[phi_idx, phi_idx] = c4[1]/2\nC4[phidot_idx, phidot_idx] = c4[2]/2\nC4[vz_idx, phi_idx] = c4[3]\nC4[vz_idx, phidot_idx] = c4[4]\nC4[phi_idx, phidot_idx] = c4[5]\nC4 += C4.T", "_____no_output_____" ], [ "gam4 = np.real(sla.sqrtm(C4))\ngam4", "_____no_output_____" ] ], [ [ "### $f_5$ ", "_____no_output_____" ] ], [ [ "gridnum = 50\nvx = np.linspace(vx_min, vx_max, gridnum)\nphi = np.linspace(phi_min, phi_max, gridnum)\nphidot = np.linspace(phidot_min, phidot_max, gridnum)\n\nVx, Phi, Phidot = np.meshgrid(vx, phi, phidot)\n\nv5 = np.ravel(( 0 - (-Vx*Phidot - g*np.cos(Phi) + g) )**2)\nU5 = np.array([np.ravel(Vx*Vx), \n np.ravel(Phi*Phi), \n np.ravel(Phidot*Phidot),\n 2*np.ravel(Vx*Phi), \n 2*np.ravel(Vx*Phidot), \n 2*np.ravel(Phi*Phidot)]).T", "_____no_output_____" ], [ "c5 = cp.Variable(6)\ncp.Problem(cp.Minimize(cp.max(U5@c5 - v5)), [U5@c5 >= v5, c5[:3]>=0]).solve(verbose=True, solver=cp.MOSEK)", "\n\nProblem\n Name : \n Objective sense : min \n Type : LO (linear optimization problem)\n Constraints : 250003 \n Cones : 0 \n Scalar variables : 7 \n Matrix variables : 0 \n Integer variables : 0 \n\nOptimizer started.\nProblem\n Name : \n Objective sense : min \n Type : LO (linear optimization problem)\n Constraints : 250003 \n Cones : 0 \n Scalar variables : 7 \n Matrix variables : 0 \n Integer variables : 0 \n\nOptimizer - threads : 2 \nOptimizer - solved problem : the dual \nOptimizer - Constraints : 7\nOptimizer - Cones : 0\nOptimizer - Scalar variables : 122149 conic : 0 \nOptimizer - Semi-definite variables: 0 scalarized : 0 \nFactor - setup time : 0.07 dense det. time : 0.00 \nFactor - ML order time : 0.00 GP order time : 0.00 \nFactor - nonzeros before factor : 28 after factor : 28 \nFactor - dense dim. : 0 flops : 6.01e+06 \nITE PFEAS DFEAS GFEAS PRSTATUS POBJ DOBJ MU TIME \n0 1.8e+00 9.6e+04 4.8e+02 0.00e+00 0.000000000e+00 3.154225613e+02 2.3e+00 0.58 \n1 3.5e-01 1.9e+04 9.4e+01 1.51e+01 1.708530797e-01 1.361746703e+01 4.6e-01 0.70 \n2 3.7e-03 2.0e+02 9.9e-01 1.19e+00 1.903514832e-01 2.645236663e-01 4.8e-03 0.78 \n3 1.7e-03 9.1e+01 4.6e-01 2.77e+00 1.048209688e-01 1.242537439e-01 2.2e-03 0.85 \n4 1.2e-03 6.2e+01 3.1e-01 1.54e+00 9.565712271e-02 1.078761643e-01 1.5e-03 0.91 \n5 1.6e-04 8.3e+00 4.2e-02 1.27e+00 8.441387864e-02 8.587319924e-02 2.0e-04 0.97 \n6 2.4e-05 1.3e+00 6.4e-03 1.03e+00 8.399782488e-02 8.422094583e-02 3.1e-05 1.03 \n7 2.6e-06 1.4e-01 7.1e-04 1.01e+00 8.370345427e-02 8.372791389e-02 3.5e-06 1.09 \n8 1.8e-06 9.6e-02 4.8e-04 1.00e+00 8.368982668e-02 8.370656710e-02 2.4e-06 1.14 \n9 1.5e-06 8.0e-02 4.0e-04 1.00e+00 8.368789658e-02 8.370180736e-02 2.0e-06 1.20 \n10 2.0e-07 1.1e-02 5.5e-05 1.00e+00 8.367557566e-02 8.367745889e-02 2.7e-07 1.29 \n11 2.3e-09 1.2e-04 6.0e-07 1.00e+00 8.367462981e-02 8.367465059e-02 2.9e-09 1.36 \n12 2.4e-13 1.2e-08 5.8e-11 1.00e+00 8.367461885e-02 8.367461885e-02 3.0e-13 1.42 \nBasis identification started.\nPrimal basis identification phase started.\nPrimal basis identification phase terminated. Time: 0.01\nDual basis identification phase started.\nDual basis identification phase terminated. Time: 0.00\nBasis identification terminated. Time: 0.08\nOptimizer terminated. Time: 1.66 \n\n\nInterior-point solution summary\n Problem status : PRIMAL_AND_DUAL_FEASIBLE\n Solution status : OPTIMAL\n Primal. obj: 8.3674618850e-02 nrm: 3e-01 Viol. con: 8e-15 var: 0e+00 \n Dual. obj: 8.3674618852e-02 nrm: 2e-01 Viol. con: 0e+00 var: 0e+00 \n\nBasic solution summary\n Problem status : PRIMAL_AND_DUAL_FEASIBLE\n Solution status : OPTIMAL\n Primal. obj: 8.3674618850e-02 nrm: 3e-01 Viol. con: 5e-12 var: 0e+00 \n Dual. obj: 8.3674618849e-02 nrm: 5e-01 Viol. con: 1e-17 var: 3e-17 \n" ], [ "c5 = c5.value\nc5", "_____no_output_____" ], [ "C5 = np.zeros((n,n))\nC5[vx_idx, vx_idx] = c5[0]/2\nC5[phi_idx, phi_idx] = c5[1]/2\nC5[phidot_idx, phidot_idx] = c5[2]/2\nC5[vx_idx, phi_idx] = c5[3]\nC5[vx_idx, phidot_idx] = c5[4]\nC5[phi_idx, phidot_idx] = c5[5]\nC5 += C5.T", "_____no_output_____" ], [ "np.linalg.eig(C5)[0]", "_____no_output_____" ], [ "gam5 = np.real(sla.sqrtm(C5))\ngam5", "_____no_output_____" ] ], [ [ "### $f_6$ ", "_____no_output_____" ], [ "No error -- linearization is the same as original", "_____no_output_____" ], [ "## Final system ", "_____no_output_____" ] ], [ [ "from linearize_dynamics import *", "_____no_output_____" ], [ "A = quadrotor_jacobian(np.zeros(n))\nG = np.eye(n)\nC = np.vstack([gam1, gam2, gam4, gam5])", "_____no_output_____" ] ], [ [ "### Check correctness ", "_____no_output_____" ] ], [ [ "prop = np.random.random((1000000, n))\nrand_xs = x_max*prop + x_min*(1-prop)\nfx = xdot_uncontrolled(torch.Tensor(rand_xs))\n# print(np.linalg.norm((fx - [email protected])@np.linalg.inv(G).T, axis=1) <= np.linalg.norm([email protected], axis=1))\nprint((np.linalg.norm((fx - [email protected])@np.linalg.inv(G).T, axis=1) <= np.linalg.norm([email protected], axis=1)).all())\n\nratio = np.linalg.norm([email protected], axis=1)/np.linalg.norm((fx - [email protected])@np.linalg.inv(G).T, axis=1)\nprint(ratio.max())\nprint(ratio.mean())\nprint(np.median(ratio))", "True\n2541.4533619098297\n3.9740415003721594\n2.3853982860700276\n" ] ], [ [ "### Save ", "_____no_output_____" ] ], [ [ "np.save('A.npy', A)\nnp.save('G.npy', G)\nnp.save('C.npy', C)", "_____no_output_____" ] ], [ [ "## Check if robust LQR solves ", "_____no_output_____" ] ], [ [ "import scipy.linalg as la", "_____no_output_____" ], [ "mass = 1\nmoment_arm = 0.01\ninertia_roll = 15.67e-3\n\nB = np.array([\n [0, 0],\n [0, 0],\n [0, 0],\n [0, 0],\n [1/mass, 1/mass],\n [moment_arm/inertia_roll, -moment_arm/inertia_roll]\n ])\nm = B.shape[1]\n\nD = np.zeros((C.shape[0], m))\n\nQ = np.random.randn(n, n)\nQ = Q.T @ Q\n# Q = np.eye(n)\n\nR = np.random.randn(m, m)\nR = R.T @ R\n# R = np.eye(m)", "_____no_output_____" ], [ "alpha = 0.0001\n\nn, m = B.shape\nwq = C.shape[0]\n\nS = cp.Variable((n, n), symmetric=True)\nY = cp.Variable((m, n))\nmu = cp.Variable()\n\nR_sqrt = la.sqrtm(R)\nf = cp.trace(S @ Q) + cp.matrix_frac(Y.T @ R_sqrt, S)\n\ncons_mat = cp.bmat((\n (A @ S + S @ A.T + cp.multiply(mu, G @ G.T) + B @ Y + Y.T @ B.T + alpha * S, S @ C.T + Y.T @ D.T),\n (C @ S + D @ Y, -cp.multiply(mu, np.eye(wq)))\n))\ncons = [S >> 0, mu >= 1e-2] + [cons_mat << 0]\n\ncp.Problem(cp.Minimize(f), cons).solve(solver=cp.MOSEK, verbose=True)\nK = np.linalg.solve(S.value, Y.value.T).T", "\n\nProblem\n Name : \n Objective sense : min \n Type : CONIC (conic optimization problem)\n Constraints : 1053 \n Cones : 0 \n Scalar variables : 73 \n Matrix variables : 3 \n Integer variables : 0 \n\nOptimizer started.\nProblem\n Name : \n Objective sense : min \n Type : CONIC (conic optimization problem)\n Constraints : 1053 \n Cones : 0 \n Scalar variables : 73 \n Matrix variables : 3 \n Integer variables : 0 \n\nOptimizer - threads : 2 \nOptimizer - solved problem : the primal \nOptimizer - Constraints : 1000\nOptimizer - Cones : 1\nOptimizer - Scalar variables : 38 conic : 37 \nOptimizer - Semi-definite variables: 3 scalarized : 522 \nFactor - setup time : 0.07 dense det. time : 0.00 \nFactor - ML order time : 0.03 GP order time : 0.00 \nFactor - nonzeros before factor : 4.10e+05 after factor : 4.17e+05 \nFactor - dense dim. : 2 flops : 2.47e+08 \nITE PFEAS DFEAS GFEAS PRSTATUS POBJ DOBJ MU TIME \n0 1.0e+00 1.1e+01 1.0e+00 0.00e+00 0.000000000e+00 0.000000000e+00 1.0e+00 0.08 \n1 1.4e-01 1.6e+00 1.5e-01 -6.58e-01 -7.556113101e-01 8.482969734e-03 1.4e-01 0.29 \n2 1.8e-02 2.1e-01 3.7e-03 1.15e+00 7.698474493e-03 1.035442180e-02 1.8e-02 0.37 \n3 3.1e-03 3.5e-02 2.7e-04 9.91e-01 4.107903092e-02 4.167030081e-02 3.0e-03 0.43 \n4 2.4e-03 2.7e-02 2.1e-04 4.75e-01 1.029691871e-01 1.046801745e-01 2.4e-03 0.46 \n5 4.4e-04 5.0e-03 3.5e-05 3.09e-01 6.513034372e-01 6.545254792e-01 4.3e-04 0.52 \n6 6.7e-05 7.7e-04 8.6e-06 -2.31e-01 1.961658160e+00 1.976127074e+00 6.7e-05 0.57 \n7 2.9e-05 3.3e-04 5.3e-06 -7.05e-01 3.212686900e+00 3.246044006e+00 2.8e-05 0.61 \n8 1.1e-05 1.3e-04 3.3e-06 -5.89e-01 5.139878515e+00 5.224481321e+00 1.1e-05 0.64 \n9 1.6e-06 1.8e-05 5.7e-07 -4.19e-01 1.399493512e+01 1.413132313e+01 1.5e-06 0.70 \n10 7.7e-07 8.7e-06 3.0e-07 -4.05e-01 1.833317623e+01 1.849011638e+01 7.6e-07 0.73 \n11 1.3e-07 1.5e-06 6.2e-08 -4.69e-01 3.308109028e+01 3.331037089e+01 1.3e-07 0.77 \n12 1.0e-07 1.1e-06 5.1e-08 -1.68e-01 3.518751196e+01 3.544905916e+01 1.0e-07 0.80 \n13 5.2e-08 5.8e-07 1.9e-08 4.94e-01 4.418820034e+01 4.432351869e+01 4.4e-08 0.83 \n14 2.3e-08 2.5e-07 7.3e-09 4.11e-01 4.815214005e+01 4.826061217e+01 1.9e-08 0.88 \n15 5.6e-09 6.4e-08 9.7e-10 8.13e-01 5.180890088e+01 5.183968032e+01 4.8e-09 0.91 \n16 4.4e-09 5.0e-08 6.9e-10 9.32e-01 5.207384623e+01 5.209863371e+01 3.8e-09 0.94 \n17 4.2e-09 4.5e-08 5.9e-10 9.46e-01 5.218143720e+01 5.220378235e+01 3.4e-09 0.98 \n18 4.2e-09 4.5e-08 5.9e-10 9.49e-01 5.218143720e+01 5.220378235e+01 3.4e-09 1.02 \n19 4.2e-09 4.5e-08 5.9e-10 9.50e-01 5.218143720e+01 5.220378235e+01 3.4e-09 1.05 \nOptimizer terminated. Time: 1.12 \n\n\nInterior-point solution summary\n Problem status : UNKNOWN\n Solution status : UNKNOWN\n Primal. obj: 5.2181437205e+01 nrm: 3e+01 Viol. con: 1e-04 var: 0e+00 barvar: 0e+00 \n Dual. obj: 5.2203782351e+01 nrm: 1e+05 Viol. con: 0e+00 var: 1e-04 barvar: 1e-05 \n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb9e46313ef37d523122c8b2805f30e452bcf612
555,681
ipynb
Jupyter Notebook
0_Foundations/Week3_Classification modeling/.ipynb_checkpoints/Analyzing product sentiment-checkpoint.ipynb
Breezen/Machine-Learning
77267e106cb865a5f95d37093d4219b3458aea27
[ "MIT" ]
null
null
null
0_Foundations/Week3_Classification modeling/.ipynb_checkpoints/Analyzing product sentiment-checkpoint.ipynb
Breezen/Machine-Learning
77267e106cb865a5f95d37093d4219b3458aea27
[ "MIT" ]
null
null
null
0_Foundations/Week3_Classification modeling/.ipynb_checkpoints/Analyzing product sentiment-checkpoint.ipynb
Breezen/Machine-Learning
77267e106cb865a5f95d37093d4219b3458aea27
[ "MIT" ]
null
null
null
420.015873
248,146
0.613994
[ [ [ "# Predicting sentiment from product reviews\n\n# Fire up GraphLab Create", "_____no_output_____" ] ], [ [ "import graphlab", "_____no_output_____" ] ], [ [ "# Read some product review data\n\nLoading reviews for a set of baby products. ", "_____no_output_____" ] ], [ [ "products = graphlab.SFrame('amazon_baby.gl/')", "[INFO] This commercial license of GraphLab Create is assigned to [email protected].\n\n[INFO] Start server at: ipc:///tmp/graphlab_server-121368 - Server binary: /home/ubuntu/anaconda/lib/python2.7/site-packages/graphlab/unity_server - Server log: /tmp/graphlab_server_1440696851.log\n[INFO] GraphLab Server Version: 1.5.2\n" ] ], [ [ "# Let's explore this data together\n\nData includes the product name, the review text and the rating of the review. ", "_____no_output_____" ] ], [ [ "products.head()", "_____no_output_____" ] ], [ [ "# Build the word count vector for each review", "_____no_output_____" ] ], [ [ "products['word_count'] = graphlab.text_analytics.count_words(products['review'])", "_____no_output_____" ], [ "products.head()", "_____no_output_____" ], [ "graphlab.canvas.set_target('ipynb')", "_____no_output_____" ], [ "products['name'].show()", "_____no_output_____" ] ], [ [ "# Examining the reviews for most-sold product: 'Vulli Sophie the Giraffe Teether'", "_____no_output_____" ] ], [ [ "giraffe_reviews = products[products['name'] == 'Vulli Sophie the Giraffe Teether']", "_____no_output_____" ], [ "len(giraffe_reviews)", "_____no_output_____" ], [ "giraffe_reviews['rating'].show(view='Categorical')", "_____no_output_____" ] ], [ [ "# Build a sentiment classifier", "_____no_output_____" ] ], [ [ "products['rating'].show(view='Categorical')", "_____no_output_____" ] ], [ [ "## Define what's a positive and a negative sentiment\n\nWe will ignore all reviews with rating = 3, since they tend to have a neutral sentiment. Reviews with a rating of 4 or higher will be considered positive, while the ones with rating of 2 or lower will have a negative sentiment. ", "_____no_output_____" ] ], [ [ "#ignore all 3* reviews\nproducts = products[products['rating'] != 3]", "_____no_output_____" ], [ "#positive sentiment = 4* or 5* reviews\nproducts['sentiment'] = products['rating'] >=4", "_____no_output_____" ], [ "products.head()", "_____no_output_____" ] ], [ [ "## Let's train the sentiment classifier", "_____no_output_____" ] ], [ [ "train_data,test_data = products.random_split(.8, seed=0)", "_____no_output_____" ], [ "sentiment_model = graphlab.logistic_classifier.create(train_data,\n target='sentiment',\n features=['word_count'],\n validation_set=test_data)", "PROGRESS: Logistic regression:\nPROGRESS: --------------------------------------------------------\nPROGRESS: Number of examples : 133448\nPROGRESS: Number of classes : 2\nPROGRESS: Number of feature columns : 1\nPROGRESS: Number of unpacked features : 219217\nPROGRESS: Number of coefficients : 219218\nPROGRESS: Starting L-BFGS\nPROGRESS: --------------------------------------------------------\nPROGRESS: +-----------+----------+-----------+--------------+-------------------+---------------------+\nPROGRESS: | Iteration | Passes | Step size | Elapsed Time | Training-accuracy | Validation-accuracy |\nPROGRESS: +-----------+----------+-----------+--------------+-------------------+---------------------+\nPROGRESS: | 1 | 5 | 0.000002 | 1.737795 | 0.841481 | 0.839989 |\nPROGRESS: | 2 | 9 | 3.000000 | 2.388519 | 0.947425 | 0.894877 |\nPROGRESS: | 3 | 10 | 3.000000 | 2.630476 | 0.923768 | 0.866232 |\nPROGRESS: | 4 | 11 | 3.000000 | 2.868154 | 0.971779 | 0.912743 |\nPROGRESS: | 5 | 12 | 3.000000 | 3.106237 | 0.975511 | 0.908900 |\nPROGRESS: | 6 | 13 | 3.000000 | 3.348601 | 0.899991 | 0.825967 |\nPROGRESS: | 10 | 18 | 1.000000 | 4.446451 | 0.988715 | 0.916256 |\nPROGRESS: +-----------+----------+-----------+--------------+-------------------+---------------------+\n" ] ], [ [ "# Evaluate the sentiment model", "_____no_output_____" ] ], [ [ "sentiment_model.evaluate(test_data, metric='roc_curve')", "_____no_output_____" ], [ "sentiment_model.show(view='Evaluation')", "_____no_output_____" ] ], [ [ "# Applying the learned model to understand sentiment for Giraffe", "_____no_output_____" ] ], [ [ "giraffe_reviews['predicted_sentiment'] = sentiment_model.predict(giraffe_reviews, output_type='probability')", "_____no_output_____" ], [ "giraffe_reviews.head()", "_____no_output_____" ] ], [ [ "## Sort the reviews based on the predicted sentiment and explore", "_____no_output_____" ] ], [ [ "giraffe_reviews = giraffe_reviews.sort('predicted_sentiment', ascending=False)", "_____no_output_____" ], [ "giraffe_reviews.head()", "_____no_output_____" ] ], [ [ "## Most positive reviews for the giraffe", "_____no_output_____" ] ], [ [ "giraffe_reviews[0]['review']", "_____no_output_____" ], [ "giraffe_reviews[1]['review']", "_____no_output_____" ] ], [ [ "## Show most negative reviews for giraffe", "_____no_output_____" ] ], [ [ "giraffe_reviews[-1]['review']", "_____no_output_____" ], [ "giraffe_reviews[-2]['review']", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb9e473eecb1f7b3ff8058fe8bca7805e40984fa
48,712
ipynb
Jupyter Notebook
TD-Gammon012-Expectiminimax.ipynb
Lagiacrus24/TD-Gammon
a419086580b7baea9bb05a5eda849f936e9a74ff
[ "MIT" ]
3
2019-03-28T02:42:08.000Z
2021-06-17T10:27:04.000Z
TD-Gammon012-Expectiminimax.ipynb
Lagiacrus24/TD-Gammon
a419086580b7baea9bb05a5eda849f936e9a74ff
[ "MIT" ]
2
2018-09-17T12:36:12.000Z
2019-11-29T12:16:55.000Z
TD-Gammon012-Expectiminimax.ipynb
Lagiacrus24/TD-Gammon
a419086580b7baea9bb05a5eda849f936e9a74ff
[ "MIT" ]
null
null
null
106.59081
1,632
0.664005
[ [ [ "# Expectiminimax\n\nDer Vollständigkeits halber der ganze Expectiminimax Algorithmus. <br>\nWährend 1-ply, 2-ply und 3-ply nur den ersten, die ersten beiden, bzw. ersten drei Schritte von Expectiminimax ausgeführt haben, kann man alle mit dem Expectiminmax Algorithmus zusammenfassen. Das erlaubt einem eine saubere Notation und kann (mit einem ausreichend starken Rechner und genug Geduld) eventuell noch tiefer suchen!", "_____no_output_____" ] ], [ [ "from Player import ValuePlayer\n\nclass ExpectiminimaxValuePlayer(ValuePlayer):\n\n # Konstruktor braucht einen Parameter für die maximal Suchtiefe\n # 0 = 1-ply, 1= 2-ply, 2 = 3-ply, usw.\n def __init__(self, player, valuefunction, max_depth):\n ValuePlayer.__init__(self, player, valuefunction)\n self.max_depth = max_depth\n \n def get_action(self, actions, game):\n # Spielstatus speichern\n old_state = game.get_state()\n # Variablen initialisieren\n best_value = -1\n best_action = None\n # Alle Züge durchsuchen\n for a in actions:\n # Zug ausführen\n game.execute_moves(a, self.player)\n # Spielstatus bewerten\n value = self.expectiminimax(game, 0)\n # Besten merken\n if value > best_value:\n best_value = value\n best_action = a\n # Spiel zurücksetzen\n game.reset_to_state(old_state)\n return best_action\n \n def expectiminimax(self, game, depth):\n # Blatt in unserem Baum\n if depth == self.max_depth:\n return self.value(game, self.player)\n else:\n # Alle möglichen Würfe betrachten\n all_rolls = [(a,b) for a in range(1,7) for b in range(a,7)]\n value = 0\n for roll in all_rolls:\n # Wahrscheinlichkeiten von jedem Wurf\n probability = 1/18 if roll[0] != roll[1] else 1/36\n state = game.get_state()\n # Min-Knoten\n if depth % 2 == 0:\n moves = game.get_moves(roll, game.get_opponent(self.player))\n temp_val = 1\n for move in moves:\n game.execute_moves(move, game.get_opponent(self.player))\n # Bewertet wird aber aus unserer Perspektive\n v = self.expectiminimax(game, depth + 1)\n if v < temp_val:\n temp_val = v\n # Max-Knoten\n else:\n moves = game.get_moves(roll, self.player)\n temp_val = 0\n for move in moves:\n game.execute_moves(move, self.player)\n # Bewertet wird aber aus unserer Perspektive\n v = self.expectiminimax(game, depth + 1)\n if v > temp_val:\n temp_val = v\n # Spiel zurücksetzen \n game.reset_to_state(state)\n # Wert gewichtet addieren\n value += probability * temp_val\n return value\n \n def get_name(self):\n return \"ExpectiminimaxValuePlayer [\" + self.value.__name__ + \"]\"\n \n\nclass ExpectiminimaxModelPlayer(ExpectiminimaxValuePlayer):\n \n def __init__(self, player, model, depth):\n ExpectiminimaxValuePlayer.__init__(self, player, self.get_value, depth)\n self.model = model\n \n def get_value(self, game, player):\n features = game.extractFeatures(player)\n v = self.model.get_output(features)\n v = 1 - v if self.player == game.players[0] else v\n return v\n \n def get_name(self):\n return \"EMinMaxModelPlayer [\" + self.model.get_name() +\"]\"\n ", "_____no_output_____" ], [ "import Player\nfrom NeuralNetModel import TDGammonModel\nimport tensorflow as tf\n\ngraph = tf.Graph()\nsess = tf.Session(graph=graph)\nwith sess.as_default(), graph.as_default():\n model = TDGammonModel(sess, restore=True)\n model.test(games = 100, enemyPlayer = ExpectiminimaxModelPlayer('white', model, 1))", "Restoring checkpoint: checkpoints/TD-Gammon/checkpoint.ckpt-1593683\nINFO:tensorflow:Restoring parameters from checkpoints/TD-Gammon/checkpoint.ckpt-1593683\n[Game 0] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 0:1 of 1 games (0.00%)\n[Game 1] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 1:1 of 2 games (50.00%)\n[Game 2] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 1:2 of 3 games (33.33%)\n[Game 3] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 2:2 of 4 games (50.00%)\n[Game 4] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 3:2 of 5 games (60.00%)\n[Game 5] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 4:2 of 6 games (66.67%)\n[Game 6] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 4:3 of 7 games (57.14%)\n[Game 7] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 4:4 of 8 games (50.00%)\n[Game 8] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 4:5 of 9 games (44.44%)\n[Game 9] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 4:6 of 10 games (40.00%)\n[Game 10] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 4:7 of 11 games (36.36%)\n[Game 11] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 4:8 of 12 games (33.33%)\n[Game 12] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 4:9 of 13 games (30.77%)\n[Game 13] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 4:10 of 14 games (28.57%)\n[Game 14] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 5:10 of 15 games (33.33%)\n[Game 15] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 5:11 of 16 games (31.25%)\n[Game 16] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 5:12 of 17 games (29.41%)\n[Game 17] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 5:13 of 18 games (27.78%)\n[Game 18] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 5:14 of 19 games (26.32%)\n[Game 19] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 6:14 of 20 games (30.00%)\n[Game 20] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 6:15 of 21 games (28.57%)\n[Game 21] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 7:15 of 22 games (31.82%)\n[Game 22] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 7:16 of 23 games (30.43%)\n[Game 23] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 8:16 of 24 games (33.33%)\n[Game 24] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 8:17 of 25 games (32.00%)\n[Game 25] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 8:18 of 26 games (30.77%)\n[Game 26] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 8:19 of 27 games (29.63%)\n[Game 27] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 9:19 of 28 games (32.14%)\n[Game 28] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 10:19 of 29 games (34.48%)\n[Game 29] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 11:19 of 30 games (36.67%)\n[Game 30] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 11:20 of 31 games (35.48%)\n[Game 31] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 12:20 of 32 games (37.50%)\n[Game 32] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 12:21 of 33 games (36.36%)\n[Game 33] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 13:21 of 34 games (38.24%)\n[Game 34] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 13:22 of 35 games (37.14%)\n[Game 35] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 13:23 of 36 games (36.11%)\n[Game 36] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 13:24 of 37 games (35.14%)\n[Game 37] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 13:25 of 38 games (34.21%)\n[Game 38] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 13:26 of 39 games (33.33%)\n[Game 39] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 14:26 of 40 games (35.00%)\n[Game 40] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 15:26 of 41 games (36.59%)\n[Game 41] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 15:27 of 42 games (35.71%)\n[Game 42] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 15:28 of 43 games (34.88%)\n[Game 43] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 15:29 of 44 games (34.09%)\n[Game 44] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 15:30 of 45 games (33.33%)\n[Game 45] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 15:31 of 46 games (32.61%)\n[Game 46] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 15:32 of 47 games (31.91%)\n[Game 47] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 16:32 of 48 games (33.33%)\n[Game 48] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 16:33 of 49 games (32.65%)\n[Game 49] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 17:33 of 50 games (34.00%)\n[Game 50] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 17:34 of 51 games (33.33%)\n[Game 51] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 17:35 of 52 games (32.69%)\n[Game 52] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 18:35 of 53 games (33.96%)\n[Game 53] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 18:36 of 54 games (33.33%)\n[Game 54] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 18:37 of 55 games (32.73%)\n[Game 55] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 19:37 of 56 games (33.93%)\n[Game 56] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 19:38 of 57 games (33.33%)\n[Game 57] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 20:38 of 58 games (34.48%)\n[Game 58] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 20:39 of 59 games (33.90%)\n[Game 59] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 20:40 of 60 games (33.33%)\n[Game 60] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 20:41 of 61 games (32.79%)\n[Game 61] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 21:41 of 62 games (33.87%)\n[Game 62] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 22:41 of 63 games (34.92%)\n[Game 63] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 23:41 of 64 games (35.94%)\n[Game 64] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 23:42 of 65 games (35.38%)\n[Game 65] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 24:42 of 66 games (36.36%)\n[Game 66] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 24:43 of 67 games (35.82%)\n[Game 67] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 24:44 of 68 games (35.29%)\n[Game 68] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 25:44 of 69 games (36.23%)\n[Game 69] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 25:45 of 70 games (35.71%)\n[Game 70] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 25:46 of 71 games (35.21%)\n[Game 71] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 26:46 of 72 games (36.11%)\n[Game 72] ModelPlayer [TD-Gammon] (black) vs EMinMaxModelPlayer [TD-Gammon] (white) 26:47 of 73 games (35.62%)\n" ], [ "import Player\nimport PlayerTest\n\nplayers = [Player.ValuePlayer('black', Player.blocker), ExpectiminimaxValuePlayer('white', Player.blocker, 1)]\nPlayerTest.test(players, 100)", "Spiel 0 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 1 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 2 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 3 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 4 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 5 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 6 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 7 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 8 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 9 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 10 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 11 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 12 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 13 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 14 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 15 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 16 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 17 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 18 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 19 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 20 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 21 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 22 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 23 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 24 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 25 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 26 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 27 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 28 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 29 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 30 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 31 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 32 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 33 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 34 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 35 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 36 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 37 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 38 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 39 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 40 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 41 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 42 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 43 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 44 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 45 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 46 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 47 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 48 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 49 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 50 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 51 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 52 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 53 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 54 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 55 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 56 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 57 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 58 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 59 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 60 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 61 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 62 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 63 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 64 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 65 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 66 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 67 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 68 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 69 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 70 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 71 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 72 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 73 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 74 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 75 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 76 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 77 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 78 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 79 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 80 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 81 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 82 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 83 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 84 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 85 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 86 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 87 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 88 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 89 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 90 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 91 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 92 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 93 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 94 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 95 von 100 geht an ValuePlayer [blocker] ( black )\nSpiel 96 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 97 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 98 von 100 geht an ExpectiminimaxValuePlayer [blocker] ( white )\nSpiel 99 von 100 geht an ValuePlayer [blocker] ( black )\n\n{'white': 63, 'black': 37}\n100 Spiele in 617.9951107501984 Sekunden\n" ], [ "import Player\nimport PlayerTest\nfrom NeuralNetModel import TDGammonModel\nimport tensorflow as tf\n\ngraph = tf.Graph()\nsess = tf.Session(graph=graph)\nwith sess.as_default(), graph.as_default():\n model = TDGammonModel(sess, restore=True)\n players = [Player.ModelPlayer('black', model), Player.ExpectiminimaxModelPlayer('white', model, 2)]\n PlayerTest.test(players, 10)", "Restoring checkpoint: checkpoints/TD-Gammon/checkpoint.ckpt-1593683\nINFO:tensorflow:Restoring parameters from checkpoints/TD-Gammon/checkpoint.ckpt-1593683\nSpiel 0 von 10 geht an ModelPlayer [TD-Gammon] ( black )\nSpiel 1 von 10 geht an ModelPlayer [TD-Gammon] ( black )\nSpiel 2 von 10 geht an EMinMaxModelPlayer [TD-Gammon] ( white )\n" ] ], [ [ "Diese 3 Spiele haben 24 Stunden gedauert....", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
cb9e4d87f28a42beccd9419c74b0afacde17bfb8
31,063
ipynb
Jupyter Notebook
Informatics/Reinforcement Learning/Practical RL - HSE/week3_model_free/experience_replay.ipynb
MarcosSalib/Cocktail_MOOC
46279c2ec642554537c639702ed8e540ea49afdf
[ "MIT" ]
null
null
null
Informatics/Reinforcement Learning/Practical RL - HSE/week3_model_free/experience_replay.ipynb
MarcosSalib/Cocktail_MOOC
46279c2ec642554537c639702ed8e540ea49afdf
[ "MIT" ]
null
null
null
Informatics/Reinforcement Learning/Practical RL - HSE/week3_model_free/experience_replay.ipynb
MarcosSalib/Cocktail_MOOC
46279c2ec642554537c639702ed8e540ea49afdf
[ "MIT" ]
null
null
null
78.244332
17,336
0.775424
[ [ [ "### Honor Track: experience replay\n\nThere's a powerful technique that you can use to improve sample efficiency for off-policy algorithms: [spoiler] Experience replay :)\n\nThe catch is that you can train Q-learning and EV-SARSA on `<s,a,r,s'>` tuples even if they aren't sampled under current agent's policy. So here's what we're gonna do:\n\n<img src=https://github.com/yandexdataschool/Practical_RL/raw/master/yet_another_week/_resource/exp_replay.png width=480>\n\n#### Training with experience replay\n1. Play game, sample `<s,a,r,s'>`.\n2. Update q-values based on `<s,a,r,s'>`.\n3. Store `<s,a,r,s'>` transition in a buffer. \n 3. If buffer is full, delete earliest data.\n4. Sample K such transitions from that buffer and update q-values based on them.\n\n\nTo enable such training, first we must implement a memory structure that would act like such a buffer.", "_____no_output_____" ] ], [ [ "import sys, os\nif 'google.colab' in sys.modules and not os.path.exists('.setup_complete'):\n !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/spring20/setup_colab.sh -O- | bash\n\n !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/grading.py -O ../grading.py\n !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/week3_model_free/submit.py\n\n !touch .setup_complete\n\n# This code creates a virtual display to draw game images on.\n# It will have no effect if your machine has a monitor.\nif type(os.environ.get(\"DISPLAY\")) is not str or len(os.environ.get(\"DISPLAY\")) == 0:\n !bash ../xvfb start\n os.environ['DISPLAY'] = ':1'", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nfrom IPython.display import clear_output", "_____no_output_____" ], [ "import random\n\n\nclass ReplayBuffer(object):\n def __init__(self, size):\n \"\"\"\n Create Replay buffer.\n Parameters\n ----------\n size: int\n Max number of transitions to store in the buffer. When the buffer\n overflows the old memories are dropped.\n\n Note: for this assignment you can pick any data structure you want.\n If you want to keep it simple, you can store a list of tuples of (s, a, r, s') in self._storage\n However you may find out there are faster and/or more memory-efficient ways to do so.\n \"\"\"\n self._storage = []\n self._maxsize = size\n\n # OPTIONAL: YOUR CODE\n\n def __len__(self):\n return len(self._storage)\n\n def add(self, obs_t, action, reward, obs_tp1, done):\n '''\n Make sure, _storage will not exceed _maxsize. \n Make sure, FIFO rule is being followed: the oldest examples has to be removed earlier\n '''\n data = (obs_t, action, reward, obs_tp1, done)\n\n # add data to storage\n self._storage.append(data)\n \n # FIFO\n while len(self._storage) > self._maxsize:\n self._storage.pop(0)\n \n \n def sample(self, batch_size):\n \"\"\"Sample a batch of experiences.\n Parameters\n ----------\n batch_size: int\n How many transitions to sample.\n Returns\n -------\n obs_batch: np.array\n batch of observations\n act_batch: np.array\n batch of actions executed given obs_batch\n rew_batch: np.array\n rewards received as results of executing act_batch\n next_obs_batch: np.array\n next set of observations seen after executing act_batch\n done_mask: np.array\n done_mask[i] = 1 if executing act_batch[i] resulted in\n the end of an episode and 0 otherwise.\n \"\"\"\n # <YOUR CODE: randomly generate batch_size integers to be used as indexes of samples>\n idxes = random.sample(range(len(self._storage)), \n k=batch_size if len(self._storage)>=batch_size else len(self._storage))\n \n # collect <s,a,r,s',done> for each index\n data_batch = np.array([self._storage[idx] for idx in idxes])\n \n return data_batch[:,0], data_batch[:,1], data_batch[:,2], data_batch[:,3], data_batch[:,4]", "_____no_output_____" ] ], [ [ "Some tests to make sure your buffer works right", "_____no_output_____" ] ], [ [ "def obj2arrays(obj):\n for x in obj:\n yield np.array([x])\n\ndef obj2sampled(obj):\n return tuple(obj2arrays(obj))\n\nreplay = ReplayBuffer(2)\nobj1 = (0, 1, 2, 3, True)\nobj2 = (4, 5, 6, 7, False)\nreplay.add(*obj1)\nassert replay.sample(1) == obj2sampled(obj1), \\\n \"If there's just one object in buffer, it must be retrieved by buf.sample(1)\"\nreplay.add(*obj2)\nassert len(replay) == 2, \"Please make sure __len__ methods works as intended.\"\nreplay.add(*obj2)\nassert len(replay) == 2, \"When buffer is at max capacity, replace objects instead of adding new ones.\"\nassert tuple(np.unique(a) for a in replay.sample(100)) == obj2sampled(obj2)\nreplay.add(*obj1)\nassert max(len(np.unique(a)) for a in replay.sample(100)) == 2\nreplay.add(*obj1)\nassert tuple(np.unique(a) for a in replay.sample(100)) == obj2sampled(obj1)\nprint(\"Success!\")", "Success!\n" ] ], [ [ "Now let's use this buffer to improve training:", "_____no_output_____" ] ], [ [ "import gym\nfrom qlearning import QLearningAgent\n\ntry:\n env = gym.make('Taxi-v3')\nexcept gym.error.DeprecatedEnv:\n # Taxi-v2 was replaced with Taxi-v3 in gym 0.15.0\n env = gym.make('Taxi-v2')\n\nn_actions = env.action_space.n", "_____no_output_____" ], [ "def play_and_train_with_replay(env, agent, replay=None,\n t_max=10**4, replay_batch_size=32):\n \"\"\"\n This function should \n - run a full game, actions given by agent.getAction(s)\n - train agent using agent.update(...) whenever possible\n - return total reward\n :param replay: ReplayBuffer where agent can store and sample (s,a,r,s',done) tuples.\n If None, do not use experience replay\n \"\"\"\n total_reward = 0.0\n s = env.reset()\n\n for t in range(t_max):\n # get agent to pick action given state s\n a = agent.get_action(s)\n\n next_s, r, done, _ = env.step(a)\n\n # update agent on current transition. Use agent.update\n agent.update(s, a, r, next_s)\n\n if replay is not None:\n # store current <s,a,r,s'> transition in buffer\n replay.add(s, a, r, next_s, done)\n\n # sample replay_batch_size random transitions from replay,\n # then update agent on each of them in a loop\n # s_, a_, r_, next_s_, done_ = replay.sample(replay_batch_size)\n samples = replay.sample(replay_batch_size)\n \n for i in range(len(samples[0])):\n agent.update(samples[0][i], samples[1][i], samples[2][i], samples[3][i])\n\n s = next_s\n total_reward += r\n if done:\n break\n\n return total_reward", "_____no_output_____" ], [ "# Create two agents: first will use experience replay, second will not.\n\nagent_baseline = QLearningAgent(\n alpha=0.5, epsilon=0.25, discount=0.99,\n get_legal_actions=lambda s: range(n_actions))\n\nagent_replay = QLearningAgent(\n alpha=0.5, epsilon=0.25, discount=0.99,\n get_legal_actions=lambda s: range(n_actions))\n\nreplay = ReplayBuffer(1000)", "_____no_output_____" ], [ "from IPython.display import clear_output\nimport pandas as pd\n\ndef moving_average(x, span=100):\n return pd.DataFrame({'x': np.asarray(x)}).x.ewm(span=span).mean().values\n\nrewards_replay, rewards_baseline = [], []\n\nfor i in range(1000):\n rewards_replay.append(\n play_and_train_with_replay(env, agent_replay, replay))\n rewards_baseline.append(\n play_and_train_with_replay(env, agent_baseline, replay=None))\n\n agent_replay.epsilon *= 0.99\n agent_baseline.epsilon *= 0.99\n\n if i % 100 == 0:\n clear_output(True)\n print('Baseline : eps =', agent_replay.epsilon,\n 'mean reward =', np.mean(rewards_baseline[-10:]))\n print('ExpReplay: eps =', agent_baseline.epsilon,\n 'mean reward =', np.mean(rewards_replay[-10:]))\n plt.plot(moving_average(rewards_replay), label='exp. replay')\n plt.plot(moving_average(rewards_baseline), label='baseline')\n plt.grid()\n plt.legend()\n plt.show()", "Baseline : eps = 2.9191091959171894e-05 mean reward = 7.8\nExpReplay: eps = 2.9191091959171894e-05 mean reward = 7.1\n" ] ], [ [ "### Submit to Coursera", "_____no_output_____" ] ], [ [ "from submit import submit_experience_replay\nsubmit_experience_replay(rewards_replay, rewards_baseline, '', '')", "Submitted to Coursera platform. See results on assignment page!\n" ] ], [ [ "#### What to expect:\n\nExperience replay, if implemented correctly, will improve algorithm's initial convergence a lot, but it shouldn't affect the final performance.\n\n### Outro\n\nWe will use the code you just wrote extensively in the next week of our course. If you're feeling that you need more examples to understand how experience replay works, try using it for binarized state spaces (CartPole or other __[classic control envs](https://gym.openai.com/envs/#classic_control)__).\n\n__Next week__ we're gonna explore how q-learning and similar algorithms can be applied for large state spaces, with deep learning models to approximate the Q function.\n\nHowever, __the code you've written__ for this week is already capable of solving many RL problems, and as an added benifit - it is very easy to detach. You can use Q-learning, SARSA and Experience Replay for any RL problems you want to solve - just thow 'em into a file and import the stuff you need.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb9e56014f7038ecb63f5d35eefe254fd26cf7b6
5,309
ipynb
Jupyter Notebook
mpcontribs-portal/notebooks/contribs.materialsproject.org/screening_inorganic_pv.ipynb
rkingsbury/MPContribs
8427cb09a389706e4a4249374afa2faa51bb340e
[ "MIT" ]
27
2016-07-16T17:20:11.000Z
2022-02-25T20:35:12.000Z
mpcontribs-portal/notebooks/contribs.materialsproject.org/screening_inorganic_pv.ipynb
rkingsbury/MPContribs
8427cb09a389706e4a4249374afa2faa51bb340e
[ "MIT" ]
833
2015-06-29T22:08:24.000Z
2022-03-28T15:03:53.000Z
mpcontribs-portal/notebooks/contribs.materialsproject.org/screening_inorganic_pv.ipynb
rkingsbury/MPContribs
8427cb09a389706e4a4249374afa2faa51bb340e
[ "MIT" ]
20
2015-06-02T00:39:27.000Z
2021-03-29T22:25:17.000Z
26.678392
99
0.480693
[ [ [ "import os, json\nfrom pathlib import Path\nfrom pandas import DataFrame\nfrom mpcontribs.client import Client\nfrom unflatten import unflatten", "_____no_output_____" ], [ "client = Client()", "_____no_output_____" ] ], [ [ "**Load raw data**", "_____no_output_____" ] ], [ [ "name = \"screening_inorganic_pv\"\nindir = Path(\"/Users/patrick/gitrepos/mp/mpcontribs-data/ThinFilmPV\")\nfiles = {\n \"summary\": \"SUMMARY.json\",\n \"absorption\": \"ABSORPTION-CLIPPED.json\",\n \"dos\": \"DOS.json\",\n \"formulae\": \"FORMATTED-FORMULAE.json\"\n}\ndata = {}\n\nfor k, v in files.items():\n path = indir / v\n with path.open(mode=\"r\") as f:\n data[k] = json.load(f)\n \nfor k, v in data.items():\n print(k, len(v))", "_____no_output_____" ] ], [ [ "**Prepare contributions**", "_____no_output_____" ] ], [ [ "config = {\n \"SLME_500_nm\": {\"path\": \"SLME.500nm\", \"unit\": \"%\"},\n \"SLME_1000_nm\": {\"path\": \"SLME.1000nm\", \"unit\": \"%\"},\n \"E_g\": {\"path\": \"ΔE.corrected\", \"unit\": \"eV\"},\n \"E_g_d\": {\"path\": \"ΔE.direct\", \"unit\": \"eV\"},\n \"E_g_da\": {\"path\": \"ΔE.dipole\", \"unit\": \"eV\"},\n \"m_e\": {\"path\": \"mᵉ\", \"unit\": \"mₑ\"},\n \"m_h\": {\"path\": \"mʰ\", \"unit\": \"mₑ\"}\n}\ncolumns = {c[\"path\"]: c[\"unit\"] for c in config.values()}\ncontributions = []\n\nfor mp_id, d in data[\"summary\"].items():\n formula = data[\"formulae\"][mp_id].replace(\"<sub>\", \"\").replace(\"</sub>\", \"\")\n contrib = {\"project\": name, \"identifier\": mp_id, \"data\": {\"formula\": formula}}\n cdata = {v[\"path\"]: f'{d[k]} {v[\"unit\"]}' for k, v in config.items()}\n contrib[\"data\"] = unflatten(cdata)\n \n df_abs = DataFrame(data=data[\"absorption\"][mp_id])\n df_abs.columns = [\"hν [eV]\", \"α [cm⁻¹]\"]\n df_abs.set_index(\"hν [eV]\", inplace=True)\n df_abs.columns.name = \"\" # legend name\n df_abs.attrs[\"name\"] = \"absorption\"\n df_abs.attrs[\"title\"] = \"optical absorption spectrum\"\n df_abs.attrs[\"labels\"] = {\"variable\": \"\", \"value\": \"α [cm⁻¹]\"}\n\n df_dos = DataFrame(data=data[\"dos\"][mp_id])\n df_dos.columns = ['E [eV]', 'DOS [eV⁻¹]']\n df_dos.set_index(\"E [eV]\", inplace=True)\n df_dos.columns.name = \"\" # legend name\n df_dos.attrs[\"name\"] = \"DOS\"\n df_dos.attrs[\"title\"] = \"electronic density of states\"\n df_dos.attrs[\"labels\"] = {\"variable\": \"\", \"value\": \"DOS [eV⁻¹]\"}\n\n contrib[\"tables\"] = [df_abs, df_dos]\n contributions.append(contrib)\n \nlen(contributions)", "_____no_output_____" ] ], [ [ "**Submit contributions**", "_____no_output_____" ] ], [ [ "client.delete_contributions(name)\nclient.init_columns(name, columns)\nclient.submit_contributions(contributions[:5])", "_____no_output_____" ] ], [ [ "**Retrieve and plot tables**", "_____no_output_____" ] ], [ [ "all_ids = client.get_all_ids(\n {\"project\": \"screening_inorganic_pv\"}, include=[\"tables\"]\n).get(name, {})\ncids = list(all_ids[\"ids\"])\ntids = list(all_ids[\"tables\"][\"ids\"])\nlen(cids), len(tids)", "_____no_output_____" ], [ "client.get_contribution(cids[0])", "_____no_output_____" ], [ "t = client.get_table(tids[0]) # pandas DataFrame", "_____no_output_____" ], [ "t.display()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
cb9e6111bd2ee4996488a78085e3f05dd02de45c
224,621
ipynb
Jupyter Notebook
notebooks/preprocessing/Data_maker_loader.ipynb
PatBall1/DeepForestcast
f9444490d71b89aa7823e830cf7fbe6752c74d9a
[ "MIT" ]
null
null
null
notebooks/preprocessing/Data_maker_loader.ipynb
PatBall1/DeepForestcast
f9444490d71b89aa7823e830cf7fbe6752c74d9a
[ "MIT" ]
1
2022-02-05T10:35:48.000Z
2022-02-05T10:35:48.000Z
notebooks/preprocessing/Data_maker_loader.ipynb
PatBall1/DeepForestcast
f9444490d71b89aa7823e830cf7fbe6752c74d9a
[ "MIT" ]
null
null
null
123.826351
109,750
0.839022
[ [ [ "# Make data and load data explained", "_____no_output_____" ], [ "Global Forest Change dataset https://earthenginepartners.appspot.com/science-2013-global-forest/download_v1.6.html is divided into 10x10 degree tiles, each of which comes with six raster files per tile: treecover, gain, data mask, loss year, first and last. All files contain unsigned 8-bit values and have a spatial resolution of 1 arc-second per pixel, which correspond to approximately 30 meters per pixel around the equator. After 2013 loss year and last files were updated annually. The last 2018 loss year file assign an integer value 0-18 to each pixel. 1-18 corresponds to the year (2001-2018) at which deforestation event was observed at this location or 0 if the deforestation was not detected there in the period 2001-2018. The dataset is such that once a pixel is assigned as deforestrated, it does not go back to forested at any time in the future. We collected the following ten tif files: treecover, gain, datamask, all \"last\" files from 2014 to 2018 and the most recent, 2018, loss year file. As our target area Madre de Dios is located at the intersection of three of the Hansen dataset tiles, for each of the ten files we collected the the corresponding tiles, merged them together in one raster file and cropped it with the Madre de Dios shape file. R file can be fond in r_code/R.ipynb. The resulting raster file was of spatial size 14646 x 15723 pixels and those pixels that were outside Madre de Dios boundaries were masked as NA values. Since we wish our models to be able to predict the label of each pixel of Madre de Dios area by analyzing an image, or time series of images, that captures its local region, we also included pixels lying in a buffer area of 0.09 degree (or approximately 10km) in our dataset. This allowed us to extract features from images that cover area up to 10km away from any Madre de Dios pixel.", "_____no_output_____" ], [ "<br> **from R:**\n \n Junin area corresponds to two tiles. \n An R script has been run to collect this tiles, merge them, and mask them\n with Junin area + buffer shape file. See R notebook for more details.\n\n The following tiff files were then created and saved in sourcepath = '/home/ubuntu/Madre':\n \n datamask_2018.tif \n gain_2018.tif\n treecover2000_2018.tif\n if_in_buffer.tif\n \n lossyear_2018.tif\n last_2018_1.tif\n last_2018_2.tif\n last_2018_3.tif\n last_2018_4.tif \n \n last_2017_1.tif\n last_2017_2.tif\n last_2017_3.tif\n last_2017_4.tif\n \n last_2016_1.tif\n last_2016_2.tif\n last_2016_3.tif\n last_2016_4.tif\n \n last_2015_1.tif\n last_2015_2.tif\n last_2015_3.tif\n last_2015_4.tif\n \n last_2014_1.tif\n last_2014_2.tif\n last_2014_3.tif\n last_2014_4.tif\n\n Each file is one layer raster file of shape 14646, 15723. NA values are encoded as -1.\n NA values are pixels that were masked as they are not in the Madre de Dios area and its buffer ", "_____no_output_____" ], [ "**datamask_2018.tif**:\n \n is a layer that has values 0,1,2,or -1.\n 0 for no data (Madre de Dios does not have pixels with no data)\n 1 for mapped land surface\n 2 for permanent woter bodies\n -1 for masked pixel\n\n**gain_2018.tif**\n\n is a layer that has values 0,1,or -1.\n 0 for no gain experienced between 2001 and 2012\n 1 for gain experienced between 2001 and 2012\n -1. for masked pixel\n \n**treecover2000_2018.tif**\n\n is a layer that has values between 0,100,or -1.\n value between 0 and 100 indicates percentage three cover observed in 2000.\n -1 for masked pixel\n\n\n**if_in_buffer.tif**\n\n is a layer that has values 0,1,or -1.\n 0 if a pixel lies in Madre de Dios area\n 1 if a pixel lies in the buffer area\n -1 for masked pixel\n \nThe first three files are produced form the files:\n datamask\n gain\n treecover2000\n \n Downladed from:\n https://earthenginepartners.appspot.com/science-2013-global-forest/download_v1.6.html\n\nFor how if_in_buffer.tif was created see R notebook. ", "_____no_output_____" ], [ " **lossyear_2018.tif**\n \n Downladed from:\n https://earthenginepartners.appspot.com/science-2013-global-forest/download_v1.6.html\n contains the most recent lossyear file.\n \n layer with values 0,1,2,3,..18 or -1\n 0 for no loss experienced between 2001 and 2018\n 1,2,3..18 for loss experienced in 2001, 2002, 2003,..2018\n -1 for masked pixel\n \n \n last_2018_1.tif\n last_2018_2.tif\n last_2018_3.tif\n last_2018_4.tif\n \n The \"last\" file 4 bands corresponding to year 2018\n each has values between 0:255 or -1\n masked pixels have value -1", "_____no_output_____" ], [ "# Input layers:", "_____no_output_____" ], [ "<img src=\"images/table.png\">", "_____no_output_____" ], [ "The only feature we constructed from Hansen data is one hot encoded cathegorical variable **if loss when** that for each neighbouring pixel encodes if it has experienced deforestation and how far in the past this was obsered. If pixel did not experienced deforestation, all cathegorical layers have 0 at that location. Our motivation to have it is because we had the hypothesis that deforestation event cluster in certain areas. Therefore, we wanted to have feature that summarize the information of neighbouring pixels' deforestation state. \n\nOne can set different enocding through function:\n\n def if_def_when(lossyear,year,cutoffs = [2,5,8]):\n \"\"\"\n Creates categorical variables for deforestration event given cutoffs.\n Values in cutoffs define the time bins\n Returns len(cutoffs) + 1 cathegorical layers:\n Example: cutoffs = [2,5,8], num of layers = 4 , considered year = year\n Cathegories 0,1,2,3 take values 1: \n for layer 0 : if year - lossyear is in [0,2) \n for layer 1 : if year - lossyear is in [2,5) \n for layer 2 : if year - lossyear is in [5,8) \n for layer 3 : if deforestation 8 years ago or more\n For a given considered year we have no prior knowledge for future deforestations:\n if loss event is in year > considered year or pixel is non deforested up to 2018+, \n all categories have value 0 \n \"\"\"\n\n<br>Currently the cut offs are as follows:\n <br> For considered year t and loss taking values 1,2,3,4,...18:\n <br> if loss > t all cathegorical levels take 0. Otherwise: \n <br>Layer 1) deforestation in the same or last year = 1 if t - loss in [0,1] \n <br>Layer 2) deforestation in the past 2 to 4 years = 1 if t - loss in [2,4] \n <br>Layer 3) deforestation in the past 5 to 7 years = 1 if t - loss in [5,7] \n <br>Layer 4) deforestation more than 7 years = 1 if t - loss in [8,t-1] ", "_____no_output_____" ], [ "All our models were build so that they can take two or more tensors with the same spatial dimensions, which we define below, and forecast if deforestation is observed in the following year at the locating corresponding to the spatially-central pixel of these tensors.\n\nThe first 3D tensor that any of our models receives, which we named **static**, is tensor of shape $\\mathbf{S} \\in \\mathbb{R}^{2 \\times (2r+1) \\times (2r+1)}$ where $(2r+1 \\times 2r+1)$ is its spatial dimension and $r$ is a predefined hyperparameter that defines the number of pixels the input tensor to have in each spatial direction from the target central pixel. In the data classes implementation the parameter **size** correspond to **r**. The two chnnels of this tensor are treecover2000 and datamask. \n\nOur second set of tensors is a time series of 3D tensors $\\mathbf{X}_{t-3}$,$\\mathbf{X}_{t-2}$,$\\mathbf{X}_{t}$ $\\in \\mathbf{R}^{d \\times (2r+1) \\times (2r+1)}$, where again each tensor has spatial dimensions $(2r+1) \\times (2r+1)$ but depth **d = 4 + the number of layers of the if_loss_when** encoded variable. The channels of a tensor with time index $t$ are **if_loss_when(i,t), last_b30(t), last_b40(t), last_b50(t) and last_b70(t)** \n\nThis time series is stored in 4D tensor of shape (c x t x 2r+1 x 2r+1) and here c = 4+4 and t = 3", "_____no_output_____" ], [ "Finally each tensor with time index $t$ comes with a label $Y_{t+1} \\in \\{0,1\\}$ which takes value 1 only if the target central pixel (at spatial location $r+1 \\times r+1$) is marked as deforested exactly in year $t+1$. To clarify this, here we note that if this pixel was labeled as deforested in any other year $t_j \\neq t+1$ ,lossyear($t_j$) = 1, or was never labeled as deforested in the study period 2001-2018 , lossyear($t_j$) = 0 $\\forall$ $t_j$ in ${1,2,..18}$, then $Y_{t+1} = 0$", "_____no_output_____" ], [ "## Set of valid pixels in each year", "_____no_output_____" ], [ "Due to the characteristics of Hansen dataset, we know that if a pixel is labeled as deforested in year $t_{j}$ then the pixel never returns to the state of being forested. Additionally, if its the percentage of tree cover observed in 2000 was below 30\\%, than this location is not considered as forest. Only if a pixel with treecover2000<30% experience \"gain\" in the study period 2001-2012 we may assume it corresponds to a forested area from 2013 onward. Finally, if it has $datamask=1$ then we know it is a permanent water body. Having stated this facts, we note that if our models aim to forecast the label of a pixel with index $j$ , $Y^{j}_{t+1} \\equiv \\mathbb{I}\\{lossyear_j = t+1\\}$, they would not be of any use if we know that this pixel j is not a forested area in year t. It will never be reverted to forest and therefore detecting deforestation at this location in year $t+1$ doesn't make sense. Therefore, when predicting the labels of pixels $Y{^j}_{t+1}$ in year $t+1$, we restricted these set of pixels to be:", "_____no_output_____" ], [ "$$\\mathbf{J}_t =: \\{j \\in \\mathbb{M} : ( lossyear_j > t \\,\\ \\cup \\,\\ lossyear_j = 0 )$$ \n$$\\cap (datamask_j = 1) \\cap (treecover_j > 30\\% \\,\\ \\cup \\,\\ gain_j = 1) \\}$$\nwhere $\\mathbb{M}$ is the index set of pixels lyng within Madre de Dios baundries. ", "_____no_output_____" ], [ "Since channel **treecover2000** has range 0:100 we rescaled it to be in the range 0:1.\n<br>Each of the bands of Landset image composite is also separately normalized, where normalization is taken with respect to the spatial domain at a single time image. Reason for that is that different channels have different means and std. Mean and std are also computed per year because images might be taken at different seasons and therefore the channels values distribution might be different.\n<br>Since for Madre de Dios datamask has no entrie = 0, we changed the valus as 0 - land, 1 - water bodies.\n\n<br>For our last 3 models that utilize a time series of tensors we worked with the following dataset: $[\\mathbf{S^j},\\textbf{X}^j_{2014},\\textbf{X}^j_{2015},\\textbf{X}^j\n_{2016}]$ as set of input tensors and $Y^j_{2017}$ as the set of labels to be predicted where $j \\in \\mathbf{J}_{2016}$. \n\n<br>We split the data into train and validation data sets with ratio 8:2. From the 80% trainig data, a 5 fold cross validation training with early stopping was used. We used the validation data to select the best model of each class. We evaluated their performance on $[\\mathbf{S^j},\\textbf{X}^j_{2015},\\textbf{X}^j_{2016},\\textbf{X}^j_{2017}]$ as the set of input tensors and $Y^j_{2018}$ as the set of labels to be predicted where $j \\in \\mathbf{J}_{2017}$.\n\n<br>Our Model 1, 2D CNN model, is able to analyze only mono-temporal tensors and from them to extract features forecasting the central pixel deforestation label in the following year. We used the union of the following data pairs of tensors and labels as dataset:\n<br>$[\\mathbf{S^j},\\textbf{X}^j_{2014}]$ as an input tensors and $Y^j_{2015}$ as the set of labels to be predicted where $j \\in \\mathbf{J}_{2014}$.\n<br>$[\\mathbf{S^j},\\textbf{X}^j_{2015}]$ as an input tensors and $Y^j_{2016}$ as the set of labels to be predicted where $j \\in \\mathbf{J}_{2015}$.\n<br>$[\\mathbf{S^j},\\textbf{X}^j_{2016}]$ as an input tensors and $Y^j_{2017}$ as the set of labels to be predicted where $j \\in \\mathbf{J}_{2016}$.\n<br>We evaluated its performance on :\n<br>$[\\mathbf{S^j},\\textbf{X}^j_{2017}]$ as an input tensors and $Y^j_{2018}$ as the set of labels to be predicted where $j \\in \\mathbf{J}_{2017}$. \n\n<br> **WHERE IS THIS SAVED? PIXEL FILE?**\n", "_____no_output_____" ], [ "## The rest of this notebook explains how to use the tiff files to produce the above defined datasets.", "_____no_output_____" ], [ "Load needed packages.\n\nsourcepath is the path to folder that has all tiff files.\n\nwherepath is the path to folder where the tensors to be saved and loaded later", "_____no_output_____" ] ], [ [ "import torch\nfrom torchvision.transforms import ToTensor\nfrom PIL import Image\nImage.MAX_IMAGE_PIXELS = None\nfrom torch.utils.data import Dataset\nimport os.path\nimport numpy as np\n#sourcepath is path to folder that has all tiff files.\n# they must have value 111 for NA, and last tif file should come as last_year_1 for band 1 in year = year \n#wherepath is the path to folder where the tensors to be saved and laded later\n\n# server = '/home/ubuntu/satellite' # for Amazon server\nserver = '/rds/general/project/aandedemand/live/satellite/junin' # For Imperial HPC\n\nsourcepath = '/data_reduced'\nwherepath = '/data_reduced/tensors'\n\nsourcepath = server + sourcepath\nwherepath = server + wherepath\nprint(sourcepath, wherepath)", "/rds/general/project/aandedemand/live/satellite/junin/data_reduced /rds/general/project/aandedemand/live/satellite/junin/data_reduced/tensors\n" ], [ "#for visualising and time measurung\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport time\nprint('done')", "done\n" ] ], [ [ "## **to_Tensor**\nThe following function open a raster (with one layer) as a tensor of size same as the tiff file - (14646, 15723)\n<br>**path** is the sourcepath: '/home/ubuntu/Madre'\n<br>**name** is one from above, eg: datamask_2018.tif, gain_2018.tif, treecover2000_2018.tif, if_in_buffer.tif", "_____no_output_____" ] ], [ [ "def to_Tensor(path,name):\n \"\"\"\n Load Tiff files as tensors\n \"\"\"\n t = Image.open(path+\"/\"+name)\n t = ToTensor()(t)\n t = t.squeeze(dim = 0)\n return(t) ", "_____no_output_____" ] ], [ [ "## datamask", "_____no_output_____" ] ], [ [ "\"\"\"\ndatamask:\n -1 for NA value\n 0 for no data pixel in Madre de Dios\n 1 for land pixel in Madre de Dios\n 2 for water body pixel in Madre de Dios \n\"\"\"\n#Run this:\n###############\ndatamask = to_Tensor(sourcepath,'Hansen/datamask_2018.tif')\n##############\n\n#visualise:\ncolors = ['grey','white','green','blue']\nplt.fig = plt.figure(figsize=(14,14))\nplt.matshow(datamask,cmap=matplotlib.colors.ListedColormap(colors))\nplt.show()\nprint(\"Number of pixels in AOI and buffer with no data (0 entries): \", len((datamask == 0).nonzero()))", "_____no_output_____" ] ], [ [ "Since for AOI area all pixels have value $\\neq 0$, to rescale this layer to be in range 0,1 we change encoding as follows:\n \n datamask:\n -1 for NA value\n 0 for land pixel in AOI\n 1 for water body pixel in AOI \n\nby running this line:\n\n datamask[datamask != -1] = datamask[datamask != -1] - 1 ", "_____no_output_____" ] ], [ [ "datamask[datamask != -1] = datamask[datamask != -1] - 1 ", "_____no_output_____" ] ], [ [ "## if_in_buffer", "_____no_output_____" ] ], [ [ "\"\"\"\nBuffer:\n -1 for NA value\n 0 for pixel in Madre de Dios\n 1 for pixel in buffer\n\"\"\"\nbuffer = to_Tensor(sourcepath,'buffer/if_in_buffer.tif')\n#to visualise\ncolors = ['grey','green','red']\nplt.fig = plt.figure(figsize=(8,8))\nplt.matshow(buffer,cmap=matplotlib.colors.ListedColormap(colors))\nplt.show()", "_____no_output_____" ] ], [ [ "## DSM - new layer", "_____no_output_____" ], [ "Since no mask is applied, to mask the NA values with -1 do:", "_____no_output_____" ] ], [ [ "DSM = to_Tensor(sourcepath,'DSM/DSM_resample_clip_REDUCED.tif')\n#DSM[datamask == -1 ] = -1\nplt.fig = plt.figure(figsize=(8,8))\nplt.matshow(DSM, norm = plt.Normalize(0, 6000))\nplt.show()", "_____no_output_____" ] ], [ [ "Explore its values ditribution:", "_____no_output_____" ] ], [ [ "values = (DSM != -1).nonzero()\nprint(values)\nvalues = DSM[ values[:,0] , values[:,1]].view(-1)\nprint(values)", "tensor([[ 0, 2651],\n [ 0, 2652],\n [ 0, 2653],\n ...,\n [7652, 8517],\n [7652, 8518],\n [7652, 8519]])\ntensor([1371.2875, 1380.6450, 1386.6625, ..., 3292.9475, 3301.5874,\n 3313.1201])\n" ], [ "plt.fig = plt.figure()\nplt.hist(values.numpy())\nplt.show()", "_____no_output_____" ], [ "(values).min() ", "_____no_output_____" ] ], [ [ "Since values have positively skewed distribution take log transform. Since minimum value is 107, log transform without shift is possible.", "_____no_output_____" ] ], [ [ "DSM[DSM != -1] = np.log(DSM[DSM != -1])", "_____no_output_____" ] ], [ [ "After log transform:", "_____no_output_____" ] ], [ [ "values = (DSM != -1).nonzero()\nvalues = DSM[ values[:,0] , values[:,1]].view(-1)\nplt.fig = plt.figure()\nplt.hist(values.numpy())\nplt.show()", "_____no_output_____" ] ], [ [ "To transform in order to have normal distribution:\n<br> rescale_image function defined bellow", "_____no_output_____" ] ], [ [ "%cd ../\nfrom Data_maker_loader import rescale_image\nDSM ,mean, std = rescale_image(DSM)\ndel rescale_image", "/rdsgpfs/general/project/aandedemand/live/satellite/junin/deforestation_forecasting/python_code\n" ], [ "print(\"Mean after log transform: \", mean)\nprint(\"Std after log transform: \", std)", "Mean after log transform: tensor([7.2419])\nStd after log transform: tensor([0.7253])\n" ], [ "values = (DSM[:,:] != -1).nonzero()\nvalues = DSM[ values[:,0] , values[:,1]].view(-1)\nplt.fig = plt.figure()\nplt.hist(values.numpy())\nplt.show()", "_____no_output_____" ], [ "torch.save(DSM,wherepath+\"/\"+'DSM.pt')", "_____no_output_____" ] ], [ [ "### In summary, to include this layer do:", "_____no_output_____" ] ], [ [ "DSM = to_Tensor(sourcepath,'DSM.tif')\n# Mask area out of Madre de Dios and buffer\nDSM[datamask == -1 ] = -1\n# If positive skewed distribution of the values:\nmin_val = DSM[DSM != -1 ].min().numpy()\nprint(\"Min value of the elevation: \",min_val)\n# if there is negative values and zero values log transform must be applied after a shift to positive values only.\n# log(0) = -Inf\nif min_val > 0:\n DSM[DSM != -1] = np.log(DSM[DSM != -1])\nelse:\n DSM[DSM != -1] = np.log(DSM[DSM != -1] + min_val + 1)\n#Normalise:\nDSM, DSMmean, DSMstd = rescale_image(DSM)\nprint(\"Extracted mean: \",DSMmean)\nprint(\"Devided std: \",DSMstd)", "_____no_output_____" ] ], [ [ "## **last_to_image**\n\nThe following function open the 4 rasters (one for each band) of the last files (eg last_18_1,..last_18_4) and returns them as a tensors of size (4, 14646, 15723)\n\n<br>**path** is the sourcepath\n<br>**year** any of 13,14,15,16,17,18 ", "_____no_output_____" ] ], [ [ "def last_to_image(path,year):\n \"\"\"\n Given path to folder having tiff files for each last band for given year\n returns Tensors with chanels == bands and year as requested in the path\n \"\"\"\n image = [] \n for b in range(1,5):\n band = Image.open(path+\"/\"+'Hansen/last_20%d_%d.tif'%(year,b))\n band = ToTensor()(band)\n image.append(band)\n image = torch.cat(image,dim = 0)\n image = image.float()\n return(image)", "_____no_output_____" ], [ "start = time.time()\n#Run this:###########\nlast_16 = last_to_image(sourcepath,16)\n####################\nprint(last_16.shape)\nprint(\"Time needed to load one last file as tensor (in seconds): \",time.time() - start)", "torch.Size([4, 17964, 16852])\nTime needed to load one last file as tensor (in seconds): 41.979917764663696\n" ] ], [ [ "## Rescale Image\n<br> **rescale_image**\n\nGet satelite image of Madre de Dios area at a given year, compute the mean and std of all\nnon-masked pixels (those covering Madre de Dios area) and returns a normalized satellite image.\nNormalization is done per channel. Reason for that is that different channels have different means and \nstd. Mean and std are also conputed per year because images might be taken at different seasons and therefore the channels values distribution might be different.", "_____no_output_____" ], [ "### Here we illustrate the 4 channels values distributions of year 2016 satellite image ", "_____no_output_____" ] ], [ [ "pixels = (last_16[1,:,:] != -1).nonzero()\nvalues1 = last_16[0, pixels[:,0] , pixels[:,1]].view(-1)\nvalues2 = last_16[1, pixels[:,0] , pixels[:,1]].view(-1)\nvalues3 = last_16[2, pixels[:,0] , pixels[:,1]].view(-1)\nvalues4 = last_16[3, pixels[:,0] , pixels[:,1]].view(-1)", "_____no_output_____" ], [ "plt.fig = plt.figure()\nplt.subplot(2, 2, 1)\nplt.hist(values1.numpy())\nplt.title(\"band 1\")\nplt.subplot(2, 2, 2)\nplt.hist(values2.numpy())\nplt.title(\"band 2\")\nplt.subplot(2, 2, 3)\nplt.hist(values3.numpy())\nplt.title(\"band 3\")\nplt.subplot(2, 2, 4)\nplt.hist(values4.numpy())\nplt.title(\"band 4\")\nplt.show()", "_____no_output_____" ], [ "def rescale_image(image):\n # detach and clone the image so that you don't modify the input, but are returning new tensor.\n rescaled_image = image.data.clone()\n if(len(image.shape) == 2):\n rescaled_image = rescaled_image.unsqueeze(dim = 0)\n # Compute mean and std only from non masked pixels\n # Spatial coordinates of this pixels are:\n pixels = (rescaled_image[0,:,:] != -1).nonzero()\n mean = rescaled_image[:, pixels[:,0] , pixels[:,1]].mean(1,keepdim=True)\n std = rescaled_image[:, pixels[:,0] , pixels[:,1]].std(1,keepdim=True)\n rescaled_image[:, pixels[:,0] , pixels[:,1]] -= mean\n rescaled_image[:, pixels[:,0] , pixels[:,1]] /= std\n if(len(image.shape) == 2):\n rescaled_image = rescaled_image.squeeze(dim = 0)\n mean = mean.squeeze(dim = 0)\n std = std.squeeze(dim = 0)\n return(rescaled_image,mean,std)", "_____no_output_____" ], [ "st = time.time()\nrescaled_image, mean, std = rescale_image(last_16)\nprint(\"Time to rescale one year image:\",time.time() - st)\nprint(\"\\nMean of channles : \\n\",mean)\nprint(\"\\nStd of channles : \\n\",std)\nprint(\"\\nOriginal image preserved: \",last_16[:,555,7777])\nprint(\"Rescaled values at this location: \",rescaled_image[:,555,7777])", "Time to rescale one year image: 18.758196115493774\n\nMean of channles : \n tensor([[18.5237],\n [72.7132],\n [63.6026],\n [31.1686]])\n\nStd of channles : \n tensor([[13.4344],\n [19.2726],\n [19.4598],\n [16.9414]])\n\nOriginal image preserved: tensor([-1., -1., -1., -1.])\nRescaled values at this location: tensor([-1., -1., -1., -1.])\n" ] ], [ [ "#### Rescale_image can also be applied to any tensor of 2 dimensions. \nIn this case rescaling is done with respect to the whole area in Made de Dios (that does not have -1 values) Note: if a new tensor has negative values by characteristics, one musth change the encoding.", "_____no_output_____" ], [ "## Create Categorical Layers Indicationg Deforestation Times Periods **if_def_when**\n\n Takes as input:\n \n the lossyear tensor; \n the year to be considered as current t; t is one of 1,2,3,..18\n the cutoffs - list of cut of values (of lenght n) that determine the one hot encoding\n \n Returns:\n \n \"One hot\" encded 3D tensor with shpe ([n+1, 14646, 15723]) for loss event.\n \n Example: \n \n Takes: 2D lossyear tensor, year = 14, cutoffs = [2,5,8] (n=3).\n Returns: 3D Cathegorical tensor with num of layers = 4 (n+1) \n \n Each Cathegorical layer gets value 1 if: \n \n for layer 0: if year - lossyear is in [0,2) i.e: 14-14, 14-13 (lossyear = 14 , 13)\n for layer 1: if year - lossyear is in [2,5) i.e: 14-12, 14-11, 14-10 (lossyear = 12 , 11, 10)\n for layer 2: if year - lossyear is in [5,8) i.e: 14-9, 14-8, 14-7 (lossyear = 9 , 8, 7)\n for layer 3: 8 years ago or more i.e: 14-6, 14-5,...14-1 (lossyear = 6, 5, 4, 3, 2, 1)\n\nIf lossyear value of a pixel is 0 or greater than t ( lossyear > 14 ), all cathegorical layers have value 0, which indicates that at year t=14, we know this pixel is not deforested yet. We consider all future deforestations event for year t = 14 as not observed yet ,\"unknown\". If pixel is masked, all cathegorical values take the NA value -1", "_____no_output_____" ] ], [ [ "def if_def_when(lossyear,year,cutoffs = [2,5,8]):\n \"\"\"\n Creates categorical variables for deforestration event given cutoffs.\n Values in cutoffs define the time bins\n Returns len(cutoffs) + 1 cathegorical layers:\n Example: cutoffs = [2,5,8], num of layers = 4 , considered year = year\n Cathegories: \n 0) if year - lossyear is in [0,2) \n 1) if year - lossyear is in [2,5) \n 2) if year - lossyear is in [5,8) \n 3) 8 years ago or more\n No prior knowledge:\n if loss event is in year > considered year or pixel is non deforested up to 2018+, all cathegories have value 0 \n \"\"\"\n cutoffs.append(year)\n cutoffs.insert(0,0)\n lossyear[ (lossyear > year) ] = 0\n losses = []\n for idx in range(0,len(cutoffs) - 1 ): \n deff = torch.zeros(lossyear.size())\n deff[ (cutoffs[idx] <= (year - lossyear)) & ( (year - lossyear) < cutoffs[idx+1]) ] = 1\n losses.append(deff.float()) \n losses = torch.stack(losses)\n #Return Nan values encoded as needed:\n losses[:, (lossyear== -1)] = -1\n return(losses)", "_____no_output_____" ], [ "lossyear = torch.tensor([[1,2,3,4,5],[6,7,8,9,10],[11,12,13,14,15],[16,17,18,-1,0]])\nprint(\"loss year cases:\")\nprint(lossyear)\ncurrentyear = 14\nprint(\"\\nCurrent year: t=\", currentyear)\nwhen = if_def_when(lossyear, currentyear, cutoffs = [2,5,8])\nprint(\"\\nNo prior knowledge for future deforestration. Lossyear modified to: \")\nprint(lossyear)\nprint(\"\\nCorresponding categorical layers\")\nfor i in range(0, len(when)):\n print(\"\\nTime bin category %d\\n\"%i,when[i])", "loss year cases:\ntensor([[ 1, 2, 3, 4, 5],\n [ 6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, -1, 0]])\n\nCurrent year: t= 14\n\nNo prior knowledge for future deforestration. Lossyear modified to: \ntensor([[ 1, 2, 3, 4, 5],\n [ 6, 7, 8, 9, 10],\n [11, 12, 13, 14, 0],\n [ 0, 0, 0, -1, 0]])\n\nCorresponding categorical layers\n\nTime bin category 0\n tensor([[ 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0.],\n [ 0., 0., 1., 1., 0.],\n [ 0., 0., 0., -1., 0.]])\n\nTime bin category 1\n tensor([[ 0., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 1.],\n [ 1., 1., 0., 0., 0.],\n [ 0., 0., 0., -1., 0.]])\n\nTime bin category 2\n tensor([[ 0., 0., 0., 0., 0.],\n [ 0., 1., 1., 1., 0.],\n [ 0., 0., 0., 0., 0.],\n [ 0., 0., 0., -1., 0.]])\n\nTime bin category 3\n tensor([[ 1., 1., 1., 1., 1.],\n [ 1., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 0.],\n [ 0., 0., 0., -1., 0.]])\n" ] ], [ [ "# Create Tensors of Features to be used from models and save them for later usage. *create_tnsors_pixels*", "_____no_output_____" ], [ "Given year t, and cutoffs as defined above returns (and save them if wherepath!= None):\n<br>Static tensor $S$,\n<br>Non static tensor $X_t$,\n<br>list of valid pixels coordinates $\\mathbf{J}_{t}$\n<br>list of labels corresponding to this valid cordinates: $Y^j_{t}$ where $j \\in \\mathbf{J}_{t}$\n<br>Other two inputs are:\n<br>sourcepath = path to tiff files\n<br>wherepath = if not None, path to the folder where to save these tensors\n \n<br>Static tensor is identical for any year, hence save only once. Static tensor has datamask layer and treecover\n<br>Nonstatic tensor has if_deff_when cathegorical layers and the four bands of the landsat image stacked\n \n Valid pixels are these that meet all the following conditions :\n 1. datamask == 1 , eg land not water body\n 2. tree_cover > tree_p or gain == 1 if threecanpy in 2000 > tree_p or became forest up to 2012 \n 3. lossyear > end_year or lossyear == 0 experienced loss only after that year (or not at all in the study period) \n 4. buffer == 0 is in AOI area\n \n$$\\mathbf{J}_t =: \\{j \\in \\mathbb{M} : ( lossyear_j > t \\,\\ \\cup \\,\\ lossyear_j = 0 )$$ \n$$\\cap (datamask_j = 1) \\cap (treecover_j > 30\\% \\,\\ \\cup \\,\\ gain_j = 1) \\}$$\nWhere $\\mathbb{M}$ is the index set of pixels lyng within the ROI boundries. For each valid pixel j assign label $Y^j_{t} = 1$ if it is deforested in exactly in year = t+1 or zero otherwise.\n<br>All pixels in the rasters and produced tensors have value -1 in the locations outside ROI area and its buffer", "_____no_output_____" ] ], [ [ "def create_tnsors_pixels(end_year, tree_p = 30, cutoffs = [2,5,8] , sourcepath = sourcepath ,rescale = True, wherepath = None):\n \"\"\"\n Given year, and cutoffs as defined above returns (and save if wherepath!= None) \n Static tensor,\n Non static tensor,\n list of valid pixels codrinates,\n list of labels corresponding to this valid cordinates\n \n sourcepath = path to tiff files\n wherepath = in not None, path to where to save the tensors\n \n Static tensor is identical for any year, hence save only once\n Static tensor has datamask layer and treecover\n \n Nonstatic tensor has if_deff_when cathegorical layers and the image landset 7 bands stacked\n \n Valid pixels are these that meet all the following conditions :\n 1. datamask == 1 , eg land not water body\n 2. tree_cover > tree_p or gain == 1 if tree canopy in 2000 > tree_p or became forest up to 2012 \n 3. lossyear > end_year or lossyear == 0 experienced loss only after that year (or not at all in the study period) \n 4. buffer == 0 is in Madre de Dios area\n \n for each valid pixel assign label 1 if it is deforested in exactly in year+1 or zero otherwise \n \n All pixels in the rasters and produced tensors have value 111 in the locations outside Area of Interest and its buffer\n \"\"\"\n buffer = to_Tensor(sourcepath,'buffer/if_in_buffer.tif')\n gain = to_Tensor(sourcepath,'Hansen/gain_2018.tif')\n lossyear = to_Tensor(sourcepath,'Hansen/lossyear_2018.tif')\n datamask = to_Tensor(sourcepath,'Hansen/datamask_2018.tif')\n tree_cover = to_Tensor(sourcepath,'Hansen/treecover2000_2018.tif')\n tree_cover = tree_cover.float()\n datamask = datamask.float()\n #Create list of valid pixels coordinates\n pixels = ( (datamask == 1) & #land (not water body)\n ((tree_cover > tree_p ) | (gain == 1)) & #if forest in 2000 or became forest up to 2012 \n ((lossyear > end_year) | (lossyear == 0))& #experienced loss only after that year (or not at all in the study period) \n (buffer == 0)).nonzero() #In area of interest\n \n \n #Create list of valid pixels labels in year + 1\n labels = lossyear[pixels[:,0],pixels[:,1]] == (end_year+1) #can be change to >= (end_year+1) & <111 \n\n when = if_def_when(lossyear,end_year, cutoffs = cutoffs)\n image = last_to_image(sourcepath,end_year)\n \n if rescale:\n #Rescale datamask to have values -1 for nan, 0 for land, 1 for water\n datamask[datamask != -1] = datamask[datamask != -1] - 1\n #Rescale tree_cover to have values in [0, 1] and -1 for nan\n tree_cover[tree_cover != -1] = tree_cover[tree_cover != -1]*0.01\n #Normalize image by channel with -1 values for nan\n image, _, _ = rescale_image(image)\n \n #Create non Static tensor\n image = torch.cat((when,image),dim=0)\n #Creates static tensor\n static = torch.stack((datamask,tree_cover))\n\n \n #Creates non static tensor\n if wherepath:\n if not os.path.isfile(wherepath+\"/\"+\"static.pt\"):\n torch.save(static, wherepath+\"/\"+\"static.pt\")\n torch.save(image, wherepath+\"/\"+\"tensor_%d.pt\"%(end_year))\n torch.save(pixels,wherepath+\"/\"+\"pixels_cord_%d.pt\"%(end_year))\n torch.save(labels,wherepath+\"/\"+\"labels_%d.pt\"%(end_year))\n\n return static, image, pixels, labels \n ", "_____no_output_____" ], [ "start = time.time()\nstatic, image, pixels, labels = create_tnsors_pixels(18, tree_p = 30, cutoffs = [2,5,8] , sourcepath = sourcepath ,rescale = True, wherepath = None)\nprint(\"Total time (in seconds) needed to create tensors: \",time.time() - start)", "Total time (in seconds) needed to create tenosrs: 147.33529424667358\n" ], [ "torch.save(image, wherepath+\"/\"+\"tensor_%d.pt\"%(18))\ntorch.save(pixels,wherepath+\"/\"+\"pixels_cord_%d.pt\"%(18))", "_____no_output_____" ] ], [ [ "# Data Classes\nIn pytorch one need to have data class to load the data. This data class must have \\__getitem\\__(self, idx) function that returns the imput of the model and the output to which the prediction of the model is compared. Here \\__getitem\\__ returns the model input image/time series of images, the label of the central pixel and its cordinates. Each class must also have function that returns the dataset lenght: \\__len\\__(self) ", "_____no_output_____" ], [ "## CNN data class\nAs mentioned above, the data set for our CNN model is trained on all pairs of images labels for years 2014, 2015, 2016, 2017. To make this union of data pairs, the class is given lists. \n\nFor example, to create CNN data class that has data points from Images in 2015 and 2016 and their corresponding labels in 2016 and 2017 respectively, image must be the list $\\mathbf{X_{2015}}$, $\\mathbf{X_{2016}}$, pixels must be the list of valid pixels coordinates in the corresponding year $\\mathbf{J_{2015}}$, $\\mathbf{J_{2016}}$, and labels must be $\\mathbf{Y^{j_{2015}}_{2016}}$, $\\mathbf{Y^{j_{2016}}_{2017}}$. The static tensor $\\mathbf{S}$ is also gven to the data class.\n\nThe data set lenght is then the lenght of the set $\\mathbf{J_{2015}} \\cup \\mathbf{J_{2016}}$ and when the function \\_getitem\\_(self, idx) is given a index it returns the coresonding pixel static tensor $\\mathbf{S}$ , image and label. The function idx_to_image(self,idx) maps each index to the correct pair of image,label with respect to wich year pair we refer to by this index.\n\nThe data class also have function change_size(self, new_size) that allows the spatial size of the tensor to be changed withouth re-initializing the class.", "_____no_output_____" ] ], [ [ "class DatasetCNN(Dataset):\n \"\"\"\n CNN Data class\n \n if it is passed list of image, pixels and labels, it concate the data as one, where inputs are\n all pairs of image:next year labels for valid pixels. Pairs are ordered as sequence in the same order as\n in the flatten list\n \n if list is of lenght 1, only one year pairs\n \n size is the radius of the image. Can be modified with Data.change_size(new size) \n \"\"\"\n\n def __init__(self, size, static, image, pixels, labels): \n self.size = size\n self.lenghts = None\n \n if(len(image) == 1):\n image = torch.cat(image, dim = 0)\n self.image = torch.cat((static,image), dim = 0)\n \n else:\n #add static to each image in the list so that all images are ready tensors\n # do this only when initializig the data class so that it is quick to call ready tensor at each get item call\n #save tensors in a list keeping the image order\n \n #save the lengths of each item in the pixles codrintes/labels list \n #so that after they are flattened, a map pixel,year -> image,year is possible\n self.lenghts = [ len(i) for i in pixels ]\n self.image = []\n for im in image:\n img = torch.cat((static,im), dim = 0)\n self.image.append(img)\n \n self.pixels = torch.cat(pixels, dim = 0)\n self.labels = torch.cat(labels, dim = 0) \n \n def idx_to_image(self,idx):\n \"\"\"\n given a index of a flatten list of pixels in different years,\n return the corresponding image for the given year\n \"\"\"\n \n if self.lenghts == None:\n image = self.image\n \n else:\n csum = list(np.cumsum(self.lenghts))\n csum.insert(0,0)\n for i in range(1,len(csum)):\n if ((idx >= csum[i-1]) & (idx < csum[i])): \n image = self.image[i-1] \n break;\n return image\n \n def change_size(self, new_size):\n self.size = new_size\n \n def __getitem__(self, idx): \n \n image = self.idx_to_image(idx) \n image = image[:,\n (self.pixels[idx,0] - self.size) : (self.pixels[idx,0] + self.size + 1),\n (self.pixels[idx,1] - self.size) : (self.pixels[idx,1] +self.size + 1)] \n \n label = self.labels[idx]\n \n cor = self.pixels[idx]\n \n return image , label, cor\n \n def __len__(self):\n return len(self.pixels)\n ", "_____no_output_____" ] ], [ [ "## load_CNNdata \nThis function takes as input the size one wish the CNNdata object to have and the start and end year of the pairs considered. It then loads the lists needed to initialize the data class and returns the corresponding CNNdata object.\n\nIf one wish to add extra static layers, than add_static must be a list of this tensors (2D or 3D for multi-channels)\n\nIf one wish to add extra time layers, than add_time must be a list of lists of this tensors (2D or 3D for multi-channels) where the lists are sorted in time and are of lenght end_year - start_year + 1\n\nEg:add_time = [[layer_1_2014, layer_2_2014],[layer_1_2015, layer_2_2015],[layer_1_2016, layer_2_2016]] where \nlayer_1 and layer_2 can be 2D or 3D tensors.", "_____no_output_____" ] ], [ [ "def load_CNNdata(size, start_year, end_year, add_static = None, add_time = None, path = wherepath):\n \"\"\"\n given start year, end year and size initilalize CNN data class \n start year and end year define how many pairs imange - next year label the data to have \n size define the returned image size\n path = path to saved tensors\n \"\"\"\n \n path = path + \"/\"\n static = torch.load(path+\"static.pt\")\n if(add_static):\n for to_add in add_static:\n if len(to_add.shape) == 2 :\n to_add = to_add.unsqueeze(dim = 0)\n static = torch.cat([static,to_add], dim = 0)\n else:\n static = torch.cat([static,to_add], dim = 0) \n \n images_ls = []\n pixels_ls = []\n labels_ls = []\n \n for i, year in enumerate(range(start_year,end_year+1)):\n \n image = torch.load(path+\"tensor_%d.pt\"%(year))\n if(add_time):\n for to_add in add_time[i]:\n if len(to_add.shape) == 2 :\n to_add = to_add.unsqueeze(dim = 0)\n image = torch.cat([image,to_add], dim = 0)\n else:\n image = torch.cat([image,to_add], dim = 0) \n \n images_ls.append(image)\n \n pixels = torch.load(path+\"pixels_cord_%d.pt\"%(year))\n pixels_ls.append(pixels)\n \n labels = torch.load(path+\"labels_%d.pt\"%(year))\n labels_ls.append(labels)\n \n Data = DatasetCNN(size, static = static, image = images_ls, pixels = pixels_ls, labels = labels_ls)\n \n return Data", "_____no_output_____" ] ], [ [ "# DatasetRNN", "_____no_output_____" ], [ "This class is similar to the CNN class. To intialize it one needs to give it the size (= r) of the tensors, the static tensor, a 4D tensor coresponidng to the time series of images, organised as follows (channels,time,height,width), the set of valid pixels in the considered year and the coresponding next year labels for this set of pixels.\n\nFor example, to costruct the data set $\\mathbf{S,X_{2014},X_{2015},X_{2016}}$ , $\\mathbf{Y^{j_{2016}}_{2017}}$ , **static** must be $\\mathbf{S}$, **image** bust be the 4D tensor, stack of $\\mathbf{X_{2014},X_{2015},X_{2016}}$, **pixels** must be $\\mathbf{J_{2016}}$ and **labels** must be $\\mathbf{Y^{j_{2016}}_{2017}}$.\n\n\\__getitem\\__(self, idx) returns the static image, the 4D tensor of time series of nonstatic images, the next year label and the coordinates of the pixel under that index.", "_____no_output_____" ] ], [ [ "class DatasetRNN(Dataset):\n \"\"\"\n Data class for Moldel 2:4\n get_item return static tensor (to be fed in the static branch)\n and a 4d Tensor of non static iamges where the shape is as follows: \n (c,t,h,w) = (channels per image ,time , h = 2*size+1, w = 2*size+1)\n change_size sets new image size: h&w = 2*new_size + 1\n \"\"\"\n\n def __init__(self, size, static, images, pixels, labels ):\n \n self.size = size\n self.static = static\n self.images = images \n self.pixels = pixels\n self.labels = labels\n \n def change_size(self, new_size):\n self.size = new_size\n \n def __getitem__(self, idx): \n \n static = self.static[:,\n (self.pixels[idx,0] - self.size) : (self.pixels[idx,0] + self.size + 1),\n (self.pixels[idx,1] - self.size) : (self.pixels[idx,1] +self.size + 1)]\n #(c x t x h x w)\n images = self.images[:,:,\n (self.pixels[idx,0] - self.size) : (self.pixels[idx,0] + self.size + 1),\n (self.pixels[idx,1] - self.size) : (self.pixels[idx,1] +self.size + 1)] \n label = self.labels[idx]\n \n return (static, images) , label\n\n def __len__(self):\n return len(self.pixels)\n ", "_____no_output_____" ] ], [ [ "**load_RNNdata**\n<br>Function that takes size, start_year, end_year and returns DataRNN object corresponding to this time period and size\n\nIf one wish to add extra static layers, than add_static must be a list of this tensors (2D or 3D for multi-channels)\n\nIf one wish to add extra time layers, than add_time must be a list of lists of this tensors (2D or 3D for multi-channels) where the lists are sorted in time and are of lenght end_year - start_year + 1\n\nEg:add_time = [[layer_1_2014, layer_2_2014],[layer_1_2015, layer_2_2015],[layer_1_2016, layer_2_2016]] where layer_1 and layer_2 can be 2D or 3D tensors.", "_____no_output_____" ] ], [ [ "def load_RNNdata(size, start_year, end_year, add_static = None, add_time = None, path = wherepath):\n \"\"\"\n given start year, end year and size initilalize RNN data class \n start year and end year define number of elements in the time series of imanges\n size define the returned image size\n path = path to saved tensors\n \"\"\"\n path = path + \"/\"\n images = []\n for i, year in enumerate(range(start_year,end_year+1)):\n \n image = torch.load(path+\"tensor_%d.pt\"%(year))\n \n if(add_time):\n for to_add in add_time[i]:\n if len(to_add.shape) == 2 :\n to_add = to_add.unsqueeze(dim = 0)\n image = torch.cat([image,to_add], dim = 0)\n else:\n image = torch.cat([image,to_add], dim = 0) \n \n \n image = image.unsqueeze(dim = 1)\n images.append(image)\n \n images = torch.cat(images, dim = 1)\n \n static = torch.load(path+\"static.pt\")\n \n if(add_static):\n for to_add in add_static:\n if len(to_add.shape) == 2 :\n to_add = to_add.unsqueeze(dim = 0)\n static = torch.cat([static,to_add], dim = 0)\n else:\n static = torch.cat([static,to_add], dim = 0) \n \n pixels = torch.load(path+\"pixels_cord_%d.pt\"%(end_year))\n labels = torch.load(path+\"labels_%d.pt\"%(end_year)) \n Data = DatasetRNN(size = size , images = images ,static = static, pixels = pixels, labels = labels)\n return Data", "_____no_output_____" ] ], [ [ "# Load Data", "_____no_output_____" ], [ "## Load CNN data with two years pairs, 16 & 17\n<br> Data_16_17 = load_CNNdata(4, start_year = 16, end_year = 17, path = wherepath) initialize CNN data for two years, 16 & 17 with image size 9\n<br> Data_16_17.change_size(2) to change the image size from $2 \\times 4+1 = 9$ to $2 \\times 2+1 = 5$", "_____no_output_____" ] ], [ [ "Data_16_17 = load_CNNdata(4, start_year = 16, end_year = 17, path = wherepath)\nprint(\"Data lenght of 2 years:\",len(Data_16_17))\nloss = (Data_16_17.labels == 1).nonzero()\nprint(\"% deforested valid pixels in 2017 and 2018:\")\nprint(len(loss)/len(Data_16_17.labels))\nprint(\"\\nFirst band of statelite image layer: \\n\")\nprint(Data_16_17[8498][0][-4,:,:])\nprint(\"\\nChnage size from 9 to 5:\\n\")\nData_16_17.change_size(2)\nprint(\"\\n\")\nprint(Data_16_17[8498][0][-4,:,:])\ndel Data_16_17", "_____no_output_____" ] ], [ [ "## Load 1 year data CNN\nData_17 = load_CNNdata(4, start_year = 17, end_year = 17, path = wherepath)", "_____no_output_____" ] ], [ [ "Data_17 = load_CNNdata(4, start_year = 17, end_year = 17, path = wherepath)\nprint(\"% valid pixels deforested in 2018: \",len((Data_17.labels == 1).nonzero())/len(Data_17))\nprint(\"\\nFirst band of statelite image layer: \\n\")\nprint(Data_17[55555][0][-4,:,:])\nprint(\"\\nChnage size from 9 to 5:\\n\")\nData_17.change_size(2)\nprint(Data_17[55555][0][-4,:,:])\ndel Data_17", "_____no_output_____" ] ], [ [ "# Add DSM static tensor after it being transformed and rescaled ", "_____no_output_____" ] ], [ [ "DSM = torch.load(wherepath+\"/DSM.pt\")", "_____no_output_____" ], [ "Data_17 = load_CNNdata(4, start_year = 17, end_year = 17, path = wherepath, add_static = [DSM])", "_____no_output_____" ] ], [ [ "### data with updated layers:", "_____no_output_____" ] ], [ [ "Data_17[99][0].shape", "_____no_output_____" ] ], [ [ "# Load RNN data:\nRNNData = load_RNNdata(size = 2 , start_year = 14 , end_year = 17)", "_____no_output_____" ] ], [ [ "RNNData = load_RNNdata(size = 2 , start_year = 14 , end_year = 16)\nprint(\"shape of the statich tensor: (static chanels, h, w) = \",RNNData[22][0][0].shape) # static\nprint(\"shape of the non statich tensor: (static chanels, time, h, w) = \",RNNData[22][0][1].shape) # images\nloss = (RNNData.labels == 1).nonzero()\nprint(\"% deforested valid pixels in 2017:\")\nprint(len(loss)/len(RNNData.labels))\ndel(RNNData)", "_____no_output_____" ] ], [ [ "# Summary:\n<br> 1) Save tensors with :", "_____no_output_____" ] ], [ [ " for year in range(14,18):\n print(year)\n static, image, pixels,labels = create_tnsors_pixels(year, tree_p = 0.3,\n cutoffs = [2,5,8],\n sourcepath = sourcepath,\n wherepath = wherepath)\n print(\"Files saved!\")\n print(\"\\n\") ", "14\nFiles saved!\n\n\n15\nFiles saved!\n\n\n16\nFiles saved!\n\n\n17\nFiles saved!\n\n\n18\nFiles saved!\n\n\n" ] ], [ [ "2) Load data with:", "_____no_output_____" ], [ "CNNData_16_17 = load_CNNdata(4, start_year = 16, end_year = 17, path = wherepath)\n\nCNNData_17 = load_CNNdata(4, start_year = 17, end_year = 17, path = wherepath)\n\nRNNData = load_RNNdata(size = 4 , start_year = 14 , end_year = 16, path = wherepath)", "_____no_output_____" ], [ "To add extra static layer:\n \n server = '/rdsgpfs/general/user/kpp15/home/Hansen'\n wherepath = server + '/data/raster/tensors'\n\n %cd server+ '/deforestation_forecasting/python_code'\n from Data_maker_loader import *\n\n DSM = torch.load(wherepath+\"/DSM.pt\")\n\n CNNdata = load_CNNdata(size = 45, start_year = 16, end_year = 16, path = wherepath, add_static = [DSM])\n # or\n CNNdata = load_CNNdata(size = 45, start_year = 14, end_year = 16, path = wherepath, add_static = [DSM])\n # or\n RNNdata = load_RNNdata(size = 45, start_year = 14, end_year = 16, path = wherepath, add_static = [DSM]) ", "_____no_output_____" ], [ "# Appendix", "_____no_output_____" ], [ "## idx - image map for CNN data with several {image,next_year_label} pairs.\n\n<br>Pixels cordinates from different years and labels from different years are mixed, more specifically concated sequentially. Get the correct image for given index, and therefore pixel[index], label[index].\n<br> Example: 4 years, year 1 have 3 pixels, year 2 - 6, year 3 - 1, year 4 - 5.\n<br> Pixels and labels are concated and make in total 15 data points.\n<br> Four different images correspond to indexes 0:3, 3:9, 9:9, 10:15\n<br> Note 0:3 returns 0,1,2 and 0:15 returns 0,1,2,..14. Python indexing start from 0", "_____no_output_____" ] ], [ [ "v = [[1,1,1],[1,1,1,1,1,1,],[1],[1,1,1,1,1]]\nlenghts = [ len(i) for i in v ]\nprint(\"num of pixels in each year: \",lenghts)\ncsum = list(np.cumsum(lenghts))\nprint(\"Culminative sum of number of pixels in each year: \",csum)\ncsum.insert(0,0)\n\nfor idx in range(0,csum[-1]):\n for i in range(1,len(csum)):\n if ((idx >= csum[i-1]) & (idx < csum[i])): \n print(\"idx : \",idx,\"in tensor: \",i-1)\n break;", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ] ]
cb9e617c2fb3df19a46b1c5bc3175a8d4164066a
9,449
ipynb
Jupyter Notebook
example/model_inversion/mid.ipynb
luoshenseeker/AIJack
4e871a5b3beb4b7c976d38060d6956efcebf880d
[ "MIT" ]
24
2021-11-17T02:16:47.000Z
2022-03-27T01:04:08.000Z
example/model_inversion/mid.ipynb
luoshenseeker/AIJack
4e871a5b3beb4b7c976d38060d6956efcebf880d
[ "MIT" ]
9
2021-12-03T06:09:27.000Z
2022-03-29T06:33:53.000Z
example/model_inversion/mid.ipynb
luoshenseeker/AIJack
4e871a5b3beb4b7c976d38060d6956efcebf880d
[ "MIT" ]
5
2022-01-12T09:58:04.000Z
2022-03-17T09:29:04.000Z
30.678571
95
0.534236
[ [ [ "import torch\nfrom torch import nn\nfrom torch import optim\nfrom torchvision.datasets import MNIST\nfrom torch.utils.data import TensorDataset, Dataset, DataLoader\n\nfrom tqdm.notebook import tqdm\nimport numpy as np", "_____no_output_____" ], [ "from aijack.defense import VIB, KL_between_normals, mib_loss", "_____no_output_____" ], [ "dim_z = 256\nbeta = 1e-3\nbatch_size = 100\nsamples_amount = 15\nnum_epochs = 1", "_____no_output_____" ], [ "train_data = MNIST(\"MNIST/.\", download=True, train=True)\ntrain_dataset = TensorDataset(\n train_data.train_data.view(-1, 28 * 28).float() / 255, train_data.train_labels\n)\ntrain_loader = DataLoader(train_dataset, batch_size=batch_size)\n\ntest_data = MNIST(\"MNIST/.\", download=True, train=False)\ntest_dataset = TensorDataset(\n test_data.test_data.view(-1, 28 * 28).float() / 255, test_data.test_labels\n)\ntest_loader = DataLoader(test_dataset, batch_size=batch_size)", "_____no_output_____" ], [ "encoder = nn.Sequential(\n nn.Linear(in_features=784, out_features=1024),\n nn.ReLU(),\n nn.Linear(in_features=1024, out_features=1024),\n nn.ReLU(),\n nn.Linear(in_features=1024, out_features=2 * dim_z),\n)\ndecoder = nn.Linear(in_features=dim_z, out_features=10)", "_____no_output_____" ], [ "net = VIB(encoder, decoder, dim_z, num_samples=samples_amount)\nopt = torch.optim.Adam(net.parameters(), lr=1e-4)\nscheduler = torch.optim.lr_scheduler.ExponentialLR(opt, gamma=0.97)", "_____no_output_____" ], [ "import time\n\nfor epoch in range(num_epochs):\n loss_by_epoch = []\n accuracy_by_epoch = []\n I_ZX_bound_by_epoch = []\n I_ZY_bound_by_epoch = []\n\n loss_by_epoch_test = []\n accuracy_by_epoch_test = []\n I_ZX_bound_by_epoch_test = []\n I_ZY_bound_by_epoch_test = []\n\n if epoch % 2 == 0 and epoch > 0:\n scheduler.step()\n\n for x_batch, y_batch in tqdm(train_loader):\n x_batch = x_batch\n y_batch = y_batch\n\n y_pred, result_dict = net(x_batch)\n sampled_y_pred = result_dict[\"sampled_decoded_outputs\"]\n p_z_given_x_mu = result_dict[\"p_z_given_x_mu\"]\n p_z_given_x_sigma = result_dict[\"p_z_given_x_sigma\"]\n\n approximated_z_mean = torch.zeros_like(p_z_given_x_mu)\n approximated_z_sigma = torch.ones_like(p_z_given_x_sigma)\n\n loss, I_ZY_bound, I_ZX_bound = mib_loss(\n y_batch,\n sampled_y_pred,\n p_z_given_x_mu,\n p_z_given_x_sigma,\n approximated_z_mean,\n approximated_z_sigma,\n beta=beta,\n )\n\n prediction = torch.max(y_pred, dim=1)[1]\n accuracy = torch.mean((prediction == y_batch).float())\n\n loss.backward()\n opt.step()\n opt.zero_grad()\n\n I_ZX_bound_by_epoch.append(I_ZX_bound.item())\n I_ZY_bound_by_epoch.append(I_ZY_bound.item())\n\n loss_by_epoch.append(loss.item())\n accuracy_by_epoch.append(accuracy.item())\n\n for x_batch, y_batch in tqdm(test_loader):\n x_batch = x_batch\n y_batch = y_batch\n\n y_pred, result_dict = net(x_batch)\n sampled_y_pred = result_dict[\"sampled_decoded_outputs\"]\n p_z_given_x_mu = result_dict[\"p_z_given_x_mu\"]\n p_z_given_x_sigma = result_dict[\"p_z_given_x_sigma\"]\n\n approximated_z_mean = torch.zeros_like(p_z_given_x_mu)\n approximated_z_sigma = torch.ones_like(p_z_given_x_sigma)\n\n loss, I_ZY_bound, I_ZX_bound = mib_loss(\n y_batch,\n sampled_y_pred,\n p_z_given_x_mu,\n p_z_given_x_sigma,\n approximated_z_mean,\n approximated_z_sigma,\n beta=beta,\n )\n\n prediction = torch.max(y_pred, dim=1)[1]\n accuracy = torch.mean((prediction == y_batch).float())\n\n I_ZX_bound_by_epoch_test.append(I_ZX_bound.item())\n I_ZY_bound_by_epoch_test.append(I_ZY_bound.item())\n\n loss_by_epoch_test.append(loss.item())\n accuracy_by_epoch_test.append(accuracy.item())\n\n print(\n \"epoch\",\n epoch,\n \"loss\",\n np.mean(loss_by_epoch_test),\n \"prediction\",\n np.mean(accuracy_by_epoch_test),\n )\n\n print(\n \"I_ZX_bound\",\n np.mean(I_ZX_bound_by_epoch_test),\n \"I_ZY_bound\",\n np.mean(I_ZY_bound_by_epoch_test),\n )", "_____no_output_____" ], [ "from aijack.attack import GradientInversion_Attack", "_____no_output_____" ], [ "y_pred, result_dict = net(x_batch[:1])\nsampled_y_pred = result_dict[\"sampled_decoded_outputs\"]\np_z_given_x_mu = result_dict[\"p_z_given_x_mu\"]\np_z_given_x_sigma = result_dict[\"p_z_given_x_sigma\"]\n\napproximated_z_mean = torch.zeros_like(p_z_given_x_mu)\napproximated_z_sigma = torch.ones_like(p_z_given_x_sigma)\n\n\nloss, I_ZY_bound, I_ZX_bound = mib_loss(\n y_batch[:1],\n sampled_y_pred,\n p_z_given_x_mu,\n p_z_given_x_sigma,\n approximated_z_mean,\n approximated_z_sigma,\n beta=beta,\n)\n\nreceived_gradients = torch.autograd.grad(loss, net.parameters())\nreceived_gradients = [cg.detach() for cg in received_gradients]\nreceived_gradients = [cg for cg in received_gradients]", "_____no_output_____" ], [ "from matplotlib import pyplot as plt\nimport cv2\n\nnet.eval()\ncpl_attacker = GradientInversion_Attack(\n net,\n (784,),\n lr=0.3,\n log_interval=50,\n optimizer_class=torch.optim.LBFGS,\n distancename=\"l2\",\n optimize_label=False,\n num_iteration=200,\n)\n\nnum_seeds = 5\nfig = plt.figure(figsize=(6, 2))\nfor s in tqdm(range(num_seeds)):\n cpl_attacker.reset_seed(s)\n try:\n result = cpl_attacker.attack(received_gradients)\n ax1 = fig.add_subplot(2, num_seeds, s + 1)\n ax1.imshow(result[0].cpu().detach().numpy()[0].reshape(28, 28), cmap=\"gray\")\n ax1.axis(\"off\")\n ax1.set_title(torch.argmax(result[1]).cpu().item())\n ax2 = fig.add_subplot(2, num_seeds, num_seeds + s + 1)\n ax2.imshow(\n cv2.medianBlur(result[0].cpu().detach().numpy()[0].reshape(28, 28), 5),\n cmap=\"gray\",\n )\n ax2.axis(\"off\")\n except:\n pass\nplt.suptitle(\"Result of CPL\")\nplt.tight_layout()\nplt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9e6f5498e6e0a0a163080b13d8bc4ce915ac76
39,778
ipynb
Jupyter Notebook
notebooks/1.2_qubit_operations.ipynb
Yuya-O-Nakagawa/quantum-native-dojo
c79ca6141db8fe1d218e1627605ae2a0c803a4e0
[ "BSD-3-Clause" ]
1
2019-12-05T06:52:15.000Z
2019-12-05T06:52:15.000Z
notebooks/1.2_qubit_operations.ipynb
Yuya-O-Nakagawa/quantum-native-dojo
c79ca6141db8fe1d218e1627605ae2a0c803a4e0
[ "BSD-3-Clause" ]
null
null
null
notebooks/1.2_qubit_operations.ipynb
Yuya-O-Nakagawa/quantum-native-dojo
c79ca6141db8fe1d218e1627605ae2a0c803a4e0
[ "BSD-3-Clause" ]
1
2022-03-06T17:53:36.000Z
2022-03-06T17:53:36.000Z
36.393413
1,856
0.509452
[ [ [ "## 1-2. 量子ビットに対する基本演算", "_____no_output_____" ], [ "量子ビットについて理解が深まったところで、次に量子ビットに対する演算がどのように表されるかについて見ていこう。\nこれには、量子力学の性質が深く関わっている。\n\n1. 線型性: \n詳しくは第4章で学ぶのだが、量子力学では状態(量子ビット)の時間変化はつねに(状態の重ね合わせに対して)線型になっている。つまり、**量子コンピュータ上で許された操作は状態ベクトルに対する線型変換**ということになる\n。1つの量子ビットの量子状態は規格化された2次元複素ベクトルとして表現されるのだったから、\n1つの量子ビットに対する操作=線型演算は$2 \\times 2$の**複素行列**によって表現される。\n\n2. ユニタリ性: \nさらに、確率の合計は常に1であるという規格化条件から、量子操作に表す線形演算(量子演算)に対してさらなる制限を導くことができる。まず、各測定結果を得る確率は複素確率振幅の絶対値の2乗で与えられるので、その合計は状態ベクトルの(自分自身との)内積と一致することに注目する:\n\n$$\n|\\alpha|^2 + |\\beta|^2 =\n(\\alpha^*, \\beta^*)\n\\left(\n\\begin{array}{c}\n\\alpha \n\\\\\n\\beta \n\\end{array}\n\\right) = 1.\n$$\n\n(アスタリスク $^*$ は複素共役を表す) \n 量子コンピュータで操作した後の状態は、量子演算に対応する線形変換(行列)を$U$とすると、\n\n$$\nU\n\\left(\n\\begin{array}{c}\n\\alpha \n\\\\\n\\beta \n\\end{array}\n\\right)\n$$\n\nと書ける。この状態についても上記の規格化条件が成り立つ必要があるので、\n\n$$\n(\\alpha^*, \\beta^*)\nU^\\dagger U\n\\left(\n\\begin{array}{c}\n\\alpha \n\\\\\n\\beta \n\\end{array}\n\\right) = 1\n$$\n\nが要請される。(ダガー $^\\dagger$ は行列の転置と複素共役を両方適用したものを表し、エルミート共役という)\n\n この関係式が任意の$\\alpha$, $\\beta$について成り立つ必要があるので、量子演算$U$は以下の条件を満たす**ユニタリー行列**に対応する:\n$$\nU^{\\dagger} U = U U^{\\dagger} = I.\n$$\n\nすなわち、**量子ビットに対する操作は、ユニタリー行列で表される**のである。", "_____no_output_____" ], [ "ここで、用語を整理しておく。量子力学では、状態ベクトルに対する線形変換のことを**演算子** (operator) と呼ぶ。単に演算子という場合は、ユニタリーとは限らない任意の線形変換を指す。それに対して、上記のユニタリー性を満たす線形変換のことを**量子演算** (quantum gate) と呼ぶ。量子演算は、量子状態に対する演算子のうち、(少なくとも理論的には)**物理的に実現可能なもの**と考えることができる。", "_____no_output_____" ], [ "### 1量子ビット演算の例:パウリ演算子\n1つの量子ビットに作用する基本的な量子演算として**パウリ演算子**を導入する。\nこれは量子コンピュータを学んでいく上で最も重要な演算子であるので、定義を体に染み込ませておこう。\n\n$$\n\\begin{eqnarray}\nI&=&\n\\left(\\begin{array}{cc}\n1 & 0\n\\\\\n0 & 1\n\\end{array}\n\\right),\\;\\;\\;\nX=\n\\left(\\begin{array}{cc}\n0 & 1\n\\\\\n1 & 0\n\\end{array}\n\\right),\\;\\;\\;\nY &=&\n\\left(\\begin{array}{cc}\n0 & -i\n\\\\\ni & 0\n\\end{array}\n\\right),\\;\\;\\;\nZ=\n\\left(\\begin{array}{cc}\n1 & 0\n\\\\\n0 & -1\n\\end{array}\n\\right).\n\\end{eqnarray}\n$$\n\n各演算子のイメージを説明する。\n\nまず、$I$は恒等演算子で、要するに「何もしない」ことを表す。\n\n$X$は古典ビットの反転(NOT)に対応し\n\n$$X|0\\rangle = |1\\rangle, \\;\\;\nX|1\\rangle = |0\\rangle\n$$\n\nのように作用する。(※ブラケット表記を用いた。下記コラムも参照。)\n\n$Z$演算子は$|0\\rangle$と$|1\\rangle$の位相を反転させる操作で、\n\n$$\nZ|0\\rangle = |0\\rangle, \\;\\;\nZ|1\\rangle = -|1\\rangle\n$$\n\nと作用する。\nこれは$|0\\rangle$と$|1\\rangle$の重ね合わせの「位相」という情報を保持できる量子コンピュータ特有の演算である。\n例えば、\n\n$$\nZ \\frac{1}{\\sqrt{2}} ( |0\\rangle + |1\\rangle ) = \\frac{1}{\\sqrt{2}} ( |0\\rangle - |1\\rangle ) \n$$\n\nとなる。\n\n$Y$演算子は$Y=iXZ$と書け、\n位相の反転とビットの反転を組み合わせたもの(全体にかかる複素数$i$を除いて)であると考えることができる。\n\n(詳細は Nielsen-Chuang の `1.3.1 Single qubit gates` を参照)", "_____no_output_____" ], [ "### SymPyを用いた一量子ビット演算\nSymPyではよく使う基本演算はあらかじめ定義されている。", "_____no_output_____" ] ], [ [ "from IPython.display import Image, display_png\nfrom sympy import *\nfrom sympy.physics.quantum import *\nfrom sympy.physics.quantum.qubit import Qubit,QubitBra\ninit_printing() # ベクトルや行列を綺麗に表示するため", "_____no_output_____" ], [ "# Google Colaboratory上でのみ実行してください\nfrom IPython.display import HTML\ndef setup_mathjax():\n display(HTML('''\n <script>\n if (!window.MathJax && window.google && window.google.colab) {\n window.MathJax = {\n 'tex2jax': {\n 'inlineMath': [['$', '$'], ['\\\\(', '\\\\)']],\n 'displayMath': [['$$', '$$'], ['\\\\[', '\\\\]']],\n 'processEscapes': true,\n 'processEnvironments': true,\n 'skipTags': ['script', 'noscript', 'style', 'textarea', 'code'],\n 'displayAlign': 'center',\n },\n 'HTML-CSS': {\n 'styles': {'.MathJax_Display': {'margin': 0}},\n 'linebreaks': {'automatic': true},\n // Disable to prevent OTF font loading, which aren't part of our\n // distribution.\n 'imageFont': null,\n },\n 'messageStyle': 'none'\n };\n var script = document.createElement(\"script\");\n script.src = \"https://colab.research.google.com/static/mathjax/MathJax.js?config=TeX-AMS_HTML-full,Safe\";\n document.head.appendChild(script);\n }\n </script>\n '''))\nget_ipython().events.register('pre_run_cell', setup_mathjax)", "_____no_output_____" ], [ "from sympy.physics.quantum.gate import X,Y,Z,H,S,T,CNOT,SWAP, CPHASE", "_____no_output_____" ] ], [ [ "演算子は何番目の量子ビットに作用するか、\nというのを指定して `X(0)` のように定義する。\nまた、これを行列表示するときには、いくつの量子ビットの空間で表現するか \n`nqubits`というのを指定する必要がある。\nまだ、量子ビットは1つしかいないので、\n`X(0)`、`nqubits=1`としておこう。", "_____no_output_____" ] ], [ [ "X(0)", "_____no_output_____" ], [ "represent(X(0),nqubits=1) # パウリX", "_____no_output_____" ] ], [ [ "同様に、`Y`, `Z`なども利用することができる。それに加え、アダマール演算 `H` や、位相演算 `S`、そして$\\pi/4$の位相演算 `T` も利用することができる(これらもよく出てくる演算で、定義は各行列を見てほしい):", "_____no_output_____" ] ], [ [ "represent(H(0),nqubits=1)", "_____no_output_____" ], [ "represent(S(0),nqubits=1)", "_____no_output_____" ], [ "represent(T(0),nqubits=1)", "_____no_output_____" ] ], [ [ "これらの演算を状態に作用させるのは、", "_____no_output_____" ] ], [ [ "ket0 = Qubit('0')\nS(0)*Y(0)*X(0)*H(0)*ket0", "_____no_output_____" ] ], [ [ "のように `*`で書くことができる。実際に計算をする場合は `qapply()`を利用する。", "_____no_output_____" ] ], [ [ "qapply(S(0)*Y(0)*X(0)*H(0)*ket0)", "_____no_output_____" ] ], [ [ "この列ベクトル表示が必要な場合は、", "_____no_output_____" ] ], [ [ "represent(qapply(S(0)*Y(0)*X(0)*H(0)*ket0))", "_____no_output_____" ] ], [ [ "のような感じで、SymPyは簡単な行列の計算はすべて自動的にやってくれる。", "_____no_output_____" ], [ "---\n\n### コラム:ブラケット記法\nここで一旦、量子力学でよく用いられるブラケット記法というものについて整理しておく。ブラケット記法に慣れると非常に簡単に見通しよく計算を行うことができる。 \n\n列ベクトルは \n\n$$\n|\\psi \\rangle = \\left( \n\\begin{array}{c}\n\\alpha\n\\\\\n\\beta \n\\end{array}\n\\right)\n$$\n\nとかくのであった。これを**ケット**と呼ぶ。同様に、行ベクトルは\n\n$$\n\\langle \\psi | = ( |\\psi \\rangle ) ^{\\dagger} = ( \\alpha ^* , \\beta ^*)\n$$\n\nとかき、これを**ブラ**と呼ぶ。${\\dagger}$マークは転置と複素共役を取る操作で、列ベクトルを行ベクトルへと移す。\n\n2つのベクトル、\n\n$$\n|\\psi \\rangle = \\left( \n\\begin{array}{c}\n\\alpha\n\\\\\n\\beta \n\\end{array}\n\\right), \\;\\;\\;\n|\\phi \\rangle = \\left( \n\\begin{array}{c}\n\\gamma\n\\\\\n\\delta\n\\end{array}\n\\right)\n$$\n\nがあったとする。ブラとケットを抱き合わせると\n\n$$\n\\langle \\phi | \\psi \\rangle = (\\gamma ^* , \\delta ^* ) \\left( \n\\begin{array}{c}\n\\alpha\n\\\\\n\\beta \n\\end{array}\n\\right) = \\gamma ^* \\alpha + \\delta ^* \\beta \n$$\n\nとなり、**内積**に対応する。行ベクトルと列ベクトルをそれぞれブラ・ケットと呼ぶのは、このように並べて内積を取ると「ブラケット」になるからである。\n\n逆に、背中合わせにすると\n\n$$\n|\\phi \\rangle \\langle \\psi | = \\left( \n\\begin{array}{c}\n\\gamma\n\\\\\n\\delta\n\\end{array}\n\\right) (\\alpha ^* , \\beta ^*) = \\left( \n\\begin{array}{cc}\n\\gamma \\alpha ^* & \\gamma \\beta ^* \n\\\\\n\\delta \\alpha ^* & \\delta \\beta ^*\n\\end{array}\n\\right)\n$$\n\nとなり、演算子となる。例えば、$X$演算子は\n\n$$\nX= \\left( \n\\begin{array}{cc}\n0 & 1 \n\\\\\n1 & 0 \n\\end{array}\n\\right)\n=\n|0\\rangle \\langle 1 | + |1\\rangle \\langle 0|\n$$\n\nのように書ける。このことを覚えておけば\n\n$$\n\\langle 0| 0\\rangle = \\langle 1 | 1\\rangle = 1, \\;\\;\\; \\langle 0 | 1 \\rangle = \\langle 1 | 0 \\rangle = 0\n$$\n\nから \n\n$$\nX |0\\rangle = |1\\rangle\n$$\n\nを行列を書かずに計算できるようになる。 \n**量子情報の解析計算においては、実際にベクトルの要素を書き下して計算をすることはほとんどなく、このようにブラケットを使って形式的に書いて計算してしまう場合が多い**(古典計算機上で量子コンピュータをシミュレーションする場合はベクトルをすべて書き下すことになる)。\n\n同様に、\n\n$$\nI = |0\\rangle \\langle 0 | + |1\\rangle \\langle 1| , \\;\\;\\; Z = |0\\rangle \\langle 0| - |1\\rangle \\langle 1| \n$$\n\nも覚えておくと便利である。\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb9e75ee08d7eba4ddcce18d8ce60b26cc7013c1
144,595
ipynb
Jupyter Notebook
examples/Convolutional Variational AE.ipynb
HenryJia/seya
a48bfc3af54dace95a399ace3a6266afe64b3280
[ "BSD-3-Clause" ]
429
2015-08-11T09:48:34.000Z
2021-07-31T15:13:23.000Z
examples/Convolutional Variational AE.ipynb
aa1607/seya
a48bfc3af54dace95a399ace3a6266afe64b3280
[ "BSD-3-Clause" ]
55
2015-09-10T11:57:58.000Z
2021-04-24T14:13:31.000Z
examples/Convolutional Variational AE.ipynb
aa1607/seya
a48bfc3af54dace95a399ace3a6266afe64b3280
[ "BSD-3-Clause" ]
135
2015-08-31T17:52:26.000Z
2022-02-07T05:31:12.000Z
77.739247
112
0.814233
[ [ [ "%matplotlib inline\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nimport matplotlib.pyplot as plt\nimport numpy as np\nnp.random.seed(1337) # for reproducibility\n\nfrom theano import function\n\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape, Layer\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D, UpSampling2D\nfrom keras.utils import np_utils\nfrom keras import backend as K\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.regularizers import l2\n\nfrom seya.layers.variational import VariationalDense as VAE\nfrom seya.layers.convolutional import GlobalPooling2D\nfrom seya.utils import apply_model\n\nfrom agnez import grid2d\n\nbatch_size = 100\nnb_epoch = 100\ncode_size = 200\n\n# input image dimensions\nimg_rows, img_cols = 28, 28\n# number of convolutional filters to use\nnb_filters = 32\n# size of pooling area for max pooling\nnb_pool = 2\n# convolution kernel size\nnb_conv = 7\nnb_classes = 10\n\n# the data, shuffled and split between tran and test sets\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\nX_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)\nX_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)\nX_train = X_train.astype(\"float32\")\nX_test = X_test.astype(\"float32\")\nX_train /= 255\nX_test /= 255\n\nprint('X_train shape:', X_train.shape)\nprint(X_train.shape[0], 'train samples')\nprint(X_test.shape[0], 'test samples')\n\n# convert class vectors to binary class matrices\nY_train = np_utils.to_categorical(y_train, nb_classes)\nY_test = np_utils.to_categorical(y_test, nb_classes)\n\nX_valid = X_train[50000:]\nY_valid = Y_train[50000:]\nX_train = X_train[:50000]\nY_train = Y_train[:50000]", "X_train shape: (60000, 1, 28, 28)\n60000 train samples\n10000 test samples\n" ], [ "enc = Sequential()\nenc.add(Convolution2D(nb_filters, nb_conv, nb_conv,\n W_regularizer=l2(.0005),\n border_mode='same',\n input_shape=(1, img_rows, img_cols)))\nenc.add(Dropout(.5))\nenc.add(Activation('relu'))\nenc.add(Convolution2D(nb_filters, 3, 3,\n border_mode='same',\n input_shape=(1, img_rows, img_cols)))\nenc.add(Activation('tanh'))\nenc.add(MaxPooling2D())\nenc.add(Flatten())\n\npool_shape = enc.output_shape\n\nenc.add(VAE(code_size, batch_size=batch_size, activation='tanh',\n prior_logsigma=1.7))\n# enc.add(Activation(soft_threshold))", "_____no_output_____" ], [ "dec = Sequential()\ndec.add(Dense(np.prod(pool_shape[1:]), input_dim=code_size))\ndec.add(Reshape((nb_filters, img_rows/2, img_cols/2)))\ndec.add(Activation('relu'))\ndec.add(Convolution2D(nb_filters, 3, 3,\n border_mode='same'))\ndec.add(Activation('relu'))\ndec.add(Convolution2D(784, 3, 3,\n border_mode='same'))\ndec.add(GlobalPooling2D())\n\ndec.add(Activation('sigmoid'))\ndec.add(Flatten())", "_____no_output_____" ], [ "model = Sequential()\nmodel.add(enc)\nmodel.add(dec)\n\nmodel.compile(loss='binary_crossentropy', optimizer='adam')", "_____no_output_____" ], [ "cbk = ModelCheckpoint('vae/vae.hdf5', save_best_only=True)\n\ntry:\n model.fit(X_train, X_train.reshape((-1, 784)), batch_size=batch_size, nb_epoch=nb_epoch, verbose=1,\n validation_data=(X_valid, X_valid.reshape((-1, 784))), callbacks=[cbk])\nexcept:\n pass", "Train on 50000 samples, validate on 10000 samples\nEpoch 1/100\n50000/50000 [==============================] - 42s - loss: 1.1112 - val_loss: 0.2634\nEpoch 2/100\n50000/50000 [==============================] - 42s - loss: 1.0596 - val_loss: 0.1856\nEpoch 3/100\n50000/50000 [==============================] - 42s - loss: 1.0281 - val_loss: 0.1708\nEpoch 4/100\n50000/50000 [==============================] - 42s - loss: 1.0227 - val_loss: 0.1647\nEpoch 5/100\n50000/50000 [==============================] - 42s - loss: 1.0203 - val_loss: 0.1596\nEpoch 6/100\n50000/50000 [==============================] - 42s - loss: 1.0188 - val_loss: 0.1565\nEpoch 7/100\n50000/50000 [==============================] - 40s - loss: 1.0176 - val_loss: 0.1567\nEpoch 8/100\n50000/50000 [==============================] - 37s - loss: 1.0167 - val_loss: 0.1545\nEpoch 9/100\n50000/50000 [==============================] - 39s - loss: 1.0158 - val_loss: 0.1526\nEpoch 10/100\n50000/50000 [==============================] - 43s - loss: 1.0150 - val_loss: 0.1511\nEpoch 11/100\n50000/50000 [==============================] - 43s - loss: 1.0143 - val_loss: 0.1511\nEpoch 12/100\n50000/50000 [==============================] - 43s - loss: 1.0138 - val_loss: 0.1492\nEpoch 13/100\n50000/50000 [==============================] - 43s - loss: 1.0133 - val_loss: 0.1475\nEpoch 14/100\n50000/50000 [==============================] - 43s - loss: 1.0126 - val_loss: 0.1465\nEpoch 15/100\n50000/50000 [==============================] - 43s - loss: 1.0124 - val_loss: 0.1462\nEpoch 16/100\n50000/50000 [==============================] - 43s - loss: 1.0119 - val_loss: 0.1451\nEpoch 17/100\n50000/50000 [==============================] - 43s - loss: 1.0115 - val_loss: 0.1448\nEpoch 18/100\n50000/50000 [==============================] - 42s - loss: 1.0113 - val_loss: 0.1441\nEpoch 19/100\n50000/50000 [==============================] - 37s - loss: 1.0110 - val_loss: 0.1432\nEpoch 20/100\n13400/50000 [=======>......................] - ETA: 26s - loss: 1.0110" ] ], [ [ "# Sample", "_____no_output_____" ] ], [ [ "X = K.placeholder(ndim=2)\nY = dec(X)\nF = function([X], Y, allow_input_downcast=True)", "_____no_output_____" ], [ "x = np.random.laplace(0, 1, size=(100, code_size))\ny = F(x)\nI = grid2d(y.reshape((100, -1)))\nplt.imshow(I)", "_____no_output_____" ] ], [ [ "# Visualize first layers", "_____no_output_____" ] ], [ [ "W = np.asarray(K.eval(enc.layers[0].W))\nW = W.reshape((32, -1))\nI = grid2d(W)", "_____no_output_____" ], [ "plt.imshow(I)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb9e82d22e7741c4b26a4b2a8b053ca384c7da8f
264,482
ipynb
Jupyter Notebook
lab2/Part1_MNIST.ipynb
jagkagd/MIT-6.S191
fe02aa381eaa0b3797011e47c5c871674cb6fe9b
[ "MIT" ]
null
null
null
lab2/Part1_MNIST.ipynb
jagkagd/MIT-6.S191
fe02aa381eaa0b3797011e47c5c871674cb6fe9b
[ "MIT" ]
null
null
null
lab2/Part1_MNIST.ipynb
jagkagd/MIT-6.S191
fe02aa381eaa0b3797011e47c5c871674cb6fe9b
[ "MIT" ]
null
null
null
260.573399
139,412
0.915794
[ [ [ "<table align=\"center\">\n <td align=\"center\"><a target=\"_blank\" href=\"http://introtodeeplearning.com\">\n <img src=\"http://introtodeeplearning.com/images/colab/mit.png\" style=\"padding-bottom:5px;\" />\n Visit MIT Deep Learning</a></td>\n <td align=\"center\"><a target=\"_blank\" href=\"https://colab.research.google.com/github/aamini/introtodeeplearning/blob/master/lab2/Part1_MNIST.ipynb\">\n <img src=\"http://introtodeeplearning.com/images/colab/colab.png?v2.0\" style=\"padding-bottom:5px;\" />Run in Google Colab</a></td>\n <td align=\"center\"><a target=\"_blank\" href=\"https://github.com/aamini/introtodeeplearning/blob/master/lab2/Part1_MNIST.ipynb\">\n <img src=\"http://introtodeeplearning.com/images/colab/github.png\" height=\"70px\" style=\"padding-bottom:5px;\" />View Source on GitHub</a></td>\n</table>\n\n# Copyright Information", "_____no_output_____" ] ], [ [ "# Copyright 2020 MIT 6.S191 Introduction to Deep Learning. All Rights Reserved.\n# \n# Licensed under the MIT License. You may not use this file except in compliance\n# with the License. Use and/or modification of this code outside of 6.S191 must\n# reference:\n#\n# © MIT 6.S191: Introduction to Deep Learning\n# http://introtodeeplearning.com\n#", "_____no_output_____" ] ], [ [ "# Laboratory 2: Computer Vision\n\n# Part 1: MNIST Digit Classification\n\nIn the first portion of this lab, we will build and train a convolutional neural network (CNN) for classification of handwritten digits from the famous [MNIST](http://yann.lecun.com/exdb/mnist/) dataset. The MNIST dataset consists of 60,000 training images and 10,000 test images. Our classes are the digits 0-9.\n\nFirst, let's download the course repository, install dependencies, and import the relevant packages we'll need for this lab.", "_____no_output_____" ] ], [ [ "# Import Tensorflow 2.0\n#%tensorflow_version 2.x\nimport tensorflow as tf \n\n#!pip install mitdeeplearning\nimport mitdeeplearning as mdl\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nfrom tqdm import tqdm\n\n# Check that we are using a GPU, if not switch runtimes\n# using Runtime > Change Runtime Type > GPU\nassert len(tf.config.list_physical_devices('GPU')) > 0", "_____no_output_____" ] ], [ [ "## 1.1 MNIST dataset \n\nLet's download and load the dataset and display a few random samples from it:", "_____no_output_____" ] ], [ [ "mnist = tf.keras.datasets.mnist\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\ntrain_images = (np.expand_dims(train_images, axis=-1)/255.).astype(np.float32)\ntrain_labels = (train_labels).astype(np.int64)\ntest_images = (np.expand_dims(test_images, axis=-1)/255.).astype(np.float32)\ntest_labels = (test_labels).astype(np.int64)", "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz\n11493376/11490434 [==============================] - 2s 0us/step\n" ] ], [ [ "Our training set is made up of 28x28 grayscale images of handwritten digits. \n\nLet's visualize what some of these images and their corresponding training labels look like.", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(10,10))\nrandom_inds = np.random.choice(60000,36)\nfor i in range(36):\n plt.subplot(6,6,i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n image_ind = random_inds[i]\n plt.imshow(np.squeeze(train_images[image_ind]), cmap=plt.cm.binary)\n plt.xlabel(train_labels[image_ind])", "_____no_output_____" ] ], [ [ "## 1.2 Neural Network for Handwritten Digit Classification\n\nWe'll first build a simple neural network consisting of two fully connected layers and apply this to the digit classification task. Our network will ultimately output a probability distribution over the 10 digit classes (0-9). This first architecture we will be building is depicted below:\n\n![alt_text](https://raw.githubusercontent.com/aamini/introtodeeplearning/master/lab2/img/mnist_2layers_arch.png \"CNN Architecture for MNIST Classification\")\n", "_____no_output_____" ], [ "### Fully connected neural network architecture\nTo define the architecture of this first fully connected neural network, we'll once again use the Keras API and define the model using the [`Sequential`](https://www.tensorflow.org/api_docs/python/tf/keras/models/Sequential) class. Note how we first use a [`Flatten`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Flatten) layer, which flattens the input so that it can be fed into the model. \n\nIn this next block, you'll define the fully connected layers of this simple work.", "_____no_output_____" ] ], [ [ "def build_fc_model():\n fc_model = tf.keras.Sequential([\n # First define a Flatten layer\n tf.keras.layers.Flatten(),\n\n # '''TODO: Define the activation function for the first fully connected (Dense) layer.'''\n tf.keras.layers.Dense(128, activation=tf.nn.relu),\n\n # '''TODO: Define the second Dense layer to output the classification probabilities'''\n # '''TODO: Dense layer to output classification probabilities'''\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n \n ])\n return fc_model\n\nmodel = build_fc_model()", "_____no_output_____" ] ], [ [ "As we progress through this next portion, you may find that you'll want to make changes to the architecture defined above. **Note that in order to update the model later on, you'll need to re-run the above cell to re-initialize the model. **", "_____no_output_____" ], [ "Let's take a step back and think about the network we've just created. The first layer in this network, `tf.keras.layers.Flatten`, transforms the format of the images from a 2d-array (28 x 28 pixels), to a 1d-array of 28 * 28 = 784 pixels. You can think of this layer as unstacking rows of pixels in the image and lining them up. There are no learned parameters in this layer; it only reformats the data.\n\nAfter the pixels are flattened, the network consists of a sequence of two `tf.keras.layers.Dense` layers. These are fully-connected neural layers. The first `Dense` layer has 128 nodes (or neurons). The second (and last) layer (which you've defined!) should return an array of probability scores that sum to 1. Each node contains a score that indicates the probability that the current image belongs to one of the handwritten digit classes.\n\nThat defines our fully connected model! ", "_____no_output_____" ], [ "\n\n### Compile the model\n\nBefore training the model, we need to define a few more settings. These are added during the model's [`compile`](https://www.tensorflow.org/api_docs/python/tf/keras/models/Sequential#compile) step:\n\n* *Loss function* — This defines how we measure how accurate the model is during training. As was covered in lecture, during training we want to minimize this function, which will \"steer\" the model in the right direction.\n* *Optimizer* — This defines how the model is updated based on the data it sees and its loss function.\n* *Metrics* — Here we can define metrics used to monitor the training and testing steps. In this example, we'll look at the *accuracy*, the fraction of the images that are correctly classified.\n\nWe'll start out by using a stochastic gradient descent (SGD) optimizer initialized with a learning rate of 0.1. Since we are performing a categorical classification task, we'll want to use the [cross entropy loss](https://www.tensorflow.org/api_docs/python/tf/keras/metrics/sparse_categorical_crossentropy).\n\nYou'll want to experiment with both the choice of optimizer and learning rate and evaluate how these affect the accuracy of the trained model. ", "_____no_output_____" ] ], [ [ "'''TODO: Experiment with different optimizers and learning rates. How do these affect\n the accuracy of the trained model? Which optimizers and/or learning rates yield\n the best performance?'''\nmodel.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=1e-1), \n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "### Train the model\n\nWe're now ready to train our model, which will involve feeding the training data (`train_images` and `train_labels`) into the model, and then asking it to learn the associations between images and labels. We'll also need to define the batch size and the number of epochs, or iterations over the MNIST dataset, to use during training. \n\nIn Lab 1, we saw how we can use `GradientTape` to optimize losses and train models with stochastic gradient descent. After defining the model settings in the `compile` step, we can also accomplish training by calling the [`fit`](https://www.tensorflow.org/api_docs/python/tf/keras/models/Sequential#fit) method on an instance of the `Model` class. We will use this to train our fully connected model\n", "_____no_output_____" ] ], [ [ "# Define the batch size and the number of epochs to use during training\nBATCH_SIZE = 64\nEPOCHS = 5\n\nmodel.fit(train_images, train_labels, batch_size=BATCH_SIZE, epochs=EPOCHS)", "Epoch 1/5\n938/938 [==============================] - 1s 1ms/step - loss: 0.3685 - accuracy: 0.8964\nEpoch 2/5\n938/938 [==============================] - 1s 1ms/step - loss: 0.2011 - accuracy: 0.9425\nEpoch 3/5\n938/938 [==============================] - 1s 1ms/step - loss: 0.1516 - accuracy: 0.9571\nEpoch 4/5\n938/938 [==============================] - 1s 1ms/step - loss: 0.1231 - accuracy: 0.9654\nEpoch 5/5\n938/938 [==============================] - 1s 1ms/step - loss: 0.1038 - accuracy: 0.9709\n" ] ], [ [ "As the model trains, the loss and accuracy metrics are displayed. With five epochs and a learning rate of 0.01, this fully connected model should achieve an accuracy of approximatley 0.97 (or 97%) on the training data.", "_____no_output_____" ], [ "### Evaluate accuracy on the test dataset\n\nNow that we've trained the model, we can ask it to make predictions about a test set that it hasn't seen before. In this example, the `test_images` array comprises our test dataset. To evaluate accuracy, we can check to see if the model's predictions match the labels from the `test_labels` array. \n\nUse the [`evaluate`](https://www.tensorflow.org/api_docs/python/tf/keras/models/Sequential#evaluate) method to evaluate the model on the test dataset!", "_____no_output_____" ] ], [ [ "'''TODO: Use the evaluate method to test the model!'''\ntest_loss, test_acc = model.evaluate(test_images, test_labels) # TODO\n\nprint('Test accuracy:', test_acc)", "313/313 [==============================] - 0s 1ms/step - loss: 0.1053 - accuracy: 0.9692\nTest accuracy: 0.9692000150680542\n" ] ], [ [ "You may observe that the accuracy on the test dataset is a little lower than the accuracy on the training dataset. This gap between training accuracy and test accuracy is an example of *overfitting*, when a machine learning model performs worse on new data than on its training data. \n\nWhat is the highest accuracy you can achieve with this first fully connected model? Since the handwritten digit classification task is pretty straightforward, you may be wondering how we can do better...\n\n![Deeper...](https://i.kym-cdn.com/photos/images/newsfeed/000/534/153/f87.jpg)", "_____no_output_____" ], [ "## 1.3 Convolutional Neural Network (CNN) for handwritten digit classification", "_____no_output_____" ], [ "As we saw in lecture, convolutional neural networks (CNNs) are particularly well-suited for a variety of tasks in computer vision, and have achieved near-perfect accuracies on the MNIST dataset. We will now build a CNN composed of two convolutional layers and pooling layers, followed by two fully connected layers, and ultimately output a probability distribution over the 10 digit classes (0-9). The CNN we will be building is depicted below:\n\n![alt_text](https://raw.githubusercontent.com/aamini/introtodeeplearning/master/lab2/img/convnet_fig.png \"CNN Architecture for MNIST Classification\")", "_____no_output_____" ], [ "### Define the CNN model\n\nWe'll use the same training and test datasets as before, and proceed similarly as our fully connected network to define and train our new CNN model. To do this we will explore two layers we have not encountered before: you can use [`keras.layers.Conv2D` ](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D) to define convolutional layers and [`keras.layers.MaxPool2D`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/MaxPool2D) to define the pooling layers. Use the parameters shown in the network architecture above to define these layers and build the CNN model.", "_____no_output_____" ] ], [ [ "def build_cnn_model():\n cnn_model = tf.keras.Sequential([\n\n # TODO: Define the first convolutional layer\n tf.keras.layers.Conv2D(filters=24, kernel_size=(3, 3), activation=tf.nn.relu), \n\n # TODO: Define the first max pooling layer\n tf.keras.layers.MaxPool2D(pool_size=(2, 2)),\n\n # TODO: Define the second convolutional layer\n tf.keras.layers.Conv2D(filters=36, kernel_size=(3, 3), activation=tf.nn.relu),\n\n # TODO: Define the second max pooling layer\n tf.keras.layers.MaxPool2D(pool_size=(2, 2)),\n\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(128, activation=tf.nn.relu),\n\n # TODO: Define the last Dense layer to output the classification \n # probabilities. Pay attention to the activation needed a probability\n # output\n # '''TODO: Dense layer to output classification probabilities'''\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n ])\n \n return cnn_model\n \ncnn_model = build_cnn_model()\n# Initialize the model by passing some data through\ncnn_model.predict(train_images[[0]])\n# Print the summary of the layers in the model.\nprint(cnn_model.summary())", "Model: \"sequential_3\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 26, 26, 24) 240 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 13, 13, 24) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 11, 11, 36) 7812 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 5, 5, 36) 0 \n_________________________________________________________________\nflatten_5 (Flatten) (None, 900) 0 \n_________________________________________________________________\ndense_7 (Dense) (None, 128) 115328 \n_________________________________________________________________\ndense_8 (Dense) (None, 10) 1290 \n=================================================================\nTotal params: 124,670\nTrainable params: 124,670\nNon-trainable params: 0\n_________________________________________________________________\nNone\n" ] ], [ [ "### Train and test the CNN model\n\nNow, as before, we can define the loss function, optimizer, and metrics through the `compile` method. Compile the CNN model with an optimizer and learning rate of choice:", "_____no_output_____" ] ], [ [ "'''TODO: Define the compile operation with your optimizer and learning rate of choice'''\ncnn_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4), loss='sparse_categorical_crossentropy', metrics=['accuracy']) # TODO", "_____no_output_____" ] ], [ [ "As was the case with the fully connected model, we can train our CNN using the `fit` method via the Keras API.", "_____no_output_____" ] ], [ [ "'''TODO: Use model.fit to train the CNN model, with the same batch_size and number of epochs previously used.'''\ncnn_model.fit(train_images, train_labels, batch_size=BATCH_SIZE, epochs=EPOCHS)", "Epoch 1/5\n938/938 [==============================] - 4s 5ms/step - loss: 0.0242 - accuracy: 0.9930\nEpoch 2/5\n938/938 [==============================] - 4s 5ms/step - loss: 0.0170 - accuracy: 0.9948\nEpoch 3/5\n938/938 [==============================] - 4s 5ms/step - loss: 0.0136 - accuracy: 0.9956\nEpoch 4/5\n938/938 [==============================] - 4s 5ms/step - loss: 0.0115 - accuracy: 0.9963\nEpoch 5/5\n938/938 [==============================] - 4s 5ms/step - loss: 0.0099 - accuracy: 0.9967\n" ] ], [ [ "Great! Now that we've trained the model, let's evaluate it on the test dataset using the [`evaluate`](https://www.tensorflow.org/api_docs/python/tf/keras/models/Sequential#evaluate) method:", "_____no_output_____" ] ], [ [ "'''TODO: Use the evaluate method to test the model!'''\ntest_loss, test_acc = cnn_model.evaluate(test_images, test_labels) # TODO\n\nprint('Test accuracy:', test_acc)", "313/313 [==============================] - 1s 2ms/step - loss: 0.0528 - accuracy: 0.9878\nTest accuracy: 0.9878000020980835\n" ] ], [ [ "What is the highest accuracy you're able to achieve using the CNN model, and how does the accuracy of the CNN model compare to the accuracy of the simple fully connected network? What optimizers and learning rates seem to be optimal for training the CNN model? ", "_____no_output_____" ], [ "### Make predictions with the CNN model\n\nWith the model trained, we can use it to make predictions about some images. The [`predict`](https://www.tensorflow.org/api_docs/python/tf/keras/models/Sequential#predict) function call generates the output predictions given a set of input samples.\n", "_____no_output_____" ] ], [ [ "predictions = cnn_model.predict(test_images)", "_____no_output_____" ] ], [ [ "With this function call, the model has predicted the label for each image in the testing set. Let's take a look at the prediction for the first image in the test dataset:", "_____no_output_____" ] ], [ [ "predictions[0]", "_____no_output_____" ] ], [ [ "As you can see, a prediction is an array of 10 numbers. Recall that the output of our model is a probability distribution over the 10 digit classes. Thus, these numbers describe the model's \"confidence\" that the image corresponds to each of the 10 different digits. \n\nLet's look at the digit that has the highest confidence for the first image in the test dataset:", "_____no_output_____" ] ], [ [ "'''TODO: identify the digit with the highest confidence prediction for the first\n image in the test dataset. '''\nprediction = np.argmax(predictions[0]) # TODO\n\nprint(prediction)", "7\n" ] ], [ [ "So, the model is most confident that this image is a \"???\". We can check the test label (remember, this is the true identity of the digit) to see if this prediction is correct:", "_____no_output_____" ] ], [ [ "print(\"Label of this digit is:\", test_labels[0])\nplt.imshow(test_images[0,:,:,0], cmap=plt.cm.binary)", "Label of this digit is: 7\n" ] ], [ [ "It is! Let's visualize the classification results on the MNIST dataset. We will plot images from the test dataset along with their predicted label, as well as a histogram that provides the prediction probabilities for each of the digits:", "_____no_output_____" ] ], [ [ "#@title Change the slider to look at the model's predictions! { run: \"auto\" }\n\nimage_index = 79 #@param {type:\"slider\", min:0, max:100, step:1}\nplt.subplot(1,2,1)\nmdl.lab2.plot_image_prediction(image_index, predictions, test_labels, test_images)\nplt.subplot(1,2,2)\nmdl.lab2.plot_value_prediction(image_index, predictions, test_labels)", "_____no_output_____" ] ], [ [ "We can also plot several images along with their predictions, where correct prediction labels are blue and incorrect prediction labels are red. The number gives the percent confidence (out of 100) for the predicted label. Note the model can be very confident in an incorrect prediction!", "_____no_output_____" ] ], [ [ "# Plots the first X test images, their predicted label, and the true label\n# Color correct predictions in blue, incorrect predictions in red\nnum_rows = 5\nnum_cols = 4\nnum_images = num_rows*num_cols\nplt.figure(figsize=(2*2*num_cols, 2*num_rows))\nfor i in range(num_images):\n plt.subplot(num_rows, 2*num_cols, 2*i+1)\n mdl.lab2.plot_image_prediction(i, predictions, test_labels, test_images)\n plt.subplot(num_rows, 2*num_cols, 2*i+2)\n mdl.lab2.plot_value_prediction(i, predictions, test_labels)\n", "_____no_output_____" ] ], [ [ "## 1.4 Training the model 2.0\n\nEarlier in the lab, we used the [`fit`](https://www.tensorflow.org/api_docs/python/tf/keras/models/Sequential#fit) function call to train the model. This function is quite high-level and intuitive, which is really useful for simpler models. As you may be able to tell, this function abstracts away many details in the training call, and we have less control over training model, which could be useful in other contexts. \n\nAs an alternative to this, we can use the [`tf.GradientTape`](https://www.tensorflow.org/api_docs/python/tf/GradientTape) class to record differentiation operations during training, and then call the [`tf.GradientTape.gradient`](https://www.tensorflow.org/api_docs/python/tf/GradientTape#gradient) function to actually compute the gradients. You may recall seeing this in Lab 1 Part 1, but let's take another look at this here.\n\nWe'll use this framework to train our `cnn_model` using stochastic gradient descent.", "_____no_output_____" ] ], [ [ "# Rebuild the CNN model\ncnn_model = build_cnn_model()\n\nbatch_size = 12\nloss_history = mdl.util.LossHistory(smoothing_factor=0.95) # to record the evolution of the loss\nplotter = mdl.util.PeriodicPlotter(sec=2, xlabel='Iterations', ylabel='Loss', scale='semilogy')\noptimizer = tf.keras.optimizers.SGD(learning_rate=1e-2) # define our optimizer\n\nif hasattr(tqdm, '_instances'): tqdm._instances.clear() # clear if it exists\n\nfor idx in tqdm(range(0, train_images.shape[0], batch_size)):\n # First grab a batch of training data and convert the input images to tensors\n (images, labels) = (train_images[idx:idx+batch_size], train_labels[idx:idx+batch_size])\n images = tf.convert_to_tensor(images, dtype=tf.float32)\n\n # GradientTape to record differentiation operations\n with tf.GradientTape() as tape:\n #'''TODO: feed the images into the model and obtain the predictions'''\n logits = cnn_model(images) # TODO\n\n #'''TODO: compute the categorical cross entropy loss\n loss_value = tf.keras.backend.sparse_categorical_crossentropy(labels, logits) # TODO\n\n loss_history.append(loss_value.numpy().mean()) # append the loss to the loss_history record\n plotter.plot(loss_history.get())\n\n # Backpropagation\n '''TODO: Use the tape to compute the gradient against all parameters in the CNN model.\n Use cnn_model.trainable_variables to access these parameters.''' \n grads = tape.gradient(loss_value, cnn_model.trainable_variables) # TODO\n optimizer.apply_gradients(zip(grads, cnn_model.trainable_variables))\n", "_____no_output_____" ] ], [ [ "## 1.5 Conclusion\nIn this part of the lab, you had the chance to play with different MNIST classifiers with different architectures (fully-connected layers only, CNN), and experiment with how different hyperparameters affect accuracy (learning rate, etc.). The next part of the lab explores another application of CNNs, facial detection, and some drawbacks of AI systems in real world applications, like issues of bias. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb9e9adf72189378c07d720ba800cb9c90939df1
196,755
ipynb
Jupyter Notebook
examples/migs_TensorTrade_Tutorial.ipynb
carlomigs/tensortrade
525cd704e9e38223fa7aad091851f92fbc214f74
[ "Apache-2.0" ]
null
null
null
examples/migs_TensorTrade_Tutorial.ipynb
carlomigs/tensortrade
525cd704e9e38223fa7aad091851f92fbc214f74
[ "Apache-2.0" ]
null
null
null
examples/migs_TensorTrade_Tutorial.ipynb
carlomigs/tensortrade
525cd704e9e38223fa7aad091851f92fbc214f74
[ "Apache-2.0" ]
null
null
null
82.600756
20,542
0.681685
[ [ [ "<a href=\"https://colab.research.google.com/github/carlomigs/tensortrade/blob/master/examples/migs_TensorTrade_Tutorial.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import os\nimport sys\nimport warnings\nimport numpy\n\ndef warn(*args, **kwargs):\n pass\n\nwarnings.warn = warn\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nnumpy.seterr(divide = 'ignore') \n\nsys.path.append(os.path.dirname(os.path.abspath('')))", "_____no_output_____" ], [ "# copy in datasets from btgym\n!git clone https://github.com/Kismuz/btgym.git", "Cloning into 'btgym'...\nremote: Enumerating objects: 24, done.\u001b[K\nremote: Counting objects: 100% (24/24), done.\u001b[K\nremote: Compressing objects: 100% (22/22), done.\u001b[K\nremote: Total 7001 (delta 4), reused 8 (delta 2), pack-reused 6977\u001b[K\nReceiving objects: 100% (7001/7001), 126.71 MiB | 11.78 MiB/s, done.\nResolving deltas: 100% (5172/5172), done.\nChecking out files: 100% (348/348), done.\n" ], [ "!git clone https://github.com/notadamking/tensortrade.git", "Cloning into 'tensortrade'...\nremote: Enumerating objects: 1, done.\u001b[K\nremote: Counting objects: 100% (1/1), done.\u001b[K\nremote: Total 3385 (delta 0), reused 0 (delta 0), pack-reused 3384\u001b[K\nReceiving objects: 100% (3385/3385), 31.68 MiB | 8.12 MiB/s, done.\nResolving deltas: 100% (2028/2028), done.\n" ], [ "!pip install -e /content/tensortrade", "Obtaining file:///content/tensortrade\nCollecting numpy==1.16.4\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/87/2d/e4656149cbadd3a8a0369fcd1a9c7d61cc7b87b3903b85389c70c989a696/numpy-1.16.4-cp36-cp36m-manylinux1_x86_64.whl (17.3MB)\n\u001b[K |████████████████████████████████| 17.3MB 469kB/s \n\u001b[?25hCollecting pandas==0.25.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/1d/9a/7eb9952f4b4d73fbd75ad1d5d6112f407e695957444cb695cbb3cdab918a/pandas-0.25.0-cp36-cp36m-manylinux1_x86_64.whl (10.5MB)\n\u001b[K |████████████████████████████████| 10.5MB 27.1MB/s \n\u001b[?25hCollecting gym==0.14.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/61/75/9e841bc2bc75128e0b65c3d5255d0bd16becb9d8f7120b965d41b8e70041/gym-0.14.0.tar.gz (1.6MB)\n\u001b[K |████████████████████████████████| 1.6MB 28.1MB/s \n\u001b[?25hCollecting pyyaml==5.1.2\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/e3/e8/b3212641ee2718d556df0f23f78de8303f068fe29cdaa7a91018849582fe/PyYAML-5.1.2.tar.gz (265kB)\n\u001b[K |████████████████████████████████| 266kB 42.1MB/s \n\u001b[?25hRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas==0.25.0->tensortrade==0.0.2rc0) (2018.9)\nRequirement already satisfied: python-dateutil>=2.6.1 in /usr/local/lib/python3.6/dist-packages (from pandas==0.25.0->tensortrade==0.0.2rc0) (2.6.1)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from gym==0.14.0->tensortrade==0.0.2rc0) (1.3.1)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from gym==0.14.0->tensortrade==0.0.2rc0) (1.12.0)\nRequirement already satisfied: pyglet<=1.3.2,>=1.2.0 in /usr/local/lib/python3.6/dist-packages (from gym==0.14.0->tensortrade==0.0.2rc0) (1.3.2)\nRequirement already satisfied: cloudpickle~=1.2.0 in /usr/local/lib/python3.6/dist-packages (from gym==0.14.0->tensortrade==0.0.2rc0) (1.2.2)\nRequirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from pyglet<=1.3.2,>=1.2.0->gym==0.14.0->tensortrade==0.0.2rc0) (0.16.0)\nBuilding wheels for collected packages: gym, pyyaml\n Building wheel for gym (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for gym: filename=gym-0.14.0-cp36-none-any.whl size=1637526 sha256=d35d93141cd1321bdcdc1167bd637e4ec8bfdd5e8c9772458cbeb06400a213af\n Stored in directory: /root/.cache/pip/wheels/7e/53/f6/c0cd3c9bf953f35c0aee7fa62ea209371e92f5e5cced3245ba\n Building wheel for pyyaml (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for pyyaml: filename=PyYAML-5.1.2-cp36-cp36m-linux_x86_64.whl size=44104 sha256=38c6fb93858e2e1cc660bbf447a141fbe04db76fa3255feb52aeb9861f00a71e\n Stored in directory: /root/.cache/pip/wheels/d9/45/dd/65f0b38450c47cf7e5312883deb97d065e030c5cca0a365030\nSuccessfully built gym pyyaml\n\u001b[31mERROR: datascience 0.10.6 has requirement folium==0.2.1, but you'll have folium 0.8.3 which is incompatible.\u001b[0m\n\u001b[31mERROR: albumentations 0.1.12 has requirement imgaug<0.2.7,>=0.2.5, but you'll have imgaug 0.2.9 which is incompatible.\u001b[0m\nInstalling collected packages: numpy, pandas, gym, pyyaml, tensortrade\n Found existing installation: numpy 1.17.3\n Uninstalling numpy-1.17.3:\n Successfully uninstalled numpy-1.17.3\n Found existing installation: pandas 0.25.3\n Uninstalling pandas-0.25.3:\n Successfully uninstalled pandas-0.25.3\n Found existing installation: gym 0.15.3\n Uninstalling gym-0.15.3:\n Successfully uninstalled gym-0.15.3\n Found existing installation: PyYAML 3.13\n Uninstalling PyYAML-3.13:\n Successfully uninstalled PyYAML-3.13\n Running setup.py develop for tensortrade\nSuccessfully installed gym-0.14.0 numpy-1.16.4 pandas-0.25.0 pyyaml-5.1.2 tensortrade\n" ], [ "import os\nos.getcwd()\n#%cd /content/tensortrade\n%ls", "\u001b[0m\u001b[01;34mbtgym\u001b[0m/ \u001b[01;34msample_data\u001b[0m/ \u001b[01;34mtensortrade\u001b[0m/\n" ], [ "!pip install -e tensortrade[tf,tensorforce,baselines,ccxt,fbm] -U", "Obtaining file:///content/tensortrade\nRequirement already satisfied, skipping upgrade: numpy==1.16.4 in /usr/local/lib/python3.6/dist-packages (from tensortrade==0.0.2rc0) (1.16.4)\nRequirement already satisfied, skipping upgrade: pandas==0.25.0 in /usr/local/lib/python3.6/dist-packages (from tensortrade==0.0.2rc0) (0.25.0)\nRequirement already satisfied, skipping upgrade: gym==0.14.0 in /usr/local/lib/python3.6/dist-packages (from tensortrade==0.0.2rc0) (0.14.0)\nRequirement already satisfied, skipping upgrade: pyyaml==5.1.2 in /usr/local/lib/python3.6/dist-packages (from tensortrade==0.0.2rc0) (5.1.2)\nCollecting stable-baselines==2.8.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/da/75/6f92ac720de62be8c16ec652d22c9296a90d857cd636d043de16de8128ab/stable_baselines-2.8.0-py3-none-any.whl (222kB)\n\u001b[K |████████████████████████████████| 225kB 2.6MB/s \n\u001b[?25hRequirement already satisfied, skipping upgrade: mpi4py in /usr/local/lib/python3.6/dist-packages (from tensortrade==0.0.2rc0) (3.0.2)\nCollecting ccxt==1.18.1220\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/a6/9e/788c7a99d83fc037224b3d6decf08840d516359f7019b34bf54b475829a1/ccxt-1.18.1220-py2.py3-none-any.whl (1.4MB)\n\u001b[K |████████████████████████████████| 1.4MB 39.2MB/s \n\u001b[?25hCollecting stochastic==0.4.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/62/fb/1bdf5bef80bb31369306290c308371a64aebf825029520602570135ea7f1/stochastic-0.4.0-py2.py3-none-any.whl (43kB)\n\u001b[K |████████████████████████████████| 51kB 7.3MB/s \n\u001b[?25hCollecting tensorforce==0.5.2\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/d7/7b/cebafd140baec0f452543d01494beaae152d90d988545188798a031bde41/Tensorforce-0.5.2-py3-none-any.whl (308kB)\n\u001b[K |████████████████████████████████| 317kB 50.6MB/s \n\u001b[?25hCollecting tensorflow==1.13.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/77/63/a9fa76de8dffe7455304c4ed635be4aa9c0bacef6e0633d87d5f54530c5c/tensorflow-1.13.1-cp36-cp36m-manylinux1_x86_64.whl (92.5MB)\n\u001b[K |████████████████████████████████| 92.5MB 101kB/s \n\u001b[?25hRequirement already satisfied, skipping upgrade: python-dateutil>=2.6.1 in /usr/local/lib/python3.6/dist-packages (from pandas==0.25.0->tensortrade==0.0.2rc0) (2.6.1)\nRequirement already satisfied, skipping upgrade: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas==0.25.0->tensortrade==0.0.2rc0) (2018.9)\nRequirement already satisfied, skipping upgrade: scipy in /usr/local/lib/python3.6/dist-packages (from gym==0.14.0->tensortrade==0.0.2rc0) (1.3.1)\nRequirement already satisfied, skipping upgrade: cloudpickle~=1.2.0 in /usr/local/lib/python3.6/dist-packages (from gym==0.14.0->tensortrade==0.0.2rc0) (1.2.2)\nRequirement already satisfied, skipping upgrade: pyglet<=1.3.2,>=1.2.0 in /usr/local/lib/python3.6/dist-packages (from gym==0.14.0->tensortrade==0.0.2rc0) (1.3.2)\nRequirement already satisfied, skipping upgrade: six in /usr/local/lib/python3.6/dist-packages (from gym==0.14.0->tensortrade==0.0.2rc0) (1.12.0)\nRequirement already satisfied, skipping upgrade: opencv-python in /usr/local/lib/python3.6/dist-packages (from stable-baselines==2.8.0->tensortrade==0.0.2rc0) (3.4.7.28)\nRequirement already satisfied, skipping upgrade: matplotlib in /usr/local/lib/python3.6/dist-packages (from stable-baselines==2.8.0->tensortrade==0.0.2rc0) (3.1.1)\nRequirement already satisfied, skipping upgrade: joblib in /usr/local/lib/python3.6/dist-packages (from stable-baselines==2.8.0->tensortrade==0.0.2rc0) (0.14.0)\nCollecting cryptography>=2.6.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/ca/9a/7cece52c46546e214e10811b36b2da52ce1ea7fa203203a629b8dfadad53/cryptography-2.8-cp34-abi3-manylinux2010_x86_64.whl (2.3MB)\n\u001b[K |████████████████████████████████| 2.3MB 43.4MB/s \n\u001b[?25hRequirement already satisfied, skipping upgrade: setuptools>=38.5.1 in /usr/local/lib/python3.6/dist-packages (from ccxt==1.18.1220->tensortrade==0.0.2rc0) (41.4.0)\nRequirement already satisfied, skipping upgrade: certifi>=2018.1.18 in /usr/local/lib/python3.6/dist-packages (from ccxt==1.18.1220->tensortrade==0.0.2rc0) (2019.9.11)\nRequirement already satisfied, skipping upgrade: requests>=2.18.4 in /usr/local/lib/python3.6/dist-packages (from ccxt==1.18.1220->tensortrade==0.0.2rc0) (2.21.0)\nCollecting yarl==1.1.0; python_version >= \"3.5.2\"\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/9a/fe/1fc0d9e277f1ff0d29ac44742f769c55f12cc7e66a89ab340f5321937179/yarl-1.1.0-cp36-cp36m-manylinux1_x86_64.whl (255kB)\n\u001b[K |████████████████████████████████| 256kB 52.9MB/s \n\u001b[?25hCollecting aiodns==1.1.1; python_version >= \"3.5.2\"\n Downloading https://files.pythonhosted.org/packages/bd/f5/b69cb930fd5ab0569396659afe3f3c0d37d4098e5d0ba6afdf6fd9388cb0/aiodns-1.1.1-py2.py3-none-any.whl\nCollecting aiohttp>=3.0.1; python_version >= \"3.5.2\"\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/7c/39/7eb5f98d24904e0f6d3edb505d4aa60e3ef83c0a58d6fe18244a51757247/aiohttp-3.6.2-cp36-cp36m-manylinux1_x86_64.whl (1.2MB)\n\u001b[K |████████████████████████████████| 1.2MB 41.9MB/s \n\u001b[?25hRequirement already satisfied, skipping upgrade: pytest in /usr/local/lib/python3.6/dist-packages (from tensorforce==0.5.2->tensortrade==0.0.2rc0) (3.6.4)\nRequirement already satisfied, skipping upgrade: tqdm in /usr/local/lib/python3.6/dist-packages (from tensorforce==0.5.2->tensortrade==0.0.2rc0) (4.28.1)\nRequirement already satisfied, skipping upgrade: gast>=0.2.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.13.1->tensortrade==0.0.2rc0) (0.2.2)\nRequirement already satisfied, skipping upgrade: wheel>=0.26 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.13.1->tensortrade==0.0.2rc0) (0.33.6)\nRequirement already satisfied, skipping upgrade: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.13.1->tensortrade==0.0.2rc0) (1.15.0)\nCollecting tensorflow-estimator<1.14.0rc0,>=1.13.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/bb/48/13f49fc3fa0fdf916aa1419013bb8f2ad09674c275b4046d5ee669a46873/tensorflow_estimator-1.13.0-py2.py3-none-any.whl (367kB)\n\u001b[K |████████████████████████████████| 368kB 49.2MB/s \n\u001b[?25hCollecting tensorboard<1.14.0,>=1.13.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/0f/39/bdd75b08a6fba41f098b6cb091b9e8c7a80e1b4d679a581a0ccd17b10373/tensorboard-1.13.1-py3-none-any.whl (3.2MB)\n\u001b[K |████████████████████████████████| 3.2MB 37.6MB/s \n\u001b[?25hRequirement already satisfied, skipping upgrade: absl-py>=0.1.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.13.1->tensortrade==0.0.2rc0) (0.8.1)\nRequirement already satisfied, skipping upgrade: keras-applications>=1.0.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.13.1->tensortrade==0.0.2rc0) (1.0.8)\nRequirement already satisfied, skipping upgrade: astor>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.13.1->tensortrade==0.0.2rc0) (0.8.0)\nRequirement already satisfied, skipping upgrade: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.13.1->tensortrade==0.0.2rc0) (1.1.0)\nRequirement already satisfied, skipping upgrade: protobuf>=3.6.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.13.1->tensortrade==0.0.2rc0) (3.10.0)\nRequirement already satisfied, skipping upgrade: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.13.1->tensortrade==0.0.2rc0) (1.1.0)\nRequirement already satisfied, skipping upgrade: future in /usr/local/lib/python3.6/dist-packages (from pyglet<=1.3.2,>=1.2.0->gym==0.14.0->tensortrade==0.0.2rc0) (0.16.0)\nRequirement already satisfied, skipping upgrade: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->stable-baselines==2.8.0->tensortrade==0.0.2rc0) (2.4.2)\nRequirement already satisfied, skipping upgrade: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib->stable-baselines==2.8.0->tensortrade==0.0.2rc0) (0.10.0)\nRequirement already satisfied, skipping upgrade: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->stable-baselines==2.8.0->tensortrade==0.0.2rc0) (1.1.0)\nRequirement already satisfied, skipping upgrade: cffi!=1.11.3,>=1.8 in /usr/local/lib/python3.6/dist-packages (from cryptography>=2.6.1->ccxt==1.18.1220->tensortrade==0.0.2rc0) (1.13.1)\nRequirement already satisfied, skipping upgrade: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests>=2.18.4->ccxt==1.18.1220->tensortrade==0.0.2rc0) (2.8)\nRequirement already satisfied, skipping upgrade: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests>=2.18.4->ccxt==1.18.1220->tensortrade==0.0.2rc0) (1.24.3)\nRequirement already satisfied, skipping upgrade: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests>=2.18.4->ccxt==1.18.1220->tensortrade==0.0.2rc0) (3.0.4)\nCollecting multidict>=4.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/71/cc/ceb5b8c76e7a23212b9e0353053cc35a9d86c763d852a76d9b941fe81fbc/multidict-4.5.2-cp36-cp36m-manylinux1_x86_64.whl (309kB)\n\u001b[K |████████████████████████████████| 317kB 44.4MB/s \n\u001b[?25hCollecting pycares>=1.0.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/88/58/0cdc4e9aeaa19d91cf33cdc7590d5a08b2c78de73fde893580b4fa63b57b/pycares-3.0.0-cp36-cp36m-manylinux1_x86_64.whl (204kB)\n\u001b[K |████████████████████████████████| 204kB 43.1MB/s \n\u001b[?25hCollecting idna-ssl>=1.0; python_version < \"3.7\"\n Downloading https://files.pythonhosted.org/packages/46/03/07c4894aae38b0de52b52586b24bf189bb83e4ddabfe2e2c8f2419eec6f4/idna-ssl-1.1.0.tar.gz\nRequirement already satisfied, skipping upgrade: typing-extensions>=3.6.5; python_version < \"3.7\" in /usr/local/lib/python3.6/dist-packages (from aiohttp>=3.0.1; python_version >= \"3.5.2\"->ccxt==1.18.1220->tensortrade==0.0.2rc0) (3.6.6)\nRequirement already satisfied, skipping upgrade: attrs>=17.3.0 in /usr/local/lib/python3.6/dist-packages (from aiohttp>=3.0.1; python_version >= \"3.5.2\"->ccxt==1.18.1220->tensortrade==0.0.2rc0) (19.3.0)\nCollecting async-timeout<4.0,>=3.0\n Downloading https://files.pythonhosted.org/packages/e1/1e/5a4441be21b0726c4464f3f23c8b19628372f606755a9d2e46c187e65ec4/async_timeout-3.0.1-py3-none-any.whl\nRequirement already satisfied, skipping upgrade: more-itertools>=4.0.0 in /usr/local/lib/python3.6/dist-packages (from pytest->tensorforce==0.5.2->tensortrade==0.0.2rc0) (7.2.0)\nRequirement already satisfied, skipping upgrade: atomicwrites>=1.0 in /usr/local/lib/python3.6/dist-packages (from pytest->tensorforce==0.5.2->tensortrade==0.0.2rc0) (1.3.0)\nRequirement already satisfied, skipping upgrade: py>=1.5.0 in /usr/local/lib/python3.6/dist-packages (from pytest->tensorforce==0.5.2->tensortrade==0.0.2rc0) (1.8.0)\nRequirement already satisfied, skipping upgrade: pluggy<0.8,>=0.5 in /usr/local/lib/python3.6/dist-packages (from pytest->tensorforce==0.5.2->tensortrade==0.0.2rc0) (0.7.1)\nCollecting mock>=2.0.0\n Downloading https://files.pythonhosted.org/packages/05/d2/f94e68be6b17f46d2c353564da56e6fb89ef09faeeff3313a046cb810ca9/mock-3.0.5-py2.py3-none-any.whl\nRequirement already satisfied, skipping upgrade: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.14.0,>=1.13.0->tensorflow==1.13.1->tensortrade==0.0.2rc0) (3.1.1)\nRequirement already satisfied, skipping upgrade: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.14.0,>=1.13.0->tensorflow==1.13.1->tensortrade==0.0.2rc0) (0.16.0)\nRequirement already satisfied, skipping upgrade: h5py in /usr/local/lib/python3.6/dist-packages (from keras-applications>=1.0.6->tensorflow==1.13.1->tensortrade==0.0.2rc0) (2.8.0)\nRequirement already satisfied, skipping upgrade: pycparser in /usr/local/lib/python3.6/dist-packages (from cffi!=1.11.3,>=1.8->cryptography>=2.6.1->ccxt==1.18.1220->tensortrade==0.0.2rc0) (2.19)\nBuilding wheels for collected packages: idna-ssl\n Building wheel for idna-ssl (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for idna-ssl: filename=idna_ssl-1.1.0-cp36-none-any.whl size=3163 sha256=934545a5cfd145ce9a588d305a04bf01830dca7b6313d953d3a411bc143588e8\n Stored in directory: /root/.cache/pip/wheels/d3/00/b3/32d613e19e08a739751dd6bf998cfed277728f8b2127ad4eb7\nSuccessfully built idna-ssl\nInstalling collected packages: stable-baselines, cryptography, multidict, yarl, pycares, aiodns, idna-ssl, async-timeout, aiohttp, ccxt, stochastic, tensorforce, mock, tensorflow-estimator, tensorboard, tensorflow, tensortrade\n Found existing installation: stable-baselines 2.2.1\n Uninstalling stable-baselines-2.2.1:\n Successfully uninstalled stable-baselines-2.2.1\n Found existing installation: tensorflow-estimator 1.15.1\n Uninstalling tensorflow-estimator-1.15.1:\n Successfully uninstalled tensorflow-estimator-1.15.1\n Found existing installation: tensorboard 1.15.0\n Uninstalling tensorboard-1.15.0:\n Successfully uninstalled tensorboard-1.15.0\n Found existing installation: tensorflow 1.15.0\n Uninstalling tensorflow-1.15.0:\n Successfully uninstalled tensorflow-1.15.0\n Found existing installation: tensortrade 0.0.2rc0\n Can't uninstall 'tensortrade'. No files were found to uninstall.\n Running setup.py develop for tensortrade\nSuccessfully installed aiodns-1.1.1 aiohttp-3.6.2 async-timeout-3.0.1 ccxt-1.18.1220 cryptography-2.8 idna-ssl-1.1.0 mock-3.0.5 multidict-4.5.2 pycares-3.0.0 stable-baselines-2.8.0 stochastic-0.4.0 tensorboard-1.13.1 tensorflow-1.13.1 tensorflow-estimator-1.13.0 tensorforce-0.5.2 tensortrade yarl-1.1.0\n" ], [ "!pip install talib-binary", "Collecting talib-binary\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/82/6a/d8842b73121a35f58ce55da6f6b54ba2afebee16424b9df09a302e66ed51/talib_binary-0.4.19-cp36-cp36m-manylinux1_x86_64.whl (2.4MB)\n\u001b[K |████████████████████████████████| 2.4MB 2.8MB/s \n\u001b[?25hRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from talib-binary) (1.16.4)\nInstalling collected packages: talib-binary\nSuccessfully installed talib-binary-0.4.19\n" ], [ "%ls /content/btgym/examples/data/", "\u001b[0m\u001b[01;32mDAT_ASCII_EURCHF_M1_2017.csv\u001b[0m*\n\u001b[01;32mDAT_ASCII_EURGBP_M1_2017.csv\u001b[0m*\n\u001b[01;32mDAT_ASCII_EURJPY_M1_2017.csv\u001b[0m*\n\u001b[01;32mDAT_ASCII_EURUSD_M1_2010.csv\u001b[0m*\n\u001b[01;32mDAT_ASCII_EURUSD_M1_2011.csv\u001b[0m*\n\u001b[01;32mDAT_ASCII_EURUSD_M1_2012.csv\u001b[0m*\n\u001b[01;32mDAT_ASCII_EURUSD_M1_2013.csv\u001b[0m*\n\u001b[01;32mDAT_ASCII_EURUSD_M1_2014.csv\u001b[0m*\n\u001b[01;32mDAT_ASCII_EURUSD_M1_2015.csv\u001b[0m*\nDAT_ASCII_EURUSD_M1_2016.csv\n\u001b[01;32mDAT_ASCII_EURUSD_M1_201701.csv\u001b[0m*\n\u001b[01;32mDAT_ASCII_EURUSD_M1_201702.csv\u001b[0m*\nDAT_ASCII_EURUSD_M1_201703_1_10.csv\n\u001b[01;32mDAT_ASCII_EURUSD_M1_201703.csv\u001b[0m*\n\u001b[01;32mDAT_ASCII_EURUSD_M1_201704.csv\u001b[0m*\n\u001b[01;32mDAT_ASCII_EURUSD_M1_201705.csv\u001b[0m*\n\u001b[01;32mDAT_ASCII_EURUSD_M1_201706.csv\u001b[0m*\n\u001b[01;32mDAT_ASCII_EURUSD_M1_2017.csv\u001b[0m*\ntest_bent_sine_1min_period1500_300_delta0002.csv\ntest_bent_sine_1min_period_300_1500_delta0002.csv\ntest_sine_1min_period256_delta0002.csv\n" ], [ "import numpy\nimport pandas as pd\nfrom tensortrade.exchanges.simulated import SimulatedExchange\n\nheaders=['datetime', 'open', 'high', 'low', 'close', 'volume']\ndf = pd.read_csv('/content/btgym/examples/data/DAT_ASCII_EURUSD_M1_2017.csv',\n delimiter=';',\n names=headers)\n\nexchange = SimulatedExchange(data_frame=df, base_instrument='USD', pretransform=True)\nexchange.data_frame.tail()", "_____no_output_____" ], [ "from tensortrade.features import FeaturePipeline\nfrom tensortrade.features.scalers import MinMaxNormalizer\nfrom tensortrade.features.stationarity import FractionalDifference\nfrom tensortrade.features.indicators import SimpleMovingAverage\n\nprice_columns = [\"open\", \"high\", \"low\", \"close\"]\n\nnormalize_price = MinMaxNormalizer(price_columns, inplace=True)\nmoving_averages = SimpleMovingAverage(price_columns)\ndifference_all = FractionalDifference(difference_order=1, inplace=True)\n\nfeature_pipeline = FeaturePipeline(steps=[normalize_price,\n moving_averages,\n difference_all])\nexchange.feature_pipeline = feature_pipeline\nexchange.data_frame.tail()", "_____no_output_____" ], [ "from tensortrade.actions import DiscreteActionStrategy\n\n#action_strategy = DiscreteActionStrategy(n_actions=20, instrument_symbol='BTC')\naction_strategy = DiscreteActionStrategy(n_actions=20)", "_____no_output_____" ], [ "from tensortrade.rewards import SimpleProfitStrategy\nfrom tensortrade.rewards import RiskAdjustedReturnStrategy\n\nreward_strategy = SimpleProfitStrategy()\n#reward_strategy = RiskAdjustedReturnStrategy()", "_____no_output_____" ], [ "from tensortrade.environments import TradingEnvironment\n\nenvironment = TradingEnvironment(exchange=exchange,\n feature_pipeline=feature_pipeline,\n action_strategy=action_strategy,\n reward_strategy=reward_strategy)", "_____no_output_____" ], [ "environment.exchange.transform_data_frame()", "_____no_output_____" ], [ "from stable_baselines.common.policies import MlpLnLstmPolicy\nfrom stable_baselines import PPO2\n\nmodel = PPO2\npolicy = MlpLnLstmPolicy\nparams = { \"learning_rate\": 1e-5, 'nminibatches': 1 }", "_____no_output_____" ] ], [ [ "## Training a Strategy\n\nCreating our trading strategy is as simple as plugging in our agent and the environment.", "_____no_output_____" ], [ "MigsStableBaselinesTradingStrategy", "_____no_output_____" ] ], [ [ "#@title\nimport os\nimport gym\nimport json\n\nimport pandas as pd\nimport numpy as np\n\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Union, Callable, List, Dict\n\nfrom stable_baselines.common.vec_env import DummyVecEnv\nfrom stable_baselines.common.policies import BasePolicy\nfrom stable_baselines.common.base_class import BaseRLModel\nfrom stable_baselines import DQN\n\nfrom tensortrade.environments.trading_environment import TradingEnvironment\nfrom tensortrade.strategies import TradingStrategy\n\n\nclass MigsStableBaselinesTradingStrategy(TradingStrategy):\n \"\"\"A trading strategy capable of self tuning, training, and evaluating with stable-baselines.\"\"\"\n\n def __init__(self,\n environment: TradingEnvironment,\n model: BaseRLModel = DQN,\n policy: Union[str, BasePolicy] = 'MlpPolicy',\n model_kwargs: any = {},\n **kwargs):\n \"\"\"\n Arguments:\n environment: A `TradingEnvironment` instance for the agent to trade within.\n model (optional): The RL model to create the agent with. Defaults to DQN.\n policy (optional): The RL policy to train the agent's model with. Defaults to 'MlpPolicy'.\n model_kwargs (optional): Any additional keyword arguments to adjust the model.\n kwargs (optional): Optional keyword arguments to adjust the strategy.\n \"\"\"\n self._model = model\n self._model_kwargs = model_kwargs\n\n self.environment = environment\n self._agent = self._model(policy, self._environment, **self._model_kwargs)\n\n @property\n def environment(self) -> 'TradingEnvironment':\n \"\"\"A `TradingEnvironment` instance for the agent to trade within.\"\"\"\n return self._environment\n\n @environment.setter\n def environment(self, environment: 'TradingEnvironment'):\n self._environment = DummyVecEnv([lambda: environment])\n\n def restore_agent(self, path: str):\n \"\"\"Deserialize the strategy's learning agent from a file.\n Arguments:\n path: The `str` path of the file the agent specification is stored in.\n \"\"\"\n self._agent = self._model.load(path, self._environment, self._model_kwargs)\n\n def save_agent(self, path: str):\n \"\"\"Serialize the learning agent to a file for restoring later.\n Arguments:\n path: The `str` path of the file to store the agent specification in.\n \"\"\"\n self._agent.save(path)\n\n def tune(self, steps: int = None, episodes: int = None, callback: Callable[[pd.DataFrame], bool] = None) -> pd.DataFrame:\n raise NotImplementedError\n\n def run(self, steps: int = None, episodes: int = None, episode_callback: Callable[[pd.DataFrame], bool] = None) -> pd.DataFrame:\n if steps is None and episodes is None:\n raise ValueError(\n 'You must set the number of `steps` or `episodes` to run the strategy.')\n\n steps_completed = 0\n episodes_completed = 0\n average_reward = 0\n\n obs, state, dones = self._environment.reset(), None, [False]\n\n performance = {}\n\n while (steps is not None and (steps == 0 or steps_completed < steps)) or (episodes is not None and episodes_completed < episodes):\n actions, state = self._agent.predict(obs, state=state, mask=dones)\n obs, rewards, dones, info = self._environment.step(actions)\n\n steps_completed += 1\n average_reward -= average_reward / steps_completed\n average_reward += rewards[0] / (steps_completed + 1)\n\n exchange_performance = info[0].get('exchange').performance\n performance = exchange_performance if len(exchange_performance) > 0 else performance\n\n if dones[0]:\n #if episode_callback is not None and episode_callback(performance):\n # break\n episodes_completed += 1\n print(\"episode #: {}\".format(episodes_completed))\n print(performance[-2:])\n obs = self._environment.reset()\n\n print(\"Finished running strategy.\")\n print(\"Total episodes: {} ({} timesteps).\".format(episodes_completed, steps_completed))\n print(\"Average reward: {}.\".format(average_reward))\n\n return performance", "_____no_output_____" ], [ "#from tensortrade.strategies import StableBaselinesTradingStrategy\n\nstrategy = MigsStableBaselinesTradingStrategy(environment=environment,\n model=model,\n policy=policy,\n model_kwargs=params)", "_____no_output_____" ] ], [ [ "Then to train the strategy (i.e. train the agent on the current environment), all we need to do is call `strategy.run()` with the total number of steps or episodes you’d like to run.\n\nIf this feedback loop is a bit slow for you, you can pass a callback function to `run`, which will be called at the end of each episode. The callback function will pass in a `data_frame` containing the agent's performance that episode, and expects a `bool` in return. If `True`, the agent will continue training, otherwise, the agent will stop and return its overall performance.", "_____no_output_____" ] ], [ [ "#performance = strategy.run(steps=100000)\nperformance = strategy.run(episodes=3, episode_callback=True)", "episode #: 1\n balance net_worth\n2365 5.930891 10.083227\n2366 7.974855 10.051718\nepisode #: 2\n balance net_worth\n2352 9.937687 10.075105\n2353 10.006156 10.074857\nepisode #: 3\n balance net_worth\n2347 4.385997 10.063393\n2348 8.596292 10.015410\nFinished running strategy.\nTotal episodes: 3 (9759 timesteps).\nAverage reward: -0.7086708115673862.\n" ], [ "%matplotlib inline\nperformance.net_worth.plot()", "_____no_output_____" ] ], [ [ "## BTC", "_____no_output_____" ], [ "MigsStableBaselinesTradingStrategy", "_____no_output_____" ] ], [ [ "#@title\nimport os\nimport gym\nimport json\n\nimport pandas as pd\nimport numpy as np\n\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Union, Callable, List, Dict\n\nfrom stable_baselines.common.vec_env import DummyVecEnv\nfrom stable_baselines.common.policies import BasePolicy\nfrom stable_baselines.common.base_class import BaseRLModel\nfrom stable_baselines import DQN\n\nfrom tensortrade.environments.trading_environment import TradingEnvironment\nfrom tensortrade.strategies import TradingStrategy\n\n\nclass MigsStableBaselinesTradingStrategy(TradingStrategy):\n \"\"\"A trading strategy capable of self tuning, training, and evaluating with stable-baselines.\"\"\"\n\n def __init__(self,\n environment: TradingEnvironment,\n model: BaseRLModel = DQN,\n policy: Union[str, BasePolicy] = 'MlpPolicy',\n model_kwargs: any = {},\n **kwargs):\n \"\"\"\n Arguments:\n environment: A `TradingEnvironment` instance for the agent to trade within.\n model (optional): The RL model to create the agent with. Defaults to DQN.\n policy (optional): The RL policy to train the agent's model with. Defaults to 'MlpPolicy'.\n model_kwargs (optional): Any additional keyword arguments to adjust the model.\n kwargs (optional): Optional keyword arguments to adjust the strategy.\n \"\"\"\n self._model = model\n self._model_kwargs = model_kwargs\n\n self.environment = environment\n self._agent = self._model(policy, self._environment, **self._model_kwargs)\n\n @property\n def environment(self) -> 'TradingEnvironment':\n \"\"\"A `TradingEnvironment` instance for the agent to trade within.\"\"\"\n return self._environment\n\n @environment.setter\n def environment(self, environment: 'TradingEnvironment'):\n self._environment = DummyVecEnv([lambda: environment])\n\n def restore_agent(self, path: str):\n \"\"\"Deserialize the strategy's learning agent from a file.\n Arguments:\n path: The `str` path of the file the agent specification is stored in.\n \"\"\"\n self._agent = self._model.load(path, self._environment, self._model_kwargs)\n\n def save_agent(self, path: str):\n \"\"\"Serialize the learning agent to a file for restoring later.\n Arguments:\n path: The `str` path of the file to store the agent specification in.\n \"\"\"\n self._agent.save(path)\n\n def tune(self, steps: int = None, episodes: int = None, callback: Callable[[pd.DataFrame], bool] = None) -> pd.DataFrame:\n raise NotImplementedError\n\n def run(self, steps: int = None, episodes: int = None, episode_callback: Callable[[pd.DataFrame], bool] = None) -> pd.DataFrame:\n if steps is None and episodes is None:\n raise ValueError(\n 'You must set the number of `steps` or `episodes` to run the strategy.')\n\n steps_completed = 0\n episodes_completed = 0\n average_reward = 0\n\n obs, state, dones = self._environment.reset(), None, [False]\n\n performance = {}\n\n while (steps is not None and (steps == 0 or steps_completed < steps)) or (episodes is not None and episodes_completed < episodes):\n actions, state = self._agent.predict(obs, state=state, mask=dones)\n obs, rewards, dones, info = self._environment.step(actions)\n\n steps_completed += 1\n average_reward -= average_reward / steps_completed\n average_reward += rewards[0] / (steps_completed + 1)\n\n exchange_performance = info[0].get('exchange').performance\n performance = exchange_performance if len(exchange_performance) > 0 else performance\n\n if dones[0]:\n #if episode_callback is not None and episode_callback(performance):\n # break\n episodes_completed += 1\n print(\"episode #: {}\".format(episodes_completed))\n print(performance[-2:])\n obs = self._environment.reset()\n\n print(\"Finished running strategy.\")\n print(\"Total episodes: {} ({} timesteps).\".format(episodes_completed, steps_completed))\n print(\"Average reward: {}.\".format(average_reward))\n\n return performance", "_____no_output_____" ], [ "import talib\ntalib.get_functions()", "_____no_output_____" ], [ "from tensortrade.environments import TradingEnvironment\nfrom tensortrade.exchanges.simulated import FBMExchange\nfrom tensortrade.features.scalers import MinMaxNormalizer\nfrom tensortrade.features.stationarity import FractionalDifference\nfrom tensortrade.features import FeaturePipeline\nfrom tensortrade.rewards import SimpleProfitStrategy\nfrom tensortrade.actions import DiscreteActionStrategy\n#from tensortrade.strategies import StableBaselinesTradingStrategy\nfrom tensortrade.features.indicators import TAlibIndicator\nfrom stable_baselines.common.policies import MlpLnLstmPolicy\nfrom stable_baselines import PPO2\n\nprice_columns = [\"open\", \"high\", \"low\", \"close\"]\nnormalize = MinMaxNormalizer(price_columns, inplace=True)\n#moving_averages = TAlibIndicator([\"RSI\", \"BBANDS\", \"STOCH\", \"MACD\", \"CCI\", \"WILLR\"])\ndifference = FractionalDifference(difference_order=1,\n inplace=True)\nfeature_pipeline = FeaturePipeline(steps=[normalize,\n# moving_averages,\n difference])\n\nreward_strategy = SimpleProfitStrategy()\naction_strategy = DiscreteActionStrategy(n_actions=40)\n\nexchange = FBMExchange(base_instrument='BTC',\n timeframe='1h',\n pretransform=True)\n\nenvironment = TradingEnvironment(exchange=exchange,\n action_strategy=action_strategy,\n reward_strategy=reward_strategy,\n feature_pipeline=feature_pipeline)\n\nmodel = PPO2\npolicy = MlpLnLstmPolicy\nparams = { \"learning_rate\": 1e-5, 'nminibatches': 1 }\n\nstrategy = MigsStableBaselinesTradingStrategy(environment=environment,\n model=model,\n policy=policy,\n model_kwargs=params)", "_____no_output_____" ], [ "performance = strategy.run(episodes=100)", "episode #: 1\n balance net_worth\n1221 3331.175960 3717.966608\n1222 -18.361034 3630.873956\nepisode #: 2\n balance net_worth\n1196 18089.149665 75160.253837\n1197 2528.799347 60893.771944\nepisode #: 3\n balance net_worth\n1191 156723.693509 385998.408977\n1192 242940.539636 387067.942297\nepisode #: 4\n balance net_worth\n1147 10.813978 2770.690100\n1148 1382.719491 2758.751145\nepisode #: 5\n balance net_worth\n1213 851535.114182 860066.056246\n1214 852579.864045 859904.106513\nepisode #: 6\n balance net_worth\n1171 349.477814 149511.812961\n1172 18639.279654 148117.725742\nepisode #: 7\n balance net_worth\n1213 36743.381833 2.201294e+06\n1214 18681.215697 2.249550e+06\nepisode #: 8\n balance net_worth\n1229 5.675071e+06 5.888670e+06\n1230 4.974581e+06 5.875532e+06\nepisode #: 9\n balance net_worth\n1232 1475.080083 1603.397711\n1233 922.573362 1594.551279\nepisode #: 10\n balance net_worth\n1258 3342.774816 8557.700871\n1259 1288.767947 8561.509430\nepisode #: 11\n balance net_worth\n1172 211810.903902 281948.291743\n1173 237935.772341 281608.073071\nepisode #: 12\n balance net_worth\n1184 180552.805697 319480.878423\n1185 299990.569511 317104.211529\nepisode #: 13\n balance net_worth\n1172 1887.078457 2296.022643\n1173 1423.259706 2293.729828\nepisode #: 14\n balance net_worth\n1249 8.526350e+06 1.135475e+07\n1250 6.410325e+06 1.136356e+07\nepisode #: 15\n balance net_worth\n1190 7.995016e+07 4.055454e+08\n1191 7.012492e+07 4.028734e+08\nepisode #: 16\n balance net_worth\n1135 68776.394490 83545.357389\n1136 83472.059033 83472.059033\nepisode #: 17\n balance net_worth\n1212 405734.371420 405802.093174\n1213 156426.848646 405057.508182\nepisode #: 18\n balance net_worth\n1194 560.234273 1646.477286\n1195 215.263042 1537.970341\nepisode #: 19\n balance net_worth\n1200 27.360363 24453.879518\n1201 7.186149 24720.845216\nepisode #: 20\n balance net_worth\n1178 916339.691130 1.109562e+06\n1179 128239.103338 1.145597e+06\nepisode #: 21\n balance net_worth\n1182 1.456689e+06 1.322045e+08\n1183 1.099471e+06 1.279232e+08\nepisode #: 22\n balance net_worth\n1217 1089.008340 1449.617174\n1218 687.516374 1434.359357\nepisode #: 23\n balance net_worth\n1180 1.937920e+10 2.118266e+10\n1181 1.999855e+10 2.103260e+10\nepisode #: 24\n balance net_worth\n1223 29.617539 14493.434182\n1224 25.977766 14535.673977\nepisode #: 25\n balance net_worth\n1187 4.454765e+06 1.010116e+07\n1188 5.152445e+06 1.012673e+07\nepisode #: 26\n balance net_worth\n1198 6.268496e+05 8.774902e+06\n1199 7.281647e+06 8.247354e+06\nepisode #: 27\n balance net_worth\n1224 -49.092624 535245.621414\n1225 61955.878419 497295.869620\nepisode #: 28\n balance net_worth\n1203 1.392014e+09 1.403740e+09\n1204 1.399615e+09 1.404231e+09\nepisode #: 29\n balance net_worth\n1213 1658.647505 1768.067774\n1214 29.014477 1764.853758\nepisode #: 30\n balance net_worth\n1198 19459.162137 22578.419688\n1199 2719.651111 22531.608942\nepisode #: 31\n balance net_worth\n1228 10830.444766 15249.136837\n1229 2844.586601 15309.118761\nepisode #: 32\n balance net_worth\n1210 8.963851e+06 8.963851e+06\n1211 7.862243e+06 8.960548e+06\nepisode #: 33\n balance net_worth\n1179 347.617837 10702.302975\n1180 175.807791 10883.533273\nepisode #: 34\n balance net_worth\n1215 41471.793318 49677.790566\n1216 42480.960296 49676.805821\nepisode #: 35\n balance net_worth\n1195 41.01220 3087.669950\n1196 1985.53658 3155.763737\nepisode #: 36\n balance net_worth\n1270 5185.28528 21017.736635\n1271 3258.44346 21205.032815\nepisode #: 37\n balance net_worth\n1192 2.543741e+06 3.405582e+06\n1193 6.681068e+05 3.390337e+06\n" ], [ "strategy.save_agent(path=\"tensortrade/ppo_btc_1h\")", "_____no_output_____" ], [ "%matplotlib inline\nperformance.net_worth.plot()", "_____no_output_____" ], [ "%ls /content/tensortrade/examples/data/", "Coinbase_BTCUSD_1h.csv configuration.json\nCoinbase_BTCUSD_d.csv configuration.yaml\n" ], [ "import pandas as pd\nfrom tensortrade.environments import TradingEnvironment\nfrom tensortrade.exchanges.simulated import SimulatedExchange\ndf = pd.read_csv('/content/tensortrade/examples/data/Coinbase_BTCUSD_1h.csv',skiprows=1)\nexchange = SimulatedExchange(data_frame=df, \n base_instrument='BTC',\n feature_pipeline=feature_pipeline)\nenvironment = TradingEnvironment(exchange=exchange,\n action_strategy=action_strategy,\n reward_strategy=reward_strategy)\n\nmodel = PPO2\npolicy = MlpLnLstmPolicy\nparams = { \"learning_rate\": 1e-5, 'nminibatches': 1 }\n\nnew_strategy = MigsStableBaselinesTradingStrategy(environment=environment,\n model=model,\n policy=policy,\n model_kwargs=params)\nnew_strategy.restore_agent(path=\"tensortrade/ppo_btc_1h\")\n\ntest_performance = new_strategy.run(steps=2000)", "Finished running strategy.\nTotal episodes: 0 (2000 timesteps).\nAverage reward: -0.3523745550012309.\n" ], [ "test_performance.net_worth.plot()", "_____no_output_____" ] ], [ [ "## Saving and Restoring\n\nAll trading strategies are capable of saving their agent to a file, for later restoring. The environment is not saved, as it does not have state that we care about preserving. To save our `TensorflowTradingStrategy` to a file, we just need to provide the path of the file to our strategy.", "_____no_output_____" ] ], [ [ "print(os.getcwd())\n%ls", "/content\n\u001b[0m\u001b[01;34mbtgym\u001b[0m/ \u001b[01;34msample_data\u001b[0m/ \u001b[01;34mtensortrade\u001b[0m/\n" ], [ "strategy.save_agent(path=\"ppo_btc_1h\")", "_____no_output_____" ] ], [ [ "_This specific strategy saves multiple files, including a directory of models to the path provided._\n\nTo restore the agent from the file, we first need to instantiate our strategy, before calling restore_agent.", "_____no_output_____" ] ], [ [ "new_strategy = StableBaselinesTradingStrategy(environment=environment,\n model=model,\n policy=policy,\n model_kwargs=params)\n\nnew_strategy.restore_agent(path=\"ppo_btc_1h\")", "_____no_output_____" ] ], [ [ "Our strategy is now restored back to its previous state, and ready to be used again. Let's see how it does.\n\n## Tuning Your Strategy\n\nSometimes a trading strategy will require tuning a set of hyper-parameters, or features, on an environment to achieve maximum performance. In this case, each `TradingStrategy` provides an optionally implementable tune method.\n\nTuning a model is similar to training a model, however in addition to adjusting and saving the weights and biases of the best performing model, the strategy also adjusts and persists the hyper-parameters that produced that model. ", "_____no_output_____" ] ], [ [ "from tensortrade.environments import TradingEnvironment\nfrom tensortrade.exchanges.simulated import FBMExchange\n\nexchange = FBMExchange(timeframe='1h', base_instrument='BTC', feature_pipeline=feature_pipeline)\nenvironment = TradingEnvironment(exchange=exchange, \n action_strategy=action_strategy, \n reward_strategy=reward_strategy) \nnew_strategy.environment = environment \ntuned_performance = new_strategy.tune(episodes=10)", "_____no_output_____" ] ], [ [ "In this case, the agent will be trained for 10 episodes, with a different set of hyper-parameters each episode. The best set will be saved within the strategy, and used any time strategy.run() is called thereafter.\n\n## Strategy Evaluation\n\nNow that we've tuned and trained our agent, it's time to see how well it performs. To evaluate our strategy's performance on unseen data, we will need to run it on a new environment backed by such data.", "_____no_output_____" ] ], [ [ "import pandas as pd\nfrom tensortrade.environments import TradingEnvironment\nfrom tensortrade.exchanges.simulated import SimulatedExchange\n\ndf = pd.read_csv('/content/tensortrade/examples/data/Coinbase_BTCUSD_d.csv', skiprows=1)\nexchange = SimulatedExchange(data_frame=df,\n feature_pipeline=feature_pipeline,\n base_instrument='USD',\n should_pretransform_obs=True)\n\nenvironment = TradingEnvironment(exchange=exchange,\n action_strategy=action_strategy,\n reward_strategy=reward_strategy)\n\nnew_strategy.environment = environment\n\ntest_performance = new_strategy.run(steps=2000)", "Finished running strategy.\nTotal episodes: 1 (2000 timesteps).\nAverage reward: -0.6791065948644133.\n" ], [ "df.tail()", "_____no_output_____" ], [ "%matplotlib inline\n\ntest_performance.net_worth.plot()", "_____no_output_____" ] ], [ [ "When complete, strategy.run returns a `Pandas.data_frame` of the agent's performance, including the net worth and balance of the agent at each time step.\n\n## Live Trading\n\nOnce you've built a profitable trading strategy, trained an agent to trade it properly, and ensured its \"generalize-ability\" to new data sets, all there is left to do is profit. Using a live exchange such as `CCXTExchange`, you can plug your strategy in and let it run!\n\nWhile the gambler in you may enjoy starting a strategy and letting it run without bounds, the more risk averse of you can use a `trade_callback`, which will be called each time the strategy makes a trade. This callback function, similar to the episode callback, will pass in a data frame containing the agent's overall performance, and expects a `bool` in return. If `True`, the agent will continue trading, otherwise, the agent will stop and return its performance over the session.", "_____no_output_____" ] ], [ [ "import ccxt\nfrom tensortrade.environments import TradingEnvironment\nfrom tensortrade.exchanges.live import CCXTExchange\n\nbinance = ccxt.binance({\n 'apiKey': 'HfPX38sJ2aKewYDUJx6TaWrDhuT7rq426elO5Gbc55Dvg4klASEfm0aqcdl4Mpz6',\n 'secret': 'aGdMMaaol3GkyzDcrSEoc4aFlWG78qCxnGl3o22ub24u2scrZRcsIe2qn0kI82GQ',\n 'enableRateLimit': True,\n})\n\nexchange = CCXTExchange(exchange=binance,\n base_instrument='BNB',\n observation_type='ohlcv',\n timeframe='1h')\n\nbtcusd_actions = DiscreteActionStrategy(n_actions=20, instrument_symbol='BNB/BTC')\n\nenvironment = TradingEnvironment(exchange=exchange,\n feature_pipeline=feature_pipeline,\n action_strategy=btcusd_actions,\n reward_strategy=reward_strategy)\n\nstrategy.environment = environment\n\nlive_performance = strategy.run(steps=1)", "_____no_output_____" ], [ "live_performance", "_____no_output_____" ] ], [ [ "_Passing `steps=0` instructs the strategy to run until otherwise stopped._", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ], [ [ "https://colab.research.google.com/drive/1r9I-DJjrT-0JHbrB10NLFudZ7hQdOcdq", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb9ea75e65b3efcb977ec062b9232b2eee133567
16,223
ipynb
Jupyter Notebook
5. Logistic Regression/logistic_regression_iris.ipynb
haitaozhao/PRSL
c81d64d1d2968af8ba5f34ce0ecfed32007822f1
[ "MIT" ]
5
2022-02-27T08:35:44.000Z
2022-03-12T07:53:53.000Z
5. Logistic Regression/logistic_regression_iris.ipynb
haitaozhao/PRSL
c81d64d1d2968af8ba5f34ce0ecfed32007822f1
[ "MIT" ]
null
null
null
5. Logistic Regression/logistic_regression_iris.ipynb
haitaozhao/PRSL
c81d64d1d2968af8ba5f34ce0ecfed32007822f1
[ "MIT" ]
null
null
null
76.164319
11,016
0.805708
[ [ [ "from sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "# 读入数据\niris = datasets.load_iris()\nX = iris.data \ny = iris.target\nData =np.c_[X[50:,:],np.ones([100,1])]\n\nlabel = y[50:]-1 #第二类和第三类原始的label是2,3\nlabel", "_____no_output_____" ], [ "# 划分训练集和测试集\nX_train, X_test, y_train, y_test = train_test_split(\n Data, label, test_size=0.33, random_state=42)", "_____no_output_____" ], [ "# sigmoid function\ndef my_sigmoid(w,x):\n return 1/(1+np.exp(-w.T.dot(x.T)))\n# 损失函数\ndef obj_fun(w,x,y):\n tmp = y.reshape(1,-1)*np.log(my_sigmoid(w,x)) + \\\n (1-y.reshape(1,-1))*np.log(1-my_sigmoid(w,x))\n return np.sum(-tmp)\n# 计算随机梯度的函数\ndef my_Stgrad(w,x,y):\n return (my_sigmoid(w,x) - y)*x.T\n", "_____no_output_____" ], [ "# 随机梯度下降求解逻辑斯蒂回归\n\n#初始化 \nw = np.random.random([5,1])\nalpha = 0.01 # learning rate\nobj = obj_fun(w,X_train,y_train)\nrow,_ = X_train.shape\nloss = []\nfor iter in range(100): # epoch = 100\n idx = np.random.permutation(row) # 每个epoch随机打乱数据\n for num in range(row):\n tmp_x = X_train[idx[num],:].reshape(1,-1)\n tmp_y = y_train[idx[num]]\n # 用负梯度方向更新w\n w = w - alpha*my_Stgrad(w,tmp_x,tmp_y)\n # 记录每次调整后的损失,实际是不需要的\n loss.append(obj_fun(w,X_train,y_train))\n new_obj = obj_fun(w,X_train,y_train)\n if np.abs(new_obj - obj)< 0.001:\n print('The number of iteration: %d epochs'%iter)\n break\n else:\n obj = new_obj\nprint('**-------------------------**')\nprint('The weights are:\\n')\nprint(w)\nprint('**-------------------------**')\nplt.plot(loss) ", "**-------------------------**\nThe weights are:\n\n[[-2.11103068]\n [-2.07932512]\n [ 3.09646273]\n [ 2.96635593]\n [-0.82388082]]\n**-------------------------**\n" ], [ "# 对测试数据进行预测\npred = my_sigmoid(w,X_test)>0.5\nAcc = 1 - np.sum((1*pred) !=y_test)/y_test.size\nprint('The prediction accuracy for Class 2 and Class 3 is: %.2f%%' %(Acc*100))", "The prediction accuracy for Class 2 and Class 3 is: 93.94%\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
cb9eb21bad79b6790ab6035aa434ba8686b4f8a1
9,590
ipynb
Jupyter Notebook
SQL DBs with Python/DB0201EN-Week3-1-1-Connecting-v4-py.ipynb
Pugnatore/IBM_Professional_Certification
f70fd8e78850eb54b7cef60d4e865df0748751e9
[ "MIT" ]
null
null
null
SQL DBs with Python/DB0201EN-Week3-1-1-Connecting-v4-py.ipynb
Pugnatore/IBM_Professional_Certification
f70fd8e78850eb54b7cef60d4e865df0748751e9
[ "MIT" ]
null
null
null
SQL DBs with Python/DB0201EN-Week3-1-1-Connecting-v4-py.ipynb
Pugnatore/IBM_Professional_Certification
f70fd8e78850eb54b7cef60d4e865df0748751e9
[ "MIT" ]
null
null
null
28.712575
412
0.569864
[ [ [ "<a href=\"https://www.bigdatauniversity.com\"><img src = \"https://ibm.box.com/shared/static/ugcqz6ohbvff804xp84y4kqnvvk3bq1g.png\" width = 300, align = \"center\"></a>\n\n<h1 align=center><font size = 5>Lab: Connect to Db2 database on Cloud using Python</font></h1>", "_____no_output_____" ], [ "# Introduction\n\nThis notebook illustrates how to access a DB2 database on Cloud using Python by following the steps below:\n1. Import the `ibm_db` Python library\n1. Enter the database connection credentials\n1. Create the database connection\n1. Close the database connection\n\n\n\n__Note:__ Please follow the instructions given in the first Lab of this course to Create a database service instance of Db2 on Cloud and retrieve your database Service Credentials.\n\n## Import the `ibm_db` Python library\n\nThe `ibm_db` [API ](https://pypi.python.org/pypi/ibm_db/) provides a variety of useful Python functions for accessing and manipulating data in an IBM® data server database, including functions for connecting to a database, preparing and issuing SQL statements, fetching rows from result sets, calling stored procedures, committing and rolling back transactions, handling errors, and retrieving metadata.\n\n\nWe first import the ibm_db library into our Python Application\n\nExecute the following cell by clicking within it and then \npress `Shift` and `Enter` keys simultaneously\n", "_____no_output_____" ] ], [ [ "import ibm_db", "_____no_output_____" ] ], [ [ "When the command above completes, the `ibm_db` library is loaded in your notebook. \n\n\n## Identify the database connection credentials\n\nConnecting to dashDB or DB2 database requires the following information:\n* Driver Name\n* Database name \n* Host DNS name or IP address \n* Host port\n* Connection protocol\n* User ID (or username)\n* User Password\n\n\n\n__Notice:__ To obtain credentials please refer to the instructions given in the first Lab of this course\n\nNow enter your database credentials below and execute the cell with `Shift` + `Enter`\n", "_____no_output_____" ] ], [ [ "#Replace the placeholder values with your actual Db2 hostname, username, and password:\ndsn_hostname = \"dashdb-txn-sbox-yp-dal09-03.services.dal.bluemix.net\" # e.g.: \"dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net\"\ndsn_uid = \"jng27037\" # e.g. \"abc12345\"\ndsn_pwd = \"d@n08mjs9b5s6v20\" # e.g. \"7dBZ3wWt9XN6$o0J\"\n\ndsn_driver = \"{IBM DB2 ODBC DRIVER}\"\ndsn_database = \"BLUDB\" # e.g. \"BLUDB\"\ndsn_port = \"50000\" # e.g. \"50000\" \ndsn_protocol = \"TCPIP\" # i.e. \"TCPIP\"", "_____no_output_____" ] ], [ [ "## Create the DB2 database connection\n\nIbm_db API uses the IBM Data Server Driver for ODBC and CLI APIs to connect to IBM DB2 and Informix.\n\n\nLets build the dsn connection string using the credentials you entered above\n", "_____no_output_____" ] ], [ [ "#DO NOT MODIFY THIS CELL. Just RUN it with Shift + Enter\n#Create the dsn connection string\ndsn = (\n \"DRIVER={0};\"\n \"DATABASE={1};\"\n \"HOSTNAME={2};\"\n \"PORT={3};\"\n \"PROTOCOL={4};\"\n \"UID={5};\"\n \"PWD={6};\").format(dsn_driver, dsn_database, dsn_hostname, dsn_port, dsn_protocol, dsn_uid, dsn_pwd)\n\n#print the connection string to check correct values are specified\nprint(dsn)", "DRIVER={IBM DB2 ODBC DRIVER};DATABASE=BLUDB;HOSTNAME=dashdb-txn-sbox-yp-dal09-03.services.dal.bluemix.net;PORT=50000;PROTOCOL=TCPIP;UID=jng27037;PWD=d@n08mjs9b5s6v20;\n" ] ], [ [ "Now establish the connection to the database", "_____no_output_____" ] ], [ [ "#DO NOT MODIFY THIS CELL. Just RUN it with Shift + Enter\n#Create database connection\n\ntry:\n conn = ibm_db.connect(dsn, \"\", \"\")\n print (\"Connected to database: \", dsn_database, \"as user: \", dsn_uid, \"on host: \", dsn_hostname)\n\nexcept:\n print (\"Unable to connect: \", ibm_db.conn_errormsg() )\n", "Connected to database: BLUDB as user: jng27037 on host: dashdb-txn-sbox-yp-dal09-03.services.dal.bluemix.net\n" ] ], [ [ "Congratulations if you were able to connect successfuly. Otherwise check the error and try again.", "_____no_output_____" ] ], [ [ "#Retrieve Metadata for the Database Server\nserver = ibm_db.server_info(conn)\n\nprint (\"DBMS_NAME: \", server.DBMS_NAME)\nprint (\"DBMS_VER: \", server.DBMS_VER)\nprint (\"DB_NAME: \", server.DB_NAME)", "DBMS_NAME: DB2/LINUXX8664\nDBMS_VER: 11.01.0303\nDB_NAME: BLUDB\n" ], [ "#Retrieve Metadata for the Database Client / Driver\nclient = ibm_db.client_info(conn)\n\nprint (\"DRIVER_NAME: \", client.DRIVER_NAME) \nprint (\"DRIVER_VER: \", client.DRIVER_VER)\nprint (\"DATA_SOURCE_NAME: \", client.DATA_SOURCE_NAME)\nprint (\"DRIVER_ODBC_VER: \", client.DRIVER_ODBC_VER)\nprint (\"ODBC_VER: \", client.ODBC_VER)\nprint (\"ODBC_SQL_CONFORMANCE: \", client.ODBC_SQL_CONFORMANCE)\nprint (\"APPL_CODEPAGE: \", client.APPL_CODEPAGE)\nprint (\"CONN_CODEPAGE: \", client.CONN_CODEPAGE)", "DRIVER_NAME: libdb2.a\nDRIVER_VER: 11.01.0404\nDATA_SOURCE_NAME: BLUDB\nDRIVER_ODBC_VER: 03.51\nODBC_VER: 03.01.0000\nODBC_SQL_CONFORMANCE: EXTENDED\nAPPL_CODEPAGE: 1208\nCONN_CODEPAGE: 1208\n" ] ], [ [ "## Close the Connection\nWe free all resources by closing the connection. Remember that it is always important to close connections so that we can avoid unused connections taking up resources.", "_____no_output_____" ] ], [ [ "ibm_db.close(conn)", "_____no_output_____" ] ], [ [ "## Summary\n\nIn this tutorial you established a connection to a DB2 database on Cloud database from a Python notebook using ibm_db API. ", "_____no_output_____" ], [ "Copyright &copy; 2017 [cognitiveclass.ai](cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license/).\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb9ebe5ee9f96f4dab7fbbf89f045b0b862a36f1
72,607
ipynb
Jupyter Notebook
Basic GAN/main.ipynb
Legnica1241/tensorflow
cec31be5a6d7c38b83f5afe26ec78f13b08b4a1b
[ "Apache-2.0" ]
null
null
null
Basic GAN/main.ipynb
Legnica1241/tensorflow
cec31be5a6d7c38b83f5afe26ec78f13b08b4a1b
[ "Apache-2.0" ]
null
null
null
Basic GAN/main.ipynb
Legnica1241/tensorflow
cec31be5a6d7c38b83f5afe26ec78f13b08b4a1b
[ "Apache-2.0" ]
null
null
null
89.860149
6,168
0.568761
[ [ [ "import os", "_____no_output_____" ], [ "import numpy as np", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "%reload_ext importnb\nimport GANstructure", "Using TensorFlow backend.\n" ], [ "path='../data/camel/full_numpy_bitmap_camel.npy'", "_____no_output_____" ], [ "RUN_FOLDER='run/'\nif not os.path.exists(RUN_FOLDER):\n os.mkdir(RUN_FOLDER)\n os.mkdir(os.path.join(RUN_FOLDER,'images'))\n os.mkdir(os.path.join(RUN_FOLDER,'weights'))", "_____no_output_____" ], [ "def loader(path):\n dataset_size=5000\n dataset=np.load(path)\n no_of_images=dataset.shape[0]\n dataset=dataset.reshape(no_of_images,28,28,1)\n dataset=dataset[:dataset_size]\n dataset = dataset.astype('float32') / 255.0\n '''\n test 1\n arr = np.arange(10)\n np.random.shuffle(arr)\n print(arr)\n '''\n np.random.shuffle(dataset)\n \n return dataset", "_____no_output_____" ], [ "x=loader(path)\nx=np.squeeze(x)", "_____no_output_____" ], [ "plt.imshow(x[50])\nplt.show()", "_____no_output_____" ], [ "gan = GANstructure.GAN(input_dims = (28,28,1)\n , discriminator_conv_filters = [64,64,128,128]\n , discriminator_conv_kernel_size = [5,5,5,5]\n , discriminator_conv_strides = [2,2,2,1]\n , discriminator_activation = 'relu'\n , discriminator_dropout = 0.4\n , discriminator_lr = 0.0008\n , generator_initial_dense_layer_size = (7, 7, 64)\n , generator_upsample = [2,2,1,1]\n , generator_conv_filters = [128,64, 64,1]\n , generator_conv_kernel_size = [5,5,5,5]\n , generator_conv_strides = [1,1, 1, 1]\n , generator_activation = 'relu'\n , generator_dropout = None\n , generator_lr = 0.0004\n , optimizer = 'rmsprop'\n , latent_dims = 100\n )", "_____no_output_____" ], [ "gan.discriminator.summary()", "Model: \"model_1\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndiscriminator_input (InputLa (None, 28, 28, 1) 0 \n_________________________________________________________________\ndiscriminator_layer_0 (Conv2 (None, 14, 14, 64) 1664 \n_________________________________________________________________\nactivation_1 (Activation) (None, 14, 14, 64) 0 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 14, 14, 64) 0 \n_________________________________________________________________\ndiscriminator_layer_1 (Conv2 (None, 7, 7, 64) 102464 \n_________________________________________________________________\nactivation_2 (Activation) (None, 7, 7, 64) 0 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 7, 7, 64) 0 \n_________________________________________________________________\ndiscriminator_layer_2 (Conv2 (None, 4, 4, 128) 204928 \n_________________________________________________________________\nactivation_3 (Activation) (None, 4, 4, 128) 0 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 4, 4, 128) 0 \n_________________________________________________________________\ndiscriminator_layer_3 (Conv2 (None, 4, 4, 128) 409728 \n_________________________________________________________________\nactivation_4 (Activation) (None, 4, 4, 128) 0 \n_________________________________________________________________\ndropout_4 (Dropout) (None, 4, 4, 128) 0 \n_________________________________________________________________\nflatten_1 (Flatten) (None, 2048) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 1) 2049 \n=================================================================\nTotal params: 720,833\nTrainable params: 720,833\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "gan.generator.summary()", "Model: \"model_2\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ngenerator_input (InputLayer) (None, 100) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 3136) 316736 \n_________________________________________________________________\nactivation_5 (Activation) (None, 3136) 0 \n_________________________________________________________________\nreshape_1 (Reshape) (None, 7, 7, 64) 0 \n_________________________________________________________________\nup_sampling2d_1 (UpSampling2 (None, 14, 14, 64) 0 \n_________________________________________________________________\ngenerator_layer_0 (Conv2D) (None, 14, 14, 128) 204928 \n_________________________________________________________________\nactivation_6 (Activation) (None, 14, 14, 128) 0 \n_________________________________________________________________\nup_sampling2d_2 (UpSampling2 (None, 28, 28, 128) 0 \n_________________________________________________________________\ngenerator_layer_1 (Conv2D) (None, 28, 28, 64) 204864 \n_________________________________________________________________\nactivation_7 (Activation) (None, 28, 28, 64) 0 \n_________________________________________________________________\ngenerator_layer_2 (Conv2DTra (None, 28, 28, 64) 102464 \n_________________________________________________________________\nactivation_8 (Activation) (None, 28, 28, 64) 0 \n_________________________________________________________________\ngenerator_layer_3 (Conv2DTra (None, 28, 28, 1) 1601 \n_________________________________________________________________\nactivation_9 (Activation) (None, 28, 28, 1) 0 \n=================================================================\nTotal params: 830,593\nTrainable params: 830,593\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "BATCH_SIZE = 64\nEPOCHS = 6000\nPRINT_EVERY_N_BATCHES = 5", "_____no_output_____" ], [ "dataset=loader(path)", "_____no_output_____" ], [ "x_train=dataset[:4000]", "_____no_output_____" ], [ "gan.train(x_train, batch_size = BATCH_SIZE, epochs = EPOCHS, run_folder = RUN_FOLDER, \n print_every_n_batch = PRINT_EVERY_N_BATCHES\n)\n\n", "D:\\Data\\appData2\\anaconda3\\envs\\gans\\lib\\site-packages\\keras\\engine\\training.py:297: UserWarning: Discrepancy between trainable weights and collected trainable weights, did you set `model.trainable` without calling `model.compile` after ?\n 'Discrepancy between trainable weights and collected trainable'\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9ec4ca4ddcbb381f73918f32391c1458ebe292
943,802
ipynb
Jupyter Notebook
assignment_13/Cat Dogs/CatDogs_TransferLearning.ipynb
amitbcp/tsai-vision
14a66d4c3295714fdcc97db13804ffba9d6f06cc
[ "Apache-2.0" ]
null
null
null
assignment_13/Cat Dogs/CatDogs_TransferLearning.ipynb
amitbcp/tsai-vision
14a66d4c3295714fdcc97db13804ffba9d6f06cc
[ "Apache-2.0" ]
null
null
null
assignment_13/Cat Dogs/CatDogs_TransferLearning.ipynb
amitbcp/tsai-vision
14a66d4c3295714fdcc97db13804ffba9d6f06cc
[ "Apache-2.0" ]
2
2021-07-25T10:24:11.000Z
2021-08-13T09:23:30.000Z
679.972622
882,602
0.935925
[ [ [ "<a href=\"https://colab.research.google.com/github/RajamannarAanjaram/TSAI-Assignment/blob/master/13%20ViT/Cat%20Dogs/CatDogs_TransferLearning.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "! pip install timm\n! pip install -q kaggle", "Collecting timm\n Downloading timm-0.4.12-py3-none-any.whl (376 kB)\n\u001b[K |████████████████████████████████| 376 kB 5.1 MB/s \n\u001b[?25hRequirement already satisfied: torchvision in /usr/local/lib/python3.7/dist-packages (from timm) (0.10.0+cu102)\nRequirement already satisfied: torch>=1.4 in /usr/local/lib/python3.7/dist-packages (from timm) (1.9.0+cu102)\nRequirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from torch>=1.4->timm) (3.7.4.3)\nRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from torchvision->timm) (1.19.5)\nRequirement already satisfied: pillow>=5.3.0 in /usr/local/lib/python3.7/dist-packages (from torchvision->timm) (7.1.2)\nInstalling collected packages: timm\nSuccessfully installed timm-0.4.12\n" ], [ "import timm\nfrom pprint import pprint", "_____no_output_____" ], [ "import os\nimport random\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport torch \nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms \nfrom torch.optim.lr_scheduler import StepLR \nfrom torch.utils.data import DataLoader, Dataset\n\n#to unzip the datasets\nimport zipfile\n\n \nimport glob \nfrom PIL import Image\nfrom itertools import chain\nfrom tqdm import tqdm \nfrom __future__ import print_function", "_____no_output_____" ], [ "from google.colab import files\nfiles.upload()", "_____no_output_____" ], [ "! mkdir ~/.kaggle\n! cp kaggle.json ~/.kaggle/\n! chmod 600 ~/.kaggle/kaggle.json", "_____no_output_____" ], [ "! kaggle competitions download -c 'dogs-vs-cats-redux-kernels-edition'", "Warning: Looks like you're using an outdated API Version, please consider updating (server 1.5.12 / client 1.5.4)\nDownloading train.zip to /content\n 99% 540M/544M [00:04<00:00, 186MB/s]\n100% 544M/544M [00:04<00:00, 130MB/s]\nDownloading sample_submission.csv to /content\n 0% 0.00/111k [00:00<?, ?B/s]\n100% 111k/111k [00:00<00:00, 94.8MB/s]\nDownloading test.zip to /content\n 97% 264M/271M [00:01<00:00, 157MB/s]\n100% 271M/271M [00:01<00:00, 156MB/s]\n" ], [ "from sklearn.model_selection import train_test_split ", "_____no_output_____" ], [ "#definining batch size, epocs, learning rate and gamma for training \nseed= 40\nbatch_size = 64\nepochs = 20\nlr = 3e-5\ngamma = 0.7 #for learning rate scheduler\nuse_cuda = torch.cuda.is_available()\ndevice= 'cuda:0' if use_cuda else 'cpu'\n\n#Load data\n\nos.makedirs('data', exist_ok=True)\ntrain_dir = 'data/train'\ntest_dir = 'data/test'", "_____no_output_____" ], [ "with zipfile.ZipFile('train.zip') as train_zip:\n train_zip.extractall('data')\n \nwith zipfile.ZipFile('test.zip') as test_zip:\n test_zip.extractall('data')", "_____no_output_____" ], [ "train_list = glob.glob(os.path.join(train_dir,'*.jpg'))\ntest_list = glob.glob(os.path.join(test_dir, '*.jpg'))\nprint(f\"Train Data: {len(train_list)}\")\nprint(f\"Test Data: {len(test_list)}\")\nlabels = [path.split('/')[-1].split('.')[0] for path in train_list]", "Train Data: 25000\nTest Data: 12500\n" ], [ "random_idx = np.random.randint(1, len(train_list), size=9)\nfig, axes = plt.subplots(3, 3, figsize=(16, 12))\nfor idx, ax in enumerate(axes.ravel()):\n img = Image.open(train_list[idx])\n ax.set_title(labels[idx])\n ax.imshow(img)", "_____no_output_____" ], [ "train_list, valid_list = train_test_split(train_list, \n test_size=0.2,\n stratify=labels,\n random_state=seed)\nprint(f\"Train Data: {len(train_list)}\")\nprint(f\"Validation Data: {len(valid_list)}\")\nprint(f\"Test Data: {len(test_list)}\")", "Train Data: 20000\nValidation Data: 5000\nTest Data: 12500\n" ], [ "train_transforms = transforms.Compose(\n [\n transforms.Resize((224, 224)),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ]\n)\nval_transforms = transforms.Compose(\n [\n transforms.Resize((224, 224)),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ]\n)\ntest_transforms = transforms.Compose(\n [\n transforms.Resize((224, 224)),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ]\n)", "_____no_output_____" ], [ "class CatsDogsDataset(Dataset):\n def __init__(self, file_list, transform=None):\n self.file_list = file_list\n self.transform = transform\n def __len__(self):\n self.filelength = len(self.file_list)\n return self.filelength\n def __getitem__(self, idx):\n img_path = self.file_list[idx]\n img = Image.open(img_path)\n img_transformed = self.transform(img)\n label = img_path.split(\"/\")[-1].split(\".\")[0]\n label = 1 if label == \"dog\" else 0\n return img_transformed, label", "_____no_output_____" ], [ "train_data = CatsDogsDataset(train_list, transform=train_transforms)\nvalid_data = CatsDogsDataset(valid_list, transform=test_transforms)\ntest_data = CatsDogsDataset(test_list, transform=test_transforms)\n\n\n\ntrain_loader = DataLoader(dataset = train_data, batch_size=batch_size, shuffle=True )\nvalid_loader = DataLoader(dataset = valid_data, batch_size=batch_size, shuffle=True)\ntest_loader = DataLoader(dataset = test_data, batch_size=batch_size, shuffle=True)", "_____no_output_____" ], [ "model_names = timm.list_models('*vit*')\npprint(model_names)", "['convit_base',\n 'convit_small',\n 'convit_tiny',\n 'levit_128',\n 'levit_128s',\n 'levit_192',\n 'levit_256',\n 'levit_384',\n 'vit_base_patch16_224',\n 'vit_base_patch16_224_in21k',\n 'vit_base_patch16_224_miil',\n 'vit_base_patch16_224_miil_in21k',\n 'vit_base_patch16_384',\n 'vit_base_patch32_224',\n 'vit_base_patch32_224_in21k',\n 'vit_base_patch32_384',\n 'vit_base_r26_s32_224',\n 'vit_base_r50_s16_224',\n 'vit_base_r50_s16_224_in21k',\n 'vit_base_r50_s16_384',\n 'vit_base_resnet26d_224',\n 'vit_base_resnet50_224_in21k',\n 'vit_base_resnet50_384',\n 'vit_base_resnet50d_224',\n 'vit_huge_patch14_224_in21k',\n 'vit_large_patch16_224',\n 'vit_large_patch16_224_in21k',\n 'vit_large_patch16_384',\n 'vit_large_patch32_224',\n 'vit_large_patch32_224_in21k',\n 'vit_large_patch32_384',\n 'vit_large_r50_s32_224',\n 'vit_large_r50_s32_224_in21k',\n 'vit_large_r50_s32_384',\n 'vit_small_patch16_224',\n 'vit_small_patch16_224_in21k',\n 'vit_small_patch16_384',\n 'vit_small_patch32_224',\n 'vit_small_patch32_224_in21k',\n 'vit_small_patch32_384',\n 'vit_small_r26_s32_224',\n 'vit_small_r26_s32_224_in21k',\n 'vit_small_r26_s32_384',\n 'vit_small_resnet26d_224',\n 'vit_small_resnet50d_s16_224',\n 'vit_tiny_patch16_224',\n 'vit_tiny_patch16_224_in21k',\n 'vit_tiny_patch16_384',\n 'vit_tiny_r_s16_p8_224',\n 'vit_tiny_r_s16_p8_224_in21k',\n 'vit_tiny_r_s16_p8_384']\n" ], [ "import timm\nmodel = timm.create_model('vit_base_patch16_224', pretrained=True).to(device)\nprint(model)", "VisionTransformer(\n (patch_embed): PatchEmbed(\n (proj): Conv2d(3, 768, kernel_size=(16, 16), stride=(16, 16))\n (norm): Identity()\n )\n (pos_drop): Dropout(p=0.0, inplace=False)\n (blocks): Sequential(\n (0): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (1): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (2): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (3): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (4): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (5): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (6): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (7): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (8): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (9): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (10): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (11): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n )\n (norm): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (pre_logits): Identity()\n (head): Linear(in_features=768, out_features=1000, bias=True)\n)\n" ], [ "model.head = nn.Linear(768, 2).to(device)\nprint(model)", "VisionTransformer(\n (patch_embed): PatchEmbed(\n (proj): Conv2d(3, 768, kernel_size=(16, 16), stride=(16, 16))\n (norm): Identity()\n )\n (pos_drop): Dropout(p=0.0, inplace=False)\n (blocks): Sequential(\n (0): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (1): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (2): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (3): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (4): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (5): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (6): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (7): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (8): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (9): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (10): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n (11): Block(\n (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (attn): Attention(\n (qkv): Linear(in_features=768, out_features=2304, bias=True)\n (attn_drop): Dropout(p=0.0, inplace=False)\n (proj): Linear(in_features=768, out_features=768, bias=True)\n (proj_drop): Dropout(p=0.0, inplace=False)\n )\n (drop_path): Identity()\n (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (mlp): Mlp(\n (fc1): Linear(in_features=768, out_features=3072, bias=True)\n (act): GELU()\n (fc2): Linear(in_features=3072, out_features=768, bias=True)\n (drop): Dropout(p=0.0, inplace=False)\n )\n )\n )\n (norm): LayerNorm((768,), eps=1e-06, elementwise_affine=True)\n (pre_logits): Identity()\n (head): Linear(in_features=768, out_features=2, bias=True)\n)\n" ], [ "criterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr=lr)\nscheduler = StepLR(optimizer, step_size=1, gamma=gamma)", "_____no_output_____" ], [ "for epoch in range(epochs):\n epoch_loss = 0\n epoch_accuracy = 0\n for data, label in tqdm(train_loader):\n data = data.to(device)\n label = label.to(device)\n output = model(data)\n loss = criterion(output, label)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n acc = (output.argmax(dim=1) == label).float().mean()\n epoch_accuracy += acc / len(train_loader)\n epoch_loss += loss / len(train_loader)\n with torch.no_grad():\n epoch_val_accuracy = 0\n epoch_val_loss = 0\n for data, label in valid_loader:\n data = data.to(device)\n label = label.to(device)\n val_output = model(data)\n val_loss = criterion(val_output, label)\n acc = (val_output.argmax(dim=1) == label).float().mean()\n epoch_val_accuracy += acc / len(valid_loader)\n epoch_val_loss += val_loss / len(valid_loader)\n print(\n f\"Epoch : {epoch+1} - loss : {epoch_loss:.4f} - acc: {epoch_accuracy:.4f} - val_loss : {epoch_val_loss:.4f} - val_acc: {epoch_val_accuracy:.4f}\\n\"\n )", "100%|██████████| 313/313 [07:48<00:00, 1.50s/it]\n 0%| | 0/313 [00:00<?, ?it/s]" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9ed1dca36f88d3be604f4316f11465b289ced4
14,614
ipynb
Jupyter Notebook
notebooks/demo_build_multiindex_dataframe_html.ipynb
oscar6echo/ezaggrid
60fd5975b5f90bf6ca5471b0431e50526c5b1d00
[ "MIT" ]
6
2018-05-19T01:56:24.000Z
2021-03-03T14:30:21.000Z
notebooks/demo_build_multiindex_dataframe_html.ipynb
oscar6echo/ezaggrid
60fd5975b5f90bf6ca5471b0431e50526c5b1d00
[ "MIT" ]
2
2018-06-29T14:37:46.000Z
2018-08-21T14:50:31.000Z
notebooks/demo_build_multiindex_dataframe_html.ipynb
oscar6echo/ezaggrid
60fd5975b5f90bf6ca5471b0431e50526c5b1d00
[ "MIT" ]
1
2021-02-18T09:23:26.000Z
2021-02-18T09:23:26.000Z
32.766816
111
0.388532
[ [ [ "import os\nimport numpy as np\nimport pandas as pd\nimport jinja2 as jj\n", "_____no_output_____" ], [ "def mklbl(prefix, n):\n return [\"%s%s\" % (prefix, i) for i in range(n)]\n\nmiindex = pd.MultiIndex.from_product([mklbl('A', 4),\n mklbl('B', 2),\n mklbl('C', 4),\n mklbl('D', 2)],\n names=['RowIdx-1', 'RowIdx-2', 'RowIdx-3', 'RowIdx-4'])\nindex =['-'.join(col).strip() for col in miindex.values]\nmicolumns = pd.MultiIndex.from_tuples([('a', 'foo', 'zap'),\n ('a', 'foo', 'zip'),\n ('a', 'bar', 'zap'),\n ('a', 'bar', 'zip'),\n ('b', 'foo', 'zap'),\n ('b', 'foo', 'zep'),\n ('b', 'bah', 'zep'),\n ('b', 'bah', 'zyp'),\n ('b', 'bah', 'zap'),\n ],\n names=['ColIdx-{}'.format(i) for i in range(1, 4)])\ncols =['-'.join(col).strip() for col in micolumns.values]\ndata = np.arange(len(miindex) * len(micolumns), dtype=np.float).reshape((len(miindex),len(micolumns)))\ndata = data.tolist()\n\n\ndfrc = pd.DataFrame(data, index=miindex, columns=micolumns).sort_index().sort_index(axis=1)\n\ndfr = pd.DataFrame(data, index=miindex, columns=cols).sort_index().sort_index(axis=1)\ndfr.columns.name = 'UniqueCol'\n\ndfc = pd.DataFrame(data, index=index, columns=micolumns).sort_index().sort_index(axis=1)\ndfc.index.name = 'UniqueRow'\n\ndf = pd.DataFrame(data, index=index, columns=cols).sort_index()\ndf.index.name = 'UniqueRow'\ndf.columns.name = 'UniqueCol'\n\ndfrc.info()\ndfr.info()\ndfc.info()\ndf.info()", "<class 'pandas.core.frame.DataFrame'>\nMultiIndex: 64 entries, (A0, B0, C0, D0) to (A3, B1, C3, D1)\nData columns (total 9 columns):\n(a, bar, zap) 64 non-null float64\n(a, bar, zip) 64 non-null float64\n(a, foo, zap) 64 non-null float64\n(a, foo, zip) 64 non-null float64\n(b, bah, zap) 64 non-null float64\n(b, bah, zep) 64 non-null float64\n(b, bah, zyp) 64 non-null float64\n(b, foo, zap) 64 non-null float64\n(b, foo, zep) 64 non-null float64\ndtypes: float64(9)\nmemory usage: 5.1+ KB\n<class 'pandas.core.frame.DataFrame'>\nMultiIndex: 64 entries, (A0, B0, C0, D0) to (A3, B1, C3, D1)\nData columns (total 9 columns):\na-bar-zap 64 non-null float64\na-bar-zip 64 non-null float64\na-foo-zap 64 non-null float64\na-foo-zip 64 non-null float64\nb-bah-zap 64 non-null float64\nb-bah-zep 64 non-null float64\nb-bah-zyp 64 non-null float64\nb-foo-zap 64 non-null float64\nb-foo-zep 64 non-null float64\ndtypes: float64(9)\nmemory usage: 5.1+ KB\n<class 'pandas.core.frame.DataFrame'>\nIndex: 64 entries, A0-B0-C0-D0 to A3-B1-C3-D1\nData columns (total 9 columns):\n(a, bar, zap) 64 non-null float64\n(a, bar, zip) 64 non-null float64\n(a, foo, zap) 64 non-null float64\n(a, foo, zip) 64 non-null float64\n(b, bah, zap) 64 non-null float64\n(b, bah, zep) 64 non-null float64\n(b, bah, zyp) 64 non-null float64\n(b, foo, zap) 64 non-null float64\n(b, foo, zep) 64 non-null float64\ndtypes: float64(9)\nmemory usage: 5.0+ KB\n<class 'pandas.core.frame.DataFrame'>\nIndex: 64 entries, A0-B0-C0-D0 to A3-B1-C3-D1\nData columns (total 9 columns):\na-foo-zap 64 non-null float64\na-foo-zip 64 non-null float64\na-bar-zap 64 non-null float64\na-bar-zip 64 non-null float64\nb-foo-zap 64 non-null float64\nb-foo-zep 64 non-null float64\nb-bah-zep 64 non-null float64\nb-bah-zyp 64 non-null float64\nb-bah-zap 64 non-null float64\ndtypes: float64(9)\nmemory usage: 5.0+ KB\n" ], [ "dfrc.head()", "_____no_output_____" ] ], [ [ "## Save df html\nMust:\n+ use notebook.css\n+ wrap dataframe.html in specific classes - like in notebook\nResult can be iframed in any doc\n", "_____no_output_____" ] ], [ [ "%%writefile templates/index.tpl.html\n\n<!DOCTYPE html>\n<html>\n\n<head>\n <meta charset=\"utf-8\" />\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\">\n <title>dataframe</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n <link rel=\"stylesheet\" href=\"https://cdn.jupyter.org/notebook/5.6.0/style/style.min.css\">\n</head>\n\n<body>\n\n\n <div class=\"output_are\">\n <div class=\"output_subarea output_html rendered_html output_result\">\n\n __$data.df_html$__\n\n </div>\n </div>\n\n</body>\n\n</html>", "Overwriting templates/index.tpl.html\n" ], [ "dir_template = 'templates'\ndir_dump = 'dump'\n\nloader = jj.FileSystemLoader(dir_template)\nenv = jj.Environment(loader=loader,\n variable_start_string='__$',\n variable_end_string='$__',\n block_start_string='{-%',\n block_end_string='%-}'\n )\n\ntemplate = env.get_template('index.tpl.html')\n# data = {'df_html': dfrc.to_html()}\ndata = {'df_html': dfrc.head(10).to_html()}\ncontent = template.render(data=data)\n\nif not os.path.exists(dir_dump):\n os.makedirs(dir_dump)\n\npath = os.path.join(dir_dump, 'index.html')\nwith open(path, 'w') as f:\n f.write(content)\n print('file {} saved to disk'.format(path))\n", "file dump/index.html saved to disk\n" ], [ "!cd dump && python -m http.server 8080", "Serving HTTP on 0.0.0.0 port 8080 (http://0.0.0.0:8080/) ...\n127.0.0.1 - - [07/Aug/2018 20:51:09] \"GET / HTTP/1.1\" 200 -\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb9edd5497f65b476046843d4ee4367aca8be2c7
15,034
ipynb
Jupyter Notebook
Thermal_TOP_v00.ipynb
Rlahuerta/ThemoElasticity
c9e78cc2eeea2d7955eda8d12fe431f64e0f19c2
[ "MIT" ]
null
null
null
Thermal_TOP_v00.ipynb
Rlahuerta/ThemoElasticity
c9e78cc2eeea2d7955eda8d12fe431f64e0f19c2
[ "MIT" ]
null
null
null
Thermal_TOP_v00.ipynb
Rlahuerta/ThemoElasticity
c9e78cc2eeea2d7955eda8d12fe431f64e0f19c2
[ "MIT" ]
null
null
null
47.27673
536
0.5439
[ [ [ "$\\newcommand{\\tensor}[1]{\\boldsymbol{#1}}$\n\n$\\newcommand{\\tensorthree}[1]{\\mathbfcal{#1}}$\n$\\newcommand{\\tensorfour}[1]{\\mathbb{#1}}$\n\n$\\newcommand{\\dar}{\\, \\text{d} a^r}$\n\n$\\newcommand{\\dvr}{\\, \\text{d} v^r}$\n$\\newcommand{\\dv}{\\, \\text{d} v}$\n\n$\\newcommand{\\dt}{\\, \\text{d} t}$\n$\\newcommand{\\dthe}{\\, \\text{d} \\theta}$\n\n$\\newcommand{\\tr}{\\operatorname{tr}}$\n\n## The nonlinear heat equation solver in FEniCS", "_____no_output_____" ] ], [ [ "from IPython.display import display_pretty, display_html, display_jpeg, display_png, display_json, display_latex, display_svg\n\nfrom IPython.display import Image\nImage(url='http://python.org/images/python-logo.gif')", "_____no_output_____" ] ], [ [ "### Functionals\n\nThe total **deformation gradient** can be can be decomposed as: \n\n\\begin{equation}\n \\tensor{F} = \\tensor{F}_e ~ \\tensor{F}_{\\theta}\n\\end{equation}\n\nThe analysis restricted to isotropic materials, for which the thermal part of the deformation gradient is:\n\n\\begin{equation}\n \\tensor{F}_{\\theta} = \\vartheta \\left( \\theta \\right) ~ \\tensor{I}\n\\end{equation}\n\nThe scalar $\\theta = \\upsilon \\left( \\theta \\right)$ is the **thermal stretch ratio** in any material direction. In this case, the elastic and thermal Green strains become:\n\n\\begin{equation}\n \\tensor{E}_{e} = \\frac{1}{\\vartheta^2} \\left( \\tensor{E} - \\tensor{E}_{\\theta} \\right), \\qquad \\tensor{E}_{\\theta} = \\frac{1}{2} \\left( \\vartheta^2 - 1 \\right) \\tensor{I}\n\\end{equation}\n\nThe relationship holds:\n\n\\begin{equation}\n \\tensor{I} + 2 \\tensor{E} = \\vartheta^2 \\left( \\tensor{I} + 2 \\tensor{E}_{e} \\right)\n\\end{equation}\n\nSince the thermal stretch ratio $\\vartheta$ and the coefficient of thermal expansion $\\alpha$ are related by\n\n\\begin{equation}\n \\alpha \\left( \\theta \\right) = \\frac{1}{\\vartheta} \\frac{\\text{d} \\vartheta}{\\dthe} \n\\end{equation}\n\nthe rate of elastic strain can be written as\n\n\\begin{equation}\n \\dot{\\tensor{E}}_{e} = \\frac{1}{\\vartheta^2 \\left( \\theta \\right)} \\left[ \\dot{\\tensor{E}} - \\alpha \\left( \\theta \\right) \\left( \\tensor{I} + 2 \\tensor{E} \\right) \\dot{\\theta} \\right]\n\\end{equation}\n\n", "_____no_output_____" ], [ "### Stress Response\n\nWithin the model of the multiplicative decomposition, the Helmholtz free energy can be conveniently split into two\nparts:\n\n\\begin{equation}\n {\\varphi} \\left( \\tensor{u}, \\theta \\right) = {\\varphi}_{e} \\left( \\tensor{E}_{e}, \\theta \\right) + {\\varphi}_{\\theta} \\left( \\theta \\right)\n\\end{equation}", "_____no_output_____" ], [ "where ${\\varphi}_{e}$ is an isotropic function of the elastic strain $\\tensor{E}_{e}$ and the temperature $\\theta$. This decomposition is physically appealing because the function ${\\varphi}_{e}$ can be taken as one of the\nwell-known strain energy functions of the isothermal finite-strain elasticity, except that the coefficients of the strain-dependent terms are the functions of temperature, while the function ${\\varphi}_{\\theta}$ can be separately adjusted in accord with experimental data for the specific heat.\n\nThe time-rate of the free energy\n\n\\begin{equation}\n \\dot{\\varphi} = \\frac{\\text{d} \\varphi \\left( \\tensor{u}, \\theta \\right)}{\\dt} = \\frac{\\partial {\\varphi}_{e}}{\\partial \\tensor{E}_{e}} : \\dot{\\tensor{E}_{e}} + \\frac{\\partial {\\varphi}_{e}}{\\partial \\theta} ~ \\dot{\\theta} + \\frac{\\text{d} \\varphi_{\\theta}}{\\dthe} ~ \\dot{\\theta}\n\\end{equation}\n\nthere follows\n\n\\begin{equation}\n \\dot{\\varphi} = \\frac{1}{\\vartheta^2} \\frac{\\partial {\\varphi}_{e}}{\\partial \\tensor{E}_{e}} : \\dot{\\tensor{E}_{e}} - \\left[ \\frac{\\alpha}{\\vartheta^2} \\frac{\\partial {\\varphi}_{e}}{\\partial \\tensor{E}_{e}}: \\left( \\tensor{I} + 2 \\tensor{E} \\right) - \\frac{\\partial {\\varphi}_{e}}{\\partial \\theta} - \\frac{\\text{d} \\varphi_{\\theta}}{\\dthe} \\right] ~ \\dot{\\theta}\n\\end{equation}", "_____no_output_____" ], [ "The comparison with the energy equation:\n\n\\begin{equation}\n \\dot{\\varphi} = \\frac{1}{\\varrho^r} \\tensor{S} : \\dot{\\tensor{E}} - \\eta \\dot{\\theta}\n\\end{equation}\n\nestablishes the constitutive relations for the symmetric second Piola–Kirchhoff stress tensor $\\tensor{S}$ and the specific entropy $\\eta$. These\nare:\n\n\\begin{equation}\n \\tensor{S} = \\frac{\\varrho^r}{\\vartheta^2} \\frac{\\partial {\\varphi}_{e}}{\\partial \\tensor{E}_{e}}\n\\end{equation}\n\n\\begin{equation}\n \\eta = \\alpha \\frac{\\partial {\\varphi}_{e}}{\\partial \\tensor{E}_{e}} : \\left( \\tensor{I} + 2 \\tensor{E} \\right) - \\frac{\\partial {\\varphi}_{e}}{\\partial \\theta} - \\frac{\\text{d} \\varphi_{\\theta}}{\\dthe}\n\\end{equation}\n\n\\begin{equation}\n \\varrho^r = \\upsilon^3 \\varrho^{\\theta}\n\\end{equation}\n\n\\begin{equation}\n \\tensor{S} = \\vartheta ~ \\tensor{S}_{e}, \\qquad \\tensor{S}_{e} = \\varrho^{\\theta} \\frac{\\partial {\\varphi}_{e}}{\\partial \\tensor{E}_{e}}\n\\end{equation}", "_____no_output_____" ], [ "\\begin{equation}\n \\varrho^{\\theta} \\frac{\\partial {\\varphi}_{e}}{\\partial \\tensor{E}_{e}} = \\frac{1}{2} \\lambda \\left( \\theta \\right) \\left( \\tr{\\tensor{E}_{e}} \\right)^{2} + \\mu \\left( \\theta \\right) ~ \\tensor{E}_{e} : \\tensor{E}_{e}\n\\end{equation}\n\nwhere $\\lambda \\left( \\theta \\right)$ and $\\mu \\left( \\theta \\right)$ are the temperature-dependent Lamé moduli. It follows that\n\n\\begin{equation}\n \\tensor{S}_{e} = \\tensorfour{C} \\left( \\tensor{u}, \\theta \\right) : \\tensor{E}_{e}\n\\end{equation}\n\ninto $\\tensor{S} = \\vartheta \\tensor{S}_{e}$, the stress response becomes\n\n\\begin{equation}\n \\tensor{S} = \\frac{1}{\\vartheta} \\left[ \\lambda \\left( \\tr{\\tensor{E}} \\right) \\tensor{I} + 2 \\mu \\tensor{E} \\right] - \\frac{3}{2} \\left[ \\vartheta - \\frac{1}{\\vartheta} \\right] \\kappa \\tensor{I},\n\\end{equation}\n\nwhere $\\kappa$ refers to the temperature-dependent bulk modulus. This is an exact expression for the thermoelastic stress response in the case of quadratic representation of $\\varphi_e$ e in terms of the finite elastic strain $\\tensor{E}_{e}$. If the Lamé moduli are taken to be temperature-independent, and if the approximation $\\upsilon \\approx 1 + \\alpha^{r} \\left( \\theta - \\theta^{r} \\right)$ is used ($\\alpha^{r}$ o being the coefficient of linear thermal expansion at $\\theta - \\theta^{r}$), that reduces to\n\n\\begin{equation}\n \\tensor{S} = \\lambda^{r} \\left( \\tr{\\tensor{E}} \\right) \\tensor{I} + 2 \\mu^{r} \\tensor{E} - 3 \\alpha^{r} \\left( \\theta - \\theta^{r} \\right) \\kappa^{r} \\tensor{I}\n\\end{equation}", "_____no_output_____" ], [ "### Entropy expression\n\nIn the case of quadratic strain energy representation, there is a relationship $\\varrho^r {\\varphi}_{e} = \\vartheta ^ 3 ~ \\tensor{S}_{e} : \\tensor{E}_{e}/2 $, so that\n\n\\begin{equation}\n \\varrho^r \\frac{\\partial {\\varphi}_{e}}{\\partial \\theta} = \\frac{3}{2} \\vartheta^2 \\frac{\\text{d} \\varphi_{\\theta}}{\\dthe} \\tensor{S}_{e} : \\tensor{E}_{e} + \\frac{1}{2} \\vartheta^3 \\frac{\\partial \\tensor{S}_{e}}{\\partial \\theta} : \\tensor{E}_{e}\n\\end{equation}\n\n\\begin{equation}\n \\varrho^r \\frac{\\partial {\\varphi}_{e}}{\\partial \\theta} = \\frac{3}{2} \\alpha \\left[ \\tensor{S} : \\tensor{E} - \\frac{1}{2} \\left( \\vartheta^2 - 1 \\right) ~ \\tr{\\tensor{S}} \\right] + \\frac{1}{2} \\vartheta^3 \\frac{\\partial {\\tensor{S}}_{e}}{\\partial \\theta} : \\tensor{E}_{e}\n\\end{equation} \n\nThe coefficient of thermal expansion $\\alpha$ can be readily verified that\n\n\\begin{equation}\n \\vartheta \\frac{\\partial {\\tensor{S}}_{e}}{\\partial \\theta} = \\frac{\\partial {\\tensor{S}}}{\\partial \\theta} + \\alpha \\left( \\tensor{S} + 3 \\vartheta \\kappa \\tensor{I} \\right)\n\\end{equation}\n\n\\begin{equation}\n \\vartheta^{3} \\frac{\\partial {\\tensor{S}}_{e}}{\\partial \\theta} : \\tensor{E}_{e} = \\frac{\\partial {\\tensor{S}}}{\\partial \\theta} : \\left[ \\tensor{E} - \\frac{\\left( \\vartheta^{2} - 1 \\right)}{2} ~ \\tensor{I} \\right] + \\alpha \\left[ \\tensor{S} : \\tensor{E} + \\frac{\\left( 1 + \\vartheta^{2} \\right)}{2} \\tr{\\tensor{S}} \\right]\n\\end{equation}\n\n\\begin{equation}\n \\varrho^r \\frac{\\partial {\\varphi}_{e}}{\\partial \\theta} = 2 \\alpha ~ \\tensor{S} : \\tensor{E} + \\frac{\\alpha \\left( 2 - \\vartheta^{2} \\right)}{2} \\tr{\\tensor{S}} + \\frac{1}{2} \\frac{\\partial {\\tensor{S}}}{\\partial \\theta} : \\left[ \\tensor{E} - \\frac{\\left( \\vartheta^{2} - 1 \\right)}{2} ~ \\tensor{I} \\right]\n\\end{equation}\n\n\\begin{equation}\n \\eta = \\frac{1}{2 \\varrho^r} \\left[ 3 \\vartheta \\alpha \\kappa \\tensor{I} - \\frac{\\partial {\\tensor{S}}}{\\partial \\theta} \\right] : \\left[ \\tensor{E} - \\frac{\\left( \\vartheta^{2} - 1 \\right)}{2} ~ \\tensor{I} \\right] - \\frac{\\text{d} \\varphi_{\\theta}}{\\dthe}\n\\end{equation}\n \nRecalling the standard expression for the latent heat $\\tensor{\\varepsilon}$, we finally have\n\n\\begin{equation}\n \\eta = \\frac{1}{2} \\left( \\frac{\\tensor{\\varepsilon}}{\\theta} + \\frac{\\vartheta \\alpha \\kappa}{\\varrho^r} \\tensor{I} \\right) : \\left( \\tensor{E} - \\frac{\\left( \\vartheta^{2} - 1 \\right)}{2} ~ \\tensor{I} \\right) - \\frac{\\text{d} \\varphi_{\\theta}}{\\dthe}\n\\end{equation}\n\nThis is an exact expression for the entropy $\\eta$ within the approximation used for the elastic strain energy function. The second-order tensor of the latent heat $\\tensor{\\varepsilon}$ can be calculated as\n\n\\begin{equation}\n \\tensor{\\varepsilon} = - \\frac{\\theta}{\\varrho^r} \\frac{\\partial {\\tensor{S}}}{\\partial \\theta} = - \\frac{\\theta}{\\varrho^r} \\left( \\vartheta \\frac{\\partial {\\tensor{S}}}{\\partial \\theta} -\\alpha \\left( \\tensor{S} + 3 \\vartheta \\kappa \\tensor{I} \\right) \\right)\n\\end{equation}\n\nwhich gives\n\n\\begin{equation}\n \\tensor{\\varepsilon} = \\frac{\\theta}{\\varrho^r} \\left( \\alpha \\left( \\tensor{S} + 3 \\vartheta \\kappa \\tensor{I} \\right) - \\frac{1}{\\vartheta} \\frac{\\text{d} \\tensorfour{C}}{\\dthe} : \\left( \\tensor{E} - \\frac{\\left( \\vartheta^{2} - 1 \\right)}{2} ~ \\tensor{I} \\right) \\right)\n\\end{equation}", "_____no_output_____" ], [ "If the elastic moduli are independent of the temperature, and if the stress components are much smaller than the elastic bulk modulus, then the specific heat becomes $\\tensor{\\varepsilon} = 3 \\vartheta \\alpha \\theta \\kappa \\tensor{I} / {\\varrho^r}$, while the entropy expression reduces to\n\n\\begin{equation}\n \\eta = \\frac{3}{\\varrho^r} \\vartheta \\alpha \\kappa \\left( \\tr{\\tensor{E}} - \\frac{3}{2} \\left( \\vartheta^2 - 1 \\right) \\right) - \\frac{\\text{d} \\varphi_{\\theta}}{\\dthe}\n\\end{equation}\n\nThe function $\\varphi_{\\theta}$ can be selected according to experimental data for the specific heat $c_{E} = \\theta \\partial \\eta / \\partial \\theta$. For example, if we take\n\n\\begin{equation}\n \\varphi_{\\theta} = -\\frac{1}{2} \\left( \\frac{c_{E}}{\\theta^r} + \\frac{9 \\left( \\alpha^r \\right)^2 \\kappa^r}{\\varrho^r} \\right) \\left( \\theta - \\theta^r \\right)^2\n\\end{equation}\n\nthen becomes\n\n\\begin{equation}\n \\eta = \\frac{3}{\\varrho^r} \\alpha^r \\kappa^r \\tr{\\tensor{E}} + \\frac{c_{E}}{\\theta^r} \\left( \\theta - \\theta^r \\right)\n\\end{equation}\n\nwhich is in agreement with the classical result from the linearized theory of thermoelasticity. The approximations $\\alpha \\approx \\alpha^r$ and $\\vartheta \\approx 1 + \\alpha^r \\left( \\theta - \\theta^r \\right)$ are used in the above derivation.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cb9ee1f1ac3d5481468908a7cba0d12c46c4c284
192,326
ipynb
Jupyter Notebook
spoof_predictor.ipynb
fsusan/spoofing-meter
32e907a429a74c29c307da63d884cba3558fa533
[ "CC-BY-3.0" ]
null
null
null
spoof_predictor.ipynb
fsusan/spoofing-meter
32e907a429a74c29c307da63d884cba3558fa533
[ "CC-BY-3.0" ]
null
null
null
spoof_predictor.ipynb
fsusan/spoofing-meter
32e907a429a74c29c307da63d884cba3558fa533
[ "CC-BY-3.0" ]
1
2020-09-21T03:53:36.000Z
2020-09-21T03:53:36.000Z
90.293897
20,566
0.777565
[ [ [ "import pandas as pd\nimport numpy as np\nimport pickle\nfrom joblib import dump, load\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "def read_data_small():\n X_train = pd.read_csv(\"data_small/X_train_small.csv\")\n X_test = pd.read_csv(\"data_small/X_test_small.csv\")\n y_train = np.asarray(pd.read_csv(\"data_small/y_train_small.csv\", header=None)[0])\n return X_train, X_test, y_train", "_____no_output_____" ], [ "def read_data_big():\n X_train = pd.read_csv(\"data_big/X_train_big.csv\")\n X_test = pd.read_csv(\"data_big/X_test_big.csv\")\n y_train = np.asarray(pd.read_csv(\"data_big/y_train_big.csv\", header=None)[0])\n return X_train, X_test, y_train", "_____no_output_____" ], [ "def read_data():\n X_train = pd.read_csv(\"data/X_train.csv\")\n X_test = pd.read_csv(\"data/X_test.csv\")\n y_train = np.asarray(pd.read_csv(\"data/y_train.csv\", header=None)[0])\n return X_train, X_test, y_train", "_____no_output_____" ] ], [ [ "# Visualization", "_____no_output_____" ] ], [ [ "X_train, X_test, y_train = read_data_small()\nX_train['y_label'] = y_train\nX_train[\"timeSinceLastTradeSameOrder\"] = X_train[[\"timestamp\",\"orderId\"]].groupby(\"orderId\").diff()\nX_train.loc[(X_train['timeSinceLastTradeSameOrder']<=45) & (X_train['source']!='SYSTEM'),'y_label']= 3", "_____no_output_____" ], [ "plot1 = X_train[['timestamp','y_label']]\nplot1 = plot1[plot1.y_label!=0]\nplot1a = plot1[plot1.y_label==1]\nplot1b = plot1[plot1.y_label==2]\nplot1c = plot1[plot1.y_label==3]\n\nplt.scatter(plot1a['timestamp'],plot1a['y_label'],color='red')\nplt.scatter(plot1b['timestamp'],plot1b['y_label'],color='blue')\nplt.scatter(plot1c['timestamp'],plot1c['y_label'],color='green')\nplt.show()", "_____no_output_____" ], [ "X_clean = format_data(pd.concat([X_train, X_test]))\nX_train_clean = X_clean.iloc[:X_train.shape[0],:]\nX_test_clean = X_clean.iloc[X_train.shape[0]:,:]\nX_train_clean_scaled = scale(X_train_clean)\nX_test_clean_scaled = scale(X_test_clean)\n\n# fit classifier\nclf = LogisticRegression(random_state=0, class_weight='balanced').fit(X_train_clean_scaled, y_train)\ny_train_prob_pred = clf.predict_proba(X_train_clean_scaled)\ny_test_prob_pred = clf.predict_proba(X_test_clean_scaled)", "/Users/fransiscasusan/anaconda3/lib/python3.6/site-packages/ipykernel/__main__.py:4: DataConversionWarning: Data with input dtype bool, uint8, int64, float64, object were all converted to float64 by the scale function.\n/Users/fransiscasusan/anaconda3/lib/python3.6/site-packages/sklearn/preprocessing/data.py:180: UserWarning: Numerical issues were encountered when centering the data and might not be solved. Dataset may contain too large values. You may need to prescale your features.\n warnings.warn(\"Numerical issues were encountered \"\n/Users/fransiscasusan/anaconda3/lib/python3.6/site-packages/ipykernel/__main__.py:5: DataConversionWarning: Data with input dtype bool, uint8, int64, float64, object were all converted to float64 by the scale function.\n/Users/fransiscasusan/anaconda3/lib/python3.6/site-packages/sklearn/preprocessing/data.py:180: UserWarning: Numerical issues were encountered when centering the data and might not be solved. Dataset may contain too large values. You may need to prescale your features.\n warnings.warn(\"Numerical issues were encountered \"\n/Users/fransiscasusan/anaconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\n/Users/fransiscasusan/anaconda3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:460: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\n" ], [ "X_train['y_pred_0'] = y_train_prob_pred[:,0]\nX_train['y_pred_1'] = y_train_prob_pred[:,1]\nX_train['y_pred_2'] = y_train_prob_pred[:,2]", "_____no_output_____" ] ], [ [ "Example for user 'KYPPWBZJQ'", "_____no_output_____" ] ], [ [ "ex1 = X_train[X_train['endUserRef']=='KYPPWBZJQ'].iloc[3]\n# Pie chart, where the slices will be ordered and plotted counter-clockwise:\nlabels = 'class0','class1','class2','class3'\nex = ex1\nsizes = [ex.y_pred_0,ex.y_pred_1,ex.y_pred_2,1-ex.y_pred_0-ex.y_pred_1-ex.y_pred_2]\nexplode = (0, 0.1, 0, 0) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n\nfig1, ax1 = plt.subplots()\nax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\nax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n\nplt.show()", "_____no_output_____" ], [ "ex2 = X_train[X_train['endUserRef']=='KYPPWBZJQ'].iloc[4]\n# Pie chart, where the slices will be ordered and plotted counter-clockwise:\nlabels = 'class0','class1','class2','class3'\nex = ex2\nsizes = [ex.y_pred_0,ex.y_pred_1,ex.y_pred_2,1-ex.y_pred_0-ex.y_pred_1-ex.y_pred_2]\nexplode = (0, 0.1, 0, 0) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n\nfig1, ax1 = plt.subplots()\nax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\nax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n\nplt.show()", "_____no_output_____" ], [ "ex3 = X_train[X_train['endUserRef']=='KYPPWBZJQ'].iloc[5]\n# Pie chart, where the slices will be ordered and plotted counter-clockwise:\nlabels = 'class0','class1','class2','class3'\nex = ex3\nsizes = [ex.y_pred_0,ex.y_pred_1,ex.y_pred_2,1-ex.y_pred_0-ex.y_pred_1-ex.y_pred_2]\nexplode = (0, 0.1, 0, 0) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n\nfig1, ax1 = plt.subplots()\nax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\nax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n\nplt.show()", "_____no_output_____" ], [ "ex4 = X_train[X_train['endUserRef']=='KYPPWBZJQ'].iloc[6]\n# Pie chart, where the slices will be ordered and plotted counter-clockwise:\nlabels = 'class0','class1','class2','class3'\nex = ex4\nsizes = [ex.y_pred_0,ex.y_pred_1,ex.y_pred_2,1-ex.y_pred_0-ex.y_pred_1-ex.y_pred_2]\nexplode = (0, 0.1, 0, 0) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n\nfig1, ax1 = plt.subplots()\nax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\nax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n\nplt.show()", "_____no_output_____" ] ], [ [ "Example for user 'AIWZOZZIY'", "_____no_output_____" ] ], [ [ "ex1 = X_train[X_train['endUserRef']=='AIWZOZZIY'].iloc[5]\n# Pie chart, where the slices will be ordered and plotted counter-clockwise:\nlabels = 'class0','class1','class2','class3'\nex = ex1\nsizes = [ex.y_pred_0,ex.y_pred_1,ex.y_pred_2,1-ex.y_pred_0-ex.y_pred_1-ex.y_pred_2]\nexplode = (0, 0.1, 0, 0) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n\nfig1, ax1 = plt.subplots()\nax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\nax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n\nplt.show()", "_____no_output_____" ], [ "ex2 = X_train[X_train['endUserRef']=='AIWZOZZIY'].iloc[6]\n# Pie chart, where the slices will be ordered and plotted counter-clockwise:\nlabels = 'class0','class1','class2','class3'\nex = ex2\nsizes = [ex.y_pred_0,ex.y_pred_1,ex.y_pred_2,1-ex.y_pred_0-ex.y_pred_1-ex.y_pred_2]\nexplode = (0, 0.1, 0, 0) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n\nfig1, ax1 = plt.subplots()\nax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\nax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n\nplt.show()", "_____no_output_____" ], [ "cancel_ratio = X_train[['endUserRef','operation','price']].groupby(['endUserRef','operation']).count()\ncancel_ratio = cancel_ratio/cancel_ratio.groupby(['endUserRef']).sum()\ncancel_ratio = cancel_ratio.reset_index()\ncancel_ratio = cancel_ratio[cancel_ratio.operation == 'CANCEL'].rename(columns={'price':'cancel_ratio'}).reset_index().drop(['index','operation'], axis=1)\ncancel_ratio = cancel_ratio.sort_values(by='cancel_ratio',ascending=False)\ncancel_ratio", "_____no_output_____" ], [ "np.arange(0,1,0.2)", "_____no_output_____" ], [ "plt.rcdefaults()\nfig, ax = plt.subplots()\nax.barh(cancel_ratio[8:18]['endUserRef'],cancel_ratio[8:18]['cancel_ratio']*100)\nax.set_xlabel('Cancel Percentage')\nax.set_title('User vs. Cancel Ratio')\nplt.show()", "_____no_output_____" ], [ "y_train = X_train['y_label']\nX_train = X_train.drop(['y_label'],axis=1)\nX_train[\"timeSinceLastTrade\"] = X_train[[\"timestamp\",\"endUserRef\"]].groupby(\"endUserRef\").diff()\nX_test[\"timeSinceLastTrade\"] = X_test[[\"timestamp\",\"endUserRef\"]].groupby(\"endUserRef\").diff()", "_____no_output_____" ], [ "X_train, X_test, y_train = read_data()\nX_train['y_label'] = y_train", "_____no_output_____" ], [ "X_train[\"timeSinceLastTradeSameOrder\"] = X_train[[\"timestamp\",\"orderId\"]].groupby(\"orderId\").diff()\nX_train.loc[(X_train['timeSinceLastTradeSameOrder']<=45) & (X_train['source']!='SYSTEM'),'y_label']= 3\ny_train = X_train['y_label']\nX_train = X_train.drop(['y_label'],axis=1)\nX_train[\"timeSinceLastTrade\"] = X_train[[\"timestamp\",\"endUserRef\"]].groupby(\"endUserRef\").diff()\nX_test[\"timeSinceLastTrade\"] = X_test[[\"timestamp\",\"endUserRef\"]].groupby(\"endUserRef\").diff()", "_____no_output_____" ], [ "pd.DataFrame(X_train).to_csv(\"bigLabeledData.csv\")", "_____no_output_____" ], [ "def preprocess_label(X_train, y_train):\n X_train['y_label'] = y_train\n X_train[\"timeSinceLastTradeSameOrder\"] = X_train[[\"timestamp\",\"orderId\"]].groupby(\"orderId\").diff()\n X_train.loc[(X_train['timeSinceLastTradeSameOrder']<=45) & (X_train['source']!='SYSTEM'),'y_label']= 3\n y_train = X_train['y_label']\n X_train = X_train.drop(['y_label'],axis=1)\n X_train[\"timeSinceLastTrade\"] = X_train[[\"timestamp\",\"endUserRef\"]].groupby(\"endUserRef\").diff()\n X_test[\"timeSinceLastTrade\"] = X_test[[\"timestamp\",\"endUserRef\"]].groupby(\"endUserRef\").diff()\n# X_train = X_train.sample(n=200000)\n return X_train,y_train", "_____no_output_____" ], [ "def balance_data(X_train, X_test, y_train):\n X_train, y_train = preprocess_label(X_train, y_train)\n X_train['y_label'] = y_train\n X_train_0 = X_train[X_train['y_label']==0].sample(n=100000, replace = True)\n X_train_1 = X_train[X_train['y_label']==1].sample(n=100000, replace = True)\n X_train_2 = X_train[X_train['y_label']==2].sample(n=100000, replace = True)\n X_train_3 = X_train[X_train['y_label']==3].sample(n=100000, replace = True)\n X_train = pd.concat([X_train_0, X_train_1, X_train_2, X_train_3])\n X_train = X_train.sample(frac=1).reset_index(drop=True)\n y_train = X_train['y_label']\n X_train = X_train.drop(['y_label'],axis=1)\n return X_train, X_test, y_train", "_____no_output_____" ], [ "X_train, X_test, y_train = read_data_small()\nX_train, X_test, y_train = balance_data(X_train, X_test, y_train)", "_____no_output_____" ], [ "def format_data(df):\n # encode the binaries\n df[\"isBid\"] = df.isBid*1\n df[\"isBuyer\"] = df.isBuyer*1\n df[\"isAggressor\"] = df.isAggressor*1\n df[\"type\"] = (df.type == \"ORDER\")*1\n df[\"source\"] = (df.source==\"USER\")*1\n\n df[\"orderId\"] = df.orderId.str.split('-').str[-1]\n df[\"tradeId\"] = df.tradeId.str.split('-').str[-1]\n df[\"bidOrderId\"] = df.bidOrderId.str.split('-').str[-1]\n df[\"askOrderId\"] = df.askOrderId.str.split('-').str[-1]\n\n # encode the multiple lable data\n df['operation'] = df['operation'].fillna('SUCCESS')\n tmp_operation = pd.DataFrame(pd.get_dummies(df.operation), columns=df.operation.unique()[:-1])\n df = pd.concat([df, tmp_operation], axis=1)\n df['op_before'] = df.groupby(['endUserRef'])['operation'].transform(lambda x:x.shift(1))\n tmp_op_before = pd.DataFrame(pd.get_dummies(df.op_before), columns=df.op_before.unique()[:-1])\n df = pd.concat([df, tmp_op_before], axis=1)\n df['multiple_cancel'] = ((df['op_before'] == 'CANCEL') & (df['operation'] == 'CANCEL'))*1.0 \n\n df['vol_before'] = df.groupby(['endUserRef'])['volume'].transform(lambda x:x.shift(1))\n df['price_before'] = df.groupby(['endUserRef'])['price'].transform(lambda x:x.shift(1))\n df['bestBid'] = df['bestBid'].fillna(1)\n df['midpoint'] = (df.bestBidVolume - df.bestAskVolume)/df.bestBid\n\n# cancel_ratio = df[['endUserRef','operation','price']].groupby(['endUserRef','operation']).count()\n# cancel_ratio = cancel_ratio/cancel_ratio.groupby(['endUserRef']).sum()\n# cancel_ratio = cancel_ratio.reset_index()\n# cancel_ratio = cancel_ratio[cancel_ratio.operation == 'CANCEL'].rename(columns={'price':'cancel_ratio'}).reset_index().drop(['index','operation'], axis=1)\n# df = df.merge(cancel_ratio, how='outer', on='endUserRef')\n\n df['isBidBefore'] = df.groupby(['endUserRef'])['isBid'].transform(lambda x:x.shift(1))\n df.loc[df['isBid']!=df['isBidBefore'],'flip'] = 1\n df.loc[df['isBid']==df['isBidBefore'],'flip'] = 0\n df = df.drop(['isBidBefore'],axis=1)\n\n# categorical data\n tmp_endUserRef = pd.DataFrame(pd.get_dummies(df.endUserRef), columns=df.endUserRef.unique()[:-1])\n df = pd.concat([df, tmp_endUserRef], axis=1)\n tmp_ob = pd.DataFrame(pd.get_dummies(df.obId), columns=df.obId.unique()[:-1])\n df = pd.concat([df, tmp_ob], axis=1)\n\n # # smartly engineered features can be very useful to improve the classification resutls\n# df[\"timeSinceLastTrade\"] = df[[\"timestamp\",\"endUserRef\"]].groupby(\"endUserRef\").diff()\n df[\"averageTradeSize\"] = df[[\"volume\",\"endUserRef\"]].groupby(\"endUserRef\").mean()\n \n # cancel, volume>0, after successful trade\n df['cancelAfterSuccessfulTrade'] = (df['operation'] == 'CANCEL') & (df['volume']>0) & (df['op_before'] == 'SUCCESS')*1.0\n df['isVolumeTooBig'] = (df['volume'] > 3*df['vol_before'])*1.0\n \n df[\"numberEditsOnOrders\"] = df[[\"timestamp\", \"orderId\"]].groupby(\"orderId\").count()\n df = df.fillna(-1)\n df = df.drop(['operation', 'op_before', 'obId', 'member','user','endUserRef'], axis=1)\n\n return(df)", "_____no_output_____" ] ], [ [ "# MODEL", "_____no_output_____" ] ], [ [ "# import libraries\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import scale\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import RandomForestClassifier", "_____no_output_____" ], [ "def detect_spoofying_log_reg(X_train, X_test, y_train):\n \n # clean up the data\n# X_train, y_train = preprocess_label(X_train, y_train)\n X_clean = format_data(pd.concat([X_train, X_test]))\n X_train_clean = X_clean.iloc[:X_train.shape[0],:]\n X_test_clean = X_clean.iloc[X_train.shape[0]:,:]\n X_train_clean_scaled = scale(X_train_clean)\n X_test_clean_scaled = scale(X_test_clean)\n\n # fit classifier\n clf = LogisticRegression(random_state=0, class_weight='balanced').fit(X_train_clean_scaled, y_train)\n \n dump(clf, 'logreg_model.joblib') \n \n y_train_prob_pred = clf.predict_proba(X_train_clean_scaled)\n y_test_prob_pred = clf.predict_proba(X_test_clean_scaled)\n \n return y_train_prob_pred, y_test_prob_pred\n\ndef detect_spoofying_Bernoulli_NB(X_train, X_test, y_train):\n \n # clean up the data\n# X_train, y_train = preprocess_label(X_train, y_train)\n X_clean = format_data(pd.concat([X_train, X_test]))\n X_train_clean = X_clean.iloc[:X_train.shape[0],:]\n X_test_clean = X_clean.iloc[X_train.shape[0]:,:]\n X_train_clean_scaled = scale(X_train_clean)\n X_test_clean_scaled = scale(X_test_clean)\n\n # fit classifier\n clf = BernoulliNB(alpha=1.0, binarize=0.0, fit_prior=True, class_prior=None).fit(X_train_clean_scaled, y_train)\n y_train_prob_pred = clf.predict_proba(X_train_clean_scaled)\n y_test_prob_pred = clf.predict_proba(X_test_clean_scaled)\n \n dump(clf, 'bernoulli_nb_model.joblib')\n \n return y_train_prob_pred, y_test_prob_pred\n\ndef detect_spoofying_GBC(X_train, X_test, y_train):\n \n # clean up the data\n# X_train, y_train = preprocess_label(X_train, y_train)\n X_clean = format_data(pd.concat([X_train, X_test]))\n X_train_clean = X_clean.iloc[:X_train.shape[0],:]\n X_test_clean = X_clean.iloc[X_train.shape[0]:,:]\n X_train_clean_scaled = scale(X_train_clean)\n X_test_clean_scaled = scale(X_test_clean)\n\n # fit classifier\n clf = GradientBoostingClassifier(n_estimators=10, learning_rate=0.5,\n max_depth=2, random_state=3).fit(X_train_clean_scaled, y_train)\n y_train_prob_pred = clf.predict_proba(X_train_clean_scaled)\n y_test_prob_pred = clf.predict_proba(X_test_clean_scaled)\n\n dump(clf, 'gbc.joblib')\n\n return y_train_prob_pred, y_test_prob_pred\n\ndef detect_spoofying_random_forest(X_train, X_test, y_train):\n \n # clean up the data\n# X_train, y_train = preprocess_label(X_train, y_train)\n X_clean = format_data(pd.concat([X_train, X_test]))\n X_train_clean = X_clean.iloc[:X_train.shape[0],:]\n X_test_clean = X_clean.iloc[X_train.shape[0]:,:]\n X_train_clean_scaled = scale(X_train_clean)\n X_test_clean_scaled = scale(X_test_clean)\n \n # fit classifier\n clf = RandomForestClassifier(n_estimators=50, max_depth=10,\n min_samples_split=2, random_state=0).fit(X_train_clean_scaled, y_train)\n \n dump(clf, 'rf_model.joblib')\n \n y_train_prob_pred = clf.predict_proba(X_train_clean_scaled)\n y_test_prob_pred = clf.predict_proba(X_test_clean_scaled)\n \n return y_train_prob_pred, y_test_prob_pred", "_____no_output_____" ] ], [ [ "# SCORING", "_____no_output_____" ] ], [ [ "from sklearn.metrics import cohen_kappa_score\n\ndef score(y_pred, y_true):\n \"\"\"\n y_pred: a numpy 4d array of probabilities of point assigned to each label\n y_true: a numpy array of true labels\n \"\"\"\n y_pred_label = np.argmax(y_pred, axis=1)\n return cohen_kappa_score(y_pred_label, y_true)\n\ndef wrapper(detect_spoofying):\n # read in data\n # or if you have the computational power to work with the big data set, \n # you can comment out the read_data_samll line and uncomment the following read_data_big\n X_train, X_test, y_train = read_data_small()\n X_train, X_test, y_train = balance_data(X_train, X_test, y_train)\n \n # process the data, train classifier and output probability matrix\n y_train_prob_pred, y_test_prob_pred = detect_spoofying(X_train, X_test, y_train)\n \n # score the predictions\n score_train = score(y_train_prob_pred, y_train)\n # score_test = score(y_test_prob_pred, y_test)\n \n # return the scores\n return score_train, y_train_prob_pred, y_test_prob_pred", "_____no_output_____" ] ], [ [ "# k-fold cross validation", "_____no_output_____" ] ], [ [ "### optional: examples of k-fold cross validation ###\n# k-fold cross validation can help you compare the classification models\nfrom sklearn.model_selection import KFold\nn = 5 # here we choose a 10 fold cross validation\nkf = KFold(n_splits = n)\n# X_train, X_test, y_train = read_data_small()\nkf.get_n_splits(X_train)\nprint(kf)\nkf_scores = pd.DataFrame(np.zeros([n,2]), columns=[\"train score\", \"test score\"])\nrowindex = 0\nfor train_index, test_index in kf.split(X_train):\n print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n print(X_train.index)\n print(y_train)\n X_train_kf, X_test_kf = X_train.iloc[train_index], X_train.iloc[test_index]\n y_train_kf, y_test_kf = y_train[train_index], y_train[test_index]\n y_train_prob_pred_kf, y_test_prob_pred_kf = detect_spoofying_random_forest(X_train_kf, X_test_kf, y_train_kf)\n score_train_kf = score(y_train_prob_pred_kf, y_train_kf)\n score_test_kf = score(y_test_prob_pred_kf, y_test_kf)\n kf_scores.iloc[rowindex, 0] = score_train_kf\n kf_scores.iloc[rowindex, 1] = score_test_kf\n print(score_train_kf, score_test_kf)\n rowindex += 1", "KFold(n_splits=5, random_state=None, shuffle=False)\nTRAIN: [ 80000 80001 80002 ..., 399997 399998 399999] TEST: [ 0 1 2 ..., 79997 79998 79999]\nRangeIndex(start=0, stop=400000, step=1)\n0 0\n1 3\n2 3\n3 1\n4 0\n5 0\n6 1\n7 2\n8 0\n9 3\n10 0\n11 2\n12 1\n13 1\n14 1\n15 0\n16 3\n17 1\n18 1\n19 2\n20 1\n21 3\n22 1\n23 0\n24 2\n25 3\n26 3\n27 0\n28 1\n29 0\n ..\n399970 3\n399971 0\n399972 1\n399973 0\n399974 3\n399975 3\n399976 0\n399977 2\n399978 3\n399979 3\n399980 0\n399981 0\n399982 0\n399983 2\n399984 2\n399985 2\n399986 3\n399987 3\n399988 3\n399989 2\n399990 1\n399991 3\n399992 3\n399993 2\n399994 1\n399995 2\n399996 2\n399997 1\n399998 0\n399999 3\nName: y_label, Length: 400000, dtype: int64\n" ], [ "kf_scores", "_____no_output_____" ], [ "score_train, y_train_prob_pred, y_test_prob_pred = wrapper(detect_spoofying_random_forest)", "/Users/fransiscasusan/anaconda3/lib/python3.6/site-packages/ipykernel/__main__.py:67: DataConversionWarning: Data with input dtype bool, uint8, int64, float64, object were all converted to float64 by the scale function.\n/Users/fransiscasusan/anaconda3/lib/python3.6/site-packages/sklearn/preprocessing/data.py:180: UserWarning: Numerical issues were encountered when centering the data and might not be solved. Dataset may contain too large values. You may need to prescale your features.\n warnings.warn(\"Numerical issues were encountered \"\n/Users/fransiscasusan/anaconda3/lib/python3.6/site-packages/ipykernel/__main__.py:68: DataConversionWarning: Data with input dtype bool, uint8, int64, float64, object were all converted to float64 by the scale function.\n/Users/fransiscasusan/anaconda3/lib/python3.6/site-packages/sklearn/preprocessing/data.py:180: UserWarning: Numerical issues were encountered when centering the data and might not be solved. Dataset may contain too large values. You may need to prescale your features.\n warnings.warn(\"Numerical issues were encountered \"\n" ], [ "score_train", "_____no_output_____" ], [ "### optional: examples of k-fold cross validation ###\n# k-fold cross validation can help you compare the classification models\nfrom sklearn.model_selection import KFold\nn = 5 # here we choose a 10 fold cross validation\nkf = KFold(n_splits = n)\n# X_train, X_test, y_train = read_data_small()\n# X_train, X_test, y_train = balance_data(X_train, X_test, y_train)\nkf.get_n_splits(X_train)\n# print(kf)\nkf_scores = pd.DataFrame(np.zeros([n,2]), columns=[\"train score\", \"test score\"])\nrowindex = 0\nfor train_index, test_index in kf.split(X_train):\n print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n# print(X_train.index)\n# print(y_train)\n X_train_kf, X_test_kf = X_train.iloc[train_index], X_train.iloc[test_index]\n y_train_kf, y_test_kf = y_train[train_index], y_train[test_index]\n y_train_prob_pred_kf, y_test_prob_pred_kf = detect_spoofying_GBC(X_train_kf, X_test_kf, y_train_kf)\n score_train_kf = score(y_train_prob_pred_kf, y_train_kf)\n score_test_kf = score(y_test_prob_pred_kf, y_test_kf)\n kf_scores.iloc[rowindex, 0] = score_train_kf\n kf_scores.iloc[rowindex, 1] = score_test_kf\n print(score_train_kf, score_test_kf)\n rowindex += 1", "TRAIN: [ 80000 80001 80002 ..., 399997 399998 399999] TEST: [ 0 1 2 ..., 79997 79998 79999]\n" ], [ "kf_scores", "_____no_output_____" ], [ "score_train, y_train_prob_pred, y_test_prob_pred = wrapper(detect_spoofying_GBC)", "_____no_output_____" ], [ "score_train", "_____no_output_____" ], [ "pd.DataFrame(y_train_prob_pred).to_csv(\"y_train_prob_pred.csv\")\npd.DataFrame(y_test_prob_pred).to_csv(\"y_test_prob_pred.csv\")", "_____no_output_____" ], [ "score_train, y_train_prob_pred, y_test_prob_pred = wrapper(detect_spoofying_GBC)\nscore_train", "_____no_output_____" ], [ "pd.DataFrame(y_train_prob_pred).to_csv(\"y_train_prob_pred.csv\")\npd.DataFrame(y_test_prob_pred).to_csv(\"y_test_prob_pred.csv\")", "_____no_output_____" ] ], [ [ "# BATCH", "_____no_output_____" ] ], [ [ "X_train = pd.read_csv(\"bigLabeledData.csv\")", "_____no_output_____" ], [ "X_clean = format_data(pd.concat([X_train, X_test]))\nX_train_clean = X_clean.iloc[:X_train.shape[0],:]\nX_test_clean = X_clean.iloc[X_train.shape[0]:,:]\nX_train_clean_scaled = scale(X_train_clean)\nX_test_clean_scaled = scale(X_test_clean)", "_____no_output_____" ], [ "# fit classifier\nclf = GradientBoostingClassifier(n_estimators=35, learning_rate=0.5,\n max_depth=3, random_state=3).fit(X_train_clean_scaled, y_train)\n# clf = LogisticRegression(random_state=0, class_weight='balanced').fit(X_train_clean_scaled, y_train)", "_____no_output_____" ], [ "dump(clf, 'gbc.joblib')", "_____no_output_____" ], [ "y_train_prob_pred = clf.predict_proba(X_train_clean_scaled)\ny_test_prob_pred = clf.predict_proba(X_test_clean_scaled)", "_____no_output_____" ], [ "pd.DataFrame(y_train_prob_pred).to_csv(\"y_train_prob_pred.csv\")\npd.DataFrame(y_test_prob_pred).to_csv(\"y_test_prob_pred.csv\")", "_____no_output_____" ] ], [ [ "# LSTM", "_____no_output_____" ] ], [ [ "X_train_clean_scaled.shape", "_____no_output_____" ], [ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim", "_____no_output_____" ], [ "model = nn.Sequential()", "_____no_output_____" ], [ "model.add(LSTM(50, activation='relu', input_shape=(n_steps, n_features)))\nmodel.add(Dense(1))\nmodel.compile(optimizer='adam', loss='mse')", "_____no_output_____" ], [ "# define model\nmodel = Sequential()\nmodel.add(LSTM(50, activation='relu', input_shape=(n_steps, n_features)))\nmodel.add(Dense(1))\nmodel.compile(optimizer='adam', loss='mse')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb9ee240ca1e646b4b8706a937ec9dbdefacb076
4,485
ipynb
Jupyter Notebook
01_getting_started/04_overview_of_spark_sql_properties.ipynb
itversity/spark-sql
017181d9976e39848c5e46fc628a7ba9cbc38ec0
[ "MIT" ]
9
2020-12-26T11:03:45.000Z
2022-03-03T14:12:30.000Z
01_getting_started/04_overview_of_spark_sql_properties.ipynb
itversity/spark-sql
017181d9976e39848c5e46fc628a7ba9cbc38ec0
[ "MIT" ]
null
null
null
01_getting_started/04_overview_of_spark_sql_properties.ipynb
itversity/spark-sql
017181d9976e39848c5e46fc628a7ba9cbc38ec0
[ "MIT" ]
17
2020-12-26T20:23:45.000Z
2022-03-10T06:10:55.000Z
25.338983
189
0.527759
[ [ [ "## Overview of Spark SQL Properties\nLet us understand details about Spark SQL properties which control Spark SQL run time environment. ", "_____no_output_____" ] ], [ [ "%%HTML\n<iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/2JrWYGbBR_8?rel=0&amp;controls=1&amp;showinfo=0\" frameborder=\"0\" allowfullscreen></iframe>", "_____no_output_____" ] ], [ [ "* Spark SQL inherits properties defined for Spark. There are some Spark SQL related properties as well and these are applicable even for Data Frames.\n* We can review these properties using Management Tools such as **Ambari** or **Cloudera Manager Web UI**\n* Spark run time behavior is controlled by HDFS Properties files, YARN Properties files, Hive Properties files etc in those clusters where Spark is integrated with Hadoop and Hive.\n* We can get all the properties using `SET;` in Spark SQL CLI\n\nLet us review some important properties in Spark SQL. \n\n```\nspark.sql.warehouse.dir\nspark.sql.catalogImplementation\n```\n* We can review the current value using `SET spark.sql.warehouse.dir;`", "_____no_output_____" ] ], [ [ "import org.apache.spark.sql.SparkSession\n\nval username = System.getProperty(\"user.name\")\nval spark = SparkSession.\n builder.\n config(\"spark.ui.port\", \"0\").\n config(\"spark.sql.warehouse.dir\", s\"/user/${username}/warehouse\").\n enableHiveSupport.\n master(\"yarn\").\n appName(s\"${username} | Spark SQL - Getting Started\").\n getOrCreate", "_____no_output_____" ], [ "%%sql\n\nSET", "_____no_output_____" ], [ "%%sql\n\nSET spark.sql.warehouse.dir", "_____no_output_____" ] ], [ [ "* Properties with default values does not show up as part of `SET` command. But we can check and overwrite the values - for example", "_____no_output_____" ] ], [ [ "%%sql\n\nSET spark.sql.shuffle.partitions", "_____no_output_____" ] ], [ [ "* We can overwrite property by setting value using the same **SET** command, eg:", "_____no_output_____" ] ], [ [ "%%sql\n\nSET spark.sql.shuffle.partitions=2", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb9ee39c1fe415b3d31b6e9373b2202a32aec2eb
224,517
ipynb
Jupyter Notebook
titanic.ipynb
ggljzr/titanic
5c6eea6c3873f9098da5be68c36a40ac6cdbb2be
[ "MIT" ]
null
null
null
titanic.ipynb
ggljzr/titanic
5c6eea6c3873f9098da5be68c36a40ac6cdbb2be
[ "MIT" ]
1
2016-11-25T20:55:16.000Z
2016-11-25T20:55:16.000Z
titanic.ipynb
ggljzr/titanic
5c6eea6c3873f9098da5be68c36a40ac6cdbb2be
[ "MIT" ]
null
null
null
353.570079
42,730
0.921111
[ [ [ "import numpy\nimport pandas\nimport matplotlib.pyplot\nimport matplotlib.style\nimport copy\nmatplotlib.style.use('ggplot')\n\n%matplotlib inline", "_____no_output_____" ], [ "titanic_data = pandas.read_csv('Titanic.csv', index_col=None)", "_____no_output_____" ] ], [ [ "How many passengers we know?", "_____no_output_____" ] ], [ [ "passengers_num = titanic_data.shape[0] #rows in Titanic.csv table\nprint(passengers_num)", "1313\n" ] ], [ [ "How many of them survived?", "_____no_output_____" ] ], [ [ "survived = titanic_data[titanic_data.loc[:, 'Survived'] == True] #rows from Titanic.csv, column 'Survived'\ntotal_survived_num = survived.shape[0] #number of rows with 1\nsurvived_percent = total_survived_num / (passengers_num / 100)\nprint('{} out of {} (~{:.5f} %)'.format(total_survived_num, passengers_num, survived_percent))", "450 out of 1313 (~34.27266 %)\n" ] ], [ [ "How many men survived? How many women?", "_____no_output_____" ] ], [ [ "men = titanic_data[titanic_data.loc[:, 'Sex'] == 'male']\nmen_num = men.shape[0]\nwomen_num = passengers_num - men_num\n\nmen_survived = men[men.loc[:, 'Survived'] == True]\nmen_survived_num = men_survived.shape[0]\nwomen_survived_num = total_survived_num - men_survived_num\n\nmen_survived_percent = men_survived_num / (men_num / 100)\nwomen_survived_percent = women_survived_num / (women_num / 100)\n\nprint('Men survived: {} out of {} (~{:.5f} %)'.format(men_survived_num, men_num, men_survived_percent))\nprint('Women survived: {} out of {} (~{:.5f} %)'.format(women_survived_num, women_num, women_survived_percent))", "Men survived: 142 out of 851 (~16.68625 %)\nWomen survived: 308 out of 462 (~66.66667 %)\n" ] ], [ [ "Stats for each class:", "_____no_output_____" ] ], [ [ "classes = titanic_data.PClass.unique()\nprint(classes)\ntitanic_data[titanic_data.loc[:, 'PClass'] == '*']", "['1st' '2nd' '*' '3rd']\n" ] ], [ [ "Now it seems that \"*\" class is only one person (that did not survive), so we should probably filter it out.", "_____no_output_____" ] ], [ [ "classes = numpy.delete(classes, numpy.where(classes=='*'))\nprint(classes)", "['1st' '2nd' '3rd']\n" ], [ "cols = ['class', 'survived', 'deceased', 'total', 'percent of class', 'percent of all survivors']\nclasses_data = pandas.DataFrame()\n\nfor cls in classes:\n total = titanic_data[titanic_data.loc[:, 'PClass'] == cls]\n total_num = total.shape[0]\n \n survived = total[total.loc[:, 'Survived'] == True]\n survived_num = survived.shape[0]\n deceased_num = total_num - survived_num\n \n percent_class = survived_num / (total_num / 100)\n percent_total = survived_num / (total_survived_num / 100)\n row = pandas.DataFrame([{'class' : cls, 'survived' : survived_num, \n 'deceased' : deceased_num, 'total' : total_num, \n 'percent of class' : percent_class,\n 'percent of all survivors' : percent_total }], columns=cols)\n classes_data = classes_data.append(row, ignore_index=True)\n\nclasses_data", "_____no_output_____" ] ], [ [ "Survival rate based on age:", "_____no_output_____" ] ], [ [ "max_age = titanic_data.loc[:, 'Age'].max()\nsurvived_ages = pandas.DataFrame(columns=['decade', 'survived', 'total', 'percent of all survivors'])\nfor decade in range(0,int(max_age),10):\n total_decade = titanic_data[(titanic_data.loc[:, 'Age'] >= decade) &\n (titanic_data.loc[:, 'Age'] < decade + 10)]\n \n survived_decade = total_decade[total_decade.loc[:, 'Survived'] == True]\n \n \n percent = survived_decade.shape[0] / (total_survived_num / 100)\n row = pandas.DataFrame([{'decade' : '{}-{}'.format(decade, decade + 9), \n 'survived' : survived_decade.shape[0], \n 'total' : total_decade.shape[0],\n 'percent of all survivors' : percent}])\n \n survived_ages = survived_ages.append(row, ignore_index=True)\n\ntotal_na = titanic_data[titanic_data.loc[:, 'Age'].isnull()]\nsurvived_na = total_na[total_na.loc[:, 'Survived'] == True]\npercent_na = survived_na.shape[0] / (total_survived_num / 100)\n\nrow = pandas.DataFrame([{'decade' : 'No age data', \n 'survived' : survived_na.shape[0], \n 'total' : total_na.shape[0],\n 'percent of all survivors' : percent_na}])\n\nsurvived_ages = survived_ages.append(row, ignore_index=True)", "_____no_output_____" ], [ "survived_ages['total'].plot.bar(color='r')\nsurvived_ages['survived'].plot.bar(color='b')\n\nmatplotlib.pyplot.legend(loc='upper left')\nmatplotlib.pyplot.title('Survivas based on age group')\nmatplotlib.pyplot.xlabel('Age groups')\nmatplotlib.pyplot.ylabel('Number of passengers')\nmatplotlib.pyplot.xticks(list(range(len(survived_ages))), survived_ages['decade'].values, rotation = 'vertical')\nmatplotlib.pyplot.show()", "_____no_output_____" ], [ "survived_ages['percent of all survivors'].plot.bar()\n\nmatplotlib.pyplot.title('% of all survivors ({}) based on age group'.format(total_survived_num))\nmatplotlib.pyplot.xlabel('Age groups')\nmatplotlib.pyplot.ylabel('% of all survivors')\nmatplotlib.pyplot.xticks(list(range(len(survived_ages))), survived_ages['decade'].values, rotation = 'vertical')\nmatplotlib.pyplot.show()", "_____no_output_____" ] ], [ [ "Relationship between passenger's age and class:", "_____no_output_____" ] ], [ [ "classes_nums = dict((elem, 0) for elem in classes)\nclasses_percent = copy.deepcopy(classes_nums)\n\nage_class = pandas.DataFrame(columns=classes_nums)\nage_class_percent = pandas.DataFrame(columns=classes_percent)\n\nfor decade in range(0,int(max_age),10):\n \n total_decade = titanic_data[(titanic_data.loc[:, 'Age'] >= decade) &\n (titanic_data.loc[:, 'Age'] < decade + 10)]\n \n classes_nums['decade'] = '{}-{}'.format(decade, decade + 9)\n classes_percent['decade'] = '{}-{}'.format(decade, decade + 9)\n for cls in classes:\n total_cls = total_decade[total_decade.loc[:, 'PClass'] == cls]\n classes_nums[cls] = total_cls.shape[0]\n classes_percent[cls] = total_cls.shape[0] / (total_decade.shape[0] / 100)\n \n \n age_class = age_class.append([classes_nums], ignore_index=True)\n age_class_percent = age_class_percent.append([classes_percent], ignore_index=True)", "_____no_output_____" ] ], [ [ "Number of passengers in each class by age:", "_____no_output_____" ] ], [ [ "age_class.plot.bar()\nmatplotlib.pyplot.xticks(list(range(len(age_class))), age_class['decade'].values, rotation = 'vertical')\nmatplotlib.pyplot.title('Number of passengers in each class by age')\nmatplotlib.pyplot.xlabel('Age groups')\nmatplotlib.pyplot.ylabel('Number of passengers')\nmatplotlib.pyplot.show()", "_____no_output_____" ] ], [ [ "Distribution of passengers in each age group between classes (meaning in class *C* was *y %* of passengers from age group *x*):", "_____no_output_____" ] ], [ [ "age_class_percent.plot.bar()\nmatplotlib.pyplot.xticks(list(range(len(age_class_percent))), age_class_percent['decade'].values, rotation = 'vertical')\nmatplotlib.pyplot.title('Distribution of passengers in each age group between classes')\nmatplotlib.pyplot.xlabel('Age groups')\nmatplotlib.pyplot.ylabel('% of passengers')\nmatplotlib.pyplot.show()", "_____no_output_____" ] ], [ [ "If we want to plot data for passangers without known age:", "_____no_output_____" ] ], [ [ "classes_nums['decade'] = 'No age data'\nclasses_percent['decade'] = 'No age data'\nfor cls in classes:\n total_cls = total_na[total_na.loc[:, 'PClass'] == cls]\n classes_nums[cls] = total_cls.shape[0]\n classes_percent[cls] = total_cls.shape[0] / (total_na.shape[0] / 100)\n \nage_class_na = age_class.append([classes_nums], ignore_index=True)\nage_class_percent_na = age_class_percent.append([classes_percent], ignore_index=True)", "_____no_output_____" ], [ "age_class_na.plot.bar()\nmatplotlib.pyplot.xticks(list(range(len(age_class_na))), age_class_na['decade'].values, rotation = 'vertical')\nmatplotlib.pyplot.title('Number of passengers in each class by age\\n (including passengers with uknown age)')\nmatplotlib.pyplot.xlabel('Age groups')\nmatplotlib.pyplot.ylabel('Number of passengers')\nmatplotlib.pyplot.show()", "_____no_output_____" ], [ "age_class_percent_na.plot.bar()\nmatplotlib.pyplot.xticks(list(range(len(age_class_percent_na))), age_class_percent_na['decade'].values, rotation = 'vertical')\nmatplotlib.pyplot.title('Distribution of passengers in each age group between classes\\n (including passengers with uknown age)')\nmatplotlib.pyplot.xlabel('Age groups')\nmatplotlib.pyplot.ylabel('% of passengers')\nmatplotlib.pyplot.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb9f1655566fcb720e3269bb7cb1fb31e7dbe64e
23,794
ipynb
Jupyter Notebook
learning/pipeline.ipynb
abanop66/data-science
c03b2ca3cec9cb0b2c4cdb01c6b28a0a8bd80a51
[ "Apache-2.0" ]
null
null
null
learning/pipeline.ipynb
abanop66/data-science
c03b2ca3cec9cb0b2c4cdb01c6b28a0a8bd80a51
[ "Apache-2.0" ]
null
null
null
learning/pipeline.ipynb
abanop66/data-science
c03b2ca3cec9cb0b2c4cdb01c6b28a0a8bd80a51
[ "Apache-2.0" ]
1
2021-06-17T03:40:17.000Z
2021-06-17T03:40:17.000Z
31.349144
116
0.348113
[ [ [ "# notes : when you use pipeline change the name of original train data \n#if you read_csv in variable called train\nchange it to x_train ", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "train = pd.read_csv('../datasets/titanic dataset/train.csv')\nx_test = pd.read_csv('../datasets/titanic dataset/test.csv')\ny_test_original = pd.read_csv('../datasets/titanic dataset/gender_submission.csv')", "_____no_output_____" ], [ "train", "_____no_output_____" ], [ "y_train=train['Survived']\ny_train\nx_train=train.drop('Survived',axis=1)", "_____no_output_____" ], [ "cat_col =[col for col in x_train.columns if x_train[col].nunique() < 10 and x_train[col].dtype == 'object']\nprint(cat_col)", "['Sex', 'Embarked']\n" ], [ "num_col =[col for col in x_train.columns if x_train[col].dtype in ['int64','float64']]\nprint(num_col)", "['PassengerId', 'Pclass', 'Age', 'SibSp', 'Parch', 'Fare']\n" ], [ "x_train.isna().sum()", "_____no_output_____" ] ], [ [ "# pipeline", "_____no_output_____" ] ], [ [ "from sklearn.pipeline import Pipeline\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import OneHotEncoder", "_____no_output_____" ], [ "num_transformer = SimpleImputer(strategy='constant')", "_____no_output_____" ], [ "cat_transformer = Pipeline(steps=[\n ('impute',SimpleImputer(strategy='constant')),\n ('OHE',OneHotEncoder(handle_unknown='ignore'))\n])", "_____no_output_____" ], [ "preprocessor = ColumnTransformer(transformers=[\n ('num',num_transformer,num_col),\n ('cat',cat_transformer,cat_col)\n])", "_____no_output_____" ], [ "from sklearn.ensemble import RandomForestClassifier\nmodel1 = RandomForestClassifier(n_estimators=100,random_state=0)", "_____no_output_____" ], [ "my_pipeline = Pipeline(steps=[\n ('preprocessor',preprocessor),\n ('model',model1)\n])", "_____no_output_____" ], [ "x_train.head()", "_____no_output_____" ], [ "my_pipeline.fit(x_train,y_train.ravel())", "_____no_output_____" ], [ "pred= my_pipeline.predict(x_test)\nprint(pred)", "[0 0 0 0 0 0 0 0 1 0 0 0 1 0 1 1 0 0 0 1 1 1 1 1 1 0 1 0 0 0 0 0 0 0 1 0 0\n 0 0 0 0 0 0 1 1 0 1 0 1 1 0 0 1 1 0 0 0 0 0 1 0 0 0 0 1 1 0 0 1 1 0 0 0 1\n 1 1 0 1 0 0 1 0 0 0 0 0 0 0 1 1 0 0 1 0 1 0 1 0 0 0 1 0 0 0 1 0 0 0 0 0 0\n 1 1 0 1 0 0 1 1 1 1 0 1 0 0 1 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0\n 0 0 1 0 0 1 0 0 1 0 0 1 1 1 1 0 0 0 1 0 1 0 0 0 0 0 0 1 1 1 1 1 0 0 1 0 1\n 0 1 0 0 0 0 0 1 0 1 0 1 0 0 0 1 1 0 1 0 1 0 0 1 0 0 0 0 0 0 0 1 0 1 0 1 0\n 1 0 1 0 0 1 0 0 0 1 0 0 1 0 1 0 1 1 1 1 0 0 0 0 1 0 1 0 1 0 1 0 0 0 0 0 1\n 0 0 0 1 1 0 0 0 0 0 0 0 0 1 1 0 1 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0\n 1 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 1 0 1 0 0 0 1 1 0\n 0 0 0 0 0 0 0 0 0 0 1 1 0 0 1 0 1 1 0 0 0 0 0 1 0 0 0 0 1 1 0 1 0 0 0 1 0\n 0 1 0 0 1 1 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 1 1 0 0 1 0 1 0 0 1 0 1 0 1 0 0\n 0 1 1 1 1 0 0 1 0 0 1]\n" ], [ "y_test_original.drop('PassengerId',axis=1)\ny_test=y_test_original[\"Survived\"]\ny_test\n", "_____no_output_____" ], [ "from sklearn.metrics import classification_report\nprint(classification_report(y_test ,pred))", " precision recall f1-score support\n\n 0 0.83 0.88 0.85 266\n 1 0.76 0.68 0.72 152\n\n accuracy 0.81 418\n macro avg 0.79 0.78 0.79 418\nweighted avg 0.80 0.81 0.80 418\n\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9f2b7165fbb9e35edc48dbdfdf552913bfa624
8,895
ipynb
Jupyter Notebook
TAP Affect (python).ipynb
heta-io/notebooks
761beefe150c773124d9f2bcc2a21f1054e6aed4
[ "Apache-2.0" ]
null
null
null
TAP Affect (python).ipynb
heta-io/notebooks
761beefe150c773124d9f2bcc2a21f1054e6aed4
[ "Apache-2.0" ]
null
null
null
TAP Affect (python).ipynb
heta-io/notebooks
761beefe150c773124d9f2bcc2a21f1054e6aed4
[ "Apache-2.0" ]
null
null
null
27.884013
282
0.532659
[ [ [ "# TAP Affect\n\nThis notebook was used as part of the [HETA project](http://heta.io) to experiment with [TAP](https://github.com/heta-io/tap) affect thresholds. It makes use of the [TapCliPy](https://github.com/heta-io/tapclipy) python client for TAP to call the `affectExpressions` query.\n\nTo use this notebook for your own tests, you will need:\n 1. The URL of your TAP server\n 2. Save the text files that you want to work with into the same directory as this notebook", "_____no_output_____" ] ], [ [ "# Install the TAP Python Client\n!pip install 'tapclipy>=0.1.4'", "Requirement already satisfied: tapclipy>=0.1.4 in /opt/conda/lib/python3.6/site-packages\n\u001b[33mYou are using pip version 9.0.1, however version 10.0.1 is available.\nYou should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n" ], [ "# Import the client library\nfrom tapclipy import tap_connect", "_____no_output_____" ] ], [ [ "### Connect to TAP and retrieve the current schema\nAs TAP exposes a GraphQL API, there can be changes in the schema over time. After connecting to TAP, this schema needs to be loaded into the client.", "_____no_output_____" ] ], [ [ "# Set the url for your TAP server\ntapURL = 'http://localhost:9000'\n# Create TAP Connection\ntap = tap_connect.Connect(tapURL)\n# Load the Current Schema\ntap.fetch_schema()", "_____no_output_____" ], [ "#Print out schema fields\nfor query,type in tap.schema_query_name_types().items():\n print(\"{} >> {}\".format(query, type))", "clean >> StringResult\nannotations >> SentencesResult\nvocabulary >> VocabResult\nmetrics >> MetricsResult\nposStats >> PosStatsResult\nsyllables >> SyllablesResult\nspelling >> SpellingResult\nexpressions >> ExpressionsResult\nreflectExpressions >> ReflectExpressionsResult\naffectExpressions >> AffectExpressionsResult\nmoves >> StringListResult\n" ] ], [ [ "### Setup Query\nThe client includes built in queries. We can either use the client query for `affectExpressions` or we can create our own. If creating from scratch, it wise to use the client query as a template to ensure the query is properly formed.", "_____no_output_____" ] ], [ [ "# Get query from client\nquery = tap.query('affectExpressions')\nprint(query)", "\nquery AffectExpressions($input:String,$parameters:String) { \n affectExpressions(text:$input,parameters:$parameters) { \n querytime\n message\n timestamp\n analytics {\n affect {\n text\n valence\n arousal\n dominance\n startIdx\n }\n }\n }}\n\n" ] ], [ [ "### Helper functions\n\nTo make it easier to run repeated tests on different files, we can setup some helper functions.", "_____no_output_____" ] ], [ [ "# Open a text file and return it as a string\ndef readFile(filename):\n file = open(filename)\n text = file.read()\n file.close()\n return text", "_____no_output_____" ], [ "# Test on a file\nmyText = readFile('dummy-affect.txt')\nmyText", "_____no_output_____" ], [ "# Get Affect Analytics from TAP and format the results\ndef textAffect(text,arousal=0.0,valence=0.0,dominance=0.0):\n parameters = '{\"valence\":'+str(valence)+',\"arousal\":'+str(arousal)+',\"dominance\":'+str(dominance)+'}'\n json = tap.analyse_text(query, text,parameters)\n analytics = json['data']['affectExpressions']['analytics']\n filtered = [x['affect'] for x in analytics if x['affect']]\n flattened = [item for sublist in filtered for item in sublist]\n #print(flattened)\n numFiltered = len(flattened)\n numLexicon = len(analytics)\n words = text.split(' ')\n numWords = len(words)\n percentAffect = numFiltered/numWords*100\n print(\"{0} words matched out of {1} total words in the text - {2} percent\".format(numFiltered,numWords,percentAffect))\n for t in flattened:\n #print(t)\n print(t['text'],'\\t[a] ',t['arousal'],' [v] ',t['valence'],' [d] ',t['dominance'])\n ", "_____no_output_____" ], [ "# Test the function on our text\ntextAffect(myText,arousal=4.95)", "4 words matched out of 18 total words in the text - 22.22222222222222 percent\nhappy \t[a] 6.05 [v] 8.47 [d] 7.21\namazing \t[a] 5 [v] 7.24 [d] 5.83\nhope \t[a] 5.29 [v] 7.48 [d] 6.78\nperforms \t[a] 5.15 [v] 6.48 [d] 5.82\n" ] ], [ [ "### Do analysis\n\nLoad the file, then check the results with different values of `arousal`, `valence`, and `dominance`.", "_____no_output_____" ] ], [ [ "# Read file\ntext1 = readFile('dummy-affect.txt')\n# Check values\ntextAffect(text1,arousal=4.0,valence=5.0,dominance=0.0)", "6 words matched out of 18 total words in the text - 33.33333333333333 percent\nhappy \t[a] 6.05 [v] 8.47 [d] 7.21\npresent \t[a] 4.82 [v] 6.85 [d] 6.65\namazing \t[a] 5 [v] 7.24 [d] 5.83\nhope \t[a] 5.29 [v] 7.48 [d] 6.78\nperforms \t[a] 5.15 [v] 6.48 [d] 5.82\nexpectations \t[a] 4.5 [v] 6.1 [d] 5.05\n" ], [ "# Check different values\ntextAffect(text1,arousal=5.0,valence=7.0,dominance=5.0)", "3 words matched out of 18 total words in the text - 16.666666666666664 percent\nhappy \t[a] 6.05 [v] 8.47 [d] 7.21\namazing \t[a] 5 [v] 7.24 [d] 5.83\nhope \t[a] 5.29 [v] 7.48 [d] 6.78\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb9f2c6a9630ce326c8e5299c253b0ec923d193a
159,861
ipynb
Jupyter Notebook
figures/Figure 3.ipynb
tomassa/predicting-poverty
4934c918d13cb41ef1c42f7c229556a601d45406
[ "MIT" ]
408
2016-08-19T06:12:33.000Z
2022-03-28T02:48:30.000Z
figures/Figure 3.ipynb
yop0/predicting-poverty
4934c918d13cb41ef1c42f7c229556a601d45406
[ "MIT" ]
26
2016-10-05T23:46:22.000Z
2020-10-23T16:33:59.000Z
figures/Figure 3.ipynb
yop0/predicting-poverty
4934c918d13cb41ef1c42f7c229556a601d45406
[ "MIT" ]
250
2016-08-19T06:12:23.000Z
2022-01-26T20:24:21.000Z
79.256817
145
0.823246
[ [ [ "# Figure 3: Cluster-level consumptions\n\nThis notebook generates individual panels of Figure 3 in \"Combining satellite imagery and machine learning to predict poverty\".", "_____no_output_____" ] ], [ [ "from fig_utils import *\nimport matplotlib.pyplot as plt\nimport time\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Predicting consumption expeditures\n\nThe parameters needed to produce the plots are as follows:\n\n- country: Name of country being evaluated as a lower-case string\n- country_path: Path of directory containing LSMS data corresponding to the specified country\n- dimension: Number of dimensions to reduce image features to using PCA. Defaults to None, which represents no dimensionality reduction.\n- k: Number of cross validation folds\n- k_inner: Number of inner cross validation folds for selection of regularization parameter\n- points: Number of regularization parameters to try\n- alpha_low: Log of smallest regularization parameter to try\n- alpha_high: Log of largest regularization parameter to try\n- margin: Adjusts margins of output plot\n\nThe data directory should contain the following 5 files for each country:\n\n- conv_features.npy: (n, 4096) array containing image features corresponding to n clusters\n- consumptions.npy: (n,) vector containing average cluster consumption expenditures\n- nightlights.npy: (n,) vector containing the average nightlights value for each cluster\n- households.npy: (n,) vector containing the number of households for each cluster\n- image_counts.npy: (n,) vector containing the number of images available for each cluster\n\nExact results may differ slightly with each run due to randomly splitting data into training and test sets.", "_____no_output_____" ], [ "#### Panel A", "_____no_output_____" ] ], [ [ "# Plot parameters\ncountry = 'nigeria'\ncountry_path = '../data/LSMS/nigeria/'\ndimension = None\nk = 5\nk_inner = 5\npoints = 10\nalpha_low = 1\nalpha_high = 5\nmargin = 0.25\n\n# Plot single panel\nt0 = time.time()\nX, y, y_hat, r_squareds_test = predict_consumption(country, country_path,\n dimension, k, k_inner, points, alpha_low,\n alpha_high, margin)\nt1 = time.time()\nprint 'Finished in {} seconds'.format(t1-t0)", "_____no_output_____" ] ], [ [ "#### Panel B", "_____no_output_____" ] ], [ [ "# Plot parameters\ncountry = 'tanzania'\ncountry_path = '../data/LSMS/tanzania/'\ndimension = None\nk = 5\nk_inner = 5\npoints = 10\nalpha_low = 1\nalpha_high = 5\nmargin = 0.25\n\n# Plot single panel\nt0 = time.time()\nX, y, y_hat, r_squareds_test = predict_consumption(country, country_path,\n dimension, k, k_inner, points, alpha_low,\n alpha_high, margin)\nt1 = time.time()\nprint 'Finished in {} seconds'.format(t1-t0)", "_____no_output_____" ] ], [ [ "#### Panel C", "_____no_output_____" ] ], [ [ "# Plot parameters\ncountry = 'uganda'\ncountry_path = '../data/LSMS/uganda/'\ndimension = None\nk = 5\nk_inner = 5\npoints = 10\nalpha_low = 1\nalpha_high = 5\nmargin = 0.25\n\n# Plot single panel\nt0 = time.time()\nX, y, y_hat, r_squareds_test = predict_consumption(country, country_path,\n dimension, k, k_inner, points, alpha_low,\n alpha_high, margin)\nt1 = time.time()\nprint 'Finished in {} seconds'.format(t1-t0)", "_____no_output_____" ] ], [ [ "#### Panel D", "_____no_output_____" ] ], [ [ "# Plot parameters\ncountry = 'malawi'\ncountry_path = '../data/LSMS/malawi/'\ndimension = None\nk = 5\nk_inner = 5\npoints = 10\nalpha_low = 1\nalpha_high = 5\nmargin = 0.25\n\n# Plot single panel\nt0 = time.time()\nX, y, y_hat, r_squareds_test = predict_consumption(country, country_path,\n dimension, k, k_inner, points, alpha_low,\n alpha_high, margin)\nt1 = time.time()\nprint 'Finished in {} seconds'.format(t1-t0)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb9f365a06d38937e9e878c88976b6f99914149a
1,473
ipynb
Jupyter Notebook
Lesson01/Exercise02/Exercise02.ipynb
TrainingByPackt/Unsupervised-Learning-with-Python
e141755bf24b84cf354dab988f1e740af67fc9c4
[ "MIT" ]
19
2019-05-17T08:52:46.000Z
2022-03-16T16:07:29.000Z
Lesson01/Exercise02/Exercise02.ipynb
TrainingByPackt/Unsupervised-Learning-with-Python
e141755bf24b84cf354dab988f1e740af67fc9c4
[ "MIT" ]
null
null
null
Lesson01/Exercise02/Exercise02.ipynb
TrainingByPackt/Unsupervised-Learning-with-Python
e141755bf24b84cf354dab988f1e740af67fc9c4
[ "MIT" ]
27
2019-04-16T14:08:37.000Z
2022-02-09T11:32:43.000Z
19.12987
73
0.501697
[ [ [ "import math\nimport numpy as np\ndef dist(a, b):\n return math.sqrt(math.pow(a[0]-b[0],2) + math.pow(a[1]-b[1],2))", "_____no_output_____" ], [ "centroids = [ (2, 5), (8, 3), (4,5) ]\nx = (0, 8)", "_____no_output_____" ], [ "centroid_distances =[]\nfor centroid in centroids:\n centroid_distances.append(dist(x,centroid))\nprint(centroid_distances)\nprint(np.argmin(centroid_distances))", "[3.605551275463989, 9.433981132056603, 5.0]\n0\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
cb9f3f04a9ef65d3a797e655c539ac35aad62a27
17,536
ipynb
Jupyter Notebook
TrainingTestingData/Train and Test data split.ipynb
Aniket762/ML-Algos
8a3c054278d8f0d99ad0a04c93c52e1701ff458f
[ "MIT" ]
null
null
null
TrainingTestingData/Train and Test data split.ipynb
Aniket762/ML-Algos
8a3c054278d8f0d99ad0a04c93c52e1701ff458f
[ "MIT" ]
null
null
null
TrainingTestingData/Train and Test data split.ipynb
Aniket762/ML-Algos
8a3c054278d8f0d99ad0a04c93c52e1701ff458f
[ "MIT" ]
null
null
null
49.258427
8,964
0.667541
[ [ [ "import pandas as pd\ndf = pd.read_csv(\"carprices.csv\")\ndf", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "plt.scatter(df['Mileage'],df['Sell Price($)'])", "_____no_output_____" ], [ "X = df[['Mileage','Age(yrs)']]\ny = df['Sell Price($)']", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2)", "_____no_output_____" ], [ "from sklearn.linear_model import LinearRegression\nmodel = LinearRegression()", "_____no_output_____" ], [ "model.fit(X_train,y_train)", "_____no_output_____" ], [ "model.predict(X_test)", "_____no_output_____" ], [ "model.score(X_test,y_test)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9f3fbe7b3c15db6bc652490023dfdfe9874be1
105,497
ipynb
Jupyter Notebook
Python/Colab/HerenciaPython.ipynb
judrodriguezgo/DesarrolloWeb
a020b1eb734e243114982cde9edfc3c25d60047a
[ "MIT" ]
1
2021-10-30T16:54:25.000Z
2021-10-30T16:54:25.000Z
Python/Colab/HerenciaPython.ipynb
judrodriguezgo/DesarrolloWeb
a020b1eb734e243114982cde9edfc3c25d60047a
[ "MIT" ]
null
null
null
Python/Colab/HerenciaPython.ipynb
judrodriguezgo/DesarrolloWeb
a020b1eb734e243114982cde9edfc3c25d60047a
[ "MIT" ]
3
2021-11-23T22:24:15.000Z
2021-12-31T23:51:47.000Z
180.336752
41,567
0.878091
[ [ [ "<div align=\"center\">\n <h1><strong>Herencia</strong></h1>\n <strong>Hecho por:</strong> Juan David Argüello Plata\n</div>\n\n\n## __Introducción__\n\n<div align=\"justify\">\n\nLa relación de herencia facilita la reutilización de código brindando una base de programación para el desarrollo de nuevas clases. \n\n</div>", "_____no_output_____" ], [ "## __1. Superclase y subclases__\n\nEn la relación de herencia entre dos clases, se cataloga a las clases como _padre_ e _hija_. La clase hija (subclase) _hereda_ los __métodos__ y __atributos__ de la clase padre. Las subclases (clases hijas) emplean el siguiente formato:\n\n```\n class clase_hija (clase_padre):\n //Atributos\n ...\n\n //Métodos\n ...\n```\n\nLa clase padre suele usarse como un formato para la construcción de clases hijas. Un ejemplo de ello es la _calculadora científica_, que se puede catalogar como una subclase de la calculadora convencional.", "_____no_output_____" ] ], [ [ "#Calculadora convencional\nclass Calculadora:\n def suma (self, x, y):\n return x+y;\n\n def resta (self, x, y):\n return x-y;\n \n def mult (self, x, y):\n return x*y;\n \n def div (self, x, y):\n return x/y", "_____no_output_____" ] ], [ [ "Además de las operaciones básicas, la clase de `Calculadora_cientifica` debería poder calcular el promedio de una lista numérica y la desviación estándar.\n\n---\n\n<div align=\"center\">\n\n<strong>Promedio</strong>\n\n$$\n\\begin{equation}\n \\bar{x} = \\frac{\\sum _{i=0} ^n x_i}{n}\n\\end{equation}\n$$\n\n<strong>Desviación estándar</strong>\n\n$$\n\\begin{equation}\n s = \\sqrt{ \\frac{\\sum _{i=0} ^n \\left( x_i - \\bar{x} \\right)}{n-1} }\n\\end{equation}\n$$\n\n</div>", "_____no_output_____" ] ], [ [ "#Calculadora científica\nclass Calculadora_cientifica (Calculadora):\n def promedio (self, numeros):\n return sum(numeros)/len(numeros)\n\n def desvest (self, numeros):\n promedio = self.promedio(numeros)\n des = 0;\n for num in numeros:\n des += (num-promedio)**2\n \n des /= (len(numeros)-1);\n return des**(1/2)", "_____no_output_____" ] ], [ [ "__Observa__ que al momento de crear un objeto del tipo `Calculadora_cientifica` es posible utilizar los métodos heredados de la clase `Calculadora`.", "_____no_output_____" ] ], [ [ "calc1 = Calculadora_cientifica();\n\nprint(\"2+3 = \" + str(calc1.suma(2,3)));\n\nprint(\"Promedio de: [2,3,10] = \" + str(calc1.promedio([2,3,10])));\n\nprint(\"Desviación estándar de: [2,3,10] = \" + str(calc1.desvest([2,3,10])));", "_____no_output_____" ] ], [ [ "En Python, durante la relación de herencia puede haber múltiples clases padre por cada hija.\n\n## 1.1. Operadores __`self`__ y __`super()`__\n\n<div align=\"justify\">\n\nEl operador `self` se refiere a la clase _per se_. Se emplea dentro de la clase para especificar el uso de sus métodos y atributos. El operador `super()` se emplea en una relación de herencia para referirse explícitamente a los métodos y atributos de la clase padre. Es decir: en una relación de herencia, se emplea `self` para referirse a los métodos y atributos de la subclase (clase _hija_) y `super()` para los métodos y atributos de la superclase (clase _padre_).\n\n</div>\n\n### 1.1.1 Constructores\n\nTanto la superclase como las subclases pueden tener sus propios constructores. Si la superclase tiene un constructor, la sublcase debe emplear el operador `super().__init__(entradas)` para ejecutarlo. \n\nPor ejemplo, supongamos la situación de un estudiante. Se puede asumir que la clase de `Student` deriva de la clase `Person`, como se aprecia en el diagrama UML.\n\n<div align=\"center\">\n\n<img src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAKEAAAE/CAYAAAAuSqMxAAAGnnRFWHRteGZpbGUAJTNDbXhmaWxlJTIwaG9zdCUzRCUyMmFwcC5kaWFncmFtcy5uZXQlMjIlMjBtb2RpZmllZCUzRCUyMjIwMjEtMDctMjRUMjAlM0EwOCUzQTUyLjE5M1olMjIlMjBhZ2VudCUzRCUyMjUuMCUyMChXaW5kb3dzKSUyMiUyMGV0YWclM0QlMjJ5M09uZVJTR3RFdURUblR0NmRNUCUyMiUyMHZlcnNpb24lM0QlMjIxNC45LjIlMjIlMjB0eXBlJTNEJTIyZGV2aWNlJTIyJTNFJTNDZGlhZ3JhbSUyMGlkJTNEJTIyQzVSQnM0M29EYS1LZHpaZU50dXklMjIlMjBuYW1lJTNEJTIyUGFnZS0xJTIyJTNFN1ZscmM5bzRGUDAxekhRJTJGcE9NSEVQb1JTS0R0Wmp1MGRKdjlLdXlMclVhV3ZMSUlrRiUyRmZLMXQlMkJZWjV0R0RwcFpoakdPcnE2c3U0NUVnZTc1UTZqMVZpU09QeEglMkJNQmFqdVd2V3U1TnkzR3VldzUlMkJhMkNkQVczSHlvQkFVaiUyQkQ3QktZMGljd1lCNjJvRDRrdFVBbEJGTTByb09lNEJ3OFZjT0lsR0paRDVzTFZwODFKZ0UwZ0tsSFdCTzlwNzRLTTdUblhKZjRlNkJCbU05c2Q5OWxQUkhKZzgxS2twRDRZbG1CM051V081UkNxT3dxV2cyQjZkcmxkYm4lMkZzTDVuZHclMkZkOGNmUHlmJTJGazM4SGZYejk5dThxU2pVNFpVaXhCQWxjJTJGbmZycFlUNTYlMkY2MzklMkZiOTRNdnE4dE1iOTJjY3JNOFI2Skd4aDZqVUJtUWh1VnF6V2VSbVRKWTBZNGRnYXpBVlhVOU5qWVpzd0duQzg5dkR1UUNMd0NGSlJaS0J2T3BTSUVmVkN5dnc3c2hZTHZZWkVFZThoYncxQ0lla1RwaVVNdTJ3RXNGc3FJeWFuVzR1WTZwRm1hZ2tKeGt6eXd0Z0ZkRWNTWldJOHdSaUpFenBMYjFpSFJFUUdsQSUyQkVVaUxLRTRrRjk4RTNyWUxwdEtHa2VDaTBvOGNmU1llaFRWY0RWaFV4R25yR0lDSlFjbzBocHJkUW10bHFkdDVlbHNLMXV3WUxxNkoxZTJiRG1NMFNGTG1MNmI3ZzVpSTh3Q0tVODdrYjg3V1BuQThacVUxSEdCTFBpWUtCTG1OU2xTRmVWSlphUXFrNFR4Q3EzUkJxeTBFaUxFNGlhSWdWNjYwcXdtUXdWenRsbWNURW96eTRTMk51MmlYeXhheFhRd0xIemxrcWlaRDZQdkJVTW9vb01pdTJRU3dvVjJsQk9nUDhZTm1HMXR0T3E0TTNOTVMyWGJieG84T2xHZ3FPNmlJMGxSR2daSmVnWmJ0RllIczM3MkdCcmV1OG5jcHZWVTQxWWs5bDBkbkJZaHdLRHA4VzBRelBqNWRHNXA2akpWUVJNNWZub3J6alhKaHlkd2ZsRUJIS1hzbCUyQlZyS3ZleGNtdTcyRjdBMkdHVTFOaEttR3ZmVUg5Z0Q5RVJLcDAlMkJWOGY5Vnl1TG15RzVwd201cHd0JTJGRFB5QXpZUkNSVVVhSHp5eXgyUXhlWE9yUnRxMzBjcTNzOHdDJTJCUjJ0bXhnN05DTER3bFh0NmhmVFl5N1V1Zng5MEdtMU8xOFBVeVh5MyUyRjVTMiUyRmE1M1g4dHNibHQlMkZ0JTJGYjZXJTJGN3FoMUt0VW9LbGFQJTJCQU83dXNNVzRUN2h4dyUyQjNhUEY5cnZZJTJGOTVXUm1PSjZYMHF4c0JCNnVOQTg0cmxKaSUyQlAyV2UxaHlmemYlMkZIJTJGQXU5ZTdlR3prM3FzNXolMkJYTzdSM1BabXAyVU5jbHJ2cXAyVkluOXBnaVVlVUUlMkZibXIyeTdQd3FjcEJyMXdqYiUyQnVkanZIWG1rdDl2bm9uJTJGYkk1ME44b0Q3ZmYwa0gxc3pKclNUR3lCa3JKNXRaYzBSWmZuNWQ4Q0wxUTlSOEFQSXJTbXdtVmplbHNBZ0JiQWpGOHJKVGk0UkMlMkJuQlljYlF1d1p3eEQ4RmZiZDdlZDFHcEFSR0ZIMnN2OURZNCUyRnNtSW5WRmhjZTA2cDdQc2R4NmlteVJabFQxRGNLcGliSXFOQklkTm9UWUxOJTJCalpPSGx5eWozOWdjJTNEJTNDJTJGZGlhZ3JhbSUzRSUzQyUyRm14ZmlsZSUzRYlScd8AAA6lSURBVHic7Z3Bjds4FEBZQDaXvS8C6LClqJFc3MVcVUBqEOaeDtxDbga2hUwD3EPyna9vkqJE2n88eg8QMDOSKIp+/hI14mcIIUQWFuclRAAvkBDcQUJwBwnBHSQEd5AQ3EFCcAcJwZ3DSph6YHo6nbyrdUgOLeH5fF78bRiGOM+zU42OCxIqTqdTnKYpxhjj5XJZREnhfD7HEEIcx/H6d/k5FU3XyjmdTtd1tj5HAQkzfwshXKPi6XSK4zjGGP/II+vmeb6ui/FXNNXlDsNwFTtVTmrd0Ti0hHYRIc7ncxyG4bqtRLPL5XKV53K5xBh/Sai31ci2reV8dA4tYe7yN89zUtKUPDHGOE1T8nIs5QhImAYJE9hIaNdZCTX6ckwkrAMJC+v1fZ+0kZVnmqZF9Fu7JxTRkPAPSJjB9mpFllQkHIbh5r4yV46AhH84rITwfkBCcAcJwR0kBHeQENxBQnAHCcEdJAR3QggxfPnyZctIeRaWrstv/4iE4MdvGZEQ/EBCcAcJwR0kBHeQENxBQnAHCcEdJAR3kBDcqZYwhPwIs7VsAnYIpQwg0gPAZZ0ek1GbzQCem64SprIJ6GGOMf4anWZHnMl+kk4jN75XC3zkjAUfjaKEuUHgdkTZlpFjep3dz4qlI2opKwI8N10jYU5CfZkOIeySsJQVAZ6bu0tosxD0ioTwcejSOy5JaPOxDMOwS0L5PZUVAZ6bu0sY4zJDgd52q4S5rAjw3PCcENxBQnAHCcEdJAR3kBDcQUJwBwnBHSQEd5AQ3EFCcAcJwR0kBHeQENxBQnAHCcEdJAR3kBDcubuEa7Ni3oPUmOVpmroPEfU4t4/Ih5XQHhMJ3y9dR9vZTAp6XS47w9osmFv3k7qO47iQTktYMzqwJitEqo66jdbOTQb7H52uEsoHVMqyYAcz2fmArSi5/fTIu9wAqWEYrtvskTDGclaI0nmX6ij7ybqjU5RwbwaG2hnO986MvpaNQSTU5W+VsHYsdOm8c3XkMr6kayS0eWO0CDVjkmslXMvGoEU5nU5xmqaHSKjXl+qIhEvuIuGjI2GqriKKlFlK0vSISFhqr6PTpXds76NS94SlgfH6vq92P31PZSOq7cRIJ8NKKNuM49gkYeq8S3VEwiVdJZSb+FRPMCfTWg+ydr/UJVEzDMNNb1n3eFsk1L1jTa6OSLikq4Q0KuwBCcGdLhICtICE4A4SgjtICO4gIbiDhOAOEoI7SAjuICG4g4TgDhKCO0gI7iAhuIOE4A4SgjtPI6Eej2LHpsBz8zQSapDwY9FltF2M62NFajIaxHg7nFMGChEJPy7dJNTClDIplDIa2KGYqVF79md4fooSbsnAUJttYG1OY3t8JPz4dImEW7INrEmoh0+GEJDwAHSRcEu2gZpxvAKR8Bh06x3re8JStoGShDaTwjAMSHgAuklYm21g7XI8DMO1DL0vEn5cnvI5IXwskBDcQUJwBwnBHSQEd5AQ3EFCcAcJwR0kBHeQENxBQnAHCcGdq4QsLM4LkRD8QEJwBwnBHSQEd5AQ3EFCcAcJwR0kBHeQENxBQnAHCcEdJAR3ukoomRFksbkH70EI+axe76E8WKebhJJHRidNGsfxJo1cb5Dw+amW0ApmGYbhmhBJkPw09ndZBImgOi2cFkESa9roqvPWvL6+xhDCdVubA0dn+DInf62LLu9yuazWV44FbXSR0GZYzTEMwzUylrK56nXzPC8SKA3DsBBUhJMybHrhnIT6SzNN01Vu/QVYq6/90sE+ihLWZmqtkdBm0iplc9XCpCKYPQEtYS4bWC7Xoa5Pqrya+kIbD4uENvdgrYQxxkXSddvZ6SWhLW9LfaGNh9wT2vyCuXWl+zd9nNLlOJeEfauERMLHcffese1I6HssK0VKGH2/JmVskVC2HcdxIbZep2cJKN0T5uoLbXR9TmjvIe2lc623WepE5O5HpYf67du3GzHsZTw1w4Cti+5d19YX2ugqIcAekBDcQUJwBwnBHSQEd5AQ3EFCcAcJO/D29uZdhafmKiELi+vi/U14Zt7e3uJff/1FNGwECRt4eXmJ//77b3x5efGuylODhDt5e3uLnz9/jt+/f4+fP38mGjaAhDt5eXmJX79+jTHG+PXrV6JhA0i4A4mCP378iDHG+OPHD6JhA0i4Ax0FBaLhfpBwIzYKCkTD/SDhRlJRUCAa7gMJN5CLggLRcB9IuIFSFBSIhttBwkrWoqBANNwOElby8vISP336FP/555/F8vfff9/87dOnT0TDDSBhBT9//oz//fdfcgkhZNf9/PnTu+pPARI2EgJN2Aot2AgStkMLNoKE7dCCjSBhO7RgI0jYDi3YCBK2Qws2goTt0IKNIGE7tGAjSNgOLdgIErZDCzaChO3Qgo0gYTu0YCNI2A4t2AgStkMLNoKE7dCCjSBhO7RgI0jYDi3YCBK2Qws2goTt0IKNIGE7tGAjSNgOLdgIErZDCzaChO3Qgo0gYTu0YCNI2A4t2AgStkMLNoKE7dCCjSBhO8F9Nh8WlhD4JoMfSAjuICG4g4TgDhKCO0gI7iAhuIOE4M5hJbQPS+268/m8qbzz+XxTzt56bT32s3NICYdhiNM0XX+fpmkhEBI+lsNJeLlcYgghXi6Xxd9DCHGe5zgMwzU6vr6+LraV9YKIF0KIp9NpIaEcx0Za2Ue219LpY9v6fWQOJ2GMMY7jeJUuhYghwuQk1GVYCe26cRxjjH8klEis1+ljH4lDShjjL6F0pNJC1khoL7/69/P5vJBVR98asZHwgIgY8uHXSCgSCyKaXmcXJExzOAnneV5c/oRhGK7RUESw9497I6EGCW85nIQxxpvLr0QuEcNKKFKM47gQRveyS/eEOmoi4S2HlDDG2+eEujcqHZfz+Xx9fCM92tS9Xk3vWMpfk1Af+ygcVkJ4PyAhuIOE4A4SgjtICO4gIbiDhOAOEoI7IYQYvnz5kvxfJwvLI5bf/hEJwY/fMiIh+IGE4A4SgjtICO4gIbiDhOAOEoI7d5MwhPcxgNz+bJfcsM+99dpz3kfncBLqOskr+DUi7jk21NEkoYyHCOHXGAvhPWUxKEkY468UIHbwuc1+YEfS1R4b6tgtoR06OQzD4gOWD8c7i8GahDXRNyUhGRT60SRhbmytFPweshjcS0KGbPajKKG+5OjLrWCHQ9qC30MWAyR8/3TrmOQux95ZDLbeE5aOt/XYUMduCadpuumMlCT0ymJQ0ztekwYJ70tTJNS9QZ10Msb3k8XASmgXK0xt75gMCv3odjkG2AsSgjtICO4gIbiDhOAOEoI7SAjuICG4g4TgDhKCO0gI7iAhuIOE4A4SgjtICO4gIbiDhODOh5Uw9TZ0LfaN7tTb1l7UnosepNbj873n2+KHkHDPfrrB7SygntSel4z5uUdWi97sllAPAs+NAZHxFjGuZ1Ow5ehxJ5pcObosO17FfhClMvTAK804jtfz25MZomY/3V65mentuaS+HHrsT21Wi9Ix751ZollCkWSappuhnDrHix1Vl8umIB9EanyxVDiVlSG1Lidhri52eGqOXB1qsjOU9pN1th6ptl1DJhHfe+76mFL3dxsJbXRIyZOLRKltS2k1arIy2PqVfrZl2A/CRo3T6bQ7M8SW/Sylsdo5UhKWzr10zBgdJSxlYMhFqVzWhR4SlrIylLI56MYv1aX0oYzjGE+n0+7MEFv2s20fQugiYencS8e0n0NvnjoSpuqT+n1LNCjdE6YiYalNSpGwtJ+t4yMiYemYMb5zCeUDS9236G+2vRfJbbuW5UrfO9lvdi6bw9p9USpVnb6flePI1SBXh5rsDLX72fre654wlxdIH1Pq/m4l1DkK7Tot4VrPsFbCXFYGu25v79g0zOpxUhEsJ2HtfjEue6R6fU3vWPZPSbj2ZCB1zBjvm1mi2+UYYC9ICO7slhCgF0gI7iAhuIOE4A4SgjtICO4gIbiDhOAOEoI7SAjuXCVkYXFeiITgBxKCO0gI7iAhuIOE4A4SgjtICO4gIbiDhOAOEoI7SAjuICG401VCnR8whNskSvcghL5ZAXqXB+t0kzCVzXQcx2RyoZ4g4fNTLaEVzCK5TzQ6PZv+XRZhLcOpznejo6vOnfL6+hpDCIucKbrONi+MOvlrXWxG0i2ZVWE/XSSszXBam61Vr5vneZEgSXIx63rpnIg2rW5OQv2lmaZpkXFLyl+rr/3SwT6KEuYSO9pLbI2EW3IU2uSSubx+cgKpxJxruQLtOct2trya+kIbD4uEW7K1WvF0UnXb2ekloS1vS32hjYfcE6by6m2R0B6ndDlOZY61ZdZISCR8HHfvHduORE22Vi2Mvl+TMrZIKNuO45jNPKqzzJbuCUuZaGE/XZ8T2nvI2jlIajoRuftR6aF++/btRgx7GS9lTbXlnc/n6vpCG10lBNgDEoI7SAjuICG4g4TgjruE+pHI2nZ2GcexenKZEqkypmmK0zTF8/m8+LdhL0r17nFOz8RTSXivt1tSD6a1eCLko0DCwoa552Jrb8HknrfZt1Zy8+3K8VMSpuZuy9Wjdg5hmUjRnn+pLexra/LCw9qzRvu7rv9R6Cphbp7f3NsoUq5+MCzHqJ1vNyXh1vl8U1N1WeTSnyP1to88dC+9iaOPmZqr+QgUJax9i2bLywJWhpxcqckJ7WL/J73lf9C5OuZm41y7JKemrK2dVbM0Q+kR6BoJS/P8CiUJ98y3u0XCmjmESxKuDVcYxzHO8xzneV7MjZw6d33M0lzNR+DuEtZGwr3z7dZKWCpfr7tcLrsioWwjl9zSDJtEwiVdesc1LyDk5hcWufbOt1sr4ZY5hPfcE+py7HFKb+LktkPCjaxJWJpjV7+1Uppvt8fluHYO4T29Y30Mfdmu7R3n5mo+Al0k/Gh4Pyc8GkiYQf/HpDTGBdpBQnAHCcEdJAR3kBDcQUJwBwnBHSQEd5AQ3EFCcAcJwR0kBHeQENxBQnAHCcEdJAR3kBDcQUJwBwnBnauELCyey//fX7r5PGEejgAAAABJRU5ErkJggg==\">\n\n</div>", "_____no_output_____" ] ], [ [ "class Person:\n name = \"\"\n phoneNumber = \"\"\n email = \"\"\n\n def __init__(self, nombre, numero, mail):\n self.name = nombre;\n self.phoneNumber = numero;\n self.email = mail;\n\nclass Student (Person):\n studentID = 0\n promedioGeneral = 0\n\n def __init__(self, nombre, telefono, email, studentID):\n super().__init__(nombre, telefono, email)\n self.studentID = studentID;\n\n#---------------------------------Creación de personas y estudiantes------------------\n\njohn = Student(\"John Pérez\", 302010, \"[email protected]\", 10010)\n\nprint(john.name)", "_____no_output_____" ] ], [ [ "En ese contexto, el operador `super` se refiere explícitamente a la superclase, mientras que el operador `this` se emplea para trabajar con los atributos y métodos de la subclase.\n\n### Ejemplo:\n\nElabora una clase _padre_ `Animal` para construir clases hijas: `Oso`, `Tigre` y `Perro`. Puedes emplear el diagram UML como sugerencia.\n\n\n<div align=\"center\">\n <img src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAjEAAAE6CAYAAADnQAOqAAAHtnRFWHRteGZpbGUAJTNDbXhmaWxlJTIwaG9zdCUzRCUyMmFwcC5kaWFncmFtcy5uZXQlMjIlMjBtb2RpZmllZCUzRCUyMjIwMjEtMDctMjVUMjAlM0E0NCUzQTI3LjI3OVolMjIlMjBhZ2VudCUzRCUyMjUuMCUyMChXaW5kb3dzKSUyMiUyMGV0YWclM0QlMjJ1Y2J3aUxiZm1fTzQ0TGw5ajFhZiUyMiUyMHZlcnNpb24lM0QlMjIxNC45LjIlMjIlMjB0eXBlJTNEJTIyZGV2aWNlJTIyJTNFJTNDZGlhZ3JhbSUyMGlkJTNEJTIyd3BPaG93WlZLZVpOTTBYX0dLNlolMjIlMjBuYW1lJTNEJTIyUGFnZS0xJTIyJTNFN1ZwZGIlMkJJNEZQMDFrWFlmV3VVTENJOThUR2RINm1pclphU1ozWmVWU1F4WTQ4VElNU1gwMSUyQjkxN0JBU1F4c1lzdkNRcXFyaWsydkh2dWRlN3NtbGxqZUpzODhjclZkZldZU3A1ZHBSWm5sVHkzVUhnUXQlMkZKYkJUZ08lMkZhQ2xoeUVpbklLWUVaZWNNYUxNdzJKTUpweFZBd1JnVlpWOEdRSlFrT1JRVkRuTE50MVd6QmFQV3BhN1RFQmpBTEVUWFI3eVFTSzRVRzdxREUlMkY4Qmt1U3FlN1BTSDZrNk1DbU45a25TRklyWTlnTHhQbGpmaGpBbDFGV2NUVEtYdkNyJTJCb2VVOG43dTQzeG5FaW1reklrSXVmQmc5Zmhvc3Y2VHB6WXUlMkJmNzU4ZjlDcXZpRzcwZ1VjSmlSSFZXeGE3d2clMkZwbHNRVUpUQWFMMWdpWnZxT0EyTkV5VEtCNnhBMmdqa0FyNWdMQWk0YzZSdUNyUUVOVjRSR3oyakhObks3cVVEaHoySTBYakZPM21CWmVMQmFFMjV6b2FQQjdWY3Nabkltd0RhZ0hLZGc4MUw0d0tsQlgxRldNWHhHcWRCQXlDaEY2NVRNOThlSUVWJTJCU1pNeUVZTEUyMHM2QjQlMkJEc3BOZWRQWmVRQTVqRldQQWRtT2dKWGtHJTJGanYlMkJCSG03TFlITDZHbHRWQXNuWFFhd0RlTGxmdXVRWUxqVE5aMUR1R3BSYnJnZEF3dUk1QjJRRTF6UEJTYkswd1BPdWw0MXlUMGdUSEtGSUdjeDNBaHUzQllxUk5mR3NzY09VVmNRMjByJTJCSGRyV3dBc2VLbkc3T2Z1SUpvd3ppWjVvd0ZXZUUwaHBVaEJyRkMzRXkwTkkxQ21IM3o3bk4xQyUyQlJ2N1I3SmNSZzdvTG0lMkJiZ2lVWVFUR1NSTUlJRlVSRWo2MTR3a0luZCUyRmJ3eSUyRndOTEVmdXhaUGRqNEJNWk9PWVpmYWM3RmhDVndGa1R5NE1BUWJsc3NRNjVaSkozT1VETzhkRGhKMXpZSnAxNWIwZVFkaWFZYXg1VGszQ21PaXc5UjV5S0NZNkNLNHBMUmI1THc2WU5qc082WnJIdEhHS1pvanVrTFM0a2dUSzdQbFcyTiUyQlZ1Ukd6VDhyQWhhNHRZJTJGd2kwYzFsWiUyQjJJUUNtT3NTJTJCc3FjQjgwNEx4TCUyRjZxVDNETkwlMkZURmtuQjY0a0J4eSUyRktnY2E1N2hqMnkwUjNqY0lmNmpXOG45WlNNSmFSZSUyQnklMkZzeXM3MTJsakxlVzlZT3VqTGRHYnMlMkI5YlJrUG1wVHh2VVNYeVE5djNDRW42eEM4JTJCZHZ2a1BpdkRCN1FxZmpXSXFUZnNBajRiZFdBSXQzZnkzJTJCY1JDUFpVSUhSbkRKWmM4Y0E2YUxzOU5Yd2ljZ0g1MzVmaWJnbzN6Z2o0b2VFd2M5cTlMYzJrdGZUN0hDd0t3WUpIRXhQOG9JQ2tQT2NSN3NFeXNuNXFETDdCWE1DJTJGcEVpNUtCJTJCNDhqbyUyQk5TcU54eWJiWGlJUDFaSklFdVclMkJEM2UlMkZlTzhIeU9hWTRvRWVhM3U3UmpUZXJrWEdkZW5HdzM3dmxPeGhEcVRublhZSjZvdDVBY2ZMS1FPYlN3RXdZRjJCMlk2NzA1dTJLMXYyTFBmM1pkWGwwNGYyUHQyJTJGejE3dUZBN0xyTm1UOUF2SkpMNVB2eU5MTG1wbURvQmZXRSUyRnpmUHZUVUM3NW51eUxLS2hLbnFIN2JTdWJKNVpObFU2M2E5c2RzMjM1VTQzWDQzZVd3dG4xM3cxN3BUem5YMEUzRnc2ZThOMnBYTXBnJTJGMmhYNVhCanROclV3YiUyRkw3TFZVR25CcGJMVjdzTjd3YkQ4cWE3ckJQN2pjTmhJeUY1TEN4WWhkeEFaUUFYdm1xblgwb0s5JTJCMnVtJTJCdVlYNnJJc2NQU0dvQnAwT3ZEQ0l1RGJ4d1BoWG5TZzM2QiUyRjB1bkFTJTJCbTl0UTcwajMzSjNlbkF1JTJGb0l1TGtPOUk5OVc5NjFVTThudEFXUmFiUWFmMGxrRGc1RXBxa3hnOERlJTJGOVRLelpVYXAlMkYwekc2ZFhib1RDc1B5blJXVmUlMkZ1ZW45JTJCayUyRiUzQyUyRmRpYWdyYW0lM0UlM0MlMkZteGZpbGUlM0VXDPYSAAAgAElEQVR4nO3dTa8cR9XA8bKdbB7Yxg47S20gYPM52p8Ag8QaqaVI7FBYATEC9S4S8pplyxISxr5WbCfAoj8AW2PSC0R4VxRFQoiA4TwLU+Oauv02Uz31+v9JrTh3ZmrO9D1n6tzqnmklQEaUUmxsQTcA/lBxyAqTCEIi/wC/qDhkhUkEIZF/gF9UHLLCJIKQyD/ALyoOWWESQUjkH+AXFYesMIkgJPIP8IuKQ1aYRBAS+Qf4RcUhK0wiCIn8A/yi4pAVJhGERP4BflFxyErJk0jbtqKUkrquVz+maRpRSskwDJvFcYoxU1Fy/gEhUHHISsmTSF3Xu2+NDdlA0MQA8IWKQ1ZKnUSGYRCl1K6BaNt2d5v+mf6vUkr6vt+7bRgGqapKqqraa4a6rtv9217hMb9q37yNJgaAL1QcslLqJKIPJZnNiKabiq7rRET2mg67idENjm6K9P7U4+vmp65raZpGRGTX6Iw1RqUpNf+AUKg4ZKXUScRsXOyGw24q9GqLfZs5hrmyI/KyUdGNkGauxujbaGIA+ELFISslTiJ93+8dQtINiP7/sSZGNyvHNjH6kFPXdeduo4kB4AsVh6yUOImY57qY21ijIuLexCw1ODQxAHyh4pCVEicRs2HRzENKp1iJMc+rMVdl7DFLU2L+ASFRcchKaZOIbi7MTyOJvDzE1DTNSZoY3STpZmbu8FVJSss/IDQqDllhEkFI5B/gFxWHrDCJICTyD/CLikNWmEQQEvkH+EXFIStMIgiJ/AP8ouKQFSYRhET+AX5RccgKkwhCIv8Av6g4ZIVJBCGRf4BfVByywiSCkMg/wC8qDlm5evXq6Ffws7H52K5evRq6BICi0MQgK0qR0giH/AP8ouKQFSYRhET+AX5RccgKkwhCIv8Av6g4ZIVJBCGRf4BfVByywiSCkMg/wC8qDllhEkFI5B/gFxWHrDCJICTyD/CLikNWmEQQEvkH+EXFIStLk0hd19L3vQzDIFVVeYpqX9/3opSSYRhO+jz2F7HZt/V9PxkbjsO+A/yi4pCVpUlENy5930td1z5COsdHE1NVlbRtu/v/tm339s1UEwM3NDGAX1QcsjI1iUx9Tbw50Yu8bDCaptndx5zsh2EYXd3Qj9PNgm5S9L+bppl9Dt3Q6Nvqut6NP/Wc+nXZzZC+v/1zpZR0XSdVVe3Gunv37t7zmSsxS/ui67q91xdqZSsmNDGAX1QcsjI3iXRdt2sm9GElm9mMiIg0TbO3YmOucJi32Y/TTcEwDOdWXszmQOTFKom5QqSbDfM16f+345min98cx6QbEvv5xpqYqX1hNjVVVdHECE0M4BsVh6zMTSJt2+4m5KqqRg/n2A2HXrkwb9PMFQ/7cXMTvn3fuXH6vt9rDqZWWcaYKyVjjZHZxNgN1tK+MP899v+lookB/KLikJWtDidNTdynaGLM28eefyzuQ8+n0ePqGGhiToMmBvCLikNW5iaRNSf1xr4Ss0bXdaOvr6qq3WoMTcxp0MQAflFxyMrUJGJ+pNo8rGSbm7hFzp8TYzc4hzQxepyxc2LsVRrdfNirQXP7wTx8pB+nx3VtYuzXVNc1TYzQxAC+UXHIytQksuakXpHliXvp00mHNDHmp36mxhl7TrvBmTq0NHcISp/4e+fOnaObGD6ddB5NDOAXFYesMImE0TTN3sfIS0X+AX5RccgKk4gfepVGb6zCvED+AX5RccgKkwhCIv8Av6g4ZIVJBCGRf4BfVByywiSCkMg/wC8qDllZmkS2vIr1sVd8XvO4Yy4SaX4CCmHQxAB+UXHIytIksuVVrGliYKOJAfyi4pAV18sOiMxfNdr8VI7+nhdt7lpFc48ba1bmrnRd1/Ve3OaVs/X95l4DTod9DfhFxSErc5PI2i+8MxuQsS+tM2/Tz2dfmNH8Ft65x02Zu9K1fVkBfTFL+9pIh175Gu5oYgC/qDhkZW4SWXsV66mrRtuHgeYOC81dc+mYw0l2k2SuuOgGxbyUwLFXvoYbmhjALyoOWXE9nDR31eipq1hr5qEf8wvglh43Zu5K1/q5uq7ba8z07Vtd+RqHo4kB/KLikJW5SWTtVaynPrU0t6Ji33bqlZi+73eHicYu6sg36IZBEwP4RcUhK1OTyNqrWOsxpq4abV/FWt82dr+5q1+vbWLGrnRtxjl1Vem514DTYT8DflFxyMrcOSprTuoVmb9qtHmb3YxUVbW7bWolxeXTSbamafaaMX1lav09OBxK8o8mBvCLikNWSppEaE7iU1L+ATGg4pCVEiYR++PXiEcJ+QfEhIpDVphEEBL5B/hFxSErTCIIifwD/KLikJUUJxF92QD9sWmuf5SuFPMPSBkVh6wsTSJrrmJtflTZBx2H/nTTlo69SOXYGFPfcMzE/RL7AvCLikNWliaRNV9457uJOSWaGL/YF4BfVByy4nrZAfO7XsxLDehNf4Gc+WV05v3N74IxLY1jfh+M2UDNXY16qbEY+06aqfHsRmXsG4fHrqhtNzGlXz27xNcMhETFIStzk8ghV7E2vzBu7MrU9jfq6i+aMy8UOXXJgLlx7CtO29/0u+Zq1GaTNPaFfGPjrW1ipuKfeu7Srp5NEwP4RcUhK3OTyJqrWOsxxhqcsYldj2FP2HOHpObGmbvm0pqrUR9yfaexK3TPxTHWlNnjl371bJoYwC8qDllxPZyk72teLdq8/7FNzNpxzOZh6urXc03B3BWz58Y7pIkxX5/ZxHD1bJoYwDcqDlmZm0TWnNSrx7AnaJHjV2IOGSfUSow99hYrMSWiiQH8ouKQlalJ5NCrWPd9P3tl6kOamEPGMZsHfV/zHJY1TcLcFbOnxtONiY65ruvJ83bWnhNT4tWzS3u9QGhUHLIyNYkcchVr82rQU1emPvRw0tpx7CbmmE8nzV0xe24885NWZoMzd0XtpU8nlXQoSYQmBvCNikNWmEQQEvkH+EXFIStMIgiJ/AP8ouKQFSYRhET+AX5RccgKkwhCIv8Av6g4ZIVJBCGRf4BfVByywiSCkMg/wC8qDllhEkFI5B/gFxWHrDCJICTyD/CLikNWmEQQEvkH+EXFIStMIgiJ/AP8ouKQFSYRhET+AX5RccgKkwhCIv8Av6g4ZMW8+CAbW4gNgD9UHLLCJIKQyD/ALyoOWWESQUjkH+AXFYesMIkgJPIP8IuKQ1aYRBAS+Qf4RcUhK0wiCIn8A/yi4pAVJhGERP4BflFxyAqTCEIi/wC/qDhkhUkEIZF/gF9UHLLCJIKQyD/ALyoOWWESQUjkH+AXFYesMIkgJPIP8IuKQ1aYRBAS+Qf4RcUhK1tNIn3f713Ur2maTcado5SSvu+jHQ/LaGIAv6g4ZGWLSaTrOlFKyTAMu5/VdS1t2zqPPYcmJn00MYBfVByysmYSsRsUW1VV0nXd3s+GYdgbW/+/3jS9gtM0ze42s5Go63p0daeqqt3P7969K0qp3X31mDrmruukqqpzr8mMxRxvGIbFePVzwQ37EPCLikNWXJsYPdnPNTkiL5oEvTLTNI3UdS0iL5uCsdu6rtv9W49hNji6YdFj6EZqqYkxm662bXfNkdlALcVrN204Dk0M4BcVh6xMTSL6EJG92YeI1jQxeuIfe8xcwzG2gmLHbjYxeoy5Me1YdDxj462JF25oYgC/qDhkxcdKjG6Ixh6ztGrStu3kycJbNTH2eIfECzc0MYBfVByycupzYsyJf+62ufNXzOeZO5ykx7Abq0ObGFZi/KGJAfyi4pCVU346yT4R1zzHxG4qxhoO83wVPcYhTYy+b13Xe42ReVvbtrvb5s6JmYoXbmhiAL+oOGRlq0nEPofGPvSz9GmfuZNwp87H0Z8QunPnzrnGwj4MZY45FYv56aa18cINTQzgFxWHrDCJICTyD/CLikNWmEQQEvkH+EXFIStMIgiJ/AP8ouKQFSYRhET+AX5RccgKkwhCIv8Av6g4ZIVJBCGRf4BfVByywiSCkMg/wC8qDllhEkFI5B/gFxWHrIxd5JGNzecGwB8qDsCi69evhw4BAM6hiQEw69NPPxWllNy7dy90KACwhyYGwKzr16/LhQsX5LXXXgsdCgDsoYkBMEmvwnzjG98QpZTcv38/dEgAsEMTA2DS9evX5f/+7/9EROT1119nNQZAVGhiAIzSqzDf/e53RUTk8ePHopSSBw8ehA0MAP6HJgbAqC9/+cu7VRjtypUrcvny5UARAcA+mhgA5/zzn/8UpZR873vf2/v5u+++K0opefjwYaDIAOAlmhgA53zpS1+Sz3zmM6O3XblyRa5cueI5IgA4jyYGwJ5//OMfopSS73//+6O3P3z4UJRS8u6773qODAD20cQA2PPGG29MrsJoly9fZjUGQHA0MQB2/v73v4tSSm7fvj17v7OzM1FKyaNHjzxFBgDn0cQA2HnjjTfks5/97Kr7Xr58WV5//fUTRwQA02hiAIiIyCeffCJKKfnBD36w6v73798XpZQ8efLkxJEBwDiaGAAiIvLFL35x9SqM9tprr7EaAyAYmhgA8vHHH4tSSn74wx8e9Lh79+6JUkref//9E0UGANNoYgDIF77whYNXYTRWYwCEQhMDFO6jjz4SpZT86Ec/OurxP/vZz0QpJb/85S83jgwA5tHEAIX7/Oc/L0op5+1zn/tc6JcCoDA0MUDBPvzwQ/nVr34lv/jFL+T999+X9957T548eSJPnjyRx48fy6NHj+TRo0eilJKf/OQn8vDhQzk7O5MHDx7IgwcP5P79+/Lzn/9c7t27J/fu3ZP33nsv9EsCUBCaGACLlFLyu9/9LnQYALCHJgbAIpoYADGiiQGwSCklv//970OHAQB7aGIALKKJARAjmhgAi5RS8uGHH4YOAwD20MQAWKSUkj/84Q+hwwCAPTQxABYppeSPf/xj6DAAYA9NDIBFSin505/+FDoMANhDEwNgkVJK/vznP4cOAwD20MQAWKSUkr/85S+hwwCAPTQxABYppeSvf/1r6DAAYA9NDIBFSin529/+FjoMANhDEwNgkVJKPvroo9BhAMAemhgAi2hiAMSIJgbAIqWUfPzxx6HDAIA9NDEAFiml5JNPPgkdBgDsoYkBMOuDDz4QpZT8+te/Dh0KAOyhiQEw69GjR6KUkp/+9KehQwGAPTQxAGb9+Mc/lgsXLkjbtqFDAYA9NDEAZn3rW9+Sixcvyje/+c3QoQDAHpoYAJM++OADuXjxoly6dEkuXrwov/nNb0KHBAA7NDEAJn3961+Xt99+W1599VV5++235atf/WrokABghyYGwKjvfOc7cv36dRERefXVV+XTTz+Vr3zlK/Ltb387cGQA8AJNDIA9v/3tb+VrX/uaXL9+XR4/fiwiL5qYf/3rX/LkyRO5ceOG3Lp1S549exY4UgClo4kBCvb8+XN5+vSpnJ2dyTvvvCNvvvmmKKXk9u3be/fTTYx2+/ZtUUrJm2++Ke+8846cnZ3J06dP5fnz575fAoCC0cScwLNnz3YTws2bN+XatWuilGJji3K7du2a3Lx5c9eQjK2w2E0Mec7GNr+tqSu4o4nZGH+hIkevvPLKuSYGwLi1K5xwRxOzkcePH3OuALJFEwO4efbsmdy6dUtu3LixO9cM7mhiNnLjxg156623QocBnARNDLCNt956S27cuBE6jGzQxGzg9u3bcuvWrdBhACdDEwNs59atWxxa2ghNjKNnz56JUopDSMgaTQywHeaN7dDEONInbQE5o4kBtqU//AE3NDGOSESU4JVXXpF///vfocMAssEfwNugiXF08+ZNOTs7Cx0GcFI0McC2zs7O5ObNm6HDSB5NjKNr167J06dPQ4cBnBRNDLCtp0+fyrVr10KHkTyaGEdKKb7IDtmjiQG29fz5c1GKKdgVe9ARSYgS0MQA22P+cMcedEQSogSXLl2iiQE2xvzhjj3oiCRECWhigO0xf7hjDzoiCVECmhhge8wf7tiDjkhClIAmBtge84c79qAjkhAloIkBtsf84Y496IgkRAkuXbrEVwkAG2P+cMcedEQSogQ0McD2mD/csQcdkYQoAU0MsD3mD3fsQUckIUpAEwNsj/nDHXvQEUmIEtDEANtj/nDHHnREEqIENDHA9pg/3LEHHZGEKMHFixdpYoCNMX+4Yw86IglRApoYYHvMH+7Yg45IQpSAJgbYHvOHO/agI5IQJbh48aL85z//CR0GkBXmD3fsQUckIUpAEwNsj/nDHXvQEUmIEtDEANtj/nDHHnREEqIENDHA9pg/3LEHHZGEKAFNDLA95g937EFHJCFKQBMDbI/5wx170BFJiBLQxADbY/5wxx50RBKiBDQxwPaYP9yxBx2RhCjBhQsXaGKAjTF/uGMPOiIJUQKaGGB7zB/u2IOOSEKU4MKFC/Lf//43dBhAVpg/3LEHHZGEKAFNDLA95g937EFHJCFKQBMDbI/5wx170BFJiBLQxADbY/5wxx50RBKiBDQxwPaYP9yxBx2RhCgBTQywPeYPd+xBRyQhSkATA2yP+cMde9ARSYgS0MQA22P+cMcedEQSogQ0McD2mD/csQcdkYQoAXkObI+6cscedEQSogTkObA96sode9ARSYgSkOfA9qgrd+xBRyQhSkCeA9ujrtyxBx2RhCgBeQ5sj7pyp5RSwsYWcotB6H3Axpai0PuMjU0plWbxIA+x5F8scaBMqeZfqnEjDzQxCC6W/IslDpQp1fxLNW7kgSYGwcWSf7HEgTKlmn+pxo080MQguFjyL5Y4UKZU8y/VuJEHmhgEF0v+xRIHypRq/qUaN/JAE4PgYsm/WOJAmVLNv1TjRh5oYhBcLPkXSxwoU6r5l2rcyANNDIKLJf9iiQNlSjX/Uo0beaCJWWB/qU7f96FDyk4s+RdLHCF1XTf5hVJt20rTNKKUkmEYQoeanVTzL9W4tzRVLzg9mpgJwzCIUkrqut79TL/Bd10XMLL8xJJ/scQRi7qu2ScepbqvU417S/ZcoZt9/ug9PZqYCToJl35udt5N0+zdt23b3W1VVZ085lTFkn+xxBGLsSbGXompqmqX33ozH6v/q9/M9f35Y+C8VPMv1bi3ZDcxfd/vrcaYK5zmXKDvZ9bL2M/M56F+9tHETKiqai95NJ2Mfd/vNTS6YdFv1vb/m2/w2BdL/sUSRyyWmhjz3/qN125izDdaczyzjvBCqvmXatxbmmti7IbGnFvsupn6mf0cHBV4iSZmgp2Umvnmq9+Ux84RsJsg3rSnxZJ/scQRi6Umxs7xsZUYsy7s1cqx1cuSpZp/qca9pbnDSfoPWl0L5v/rhsWsg7Gfjc0fU39ol4YmZsJUgpgrLDrZzE3jfJr1Ysm/WOKIxVITY+f4WBOj6fvbG03MS6nmX6pxb2kst/V7va4ZezObGPMk4LGf2Sv7IqzuazQxE6bOiZk62VEf69dvyqzErBdL/sUSRyy2WIkx0bTMSzX/Uo17S1Mr9yJybiXGtLaJYSVmGk3MhLlPJ5nHNvWbtr6/vo1zYtaLJf9iiSMWW5wTMzWevj8rky+lmn+pxr2luSbGbkrqut7Vydomxn4OVvZfoolZYC8B2isp5m12EpvLiDQw02LJv1jiiMWaTyeZuW/+Zbi0Yjn2Jl26VPMv1bi3NNfEiJz//iXtkCbGPiRLA/MCTQyCiyX/YokjFXajwuEiN6nmX6pxIw80MQgulvyLJY5UjJ2si+Oluv9SjRt5oIlBcLHkXyxxoEyp5l+qcSMPNDEILpb8iyUOlCnV/Es1buSBJgbBxZJ/scSBMqWaf6nGjTzQxCC4WPIvljhQplTzL9W4kQeaGAQXS/7FEgfKlGr+pRo38kATg+Biyb9Y4kCZUs2/VONGHpRSoq5evXruo5JsbL62q1evhq4DERGhDthCbrHUwaGoG7aQ2//yj04a4cSSf7HEgTKlmn+pxo08/K+ZIQkRTiz5F0scKFOq+Zdq3MgDTQyCiyX/YokDZUo1/1KNG3mgiUFwseRfLHGgTKnmX6pxIw80MQgulvyLJQ6UKdX8SzVu5IEmBsHFkn+xxIEypZp/qcaNPNDEILhY8i+WOFCmVPMv1biRh6SamLZtRSklfd9L0zTS933okLCBWPIvljhQplTzL9W4kQenJkY3FL5UVbX7b6jC6fvey3Nv8Tx6jGEYTjL+VkqL41T73v4SKPu2sVqNKQ9Kl+rvIYW4qY18JdXElIQmxr+Um5iqqqRt293/61VLjVqNXyx1cKjY46Y28nZ0E6NXQ/Qk2XXdXqfbdZ2IvHzD1omj76//3TTN3rhL4zRNs7vNTDxzzENez9Lj7NvGJqCxx8+Nu9RYmK9zaTy7Uem6brdiNbbP9P3s13Hs/ttCLG+CLnEckgNr972+X13Xu5+P5Y5+vP1zXT9mrd69e3dvTDOWpRoza7Npml2eYRux1MGhjol7LLen3vvNuUNvbdtO3l/HNAwDtVGATVZi7ERp2/bcRKo7YZ0gwzCcm4APGadpGqnreheL2W3bt82Ze1xVVXuFpM/DMffX2H2OjccsRLuJmRpvbROj47Nvm3rutftvC7G8eR8bx6E5MJZDc/cz35yn6Lqauq+uVXvMsTfqqbw137irquKNemOx1MGhXJoYnYdz7/1jz6ebhjX3pzbytkkTYxubSHWyzf3yDxln7DZtqvu2zT1ubMXF/vma+6yNx37M3POMxTm3X8aaRHt8s+jW7r+txPLm7fJmbFrKgWN/t0uW/jI136jHVuTmcsn899j/w10sdXAol7qZyu2p/JpqRpbykdrI12wTYy6d2Yd99IN1A2LeVyl1dBOzdhw7iY5pYuYeN3XewlITc2w8U49ZG+eaJkbkfMGa49sbTcwLc3VwTA6M7fu5+x3ze9CP1bXFG3X8YqmDQ7nUjZnbU+/9mrniueb+U6iNvGyyEmO/kR+7EnPIOCFWYuzHrLnP2niOXYmxx95iJca3WN68t1qJGfv5KVdiuq4bPfxnvunzRh2/WOrgUFusxMy994u8OCRknpy7dH/z59RG3jZpYuy/Js1jgoc0MYeMYyeKfV7B2iSae5wZmz7mOnYuiX2fY+OxH2Pvi7Hx9KSnY6jrevI8orXnxNi/h1OL5c3btQ5E1uXA0jkxcytpczGYf6Xa5wy4vlHbr9PMM2wjljo41BZNzNx7f9M0kx8AGbv/WHzURr6cmhjzLG7zLG/zF37o4aS149hJdIpPJ43dNvWXs/34Yz6dZH9qa+145tn7Y5OgueyqLb0OX4eSROJ58z42jkNzYG0OTR0OnPrdmGPY99O1eufOnaPfqPkExmnFUgeH2qKJEZl+7zd/buff2P11THbdUBt5cmpigC3Ekn+xxJGCsb+O4SbV/Es17lOhNvzKuomx/8I1NzrleMSSf7HEESP9lyj1czqp5l+qcW+F2ggr6yYGaYgl/2KJA2VKNf9SjRt5oIlBcLHkXyxxoEyp5l+qcSMPNDEILpb8iyUOlCnV/Es1buSBJgbBxZJ/scSBMqWaf6nGjTzQxCC4WPIvljhQplTzL9W4kQeaGAQXS/7FEgfKlGr+pRo38kATg+Biyb9Y4kCZUs2/VONGHmhiEFws+RdLHChTqvmXatzIA00Mgosl/2KJA2VKNf9SjRt5oIlBcLHkXyxxoEyp5l+qcSMPuyaGjS3kFoPQ+4CNLUWh9xkbm1IqzeJBHmLJv1jiQJlSzb9U40YeaGIQXCz5F0scKFOq+Zdq3MgDTQyCiyX/YokDZUo1/1KNG3mgiUFwseRfLHGgTKnmX6pxIw80MQgulvyLJQ6UKdX8SzVu5IEmBsHFkn+xxIEypZp/qcaNPNDEILhY8i+WOFCmVPMv1biRB5oYBBdL/sUSB8qUav6lGjfyQBOD4GLJv1jiQJlSzb9U40YeaGIQXCz5F0scKFOq+Zdq3MgDTQyCiyX/YokDZUo1/1KNG3mgiUFwseRfLHGgTKnmX6pxIw+bNjF93+9dlKlpmk3GnaOUkr7vox0Py2J5E6QOTjcelsVSB4eibk43HpZt1sR0XSdKKRmGYfezuq6lbVvnseeQhOmL5c2bOjjdeFgWSx0ciro53XhYtrqJsRPMVlWVdF2397NhGPYSXP+/3jTdgTdNs7vNTIS6rke786qqdj+/e/euKKV299Vj6pi7rpOqqsZe/C4Wc7xhGBbj1c8FN7HsQ+qAOggp1X1I3VA3IW3SxOhf1lySirz4JevOumkaqetaRF7+Usdu67pu9289hpmgOuH0GLoQlpLQLJq2bXfJbRbAUrx20eE4sRQydUAdhBRLHRyKuqFuQpptYvQSn73ZS3xrklD/4sYeM5cwYx2w/QLMJNRjzI1px6LjGRtvTbxwE8ubN3VAHYQUSx0cirqhbkLythKjE3rsMUtdb9u2o8uBOq4tktAe75B44SaWN2/q4Px41IE/sdTBoaib8+NRN/5s0sSIzB/TNH9xc7fNHX80n2duOVCPYRfGoUlIJ+1PLG/e1IFMjrcmXriJpQ4ORd3I5Hhr4oWb1U3Mkqmzy+0TqcxjhHZSjCWMebxRj3FIEur71nW9l9jmbW3b7m4zf742XriJ5c2bOqAOQoqlDg5F3VA3IW3WxIicPwZqL93pxNCbtuYkKv0Y+3iqPsP7zp075xLDXkY0x5yKxTw7fW28cBPLmzd1QB2EFEsdHIq6oW5C2rSJAY4RS/7FEgfKlGr+pRo38kATg+Biyb9Y4kCZUs2/VONGHmhiEFws+RdLHChTqvmXatzIA00Mgosl/2KJA2VKNf9SjRt5oIlBcLHkXyxxoEyp5l+qcSMPmzUxc5+d35rrc20Za9u20rat9H2/9/XWLubi87mffYnl9VAHx6MO3KX6eqib41E37pJsYmIxDMNe4umEPKUc93Msr4c6OA51sI1UXw91cxzqZhtOTYzeIfpz9OY4U5+RF5m+qqgV1O5x+nnsK4yat5mf6bc/fz813lKs+n5TV0dtmubct/O4gj4AAAKRSURBVEyaj9ex2JeU1xcDW3reNfs5B7G8HuqAOggp1ddD3VA3ITk1MUq9vBKnvXPs29ZeVXTs6qD6l2BfYdT8t05m89sTl8Yz73Po1VH1a7TVdb33eqZes/5CprnnXbOfcxDL66EOqIOQUn091A11E9LRTcxUt6f/PfbthsMwzF7XYmypa+xaE2NJOHbNi6Xxxp5z7TU57NeojS0JmmOaX2e99LxL+zkXsbwe6oA6CCnV10PdUDchzTYx5jKYvWw3dZVO8zZ707/MqauKTu3gQ5JQv6i+P38RrrHnOfbqqHNJOLbEWde1dF0nXdftlhHXJOHcfs5FLK+HOqAOQkr19VA31E1Is03MnEM66TnmcuAWSbjUSU89XlubhMMwrO6k9c/1kl/XdXTShlheD3VAHYSU6uuhbqibkI5uYkTOHws0x1Hq5XE4sxtce1VRfV99+1IS6jjsY5pz4029jjVXR9Vj28aOaZpjHfK8a/ZzDmJ5PdQBdRBSqq+HuqFuQnJqYsyzo5fOLjeX6+auKjp2xvWaJDTPWF873tx9xp7XTsK1Z5ebr9sswLVnl8/t5xzE8nqoA+ogpFRfD3VD3YTk1MTEYOyYpi/D4P9z/jmKJf9iieMY1EH6Us2/VOMWoW5yQBPjyPzGxbXHcbEvlvyLJY5jUAfpSzX/Uo1bhLrJQfJNDNIXS/7FEgfKlGr+pRo38kATg+Biyb9Y4kCZUs2/VONGHmhiEFws+RdLHChTqvmXatzIA00Mgosl/2KJA2VKNf9SjRt5oIlBcLHkXyxxoEyp5l+qcSMPNDEILpb8iyUOlCnV/Es1buRh18SwsYXcYhB6H7CxpSj0PmNjS7NyAABA8WhiAABAkmhiAABAkv4frFRxkImXgLsAAAAASUVORK5CYII=\">\n</div>\n", "_____no_output_____" ] ], [ [ "#---------------------Clase padre---------------------------\nclass Animal:\n nombre=\"\"\n edad=0\n tamaño=0\n\n def __init__(self, nombre, edad, tamaño):\n self.nombre = nombre;\n self.edad = edad;\n self.tamaño = tamaño;\n\n#------------------Clases hijas----------------------------\nclass Oso (Animal):\n tamaño_ocico=0\n\n def __init__(self, nombre, edad, tamaño, tamaño_ocico):\n super().__init__(nombre, edad, tamaño);\n self.tamaño_ocico = tamaño_ocico;\n\n self.descripcion();\n\n def descripcion(self):\n print(self.nombre + \" es un oso de \" + str(self.edad) +\" años y tiene un ocico de \" + str(self.tamaño_ocico) + \" metros.\");\n\nclass Tigre (Animal):\n color=\"\"\n\n def __init__(self, nombre, edad, tamaño, color):\n super().__init__(nombre, edad, tamaño);\n self.color = color;\n\n self.descripcion();\n\n def descripcion(self):\n print(self.nombre + \" es un tigre de \" + str(self.edad) +\" años y tiene un color \" + str(self.color));\n\n\n#-----------------------------Ejemplos----------------------------------------\n\nTigre = Tigre(\"Malo\", 3, 2.2, \"Amarillo\");\n\n", "_____no_output_____" ] ], [ [ "## __2. Polimorfismo__\n\n_Polimorfismo_ se deriva de las palabras griegas \"poli\", que significa muchos, y \"morphe\", que significa formas. En programación, se emplea este concepto para cambiar el contenido de un método heredado para que se ajuste a las necesidades principales de las subclases.\n\nExisten dos tipos de polimorfismo: _dinámico_ y _estático_.\n\n### 2.1. Polimorfismo dinámico\n\nSe conoce también en la literatura como _polimorfismo en tiempo real_, _vinculación dinámica_ o _anulación de método_ (\"overriding\", por su traducción al inglés). La multiplicidad de formas ocurren en diferentes clases.\n\nSupongamos que en la relación de herencia entre una clase padre e hija existen métodos con el mismo nombre, pero en diferentes formas. Cuando un objeto es asignado a una _referencia de clase_ y el método del objeto es llamado, el método del objeto de la clase se ejecuta; más no el de la clase referencia.\n\nDado que la creación del objeto ocurre en tiempo real, la forma en como se ejecuta el método sólo puede ser decidido cuando se ejecuta el método.\n\nPor ejemplo: una figura geométrica tiene un área y un perímetro; pero la forma de la figura define la manera en cómo se calcula.\n", "_____no_output_____" ] ], [ [ "%matplotlib inline\nfrom math import pi\nimport matplotlib.pyplot as plt\n#CLASE PADRE\nclass FiguraGeometrica:\n dimensiones = []\n ubicacionEspacial = [0,0]\n fig, ax = plt.subplots()\n\n def Area(self):\n return None\n \n def Perimetro(self):\n return None\n\n#CLASES HJIAS\nclass Circulo(FiguraGeometrica):\n def __init__(self, radio, ubicacion = [0,0]):\n self.dimensiones.append(radio)\n self.ubicacionEspacial = ubicacion\n\n #Figura\n self.ax.add_patch(plt.Circle(tuple(self.ubicacionEspacial), radio))\n \n def Area(self):\n return pi*self.dimensiones[0]**2\n \n def Perimetro(self):\n return 2*pi*self.dimensiones[0]\n\n\nclass Rectangulo(FiguraGeometrica):\n def __init__(self, b, h, ubicacion=[0,0]):\n self.dimensiones.append(b)\n self.dimensiones.append(h)\n self.ubicacionEspacial = ubicacion\n \n def Area(self):\n return self.dimensiones[0]*self.dimensiones[1]\n \n def Perimetro(self):\n return 2*(self.dimensiones[0]+self.dimensiones[1])\n\n\n#---------------------CREACIÓN DE FIGURAS-----------------------\ncir1 = Circulo(0.2)\ncir2 = Circulo(0.4, [0.6,0])\nrec1 = Rectangulo(3,4)\n\nprint(\"Círculo de radio \" + str(cir1.dimensiones[0]) + \" tiene un área de \" + str(cir1.Area()))\nprint(\"Rectángulo de \" + str(rec1.dimensiones[0]) + \" de base por \" + str(rec1.dimensiones[1]) + \" de altura tiene un área de \" + str(rec1.Area()))", "_____no_output_____" ] ], [ [ "### 2.2. Polimorfismo estático\n\nConocido también como _polimorfismo en tiempo de compilación_, _vinculación estática_ o _sobrecarga de métodos_. Consiste en tener múltiples métodos con el mismo nombre pero diferentes argumentos de entrada. Se escoge el método dependiendo de cuántas entradas pase el usuario. Por ejemplo: en una calculadora, es posible sumar dos o tres números.\n\n", "_____no_output_____" ] ], [ [ "class Calculadora:\n def suma(self, x=0, y=0, z=0):\n return x+y+z\n\ncalc = Calculadora();\n\nprint(\"Suma de dos números: \" + str(calc.suma(5,10)));\nprint(\"Suma de tres números: \" + str(calc.suma(5,10,4)));", "_____no_output_____" ] ], [ [ "El constructor es un método que permite realizar un desarrollo particular en el momento en que se crea un objeto. También es posible sobrecargar el constructor de una clase dependiendo de las entradas que ingresa un usuario.", "_____no_output_____" ] ], [ [ "class Persona:\n nombre = \"\"\n cedula = 0\n\n def __init__(self, nombre:str =\"\", cedula:int=0):\n if cedula == 0 and nombre == \"\":\n print(\"Se creó una persona desconocida\")\n elif nombre == \"\":\n self.cedula = cedula\n print(\"La persona está identificada por C.C.\" + str(self.cedula))\n elif cedula == 0:\n self.nombre = nombre\n print(\"Se creó a \" + self.nombre)\n else:\n self.nombre = nombre\n self.cedula = cedula\n print(\"Se creó a \" + nombre + \" con C.C.\" + str(cedula))", "_____no_output_____" ], [ "#Sin argumentos de entrada...\njuan = Persona()", "_____no_output_____" ], [ "#Sólo el nombre\njuan = Persona(\"Juan\")", "_____no_output_____" ], [ "#Sólo la cédula\njuan = Persona(cedula=1098)", "_____no_output_____" ], [ "#Toda la información solicitada\nargs = {\"nombre\":\"Juan\", \"cedula\":1098}\njuan = Persona(**args)", "_____no_output_____" ] ], [ [ "## __3. Ejercicios__\n\n### 3.1. _Toppings_\n\nEres el dueño de una franquicia de Zirus Pizza. Establece una relación de herencia en donde puedas definir tipos de pizza con base en el sabor de la masa base (integral, harina, etc). Puedes guiarte del diagrama UML.\n\n\n<div align=\"center\">\n\n<img src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAa8AAAE6CAYAAAChwN3xAAAH8nRFWHRteGZpbGUAJTNDbXhmaWxlJTIwaG9zdCUzRCUyMmFwcC5kaWFncmFtcy5uZXQlMjIlMjBtb2RpZmllZCUzRCUyMjIwMjEtMDctMjhUMDIlM0E1OSUzQTI5LjA2MlolMjIlMjBhZ2VudCUzRCUyMjUuMCUyMChXaW5kb3dzKSUyMiUyMGV0YWclM0QlMjJKaWliTW5yTWoyYkhRTlRNWFI5dSUyMiUyMHZlcnNpb24lM0QlMjIxNC45LjIlMjIlMjB0eXBlJTNEJTIyZGV2aWNlJTIyJTNFJTNDZGlhZ3JhbSUyMGlkJTNEJTIyZU9sX2c3MS1US2FNdlFYZ0pKbWslMjIlMjBuYW1lJTNEJTIyUGFnZS0xJTIyJTNFN1ZwdGIlMkJvMkdQMDFrYlpKSUp5RUFCOTU2ZDJWMWtwc1ZOdnVweXVUdU1TN1RwdzVwa0IlMkYlMkZSNG5ObmtER2pxaWZxRkNWWHo4RXZzNXg0OVBBcFl6aiUyRmElMkZDcHlFVHp3Z3pMSUh3ZDV5RnBadGo4WTIlMkZGZkFJUWRjZTVBREcwR0RIRUlGc0tKdlJJT20yWllHSkswMGxKd3pTWk1xNlBNNEpyNnNZRmdJdnFzMmUlMkJHc2V0Y0ViMGdEV1BtWU5kRyUyRmFDRERIQjNib3dMJTJGU3VnbU5IZEczaVN2aWJCcHJGZVNoampndXhMa1BGak9YSEF1ODZ0b1B5ZE14YzdFSmUlMkYzNVV6dGNXS0N4TEpOQiUyRm43Yk9zdXhyJTJCSjFUJTJGdXYlMkZIWDRYYzdTWHA2bEZmTXRuckJUempGZ0t4eFN2Uzg1Y0VFSTkzUmlPRVlTck1YSHN1VnJrRlF4b3h1WXJqMllUWkVBUEJLaEtRUXg2bXVrRHdCMUE4cEN4N3hnVyUyRlZuRk9KJTJGUiUyQm1OQXU1b0c4d0xHWjZUS2dXVWt2Qzlpb3RWcW9ud0FOQUJVbWh6ZElFQXRXZ0o3eXZOSHpFcWRTQXp4bkRTVXJYeDJWRVdHeG9QT05TOGtnMzBoR0M1WkQ5MmRDakk2R3dFUWlQaUJRSGFLSTcyQ090QWIwSnhycTRLeFNGUEkyRlpUVVo3V0N0NHMxeDZJSm91TkJjWDhHNzNlRGRzaDBBbm5HRXJibGp6UkMzbk9rTDQxaGFFSHJiMlUlMkJ6VURqWlZrdWxxcVZ4c3k3RmF3N3NUMWRTMEhoVHFTNWQxbVFGZ1pVWjNZTCUyRklIUE8xQWlMbU9jNm80elZJQ00xUmw3a1dhR2xDZlpoQm85Wm00VmJJSCUyRm84Q3FJUTE5WW85cVVJUTBDRWl1UmNJa2x6aFdoNkU4NHJETUwlMkYzQUdIeUJrUHVnUHJTRk1mQTVsVkpUaG81b0xPZWN4ckFYVFRCd0U1TFlqU25MdGxIUiUyQm16YmxwZVdrUXR0R1RxN2JrWnFjRTJxcWNjeG94bDNPc2NtazZFTUVSMEFWSXdXano0cndSUTgxV0hlYXJEc25HR1o0VGRpU3AxUlNyc1lYZWRzYTg1OUY3cWhscmhoM3hLMTdnbHRZN0NDUHc5YVh3Rng1YjZzNk9MRjlRUk1mNHZuVHo1QU5Yam5jNHA0QXV0UEl1R1VDc0RzU3liQWhraVY5ZThOM0E5R1JnYkRkdGc1aTJKV0Q4QnFVOTZBWTgyZ3R5Q2tEME11Y2U1SUFDbE9aUHVlWE9yVldteVdDUU9yNEUyS0RHemFqZDhHQTNMUEtsVmxsZUJOYjRYVmxLMFozVzlFWnVaTlB0aFhqRDlpS3JhU01CamlZaWEzRW1iR29aNEM3JTJCZmdFSmJVMEg4T3V6QWRxOGZoQjRtQ3EzZ1pCYWMyNE92bG5BR2xyZ0x5OCUyQklXcUcyZHhEMlZrVEFUWlUlMkZtM2dpSE9lZW1iYnFTdUYlMkZ0eTRXQUtNU3hNZHhwNEJzajY5U2VlYllDaWMxYXE5RjRTUVNFJTJCeWdxVlhBUUpHcSUyQnJhaDRDbHMyM3dpZnY4UW1uTVZnVmNvbDM5elR2cDRnV2hHRkpYNnR6TzhXMEhtN0pzNjFyJTJGSTFiOXplb1psdnlOZWxlNVpkY3RZRTg1NTJCOGtVM0JzcUVkMXpqJTJGOUJpODNISk9KMjdGNzZORng2Nk5ZcTlsZ2tJMWJWd3V3elVmUDY1Ykliemt5cjN1V2VPc2J2SnZkV2o4NWswZHFYTEhYYmxjbEh6U2VwdWMyOUdiOXRYN1YzNVhIVHFJZWFTMGIxdjllNjAwUEtnTUJ2dzltSm9QdldnQnMwbHE4cVRMTlpsTTFxWVZ0c2M2cVoxUUhIRTQlMkJBNXBISHR2RWV1QVVvV1YxbEpZenFBalpCdmVJelpRNEclMkJwNkMxUHRpUERobjFCOGdwZSUyQlMlMkJBJTJGdnFvayUyQkd3a2VON2xtRFlMN1UxS3c3dDNHb2p0MGZqQ2JGWDlXRFRKeiUyQlpJS0tXdTlqOXJYaGJTYTFVNmRyJTJCenBwQ3JUZiUyRiUyQlZjS2lvSjg5cXNkTmJhSHMlMkJuWFVnbFdVRTJVdmZjQ1p4a1dXY2JCeVM0blpsMDYwU09VQ05IdUtkU3hQVldFb3JGbCUyRjA1WWNVdkpweUglMkZ3QSUzRCUzQyUyRmRpYWdyYW0lM0UlM0MlMkZteGZpbGUlM0UN2GSpAAAgAElEQVR4nO3dW6gkRx3H8d4Iq3gJEgIRZPGE9kGFGBGCYgxKkAxRwaCyBjTCsvEywrpKCELEGHFROlEMwtHgDeKtPUIiEoOIQZTxwQjx8pANxIwPycOaByXoKgiJfx92a7ampqq6urqna6r7+4GBs2emu6una/t3qi//LoqiEF68Ur4AoK2CnQdSov8BiEF4ISn6H4AYhBeSov8BiEF4ISn6H4AYhBeSov8BiEF4ISn6H4AYhBeSov8BiEF4ISn6H4AYowivxWKxuuG1qqrV76uqWv2+ruvB2qG3AX5j6H8Ahje68JrNZqvfz2YzwmvHjaH/ARjeqMKrLEspikKWy6WIyNrvVHjpo7GiKGSxWKzmY76n5tM0ndmO+XxuDdPYZdd1vfp9WZb9fXE7YAz9D8DwRhVeKjTquvb+TgVDWZarcFkul2ujJj14fNPZ2qECRh+JdV22es+17FyNof8BGN6owquqKinLUubz+Woko0Yt+mFDfZRjBo0KPBvbdK52KGbYtF22+rwKPfPfuRtD/wMwvNGF13w+XwXGbDZbCy9zZFSW5VoI6efI9BBqms7WDkV9NnbZ+iFI12HFnI2h/wEY3ujCSz8/pP+7rutVEOiH7mwhpM9juVwGT+cbecUue2wjLdMY+h+A4Y0uvNT5I3VBhB5eKggWi8XGSMg8vKjCRkS809naoabT2xW7bDMQZ7PZqC7aGEP/AzC80YWXiKyuMBTZDAb90Js6VKf4rgb0TWe2w3W1Yeyy9dHYGLaXbmzrA2AYowgv5Iv+ByAG4YWk6H8AYhBeSIr+ByAG4YWk6H8AYhBeSIr+ByAG4YWk6H8AYhBeSIr+ByAG4YWk6H8AYhR7e3vW2nm8eA3x2tvbS/1/AECGiqLgL1+kQ/8DEIPwQlL0PwAxCC8kRf8DEIPwQlL0PwAxCC8kRf8DEIPwQlL0PwAxCC8kRf8DECP78NKfUDyfz9ce4ojdl3v/A5BGY3jNZjNZLBayXC6tj59XTwS2vYag2qQ/Pbkt/cnHfbRbhSmaEV4AYjSGlwqHxWKx9kh7x8yy3GmXZSmLxUIWiwXhNTDCC0AMZ3i5RlNVVflmtrbTrut6bdq6rkVEViGhDvkVRSHL5XL183w+X5tv03z0kZO+fH2e5nqqZaoRmz6fkOldbdLnt1wund8VziG8AMTwjrzqul4FiTp82DCz1WfUjl/twKuqWhvF6UGoDj0ul8vVe2q6NvOZz+dro8OyLJ3v6cqylLquN0Zerul9bTK/B/gRXgBieMOrqqrVzrssy8aRhG+nXdf1Ruio+ZnBEjsf23uKGTg6W3i1mV5fblP7sY7wAhBjq4cNzQshYsMrdD56iKjDekrb8Gqa3tUmW/vhRngBiOEdebW5WOP8zFY7bXPkEjvyajOfoUZevjaZ7Ycf4QUghjO89Evj9cOHDTNb7bTNkUtZllHh1WY+ZoiY56xsl/qrz4Wc83KN6vQ2me2HH+EFIIYzvNperHF+Zmuf06+804Om7WHD0PmY4RVytaGavy28fNO72qS+LwIsDOEFIIb3sCGwbfQ/ADEILyRF/wMQg/BCUvQ/ADEILyRF/wMQg/BCUvQ/ADEaw6upqvyWGtXqSr02BXW5CnC3EF4AYjSGV9sblfuwzYAhvHYL4QUgRm/loWz3Q/nuk9KfA6ZXkTersofcq2WrjGGrNE/F991DeAGI4R15hd6orG7yFTlXjUNN46rKXtf1RvV3fd5m4DRVhreFl2saRl67hfACEMMbXiFV5W3nm2z1//TagGYlDEujrA+HdNUntIWXq+oG4bVbCC8AMTofNnRdLNFUlV1/EKX58EkVMKGV4QmvfBFeAGJ4R14hF2u4wqttVXfbYUNGXuNHeAGI4QyvNlXl9UDQnyrsqsqunxdTnws952U73Eh45YvwAhDDGV5tqsq7rggMrcpuBqNelT32akNXeFHxfbcQXgBiUGEDSdH/AMQgvJAU/Q9ADMILSdH/AMQgvJAU/Q9ADMILSdH/AMRoDK9tVZU3rwrsk3ljtfme7UrDNpXp0R++cwAxGsNrW1XltxVe+n1hIhcqeShcJr9bCC8AMTqXhxJxV4gXuVAmSr1UAV9b9Xc9yFz3d6np1DJVW/UK9GYgquXq95YdHByszSe0Mr25Tq4bpxGG8AIQwzvyCrlR2Vch3lbPUB/J6WGnv6fmY6sMr6ZTIWhSYeR63yw9ZYap/nNIZfqyLAmvDggvADG84RVSVb6pQrzrs+ZhQz3ofDUNQw43ukZ751d4LbzUfEKrdJjr22b9sYnwAhCjl8OGvgrx+qG3oiic4aWW2VRNvu25MvV5NVIivHYL4QUghnfkFXOxhn7Y0BxBbXvkZR7C1NukRl+E124hvADEcIZXaFV5X4V4cwSlnx8yzys1nfPyjdiMFVo7TKjaoD7fNbz0eYicO8dGeMUjvADEcIZXm6ryvgrx+nt6KNiu6NM1XW1oHm40/62/9PfUBR37+/vR4cXVhv0hvADE8B42RLP5fL5xng/h6H8AYhBeLalRmXkBCuLQ/wDEKFxXFfLiNdQLANoq2HkgJfofgBiEF5Ki/wGIQXghKfofgBiEF5Ki/wGIQXghKfofgBiEF5Ki/wGIQXghKfofgBiEF5Ki/wGIQXghKfofgBiEF5Ki/wGIQXghKfofgBiEF5Ki/wGI0Vt4mdXWh3hMSFEU3ueMpZ4fmhFeAGL0El7m04pFzj300fX05b4QXvkjvADECAovM5hMZVlKXddrv1NPQjb/bT4Gw/ZEZT1A1JOPzdGc/oTmg4MDKYpi9dmmJyGrddLbos9vuVw2tlctC93wHQKI0Tm81E7eF24i58JBjcTm87nMZjMRuRAGtvfqul79rOahB5sKKjUPFaBN4aWHbVVVq1DUg7OpvWZYIw7hBSCGM7zUoUDzZR4KDAkvtcO3TeMLGtuIyWj8WnipefjmabZFtcc2v5D2ohvCC0CMQUZeKght0zSNkqqqcl4E0ld4mfNr0150Q3gBiLH1c176Dt/3nu/8lL4c32FDNQ8zUNuGFyOv4RBeAGJs9WpD8wIL/RySGSa2oNHPR6l5tAkv9dnZbLYWiPp7VVWt3vOd83K1F90QXgBi9Hafl3mOzDzE13T1nu/iCtf5NnXF3/7+/kagmIcb9Xm62qJfrRjaXnRDeAGI0Vt4ATHofwBiEF5Iiv4HIAbhhaTofwBiEF5Iiv4HIAbhhaTofwBi9B5evvuo+tZ1WX22taoqqapKFovFWkmrLnztG/J73qYxrAOA4WUdXrtiuVyuBZYKsm0ay/c8hnUAMLxeKmzoz/JS1eEV1/1SIu6K8fpy9enMiu76Dlwv8KumMdvsml9TW5sq38/n840KI/r0qi3mY2JUwd+m5YZ8z7kawzoAGF4v4VUUF6qsmztV873QivG2yu+u6vH6zyoE9coZTfPTP9O28r1aR9NsNtt4NphtndVN2b7lhnzPuRrDOgAYnjO8QqvKu0YH6mdbZYvlcumtYeiq/O4qwKv/bKtv2DQ/2zJD6y+a66jYDh3q89RLWDUtt+l7ztkY1gHA8DqPvFwV2PX3zJeal6tivGvH3Ca8VLtthXZty4mtfO8LL9uh0NlsJnVdS13Xq8ONIeHl+55zNoZ1ADC8zuHVZuTlox827CO8mkZerumV0PBaLpfBIy/1e3VosK5rRl4jWAcAwwsKrybmORvXeRp99BBaMV59Vr3fFF6qHeY5L9/8XOsRUvlezdtkO+elz6vNckO+51yNYR0ADK+X8NKvlmu62lAfwfkqxtuuwAsJL/0KxtD5+T5jW64ZXqFXG+rrrQd36NWGvu85V2NYBwDD6yW8dkHKR5WkuM9rLMbS/wAMi/DqiV5hI/Q8HwgvAHFGE17IE/0PQAzCC0nR/wDEILyQFP0PQAzCC0nR/wDEILyQFP0PQAzCC0nR/wDEILx68OCDD6ZuQrbofwBiEF49uP7661M3IVv0PwAxCr0sES9eKV4A0BZ7jo4uv/xyOXHihBw9ejR1UwBgMgivDu666y45evSoPPfcc3LZZZfJH//4x9RNAoBJILwi/fvf/5ZLL71UHnnkEREROXXqlBw/fjxxqwBgGgivSLfffvtaWD3zzDPywhe+UJ544omErQKAaSC8Ivztb3+T5z//+fL444+v/f5Tn/qUfOITn0jUKgCYDsIrwic/+UlrSD311FPyvOc9T55++ukErQKA6SC8WvrLX/4ihw8fljNnzljf/9jHPiaf+cxnBm4VAEwL4dXSzTff7A2n06dPy0tf+lL5z3/+M2CrAGBaCK8W/vCHP8ill14qZ8+e9X7upptukjvvvHOgVgHA9BBeLbzvfe+Tu+66q/FzDz/8sBw5cmSAFgHANBFegX7zm9/I5ZdfHvz5G264Qb7+9a9vsUUAMF2EV6Drr79e7rnnnuDPP/TQQ/Ka17xmiy0CgOkivAL87Gc/k9e+9rWtp7v22mvl+9///hZaBADTRngFePOb3yw/+MEPWk/3k5/8RN7whjdsoUUAMG2EV4Mf/vCHcvXVV0dPf9VVV8lPf/rTHlsEACC8Glx55ZVyzTXXyE033STHjx+X+XwuJ0+elFtvvVVuu+02ueOOO+QLX/iCfOlLX5KvfvWrcs8998h3vvMd+d73vicHBwdy8uRJed3rXpd6NQBgVAgvjwcffFDuu+8++dGPfiTf/e535Vvf+pZ87Wtfk7vvvlvuvPNOOXXqlLztbW+Ta665Rm655RY5ceKEfOQjH5Fjx47J+9//fjl69KjccMMN8va3v11+9atfpV4dABgNwqujz33uc3L77benbgYATArh1dGnP/1pOXXqVOpmAMCkEF4d3XrrrZSCAoCBEV4dnTx5Ur7yla+kbgYATArh1dF8Ppf9/f3UzQCASSG8Ojp+/Lh885vfTN0MAJgUwqujD37wg3LvvfembgYATArh1dGNN94odV2nbgYATArh1dG73/1uue+++1I3AwAmhfDq6J3vfKc88MADqZsBAJNCeHV03XXXyS9+8YvUzQCASSG8OnrrW99K3UIAGBjh1dHVV18tv/3tb1M3AwAmhfDq6KqrrpLf//73qZsBAJNCeHV05ZVXyp/+9KfUzQCASSG8Onr1q18tp0+fTt0MAJgUwqujsizliSeeSN0MAJgUwqujI0eOyJNPPpm6GQAwKYRXR5dddpmcOXMmdTMAYFIIr44uueQS+fvf/566GQAwKYRXRy9+8YvlX//6V+pmAMCkEF4dHT58WP773/+mbgYATEpRFIXw4pXyBQBtFew8kBL9D0AMwgtJ0f8AxCC8kBT9D0AMwgtJ0f8AxCC8kBT9D0AMwgtJ0f8AxCC8kBT9D0AMwgtJ0f8AxCC8zrPdPFtVlYiILJdLKYpC5vN54laOD/0PQAzC67yiKGQ2m63+PZ/PpSgKWSwWCVs1fvQ/ADEIr/PM8FosFqvRlz7yUr83R2iu34uIVFW19nsC8QL6H4AYhNd5oeFlTmMLI/33aj7L5VJEzj15WV/O1NH/AMQgvM7zHTa0hZcaTZmB1vT7oiikLMvtrkxG6H8AYhBe59ku2KjrWkQ2L9hQ/9ZHVK7fq5GXCqyyLAkvDf0PQAzC6zxz5KUzw0uNytQ5LcX2e/U7/bAh4XUB/Q9ADMLrvNDw0i/M0Ll+rw4X6ue/CK8L6H8AYhBe54WGl3nloJrO9Xs1b/13fOcX8F0AiEF4ISn6H4AYhBeSov8BiEF4ISn6H4AYhBeSov8BiEF4ISn6H4AYhBeSov8BiEF4ISn6H4AYhBeSov8BiFHs7e1Z6/rx4jXEa29vL/X/AQAZKoqCv3yRDv0PQAzCC0nR//KUesTOi1dRFOw8kA79L09sN6REeCE5+l+e2G5IifBCcvS/PLHdkBLhheTof3liuyElwgvJ0f/yxHZDSoQXkqP/5YnthpR2OrwWi4UURSHL5bL3eZuXXJrvLRYLZ3vQL77TPLHdkNIkw6ssS6mqavXvqqrW/iO6wgvbsav9D35sN6QUHV4qWObz+Wr0ou/wl8uldWSjplOBocJJ/Tyfz73LUEGm3pvNZqv5u5apVnS5XK4+YwZiURRS17WUZbma/uDgYG0Z+siraf3rul5bp7IsW3/HU8FOsD9VVclsNgv+fJvPmqay3fR9gu+ITR/U/knfD8Kuc3ipEcx8Pl/7j6CPbvT3zOlUMCyXy42Rlh4QIuf+Y6oQUO/Vdb22MurfZnt0apn6tOaXslgsNpZhCy/X+uthVpYl4eXh639lWTIKDqT+IAwJJBVy+h9mbU0lvBTz/zzS6hxeKmjUqEV/T9FHO+Z0vp2++VnffBaLxVpAuEZYij4ysoWgHl5mmDatv/6z7d9Y5+p/thEt7FQQ+f5oM3U9h0t4nWPbj9iOzJh/wNve00de+s/m50TW92FNf5CPkTe89C/NHMY27by3EV76+7bl24b1IefL1LzUcgmvYdn6X1mWq35DeIULDa/lcrn6jmP7JuEla4Gi9kF1Xa8+a/5BX1WV9z1beKnPqZH1YrHYOLxIeLWw6yMvl7qurf+5y7JcbXjCa1i+/kd4tdNm5NXV1MNLhZXeP8uylNlsZg26kPdcIy9zeeay9eCciq2El8jmOS8z2NqEl5qP7ZyXOSpTG88c/ZkrrW9k9Vk1r67hZa7HbDYjvDwIr/4QXttj2x/Zwkudp7UFVNN7IeFV1/XGsgmvFpp23k1XG7YJL/3wpWs+tmWawWb+2/VZNQTf39+PDi+uNgxHePWH8NqeXRl51XXNyEt2/D6vsZjP51z66kF49Yfw2p6Yc17mH7v6OS/be6HhxTkvwmsr9M6pn3CFHeHVH1t4mYfE+7rpf2r7DVt4mUd7tnW1ocjm6Eq/V1bNa0r/VwgvJEf/yxPbzc13T1gf94u5zr9to5TeriK8kBz9L71//vOfradhu7ltO7xE1m9l6mN+uSG8kBz9L627775bXvKSl8gdd9zRKsTYbkipU3ht8xjrkBXc1clO26tvfayXfk5NtT3nC0JCv4+HH35Yfv7zn2+5NdP02GOPyYc+9KFWIUZ4IaWdDa9Uclgn84bsqqpGHV7PPPOMfPzjH5dLLrlEXvnKV8oHPvAB+etf/zpQ66ZFhdjFF1/cGGKEF1KKDi+90rJerd0csbiqyIe+p/8cU8FdH1WZO3jbCU5z3ru4XlMKr/39fbn00kvlxIkT8o9//ENERD772c/KRRddJJ///OeHauLkhIQY4YWUeht5NVWR91WG972n/9y2grtZCiqkQrkZILu4XlMIr4ceekje9KY3yXXXXSe/+93vNt5//PHH5cYbb5RXvepV8uMf/3iIZk7SY489JjfffLM1xAgvpNRLeLWpZdj2PZH4OoIxNQX1wNjV9RpzeD311FNy7NgxecUrXiH33ntv47QPPPCAvP71r5d3vetd8uc//1meffbZ6Ndzzz03+Ot///tfkldbthAjvJCSN7x8VeXVxHqRSMW3I9enC3lPpFsRXPNGvpAvxCy5smvrNdbw+vKXvywvetGL5I1vfKOcPXu21Tze8Y53SFEUcujQIbnooot6fx06dCi7l+sipK6vI0eOyKFDh+QFL3jB1pbBi1fTa29vT4qiGOfIy9T2sOGurtdYw0tE5MyZM/LhD39YXv7yl8u3v/3txmnvv/9+ueKKK+Q973mPPProo9ts5qSdPn2akRd2yvkQ6/+cl+1ZNSL28z++9/Sf21ZwN3fqfZzz2oX1GnN4Kb/+9a/lLW95i1x77bXWbfboo4/Ke9/7Xrniiivk/vvvH6KZk2QLLYXwQkqdwkt/hHjTVXn6VX9t3tN/jqngrl8VaXsCaterDVOs1xTCS/nGN74hL3vZy+SjH/2oPP300/Lss8/KbbfdJocPH5YvfvGLA7ZyWnyhpRBeSKlTeIWwnf8JeS/WUBXcU67XlMJLROTs2bNyyy23yMUXXyx7e3ty7NgxefLJJwdq3bSEhJZCeCGl7MMrVQX3lOs11QobjzzyiPzyl7/ccmum6fTp01TYQFa2Hl5AE/pfWtQ2RI4ILyRH/0uPqvLIDeGF5Oh/eWK7IaWdDa/Q6uvm1YVKTHUNxbzisEmbSvFt5z0Fu9j/0GxXt9s2LphS9PPQ5vq7/m8P+YSMKdnZ8AqlOobZaWazWfTD2bYZMITXppz735Tt6nbbVnjp93yKXKjeo/B/e1jR4RVSOV1dCSfivl/KaMjGPVCKb3qzU6nPmlfsmW23VXM3q+Xr91oVRSF1Xa+12VU1I2TeOGdXd4Lw67LfcD1Joe1THPT7IH3LiN03qWn1Kjnm++ooj5r+4OBgbRl9PEkCmzqHl69yurmjV/82q6eXZbl6T92zZIaXq7q7msb2b9cyQ6u5mx1WX0fbd9Fm3riA8MpT1z96Rez7At9THNR76v+3rRxbn/smnblPsX0ferk89bnY/YP+JAls6hxeIbX9zJtqXbX+9PdtG9w2va0tZVlu/NXia5+vLJPOdR4ttuQTziG88tTHfsOs2en6f25O59vp97Vvsmk6EmMrzL2NOq3oUFU+pHK6voH0Da4P5V0nM/Xf+6q7K2r0pv/V07TMkIDRv4OisN8ETXh1Q3jlqY/9hrnz3kZ46e+32Tc1Mc+3E17DSjLyss3H9/umkZfIhc6syiiFLLMpYMzlMvLaDsIrT2McebmYD7ZV9FMehNewOoeXr3K6+ZeP2sjmX1h6x1PzaTrnZW5U/eIM/S8h2zJDA8Zsp+sYNOHVDeGVp77DS6T5KQ5twquPfZO5vvphQvVZNa+u4WWuh/4kCWzqHF6+yul6BzGv6PG9p8/D9xnbytiOX5vLbOpA+lVC+lVE5nTmX3Nt541zCK88bSO8mq42bBNe+uFL13xsyzSDzfy367Pq//b+/n50eHG1YbjeDhsCsQivPLHdtmuoJ2TkivBCcuwE88R265d+6sM8ioRN0eEF9IX+lye2G1IivJAc/S9PbDekRHghOfpfnthuSGmnwqvtVXiue8T6mDeGsyv9D+3s+nZrs3+ImYZ9SlpZh9euzBvd7Er/QztT327sU9LqHF62ezJ892Tp94Xpl4GaFddDKj3b7p+gmnt+pr4TzFXIdvP9n9NvJnbdiznUkynYp+SnU3jZqsGr39uqJpslVlQlDb0xeudwVV5WbB2Nau75Ibzy1Fd47cKTKcw26T+zT9lNne/z0tkK7ep1xZpqdanOEFLL0GwDZZnyRXjlybXdXMVuzYfDdqlB2PeTKcw22drHPmW3eMMrpDq0qakCvH6IwJyn6gwhVeTNNtDR8kV45amvkdeuPJnCnIZ9ym7rdeRl+73v+Tiuw4b8lTQthFee+g6v1E+mcE3DPmU3dTrnpW88/Xi1qzK0fl5MfS70nBfV3MeL8MpT1+22a0+m0JdrawP7lN3SKbxcV+/4rurRr9Ixj4HrFddjrwyimnt+CK889RVeu/BkCvYp+ekUXkAf6H956iu8uNQcMQgvJEf/yxPhhZQILyRH/8sT2w0pEV5Ijv6XJ7YbUiK8kBz9L09sN6REeCE5+l+ednG7tb36jyry+SK8kBz9L0+7uN22GTCE124hvJAc/S9PXSts6J9pc68oT6aAiBZevHilfCE/IdutKPw7e55MgVjn9x3sPJAO/S9Pru3Wtqq8jidTIBThheTof3nqOvLiyRTogvBCcvS/PG0rvHgyBUIQXkiO/penPrabHgg8mQJtEF5Ijv6Xpz62G0+mQCzCC8nR//LEdkNKhBeSo//lie2GlAgvJEf/yxPbDSkRXkiO/pcnthtSIryQHP0vT2w3pER4ITn6X57YbkiJ8EJy9L88sd2QUq/hpe6TcJVu2Ya+77vgPo7hsRPME9sNKfUWXqqWmF5+ZTabbdxE2DfCK3/sBPPEdkNKweHlq1Emsv5oA0Xd4W7+27zj3fccHZGw5/ccHBxIURRrd8H77pbXVn7VltDnAal5q2WhG77DPLHdkFIv4eUrnKlzPV/H9xyd0Of3qHmoAG0KL9dzhHy10cz2mmGNOOwE88R2Q0re8Ap9Lk9IePmqPPuCpu3ze9Q8fPN0PUfINr+Q9qIbdoJ5YrshpcFGXr7n6zSNkkKe39M1vMz5tWkvumEnmCfbH7a8eA382u45r6ano4acn9KX4ztsqOZhBmrb8GLkNZyQ/ofdw3ZDSsHh1cR1taF5gYXt+Tq+8Ap9fo8rvNRnZ7OZ89k8+nOE9N+HthfdsBPME9sNKfUWXiKb58jMQ3xNV+/5Lq5Q07ie37O/v78RKObhRn2erraEPA+I8OoXO8E8sd2QUq/hBcSg/+WJ7YaUCC8kR//LE9sNKRFeSI7+lye2G1IivJAc/S9PbDekRHghOfpfnthuSGmQ8LLdO+X62eR7z8ecznaDW+zVgvrl9E2qqpKqqmSxWKyVufK1ta9l54KdYJ7Ybkhp0PDShe6w+wwvvQ36vV1thQbIcrlcCywVZF0QXtgVbDek1EuFDd99WrbK7/o05s/6v9X9Wfp75r1kelUP33TmTt+2/JCK9GbleV975vP5RtURs01m9RFfhX1z2WPBTjBPbDektPXwUtO2OWyoh4AeQmbJJ3P05JpOb4NSVZWzSnzo+oS0xzSbzbyjUF+Ffdt6jAE7wTyx3ZCSN7xCq8r3GV6uUZirfa56hW3OebUp6quvT1N7bIclbYcObevfdtk5YyeYp762G09gR4ydG3m5qrkr+uG0oijWHp3im8512NBWaLdNgLja4wsv8z8n4UV45aiP7cYT2BGrl/DyVXBX0/Yx8jLfazvyMjuXqoTfpiK9a33Mzy2XS0ZegQivPHXdb4jwBHbECw4vn9AK7qGBZVZzd43KyrLcKOBrm05vg+IbeYWuT1N7bN9r6Dkvwgu7ru8/el14AjtsegkvEX8Fd7Pyu4g/vPS/XMwQ0v/KcYWObTr9LyH1cnXY0PVRhwZd7elytaHrP4++7LEgvPLk2qi4k+IAAAJzSURBVG48gZ3nAA6ht/DCpm3c5zVG9L88DTHy4gnscCG8tkyvsBF7U/TY0f/y1DW8RHgCO+IRXkiO/penPrYbT2BHLMILydH/8tTXduMJ7IhBeCE5+l+e2G5IaSvh5Ts23Leuy+qzrSEV5NvytW/I73mbxrAOU8R2Q0rZh9euSHFl4Vi+5zGswxSx3ZBScHiF3o+hjhPbrrgxjwGLuO+CNxq4cR+UeUe8/p5+zNpss2t+TW1tups/9J4us/SNuomxabkh33OuxrAOU8R2Q0q9hVdR+Cu66++F3gVvu5vddUe8/rMKQbPKu29++mfa3s2v1tFkq6ZhW2d1otm33JDvOVdjWIcp0v/Y4sUr0aufqvK2f5v3N+n3Pfjuy3Ddze66qVD/2XbPRtP8bMsMvafEdQ+X7dChPk/9stym5TZ9zzkbwzoAGJ4zvIwPOUdevorurgDUn4GlfqcfNnTtmNuEl2q37eZB23Ji7+b3hZftUOhsNpO6rqWu69XhxpDwaqqcn6sxrAOA4XUOrzYjLx/9sGEf4dU08nJNr4SGV5sK8ur36tCgXtXet1xb+xh5AZiyoPBq0lTRXZ2n0UcPoXfBq8+q95vCS7XD9mRj1/xc6xFyN7+at8l2zkufV5vlhnzPuRrDOgAYXi/h5avobl5Jp4/gfHfB267ACwkv/QrG0Pn5PmNbrhleoVcb6uutB3fo1Ya+7zlXY1gHAMPrJbx2QcryK1SQjzeW/gdgWIRXT6ggH2cs/Q/AsEYTXsgT/Q9ADMILSdH/AMQgvJAU/Q9ADMILSdH/AMQobBUwePEa8gUAbbHnAABkh/ACAGSH8AIAZOf/uJjejJki6DQAAAAASUVORK5CYII=\">\n\n</div>\n\n", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ], [ [ "### 3.2. Usuario\n\nCon base en la información de una persona, construye una clase de `Usuario` que herede de la clase `Persona`. Puedes guiarte del diagrama UML.\n\n<div align=\"center\">\n\n![user.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAKEAAAEcCAYAAACvctSpAAAGLHRFWHRteGZpbGUAJTNDbXhmaWxlJTIwaG9zdCUzRCUyMmFwcC5kaWFncmFtcy5uZXQlMjIlMjBtb2RpZmllZCUzRCUyMjIwMjEtMDctMjhUMDMlM0EwMyUzQTE1LjUwNFolMjIlMjBhZ2VudCUzRCUyMjUuMCUyMChXaW5kb3dzKSUyMiUyMGV0YWclM0QlMjJSM2FzNk5IWkJOMk53NTBqN3RLZiUyMiUyMHZlcnNpb24lM0QlMjIxNC45LjIlMjIlMjB0eXBlJTNEJTIyZGV2aWNlJTIyJTNFJTNDZGlhZ3JhbSUyMGlkJTNEJTIyZU9sX2c3MS1US2FNdlFYZ0pKbWslMjIlMjBuYW1lJTNEJTIyUGFnZS0xJTIyJTNFN1ZoYmI5b3dGUDQxa2JZSEtuTGgwa2NJN1NxdGxkall0TzFwTW9raFhwMmN6REVGJTJCdXQzSER2a0JpdXR4TmdERXFwOFBwJTJGajJPZjdiSiUyRmFjdjE0ODBHUU5IcUFrSExMNllZYnk1MVlqak1ZT3ZoWEFWc05lRTVYQTB2QlFnM1pKVEJqejlTQWhkdUtoVFNyT1VvQUxsbGFCd05JRWhySUdrYUVnSFhkYlFHOCUyRnRXVUxHa0xtQVdFdDlGdkxKU1JSb2ZPb01UdktGdEd4WmZ0JTJGclh1aVVuaGJGYVNSU1NFZFFWeWJ5elhGd0JTdCUyQktOVDduS1haRVhIWGQ3b0hjM01VRVRlVXlBJTJGRFJlZVpQaFJ6SDc1ZjFPN25vJTJGblRUdG1GR2VDRiUyQlpCVSUyQnB5Q0FoWnM1eVd5UWlXN09Za3dTdDhRSVNPVE05TnRxRXMyV0M3UUJuUWdVQ1QxUkloamtjbVE0SkthSkJ4SGg0VDdhd1V2UE5KQWtlQzJzY2dXRFBPQ3poWmt6c0Z0TEl3ZW5YUEdZcUV1RXVvb0ptNkRNdGttQTNvQWV5cVRuZWswd2FJQURPU1pxeCUyQlc0Wk1SRkxsb3hCU29pTms4a09Mb2R1RHFiZDNwR0ptNEJDVEtYWW9vc0pjQWFHZjdNQmhzWmNsMnF5JTJCd2FMcWtvcWRFT01ncGU3b1V1U3NXRjRmZ1huVG90enkzRVJTQ0NlQzBSR015bFlzclF3N1k2N0dlVnBVUDAwSkRqc2FMNlZ0TlpYYVRZMGcxbVRPWmNDSHFrUEhGQWNrd1MwaUJqbkRhalFFYWNMZVZCRldVb0NuTjE5N2pQeFN1U3p5WjJDQUdNWFBOOXRFUXREbWlnRmdDU1NhTG9WdHltd1JPYTU3WTN4aDluMnUxYzlxNGNUOTlHMlN4dCUyRnlsMUlIeEpjQzJFNTh4UzF0S1pLVDhmSjVQRCUyQmEydkhhRVdsOWhpdGVONkpwT0x1a1VxRFk4NXk3alRIeFJGcHY0bmdHS25pdEdUMGl5SjgwckZickx0dDF0MDlESE15cDN3S0daTU0xUGhDJTJCemFZUHhlNWd5TVBndUdKdVBYMmNJdUw3ZW84ckFLSnpGWDN0dXJEcXpnUUxBMHduJTJCJTJGZTQxbndCUGlKeXdGd09vME1qendBbkJPSnBOY1N5ZGRzUlFTRFMzMXdvdnJBOGM1ZUlQUmJwSGZRWEJuaTk5UUhIVjJBNDg3SXFPVzcxdGdtRmJmTG1mREtNNkgzZnhjRmcwdFJjREp5ejEwVUROOVFGQVFrbmpNaSUyRlB3QXVKUUYlMkYwSWw1eTRMN0NQJTJCTWFCSk9GSVBNR2pOT2FnYmVZeVF1Ykx0dmpadm1mcHdudmRJeHNYbFRqZE1mbGN3NWxsYlA0eVRhazgyVldOYkdLaSUyQnJRbnE5Z3Nnajd1Njdqc0ZVQWJuVmkxNlNnWEQlMkZLZ1NwWEs3MDdEMVF0UzQyM0hac0JJQmZZbFBwNHRGeTVMJTJCalhkdlAlMkIlMkY3aUJhVUU4bWU2blBieDdRWmJxcDBYZFlkWHJQdXNCdmxoRjZUaWFxJTJCS3pVRzZyc3ZES1FYM1Jvb0Y5NXVqZnUwaUdiNVBLYmR5emRHOSUyQllQJTNDJTJGZGlhZ3JhbSUzRSUzQyUyRm14ZmlsZSUzRbHqy6sAABKZSURBVHic7Z1NqN1EFMcDQgVxIVLoqtCSlS6sm+KiFkTEUHFRXDwFqSBPkAjlKVIExaJQhLSCIjwtKIKf8S2KiHYhiCBxo+DHqgs1G12oC0HwA1zIcWHP7bnnzkwmX/e83Pv/QWjfTTJzkvu7k68zkyRJEsKEyXhKCAArICEwBxICcyAhMAcSAnMgITAHEgJzICEwZ20kdN0gLYrCOixAayZhlmWzv/M8pyRJqKoqw6gA0RpLWFXVXGtYluWshUzTdGG5LMvmyiiKYq5Vret6oT6eyrIkIqK6rilJktkPQMeky1yXHwgkLIoFIdM0nS3L86SYLBMvr8uWf7PcZVnO1uOyWLqqqmb1sMwyhlVnbSWUh2OWgQWQf7MceZ7P1uXP9OdEV6STrRgLJVtC37KyNZTirzJrJaGe+DApD4/6EKtbSYYPz/oiR7ZuTJqmlKapV8KyLBdaXF5nHVgrCX2HN90SSnwSMvJcsq7rTi1hWZazH4I8HEPCFSMkoRYty7KZAC4JpTxEV1pSV12uc0KXhK7zQ0i4YoQkJJpv0eT+8LWEoStZli10dSzr5PlyHT7crwNrIyHYvUBCYA4kBOZAQmAOJATmQEJgDiQE5kBCYE6SJJQcOHDA+ewUE6ZlTJf9Q0sI7LgsIyQEdkBCYA4kBOZAQmAOJATmQEJgDiQE5kRJmGUZVVVFdV2bpZzrLpFDom+e6nmu/r8cD+hPlISyv4VVX9ixJEzTdC51n9P2GZ+EYDiCEvoes+j+FrJvLi8T6nOh15P9NeSyrr6+sg4WUo+SEKqTt6uu69kyrtETyrKkNE1n6+/s7MzVIVvCpu2X/VfyPF+bDkyxNLaE3B2R6MphWaM7A+V5PtdiytZGznP1cmMpdMunO6EXRbHQI447DPGGyd5wvhac65Tr6h0ke8Dxci4JfdsvpVynrpyxNEpYFMXc8Bihvrk8j1sROY+RrY9eL/Tl6WVD5VRV5Ry2w3co1z3ttMyuYTpcErq2X/7f9TcY+HDs+xLGkFDOd9XvijvmfJLL4noh4fg0toQxFyW7vSX0UZalc5vSNJ3rCwwJxyUoobwlIw/LmtCXQLR4TqgFbSOhHPPFVw6vyyLp1ljvAHn45WW5rL4S6u2QozuA/wlKGHNRQtT8JTRdHbeRUF6B+spx1akF1X/7luULl+3t7c4S4uo4TOPhGAxLnucLw8mtO5BwZORYhkmyPoMctQESAnMgITAHEgJzoiQcMouma/aJb70uiQ3yqhvYEyXhkFk0kBBoBnlsRxTOWtGj3cv5oee2ofX00xJXdk2WZc40LblcKG6wHBpbwtgb1lIg101n1/jOOrFAPgUJrScJZdfox3JyBH35bDgm2waMR6OEsVk0vqwVfRgNHY5Dz5xjD8dabNnisWDyUVybbBswDoMcjkNZK74sGka/Q6Qp+0YTyq7h8nl0fPkGpqqqemXbgOFobAljs2h8V82hFk3PG6MlrKpqdph1JSXgCYY9QQljs2i4IF/Wis6i4Xmu5ULZNyEJXdk1MjZfVksobrAcghLGXpQQhbNWdL8RLR7P87Vqba+ONXmez/2AZD+RUNxgOTQejlcByLW7WWkJ9e0bsDtZaQnBNICEwBxICMyBhMAcSAjMgYTAHEgIzIGEwBxICMyBhMAcSAjMgYTAHEgIzIGEwJyZhJgwGU9oCYEdkBCYAwmBOZAQmAMJgTmQEJgDCYE5kBCYAwmBOZAQmAMJgTmQEJgzqIT67UXLGAMmSYYdBH3o8kAzg0mo345JtDhw+RhAwukTLaEWTCPfEczoIX6b3vYpxxiUIvB4grp1lWMb7uzsUJIkc2MPyphd7xnWscjyQiP7c9lcF+jHIBLGDjiuR17l4Yf1aKtynmsEfikoC8dl8A8h5h3MvGxRFDO55Q+gKV79owPdCEroG1hcH2JjJGzzBvjQm9NdGxB6KXbM2+g5Hld5MfGCfiytJfSNxt8kIdH8S3D0xc5QEury2sQL+rGUc0LX+0zaSKjrCR2OfaP4t5UQLeHyGP3qWF9IyHMsLYVLGHm+xmW0kZCXzbLMO4K/HPE/dE7oixf0Y9D7hPocUh86m642QxcRvvNRvkLd3t5eEEMfxl1vb9KxhEb298UL+jGohAB0ARICcyAhMAcSAnMgITBnFAlD9+GGpm9dQ8bKL6EMvRG1LaH4lrmfx2TyEu4W5Eu9iZrfijoEq7KfB3tiInMJ9Rs5fffbiPwZMirA2Xo6g8X1/mR5f1DH7CuvKdamTB9+ubeuS+8/nd4m38UcqjdmP0+VwSRMkitZJa7Xwsp5sRkyrkwXX7aM/D/LrN99HCpPLtM204e3UeN6Pa9rm/nmfKjemP08VYISxmbRNL3d3fWkoq7r4DNiX6aLL1FB/t/1/LipPFedsc+3fW+Qdx2SZZny0WJTvU37ecoM0hL6Mk7kPD1xWb4MGd8ObiMhx+1KSHDV0zXTJySh6xQjyzIqy3L2UvOYepv285QZRMI2LWEIeTgeQsKmltC3PhMrYV3X0S0hf86H3LIs0RLGStiEPqfxncfIX3Nshgwvy/ObJOQ49DlhqDzfdsRk+nDZGtc5oSyrTb0x+3mqDCahvLprujqWLWooQ8Z1xRgjobziji0vtIyrXi1h7NWx3G75A4y9Og7t56kymIS7AcsUK4v7hKsCJBwQ+cQk9jwYrJiEYJpAQmAOJATmQEJgDiQE5kBCYA4kHIiLFy9ahzBZIOFAHDt2zDqEyTKTEBMm08n6lzB1Dh48SCdPnqSNjQ3rUCYLJOzBuXPnaGNjg/7991/at28fffPNN9YhTRJI2JG//vqL9u7dS1999RUREZ05c4Y2NzeNo5omkLAjp0+fnpPu999/p2uuuYZ++OEHw6imCSTswC+//EJXX301fffdd3OfP/HEE/Too48aRTVdIGEHHnvsMadsP/30E1111VX066+/GkQ1XSBhS77//nvas2cP/fzzz875jzzyCD399NNLjmraQMKWPPTQQ0HJLl26RNdddx39/fffS4xq2kDCFnz99de0d+9e+vPPP4PLnThxgs6ePbukqKYPJGzBvffeS+fOnWtc7osvvqD9+/cvIaLVABJG8tlnn9HBgwejlz9+/Di98sorI0a0OkDCSI4dO0bnz5+PXv6TTz6hG2+8ccSIVgdIGMFHH31EN910U+v1br/9dnr77bdHiGi1gIQR3HrrrfTOO++0Xu/999+nW265ZYSIVgtI2MC7775LR44c6bz+4cOH6YMPPhgwotUDEjZw6NAhOnr0KJ04cYI2Nzcpz3Pa2tqiU6dO0ZNPPknPPPMMPffcc/T888/TSy+9ROfPn6fXX3+d3nrrLdrZ2aGtrS26+eabrTdjVwMJA1y8eJEuXLhA7733Hr355pv02muv0csvv0wvvvginT17ls6cOUN33HEHHT16lB5//HE6efIkPfzww/Tggw/S/fffTxsbG3T8+HG666676NNPP7XenF0LJOzJs88+S6dPn7YOY9JAwp489dRTdObMGeswJg0k7MmpU6fwiK4nkLAnW1tb9MILL1iHMWkgYU/yPKft7W3rMCYNJOzJ5uYmvfrqq9ZhTBpI2JMHHniA3njjDeswJg0k7Ml99923MF41aAck7Mk999xDFy5csA5j0kDCntx999304YcfWocxaSBhT+688076+OOPrcOYNJCwJ7fddhueC/cEEvbkyJEj9Pnnn1uHMWkgYU8OHz5MX375pXUYkwYS9uTQoUP07bffWocxaSBhT2644Qa6dOmSdRiTBhL2JE1TjMTVE0jYk/3799OPP/5oHcakgYQ92bdvn3dwJBAHJOzJ9ddfT7/99pt1GJMGEvbk2muvpT/++MM6jEkDCXuyZ88e+ueff6zDmDSJ+TssMGFKEjSGwA5ICMyBhMAcSAjMgYTAHEgIzIGEwBxICMxZOwmTJKEsyxo/G5O6rilJEsrzfGl17mYgoeczsDwgofqsKIq5x0l1XRMRUVVVlCQJFUVBRERlWVKSJLORF/R6VVXNrZdl2exfV0so11230RwgofiM5WDR5LIhCXkeC5um6cJ6aZrO6tMSynq03OsAJBSfsTCu87WmlpBovjVk6Xg9WZ6UkMvhlpNoXuJ1ABJe/owl4cMmTyxdTEvI4qVpuiAhr0c0LyGLqyWULeeqs3YSuloZV8vHkvFhVh+qpYR5ni8cjmMlREu4hhJqYaRM+hDLyxItnsdxi1mW5VxrplvFJgmJcE64dhIS+Q+5RP6rXD2PBWVZ5DpcPlGchPw3ro4BMAISAnMgITAHEgJzICEwBxICcyAhMAcSAnOSJKHkwIEDczdLMWFa5nTZP7SEwI7LMkJCYAckBOZAQmAOJATmQEJgDiQE5kBCYM5kJeSM5S7om6V6nsymHqI+EGayEnYlTVNnOj/jkxCMR2cJdYfvsiznuinKfhyyJ1vTerKXW5Jc6W+hRzLQLZPup6E3UvaY47rlfI6D19/Z2fHWJ/sS8/JSXLkNeZ6vVffNLowiYVmWc10W0zRdGBbDtZ6WpCiKhV5rWkpZB7dweZ57u0zKXnK+HSJ7zbnq052XdH1SynXrQ9yF0ST07fimllAi5+n1XFIwvhZPlutqbYkWJQzVF7Ptoe0D/xOUUB5ufMNi+GTS3SNj15N1Jsli/12XFCwV0yShazu45YKEy6dzS6i/6NDOlofj0Hq6RRu6JdSnCTI+2X8YEi6X3hKyXFmWzXZ2URRzrZ9LQtd6ukVzDafhkoKXledovi9eH365Ti63r4SyDL19wE2vWzT6kCt3trzSlLdE2qwnv+wmCWOujtVGzyY5jy9ctre3O0uIq+N29JIQNJPnOYYFbgASDowc41BeWAE/kBCYAwmBOWstoRxXMM9zPDM2YldJKG9tLAM5vG+ffaCvtvU8ZOWEWWsJhwBZOf3pLaGrBfDdswtln4QyWIjCz3t9GTtdY2mqj+8RIitnGHpJKB93yackvoyW2OwTncESyq4JZexwmXIM6phYQvVpkJXTn84Sus5p5JMN+ZnvqYfvcZdeTtMmY8f1EpuYWHz1+eaHWk08iw4TlDAmi0YTymjpI6Evu4bIn7Gj5eApJpZQfSGQldOeQVtC1+dDSBjKrtHoBNoueY2x9SErZxh6nRPKX7w8b/JltHSVMJRdE8rY4TJZCFlO0xfvq8+1D5CV049eEvquPJuuSJv6pegMFiJ/do2epzN2dCxaAl8sofp0bK7DfdM2ISvnCr0kBMtnFbNyIOEuZx2yciAhMAcSAnMgITAHEgJzICEwBxICcyAhMAcSAnMgITAHEgJzICEwBxICcyAhMAcSAnNmEmLCZDyhJQR2QEJgDiQE5kBCYA4kBOZAQmAOJATmQEJgDiQE5kBCYA4kBOZAQmDOoBLqcVOWMXBPkgw7MPnQ5YFmBpNQj8tH9P+waHqotqGBhNMnWkItmEaOTsrw2ID6b56YptHrfaPzx4yOHxqTWsciy5Oj8/vilW8XAN0ZRMLYN613GdU/ZnT+0Oj4oUEwXW8ekD+Apnh9I/aDdgQl9A087hsNNSRh17Gsm8Zo1hLGjMTqe/OAq7yYeEE/ltYS9hnV3zc6P8c1hIS6vDbxgn4s5Zyw7/tNdD2hwzGXoX8YbSVES7g8Rr861hcSbUf1jxmdPyShb9R7OU++eSB0TuiLF/Rj0PuE+hyy7XvmYkbS1+ejoRH/9WHc9XYnHYu8uo6NF/RjUAkB6AIkBOZAQmAOJATmQEJgziQkDF2Nhu75tSnbd0XfFXmrpy1FUVBRFFRVlfMtol0I7ae++7Avk5ewD2Nm/nSVsK7rOfFYyDGZjIQxErjuqfneii6TFnievC8nWyRXlk3oXcih+mTmy1iZP6HsnlCMRP/fFNcxyXp52/WPhRMsmuLVf8ttsGIwCV1ZKfrRmXwyoTNn+EtyPRaTX7ivHKLFR3Wu5TjGMTN/eH+5sntCMfJ6mizLFlpVV3YR3+QPxSvr4Zh2tYSxWTShrBRdnu/Rl+9LdC0beiYdW9+YmT8y/qZTCR2j65m565AsY5GPKJvidW3XShyOQxshD1dJkgwioZyv646tb+zMn5CEoRh9EroumLIso7IsqSzL2WE8RkLfclaMKqHrfG3MlrBNfUTjZv74JAzFWNd1dEvIn/N+K8ty9VvCmIJ0Vor+xaVp2ktC/iJ854Rt6pPLD535I+N3LeeLkdfTuM4JZQy6vFC8vuVWQkLfVZm8UpRfSBcJ5WGMce3cmPqYsTJ/Qtk9vhh5P8RcHcuyZMyxV8f6TsRKSAiGweI+oTWQcBcin5iE+tesCpAQmAMJgTmQEJgDCYE5kBCYAwmBOZAQmAMJgTkzCTFhMp2sfwkAQEJgDiQE5vwHaG1k0ulxrFgAAAAASUVORK5CYII=)\n\n</div>\n\n", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ], [ [ "### 3.3. Empleado - Trabajo - Empresa\n\nConstruye la relación de clases entre los trabajadores y la empresa para la que trabajan. Puedes guiarte del diagrama UML.\n\n<div align=\"center\">\n\n <img src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAu8AAAG3CAYAAAAEtrtWAAAM+nRFWHRteGZpbGUAJTNDbXhmaWxlJTIwaG9zdCUzRCUyMmFwcC5kaWFncmFtcy5uZXQlMjIlMjBtb2RpZmllZCUzRCUyMjIwMjEtMDctMjhUMDElM0EzOSUzQTMyLjEyOFolMjIlMjBhZ2VudCUzRCUyMjUuMCUyMChXaW5kb3dzKSUyMiUyMGV0YWclM0QlMjJBbmJmdmM3WGdQVjZ1TmNLdHhrNCUyMiUyMHZlcnNpb24lM0QlMjIxNC45LjIlMjIlMjB0eXBlJTNEJTIyZGV2aWNlJTIyJTNFJTNDZGlhZ3JhbSUyMGlkJTNEJTIyTVZJdFVNT3o5YnQ3cmJmZEc4aVYlMjIlMjBuYW1lJTNEJTIyUGFnZS0xJTIyJTNFN1Z4ZGI2TTRGUDAxa2JvcnRjSkF2aDdidE4yWjBjeXFtbzVtZDU5R0xyaUpad0JuRFdtYiUyRnZxOUJqc0JiRXFTNG5SVklWVlZmREVHZkklMkJQanc5T0J0NHNmdnFENCUyQlhpQ3d0Sk5IQ2Q4R25nWFE1Y0YlMkZrSSUyRm92QXVnajRybE1FNXB5R3NzNDJjRXVmaVF5cWFpc2FrclJTTVdNc3l1aXlHZ3hZa3BBZ3E4UXc1JTJCeXhXdTJlUmRXckx2R2NhSUhiQUVkNjlDOGFaZ3NaUmFQcDlzQUhRdWNMZGVuUjBDJTJCT3hGalZsbyUyQlNMbkRJSGtzaDcycmd6VGhqV2ZFcGZwcVJTSFNlNnBqaXZPdUdvNXM3NHlUSmRqbmg1ODN6MGd0JTJGckQ3OXk3MDVIVjUlMkYlMkYlMkZRMVBaV3RQT0JvSlolMkY0aHZDVUpWamVjN1pXUFpFJTJCMGpqQ0NaUXU3bG1TM2NvakNNbzRvdk1FUGdkd0o0UkQ0SUh3akVJbm5zc0RHVnRDTkZqUUtQeU0xMndsN2pmTmNQQkxsUzRXak5ObmFCWkhzazA0ekRPSkIzZFVxWEVyem9Td0ExRk9VcWh6b3pvQjFVSmY4Rk9sNG1lY1pqSVFzQ2pDeTVUZWJSNGp4bnhPa3d1V1pTeVdsV1R2d09PUXA4WnVSNXRrd2lnZ0xDWVpYME1WZWNKSXBuJTJCdGdDM0xqeVU0VFdSc1VVYlNVQWF4aFBCODAlMkZRMnlmQkI1bm1Qbkx0YXpnZXVCNEdFeFhjY0l1ZTNHYWZKZkFEZDducFA1M2szaU9Na3hORHMlMkJkMDZJOXF4WkJYRHBWY1JodlNmVSUyQmdaN2VRWTA4amNkZzFvME5WWkRnRE9mcEVaaXhnMGVabXdBbmswaW1vaEJiNkkzR2VOMEV1WE9JRExmczdyWFByYnlGZlo0U0xFNE56N0tCJTJCaUN4cUdKQkd3WVJuT2NJRVJBWWdsZzRmTEV6SzhnRDlJMGN3NUd3NkdjT016S0tOdEdmNUVkWjdOV0FMUGdta09Gd0lBZkNRQ2hMdGhxM25RNm9DVENCTmR1d3ZBUnI0bGZIa0dmTlZ5SE5FOGQwV09GYkdpZ3hJY1E2b2lzczNvTjVId3kxT2taZDNUcyUyQjRaTWh6aE94TGRzSlJtbEluMmVWRzNsdm0zU3U1MFIlMkZhWVdNcXRiOGd0UEt4VDlNTXF5Q0J6NWJFdGp1RWdXMEVTbnpHWEZIRUNiUEViY01FRGcydjFUR0FQTEpQZHdESjBMYUZscUtIbEtsNUdCSFJRTHk4NmtoZkRxcnp3a0U0UXJtT1NGNTR0ZVRIU2tuNmF6JTJGJTJCUWVBYUQlMkZodkhkJTJGZ25LNDE3Y2ZqanBZclg1Y09wRWc4enhtRVE0WXclMkJzRjVIZE1JZXczMTFoQkZKUTFzNll0enJDR3ZKbmV4SUU3WjB4T1JWT2lLZlJSaklDUExBZmpOeVNzOEdYUU5HMXhKR3dQaTJ0TVMwblExSUVwNEx6d2RLZHhFVGslMkZnRmhPUXNqMFpGOFpxSzYlMkJiZHZzaGlwUWZJRTgzJTJCRm1IbzVxTDBqNndrUGw4JTJCbFF0clZVamd1VW9uaWFJNEM1MDUza1FGdHFmbXBjcTVONFJUNkJ5aGFVcHlnSVNhSlZVVEElMkZEUWJNVUQwcTY4UU9YTXlVdEo5ODFKTHl0R1pXQndFb25KdDNwdnBqVEw1bTVZUHBFcm9ZSnFSc2htS2FPYUtKNUpubFgyc1dvTmVXNUxROFZEYXczbHFOczg0JTJCRkFWQXFycW1wQkFQYWVXV2VpZGpTc3BIaHNXUFZPVGFLMkRvWE8yQWZwUm1tYmFTYUVhNENYTk0lMkZQZWNoV290Y0VQR0FVQlpvJTJGbHJLQU1yanJjJTJCVyUyRkZ1cWdYbTNKV1NobVIxRVRpa0IzZVAyWnBrSXdSd0lUTiUyRkk0bE9mRmROQlBpWWRQaWNneHclMkZRRnA4MEV5N0V0aFl4TVZtNHZrVHRLcjlHcU4lMkJYWGxraEdKaWUxVFNYVFpFMEM0SkprVm5EUFNRZ0E0TXpvdDEza3ZUUm5sJTJGU0J3akFNV2RwV215UVBWT2h2V0k4ckt5YzlhYXdka2pUZ2RDbnV4bHlwSnllTDZEVzRmMGIwMmxMc1NEZUxiMWZCaXFjd0lmWktxU09sNU8lMkZnJTJGNDFNVW1sc1RTcnBycTlaQ3RVc1ByR2tGN0xtQTA0WFglMkZCU0tack4yaDd1eHRuWXgwb2Q5U0xuOVR6U3NBWjh3UVkwQWNxM0puSjBRN2tYT1oybDE3QzBNcWJYbXNZeHVieXRUbUFZU2d2d1JWbnpKNHRwZ3B2clBFRFBZYlZpT2xFZjl0TSUyQmNUcXZVbFAlMkZZdk1ZcURXNGtTYlVUcTFwRzkzQTNreFV2YlRwUk5yVWZUNlR0RUZHYVdQdDFTYlNUZWpUalFmMEk0QnVNTDZhRkhWQTlHSk96YTgzSVVQeW1PR3RaczhpcjJDUnliN1N4Z2dvYTI4NFhkMUs3cVZOWiUyQmsxdk9NMHB0ZVd0SEZObm5HcnRQbTVTak55bTdNRmprNXkybGozbXVJWWNESDRKY2FGamkxTm9RWiUyRmVUUDJ4czd2UlVVbm9tSkRDc29sY3czckg5Y2tLa2EyUklXcnU3eGJVZEVrSjVhY0JKUjlGd3NZbzZTZ1NicUtUWGJLeCUyRnhBc1dUNUNBQ1pFOTdiS0Yyd1N6RjY5N0pSVERpenB6Vk1XM2Q3cmRGUmVrMzdxVXpwdGFZMWROTjFSNjJCJTJCVTNPSlNjbFNtbTBRT1FaMTR6SHF3aWZTSlk1JTJGUDFQenpJZHc5RGdpNWhnT0xhbFlid2RWalJIM3FaVlhMJTJCVFhWVk5ycFNGN1ZMMUZ6dHUlMkZYM05ydHVsUnVOYVF5T3YycERsN1ZMJTJCWVlqSUZhY0JKaElYRzFGYUtodHdnc29vMldDbWJUdmZvTFNacjdTM3IyRXJYNGY0a2hxaGRkZGU0VU1kQllkdURUNyUyQm9UajB2WmFHYk9OUVg0MmpsNERJbHZrc1VBYlZscVRjT2tKRGltT1doTjhXTktrdGw1QmZnMnFPSUVDTFdyUEJQTEZnYzViZzZHb2JiWnZiN3VTNmFJUDBVJTJCZk1jU1lWdUklMkJkRnNCRG9iNEZ0YnluRlpYSEFXb1pCYlkzdEtKZGQ3UTJMUElsQ3FHYnhoTlUzVjNvZGpOU2ZLY0c4RU0zdUE1SExRMDFqSlI4TTJDcG1oUXN6Vk5NN1lhOTBmamwlMkIycDR3S2I2U0gydnpGd2ZQaFIzWER0YjNUNjd2MCUyQkpIU3JRalJaMGR2YTd4Z1pTanBZb1lGOWwydWpCYkpaSGp3dWFrVnRRcE9LYWp4d3ZjJTJCVzVTa0lTRGpwelBWQVRpWmNtRE44Z0UxMWJub2V2ZXg3R0RCeURqNUVWUG5ZcVZPeE12Y09wR0owNU1PRlhOSWszZm1zNjlsOHBWYlo4N0xuRGNRV2Q2a3ZHcjFYUWRjRnhLQiUyQlBwb2Z4c1Q1REdCJTJGekNIeW5XeiUyRnZtJTJCJTJGOEd0OTUzbHZ6blc3UHZEZnhXU3pjdDR3M251N05lRjN5VThQbTJDM3RUTVpLbnJ5U1o3ejZGNXZxS05xVloxeTFSRkh2SnRDJTJCUEhNRUt0RTM0NzF2S3FuJTJGZm8lMkZoQnppT3l5VDZmcm0zWVJJN3NpbG5rcXByTSUyRlpmcDUyY2luYUN3TmglMkZhJTJGR2tmc21zVFR3VnJ3WWJzUWw5NWJ2VEttZE11dUcwJTJCbmNzNjF1cWRuWjlKaTBOJTJGUjg0VGQlMkZMOTc0NXJiNlFSNGEzVngyUkdoUzN2M0JYSkd6N080SGUxWDglM0QlM0MlMkZkaWFncmFtJTNFJTNDJTJGbXhmaWxlJTNFzMKrkwAAIABJREFUeJztnX3sZUlZ50/PMG/OdA9vglky2tnbEbPpQdRIVpMNO9lsbgf+EKM4ZsT1ZWcWrzaYGA2zykJrG4ebbEiP2Oyyxn/Q9aYTxRdaDaNG3MuqMYpEYRharsHRiECcGXt2gwsNz/7RPr9+fvWrqlPn5Z5Tdc7nk5z0/fU5p+qpuvXU+Z7n1kslAAAAAABQBNXYBgAAAMC4VFXFwTHLo0TKtBoAAAB6o1QRA9CFUtt9mVYDAADMmMViIdvttrf0ShUxAF0otd2XaTUAAMBM0Z/7Ee8A3Si13ZdpNQAAwAxZLBay2+0Q7wA9UGq7L9NqAACAGYN4B+hOqe2+TKsBAABmDOIdoDultvsyrQYAAJgxiHeA7pTa7su0GgAAYMYg3gG6U2q7L9NqAACAGYN4H4b1eu3d2Ge5XI5tGvRAqe2+TKsBAAAgyPvf/375oz/6o+TrSxUx+0bFe58vSpAPpbb7Mq0GAACAI3zyk5+U//Sf/pO85CUvkRe84AXyxje+UZ5++una+0oVMfsmJt4Xi4UsFgtZLpcHEfnNZuONzus1q9XqyDr9ek7/1f9fLBaH0rXYXwFWq1XwHL8QxCm13ZdpNQAAABziv/7X/yp33nmn/MiP/Ih89rOflaeeekre+MY3ygte8AL5mZ/5mei9pYqYfVMn3vWcrr2v9ejep8JcRbj+7TvnntcXAk1LXwBC+aiYd++Do5Ta7su0GgAAAERE5L3vfa987dd+rXzTN32TfOhDHzpy/o/+6I9kuVzKN37jN8pv//Zve9MoVcTsm9CY981mcxB5F5ED8e4KZ59Yt+e32+3Bud1ud3Dejajbv22k395jcW0FP6W2+zKtBgAAmDlXrlyRb//2b5ev+qqvkkuXLtVe/+53v1tOnjwp3/3d3y1PPvnkoXOliph9kzJsRqS9eN9sNkfO2Si+b3jMdrs9ck6xUXzXBjhKqe2+TKsBACbMlStX5MKFC3L27Fk5c+aMnDp1yvswH+M4duwYx7FjctNNN416HDt2TKqqkn//7/99o7Z17do1+Tf/5t/IrbfeKo888sjB/5cqYvbNvsW7jbxbfGPZfejQndVqVWsDHKXUdl+m1QAAE+X8+fNSVZWcPXtWLly4IJcvX5YnnnhCrl27trc8v/jFLxZ1fOELXyjquHbt2l6Oxx9/XL71W79V7r33XnnPe95T+z3/4i/+opw6dUpe97rXyV/91V8dOleqiNk3fYv39Xp96G/3s2L/TyPtmpYvX03XTlL1jaWHw5Ta7su0GgBgYrzvfe+T06dPy/333y9XrlwZ2xwoiF/5lV+Re++9V77lW75FPvKRjxw5/6d/+qfy6le/Wr7+679efuu3fsubRqkiZt+ExryrgG4q3mOrzbjY1WZUnCvWFruijLXXfWGAo5Ta7su0GgBgYpw+fVoefvjhsc2Agnnb294mt912m/zoj/6oXLt2TZ599ln5oR/6Ibn77rvlwoUL0XtLFTGlEBLoMC6lfidlWg0AMCHOnz8v999//9hmwAT4m7/5G/me7/ke+Yqv+Ap58YtfLKvVSj7zmc/U3leqiCkFxHuelPqdlGk1AMBEuHLlilRVxVAZ6JXf+Z3fkQ984APJ15cqYgC6UGq7L9NqAICJoKvKAIxJqSIGoAultvsyrQYAmAi6qgzAmJQqYgC6UGq7L9NqAICJcObMGbl8+fLYZsDMKVXEAHSh1HZfptUAABPh1KlT8sQTT4xtBsycUkUMQBdKbfdlWg0AMBGqqtrrBkwAKZQqYgC6UGq7L9NqgImwXq8PbbBRR5NroQxKfXjAtDh58qR3MyIOjikfJ0+eHNv1WsFTA2AkdCe8FEGuIl/XCvZt1Q1lUlV0wzA+tEOYI6W2+zKtBigcFeKr1So5mr7dbovtaCAM3ynkAO0Q5kip7b5MqwEmQqp43+12slgsDv6F6VDqwwOmBe0Q5kip7T7Z6rHHJXFw9H3kQJPIO0yTXNoizBvaIcyRUtt9I/EOMBVyac+Id8ilLcK8oR3CHCm13SPeYZbk0p4R75BLW4R5QzuEOVJqu0e8wyxp256vXr3aqx2I93nz5JNPypd92ZeNbQYAz3iYJaW2e8Q7zJKm7fnq1aty7tw5OX78uFy4cKE3O3zivaoq2e12Rz7D9Hjve9/LyxtkAc94mCOltnvEO8yS1PZsRftDDz0kjz/++J4tgzlx/vx5edOb3jS2GQC1feJyuZTtdjvqile6XO4+AhqxBQ2qyr+3Bsv3lk+p3x/iHWZJXXtW0X7ixAl58MEHEe3QO08//bR81Vd9lfzyL//y2KYA1PaJKti32+1ovxbtS7wvFgtZr9cHf+sGekpIvEP5lKptEe8wS0LtGdEOQ/Ga17yGqDtkQ6hPDC21a8WuyA1hvVqtDq6xgne323kj23qfCmYV5/p5tVpF81Ahr+d0F+pYnlqu3W53cI37QlBVlWw2G1ksFgf3X7p06VAeNvJeV/7NZnOoTOzXkQelalvEO8wStz0j2mEoHnvsMfnmb/5mec1rXjO2KQAHxJ7xm83mQETr8BkXK8JFjs7nsdFte869T4Xxbrc7Emm3AlnkeoTc/iKggtuWSf+OLQ6gedp73brZbrdH8vCJ91D5rZhfLBaI90woVdsi3luQEoWAvNH2jGiHffGFL3xBnnzySfnDP/xD+aVf+iV59NFH5Ru/8RvlZS97mfy3//bfxjYP4BCxZ/x6vT54xulOzy6u0NaotT2n2Gi3e19M9LrXxtLZbreHBHIowq7YyLjvJcCKd/dloq789rPvbxiPUrUt4r0F+rOZoj+TMSauHKqqOpiIes899wR/Gubg6HK85CUvkVe84hXyzd/8zXL27Fl5z3veM3bTB/BSVf0MmwmJV5t+X+Ldnvfl77M7Zby8pqX5hvJAvJdPqN3nDuK9BVV1WLy7P5fZTsM6qDsmT9OwY/18nYs9p9EA7fzs+Dprk5smLxaH0fbsRt4/+tGPjmwZAMDwxJ7xKZNVc4+8h9hsNt4yLRaLg+ct4n26lKptEe8tiIl3V8gvFosjY/t8P+Xp9W7a9m99KdhsNgf3aVoq1n0djLUBruO2Z0Q8AMyZ0DPeLg1ph8+4xMSryNEx766wbyLeNR3fmHc3Kq8C3I3+u2W3w2T0WrvfRhfx7pZjuVwi3jOhVG2LeG+BK7DtsBkV0erA9m93so3IDYd3/1/kRgdio+YqxG3kPXStjb7TURwm1J6tiH/ooYcQ8QAwC0J9YspkVZF68Vq32kwT8W5/cQ6l48vTFfbu36Fr9dfyixcvthbvrDaTJ6VqW8R7C3zj6OyM9tA4OzdqoGjHoIeNKvjEu04Y8on3zWZzJMLPzPaj1LVnRDwAzAme8cOxWq2OBOtgHEpt94j3FriRd4sbebeExLti38x3u12ryLtGSdxhM4j3w6S2Z3eHVUQ8AEwRnvH7w/7Czi/heVFqu0e8tyAm3n1r1rpj8qx4t6Jb5Ebk3peXb8y7T7z7xr/TWRymaXu2Iv7ChQt7sgoAYBx4xsMcKbXdI95bEBPvIkeXqFJCkffYyjDumD3fajM2Tzs7Xg+74xxcp219XL16tWdLAADGh2cEzJFS2z3iHWYJ7RkA4Ab0iTBHSm33iHeYJbRnAIAb5NAnuuvBu79Ep9zT9bqUNGJz2vokpfz2un3YMHVKrS/EO8yS3Nuzb8UiDg6O/R1zJ8c6qCrEexPxDs3Jsd2ngHiHWZJ7e87dPoApgb91qwPfS5A790vnY9m5X3rOt276YrE4dL4uPWuLm15VVUcWg0hdd963U6xdEtpneyx9F7tUtF0+MrX89rpLly4dyYsX1Dil1gviHWZJ7u05d/sApgT+1r4OFovFgZBcr9eyWq0OhKvdrNAngEPnrE3b7TYpvVCZQiu5uTu+ujuh14n3FNtt3r5FLjabzaH/XywWhyLoKeW317k2+L4bOEypvo94h1mSe3vO3T6AKYG/tasDn3D2DSnxCWC9zgrT1DHvvvTqbPPthmptdm2Iifc627fbrXd3Wbdu3F1YXVLKb6+LlVHtgMOU6vuId5glubfn3O0DmBL4W7gO7BARN3Ibi3q7u42HBLDm7YscW/Fal56LDjNRVEDHzjUV7yHb3SEu7hAbix0+5NZvSvlDNjB5NY1S6wjxDrMk9/acu30AUwJ/6y/y7vv/rpH3lPTqbEiNvLtR8j4i76mEhs3Eyh+6DvGeRql1lGz1yZMnvW+SHBwlHidPntynX3WmqsrsUABKBH9rXwdVdSM6rGOx3cj2YrEI7jSeMuY9JT0f7rh2Nw17TtNTQa5liu2Snjrm3bVfccehh8R7rPz2utgvF+44ebhOqb5P5B1mSe7tOXf7AKYE/ta+DkKrqthVUGzEWj/bVVYUV3zqNRrJjqVny+FGxqsqfbUZkaNDWWKrzYRsd9MPjTe35XJ3X08pv73u4sWLyWWE65RaL4h3mCW5t+fc7QOYEvjbcHUQWysdYGhK9X3EO8yS3Ntz7vaViG/4lBvpaov+rK0/k7dlt9t5l5SD/YK/Id5hnpTq+4h3mCW5t+fc7SsR/ale0Z+++9iZsC/x7toIw4C/UQcwT0pt94h3mCW5t+fc7SsRVxjbyWfuOFx7nY3UW3HujovV8+6kNlfY210f7cuDHdPqTkbz5Q/9gb9RBzBPSm33iHeYJbm359ztK5EU8e6uxmDvsSJcJ4LpShEq+uvEu3ufin4dQuB7cfDlD/2Cv1EHME9KbfeId5glubfn3O0rkdiwGXfbc5EbYtkOq1ksFrJcLo+cs8I6Jt59aYZsjOUP/YK/1dfBcrmU7XYru91ukCUH97FOuW/ei7sxUo7YX+s0UFCC3SVQqu8j3mGW5N6ec7evRHwPbncoi53AqsNiXPGs60O3Ee++NF0bVZzH8od+wd/q68Aul1jqC6TP93Rd+pxxN31y14eH9pTq+72J96Hfyn3scxa7+9B3z/kexuxwli+5fy+521ciscmgPvHeNvKuQ2OIvJcD/hauA99Lr+sril273RWXdWuOu+fq1k1Pzdcdlub63mq1OihLKA93TkzbsvrWibf2qM/7gguI9/1Qqu/3Jt5zeCvfl3i3O7GJ3IiIKbGHMeRJ7g6bu30l0lS8u/ekjnlvcs6Nrrs2hvKHfsHf4nWw2WwOtWff826z2Rxqu+5uoe6Opu612q5VmLriPXR/Xb5uGd1zrv+pHTYPtcXumNqmrG4/Y89p36D6xd29FfG+H0r1/c7iPfWtvO6Ns+5N1a7s4O6aFstDHcH35hx7k9d7XYey5zebzaEVIi5dunQoD9v5NHnjtju6wX7I3WFzt69E2oh3t4+wwtn1WXs+tBKNzcuXprtzYyx/6A/8LV4H6/X6wDcWi4U3QKbPQx+hKLpvh1Q973t++u6P5esrY0iruALZZ6OWu2tZU9Kx5xDv+6NU3+8l8p7yVh574xRJf1NVYexzBHfSmfvm6j78Qm/ZLjZyFqobO+nNfUinlN+Keca17p/cHTZ3+wCmBP7Wz7AZ96VV0RddpU68ixx+fsbuj+XrK0soKu8OWbEBQN+v+l3KGhLv9uW9qirE+wCU6vu9iPeUt/JYo23yphoTve61sXRib9k+QmPRrA2hl4m68rsO3CSSAO3I3WFztw9gSuBv8TpoMyzWDiVpGnl374ndH8vXJSbeXU3gsyWkD5qWNUUHEXkfhlJ9f/BhMyHxug/xbs/78g+9Zdehadlxcoj3ssjdYXO3D2BK4G/hOrCLUNhAnYsrKOvGgbsbkem1+ot53Zh3a1MsX7eMsflpNjBndYn7/G5b1iY6yP4Cj3jfH6X6fi+R95S38twj7yHciSm2zOrkiPfyyN1hc7cPYErgb+E6SBkWq9g5YHXzR+rONVltJpavfe7WiXc3D/dZboN7bcpaN2zGpmmvRbzvj1J9v7N4T30rT2m0KW+qKeJd0/GNeXej8r63bF/ZfRPV3A6hrXh3y7FcLhHveyZ3h83dPoApgb9RBzmDeN8fpbb7zuI99a28TrymvqmmiHd3xQZfOr48XWHv/h26Vie0Xrx4sbV4Z7WZYcndYXO3D2BK4G/UQc7YFarYYbVfSm337LCaIavVCsfcM7m359ztA5gS+Bt1APOk1HaPeM8Ad91nou77J/f2nLt9AFMCf6MOYJ6U2u4R7zBLcm/PJ0+ePDJUi4ODYz/HyZMnx3b5vfCbv/mb8sd//MdJ11ZV3n0iwD4otd0j3mGW5N6ec7cPYEpMzd92u5289rWvlec+97ly2223yQ/+4A/KM888E72nrg50TptdpGIIQuvAN6Gq4qvM9MEQC02EytGkjlLrwveS28dw3ty+z1J9H/EOsyT39py7fQBTYkr+9qM/+qNSVZXcfvvtB6Lrtttuk+PHj8vFixeD99XVQZuNmvogN7HnQ19oYmvM90Ef5Wgi3t3r7BLZbcnt+yzV93sT732+lbf9cut2akvZgEkZ4k0dxiN3h83dPoApMQV/u3Tpktxzzz3yvOc9Lzg86Pjx4/LVX/3V8ju/8ztH7g/VQSit0EaM6/X64Bp35TVdKUUkvm67nQemq8fZ/w+t2ubaK3J47fTdbhfNV+3zRZljGkKXybZLOLpl9tVBaOf25XJ5qH417dTIe6gcbl1YtF7c/Wssq9VK1ut1Ed+nu6R3iFJ9vzfx3udbOeId9k3uDpu7fQBTogR/++IXv3hwfOELX5AvfOELcu3aNfmzP/szeeUrXynPfe5zg0LbPU6cOCGvfe1r5cknnzxIP1YHKUtC26WaRfz7rNiorbu3i7sEtF7bROzZyLAV0vZ5HsrX3ZCxSRR9sVgcEpK+Mrt/200k3fry2aLX14n3unK4aagY9olmNy/9vxK+z7ryWdtKpLN4T30rF2n3ZiYSfjutu08bhW/9d20svjdcm1fdmzqUSe7fY+72AUyJ3PztIx/5iPzUT/2UvPa1rz0UaTx27NjBcdNNN8mxY8ekqiq55ZZbkoW7HjfffLMcP35cLly4ICLxOrAbMKqQdGm6w7nNz3etm64vj9hO7Zquls2KTl++bXc2dzdQWi6XstlsgmUOBRF9Gzeq/lBBmireY+XQNOoi0zFdV8L36aLnPvWpTx0pZ4n0EnlP3ajJfftKeTOLvZ3G7rPEIgIpb7gxu6FMcnfY3O0DmBI5+dvly5flec97nvzwD/+w/M//+T/lox/9aPT6D3/4w/Lv/t2/iw6XcY9bb71VvuM7vkM+/elPH6TTNUAX2sXct/u4u6O5K6J953x51Ik9145YviKHg3epkzNtUFAP1UF14t2912qb1Wolm83m0ItTinivK4ebhr4curomlJevLLl+nyI3hhARefeQ+lbu21G17s3MJfblpg6bcb/g2BtuzG4ol9wdNnf7AKZELv72jne8Q5773OfKe9/73sb3/vIv/7K85CUvkTvvvDMouO+66y45ffq0/K//9b+O3B+rg5RhsUNE3t3nb1Ox1yRSmzpsxhW5dux4TLy7trgR8+12exAsdCPOLrGy1w2bcdOIjXl3r835+9T6r/sOc/H9pgw2bMYd+qJH3ZuZSPjttO4+JRYR0PRDb7gxu6FccnfY3O0DmBI5+NsXv/hFeeELXyi/9mu/1imdRx55RG6++eZDz6vbbrtNTpw4IY8++mjwvlAd2EUo7DPSxU5Y1WtdIWafm+5YZXessz3nij19drvLM9pzNn/7/6F87Zhqva5O+LkaRFkulwcR35B4d+/V1WosbsQ4RbzXlSNF0NZdV8L3mUoOvt+GXiLvqW/loUpNGROl7CPyHnvDjdkN5ZK7w+ZqX924zS64L8juuaYRJ4BUcmhD//2//3d5zWte00taTz75pDzwwANy1113yS233CIPPfSQ/N//+3+j98R+7U4ZFqu+aFc6cc/ZfiM2l8yec4fDukNCfL+Mu2naFV9i+dr5Be5Liq/fWy6X3uE1sRV3XMGr14WG1Vg7fIFEO74+pRy2LmI0Fe85fp8p5OD7begs3lPfyjUN/UnG99bpezOrezsN3WeJRQSsbaE3vpjdUCa5f4e52rcv8W79WORGh66kRosA2pCDv33d132d/OZv/mavaf72b/+2fPCDH0y6tmsd7PPFfq5Qn/snB99vQ2fxnvpWLnL0LSr01uaK8Njbaew+vc5OWA29nblvuLE3O5ypfHJ32Db2+dq5byxmKCrmRousX8XycCNLqWv/6r2hcaf60mz9/9KlS4fy8P1KFyq/HQLX5udVmC5j9wef/OQn5UUvetGoNiDe88FdZAP2x9i+35Zehs1MATqdeZF7e+4i3kNr3obWw3Xvs2M1QxOTUtdytn/HVmrSPEO796kQd/PwiffYWsN2nCTiHZSx+4M//dM/lZe//OWj2jB2HQCMQantfvbinTfceZJ7e+4i3lNm7sdWgoiJ3qYrSjRZqSm2n4Mr3n2rHMTK767k0HZNZ5gmY/cH733ve+VVr3rVqDaMXQcAY1Bqu5+9eId5knt7Dtlnh4S4L5x14nUf4t2eD62m4B4pv3BpWnbeCeId9sXY/cG73vUueeihh0a1Yew6ABiDUts94h1mSe7teYqR9xDuRmmK3Rob8Q77ZOz+4Ny5c/LKV75yVBvGrgOAMSi13SPeYZbk3p77Fu8i4fVw24j31LWc7fCX2EpN7jAZd4OTruLdLYe7njDMm7H7g3Pnzsm5c+dGtWHsOgAYg1LbfW/iXVeasUtHjoU+xEPDC+ru6+u6vtJwV82A7uTusPsQ73WrzTQR776Vm1LW/nWFvft36Fqd0Hrx4sXW4p3VZiDE2P0B4h1gHEpt972J95SNmobAjdiJXH/wx9afV4YU701AvPdP7g6bu32ls1qtmKQOB4ztb4h3gHEotd13Fu++CWn2Z3UltgZzyljdlDWoRQ6Pk1X0evfvUOQw1SYltEqGb91rd3xwXd3YNa5ZyrI/cnfY3O0rDffXOKLuYBnb3xDvAONQarvvJfKeslFTbA3mVPGu6YcmyNUtRWfL4lt7uo14d/OsW/farY+6ulF7ibz3S+4Om7t9AFNibH9DvAOMQ6ntvhfxvl6vD4TnYrHwiucUMZxyLiZsU8R7bO3ptpF3S8x2X33U5eWWEfohd4fN3T6AKTG2v+1bvC8Wi9pnyNh1ADAGpbb7wYfNjC3eY2tPtxXvdriL/Uke8Z4vuTts7vYBTImx/W2f4t0dihm7DmBulNrue4m8p0xWjQlUV3S3Fe9qS2jMuwr00HjX2HCYkHj3jX9HvOdP7g6bo32pE7VDK7l0WVu9qQ+wmhM0YWx/25d411/CEe8Afkpt953Fu10a0g6fcUkR7741mJuK99BqM3Ziqx2Hbtee9on3mE3u/SLXO0vEe/7k7rC52xfD3R1VSV31ycc+fQD/grH9bd/DZhDvAH5KbfedxXvKZFWReoFqV5Npu4GMtckOY3FXpAmtPe1G6+psUuyKMNbe0KY1TYboaL0iMPold4dtY19oZSZ7zq58FFp1ydpgz7ntPna/3RDKXuuu+uLanrLiUmh1J7WZ1ZygKWP3B4h3gHEotd2zwyrMktzbcxfxri+rdSsf2b/dl2I7/Gy9XstqtfK+tIZWSFqv196/Q3mmrrgUW93JVxdN0ob5MnZ/gHgHGIdS2z3iHWZJ7u25i3j3LZ3qnktddcmej831cEW1m99isTgyFj5mX+rQsdA4eoalQRPG7g8Q7wDjUGq7R7zDLMm9PYfss0M/3OFgoWFa2+3WK2Dt0BM7dCQ02TM218O30pNG723Uvy7PFIEdWt0pZCviHeoYuz/IRbxzcMzxKBHEO8yS3NtzG/u6RN596cT+vy7yLnJjeMpqtToYdlOXZ53Ajq3uVGcr4h1CjN0fsEkTwDiU2u4R7zBLcm/PXcS7ju32jXl3o/K+VZf0nApaTaduzLsrou3kVE0rlGeqwI6t7uTLu0naMF/G7g8Q7wDjUGq7702860ozdunIoYlFBl1iP5uEHuZN1o6GvMn9e+wi3nViqE3DJ95Dqy75ztk0Ytf4yuEb5+7m2WTFpdDqTpofqzlBU8buDxDvAONQarvvTbynbNQ0JDGh7S5jp0vrKTzIp0/uDtvHsBkASGPs/gDxDjAOpbb7zuI9NAHAtxlLKFIXWp9aP7sT80LrPMci75qmb2yunteInKZ76dKlQ5HM1LWjXRtDO07CeOTusIh3gOEYuz9AvAOMQ6ntvpfIe+pGTVZox9Z4VrHsm2gXW+c5ddiMu960z047OS72cpCydnRoXC6MR+4Om7t9AFNibH9DvAOMQ6ntvhfxvl6vD0TsYrHwRv5S1pVusouqYsevNhnzXrdLo295vdRxtO6Y2tCKGDAeuTts7vYBTImx/Q3xDjAOpbb7wYbNNFnjuU68h9Z5biLeLXqtXRED8T5tcnfY3O0DmBJj+xviHWAcSm33vUTeUyarNlnjOSbeY+s8p4j3zWbjtdFuB494nz65O2yXMe9Twverm9s/tCE2P0DP+XxW58Tsa15B7FfGEki1P7dyju03iHeAcSi13XcW73ZpSDt8JpRGyhrPMfEeW+c5NfLuDpPRNO1yc13Eu2vzcrlEvGdG7g6bu31DMbZ4d5fPRLzHQby3A/EOMA6ltvvO4j11sqpI+hrPdcNmQus8p6w2Y//22aLlqKpKLl682Fq8s9pM3uTusF0j73WrIdm14O1qTrF23XRVqNg68KH8tezui7TF7R9i81fqyumrH1tOd0lZvX6329WueuXWkw+to9Cvfrl+fyFS7Lf9t10BzJdPrB+tW73MLkxQ98I1dn+AeAcYh1LbPTusDoBuDw/5kHt77ku8+1ZDcoeO6e6p9r468ScSXxVK022av68eYuI9tvpUSjl99tk5MO79KgTf//73B/O1wtU9Z9PS6L6vvDl/fzFS7LfX6WffKmTude6qXaH73FXC6upc02rDer1u9CtQ6FrEO8A4lNruEe97QB8eehAh6reWAAAgAElEQVR1z4/c23Nf4j1lToYvjZj4azM3xYrsJnNArB/ZIySC+iyn5q/RYR1G44vixurJLXssCuyK3xy/vxgp9rvXhVYhi80darJ6mctut5Pf+73fO2J3U/QXjBTxriI/tJvvlMS7+/wbInDlq9Oc0oN8yV0LhEC8wyzJvT2H7LPDENyHYpNhXXboRNNhF23mpvgi5CkP97rIu1sn7styl3Jq2pvN5mDIjC1H3apX7jA939A/V/ilit+xv78QTcV7bBWymHhvsnqZosK5j8i7CvEm8y9i87CmIt59L6fL5TI6F64PEO/Qlty1QAjEO8yS3NvzPiPvLnbYhSvShojcdhk24+aTWs5U8a5DVHQ+j5ZDd1/25RuLvLu4QzxSxW+oXGN8f5aukXdLk8i7xZd3nRhr2x+kinf7y43P7lLEe10bsKu2Kdp23L/1UNrO80jZFT3mR64tqXMy3LkVUCalfneId5glubfnfYp3jSIrPvGnf9uVkpqIP03XjnlOyd9XDzHxHlt9KpZPqni3D27796OPPhrM1x3v7RvzHiJF/I79/XW1383Lvry432eoLLH76obNhOxuQx8rH4lMQ7ynvuCF5lK0neehdtl2504eD7VD+7Jh/cq2uzp7Qzu1QxnkrgVCIN5hluTenvcdebfRJfcnbXdIRlvxF1tFJJa/zaNOvLtpuTaG8kkV7yI3hkjYMtllI9183Yhck+8yVfyO/f2FRFqq/TYy6uZj022y2ozeh3hvR6gOQkOU3HaXIt5jv+i0neehtvvaXRM/V3t86aXYC2WSuxYIgXiHWZJ7e87dPggzh4f6er0evHz7XLUL8T5M5D02l6LtPA+1qw/x7qbXxF4ok1KftYh3mCW5t+fc7YMwc3io9yFY63BXLdnnql2I9/2PebeCN3au6TwPtcsn3lPmgITKSeR9HpT6rG0k3jk4pnTkTO72AUyJtv7mE+9VdXjYV4q4e8tb3lKEeK8jtNqMO0fDN5ei7TwPtT0m3mPzJvScnZti/z/VXiiTUp+1RN5hluTennO3D2BKjO1vDzzwgLz73e8e1Ya+6sAdI5+6c2+XeR6hXdFFwnNAYrbE5mSE7IUyGdv324J4h1mSe3vO3T6AKTG2v73iFa+QP/iDPxjVhrHrAGAMSm33iHeYJbm359ztA5gSXfzt6aef7pz/85//fPn0pz/dOZ0u0OfAHCm13SPeYZbk3p5ztw9gSrTxt6efflr+y3/5L3LnnXfKO9/5ztZ5f/zjH5fnP//5re/vC/ocmCOltnvEO8yS3Ntz7vYBTIkm/mZF++tf/3r52Mc+1invN7zhDfKGN7yhUxp9QJ8Dc6TUdo94h1mSe3vO3T6AKZHib32LdhGRD3zgA/KiF71Innnmmc5pdYU+B+ZIqe0e8Q6zJPf2nLt9AFMi5m/7EO0iIs8884zcd9998q53vauX9LrStc+JrZs+JH3a4e410McmYa59dlnK1HtErq+i466641479Hey3W572wMiZnuf5cqhzbYB8Q6zJPf2nLt9AFPC52/7Eu0iIu9617vkRS96URbDZRT6nMOE1qz3CeYutBHvu90uKJLHfokKvVT0CeId8Q4zJff2nLt9AFPC+lvfov3Tn/60/MEf/IG8+93vlre85S1y3333yX333Scf+MAHuprdKyl9Tmxdc1/Ed7VaHUSt7UZJVtyp2Autp27z9p1z/z+0K2ponXafjSLx3WIVd017vV7TtmvMa71Z++za9bvdrjY9ZbVaHbLN/kKg5WnynaTUk659H7vWfif2s75s2O9dNziry7eujF0p9VmLeIdZknt7tp0ZBwfH/o9PfvKT8spXvlKqqpIv//Iv7y3d5z//+fKKV7xCHnjgAXnLW94iv/ALvzB29+KlqvoX73ZnUo0UbzabQ1HjxWJxIOBUkLo711ohbXdb9f2/K/rcHVI13ZiNak9sAyb3GrtDqxXKoXO2Tu1mULH07D0Wt+7c76GuvL403HqyLwuhOlWWy+WRXxN837tuxBX7furK2JW+0hkaxDvMktzbc+72AUwJ62/7HC6TM6E+x40G6+EOjfAJxdBuqXpOI7Lb7da78+lut/MOkUj9/1AU3l7nszFFvPvqyRXben+sPCreU9LTz7auQhHq0GdfeVPq3/fLQaiufENn7HX6OeX7qStjV0p91iLeYZbk3p5ztw9gSvj8bW4ifh+R95B412EfKvJCLwghke7mF/p/TVfpW7zbIShVVQXFu9bddruNive69PSzrctQGX33hcqbUv/2vhTx7pvcu1wuZbPZyGazOWgDKeI9VsaulPqsHVy872P2dh2hN9tc0oPhyd1hc7cPYErE/G0uIn5I8b7dbg+NeXYFaSjdpv/fNvIuEh/z7nup6Bp5T0lP09p35N1Xn10j7/r/+r1vNhsi7x0YVLwPNXvbBfEOLrk7bO72AUyJFH+buojv2uc0Ee+anzuURsWyG2m1z1w7Ftz3/3Vj3kMC27UxpFc04OjauFgsjqSt+aaMeU9Jz95jccvYVLzH6t/3K0KoTm09+TSSDd7WpVX3PSLeUy/s+FYukjZ7u+3McJ0J7Ubz7WzuS5cuSVXdmDWd2sFYW9zZ4U1maEM+5P6d5G4fwJRo4m9TFfFDi/fVanUocOc+S62WCD1nff/fdLWZmI3ucBJ3pIDVAzY99/kfixpbPVKXnq07q6VsGduK91D9+8R7m9VmbJ3ZekxdbSZUxq6U+qwdTLynjiFrMzPcN4vZCnt1BHfWdJOfzewYLvviUGev+7ICeZC7w+ZuH8CUaONvVsS/853v3INVwzJ0n5OiB0rFJ3j7JLbO+9gMsc57n5T6rO0s3lNnoqeI97bj03xvzK7tVryH3ihDY8ysPb70UuyFvMjdYXO3D2BKdPG3p59+ukdLxmOoPsf+ij5Vhnj+5yiSY2Pnc6XUZ21Wkfe2M8NF5NBmCG6n0Jd4d9NrYi/kRe4Om7t9AFMCf6MOYJ6U2u6zGfPuG9fVRLy7+cSGzfhmgLtppoh3Iu/lkrvD5m4fwJTA36gDmCeltvssVptxJ5g2nRnurinaVLzrtcvl8sjs67oZ7qn2Ql7k7rC52wcwJfA36gDmSantfvB13utmb7edGW5nabvjwHQ298WLF48Iane4TWz2tZvedrtNthfyIneHzd0+gCmBv5VZB775dvbX/br1wGPn+1xLfB9phsa8x9ZH3zfb7ba3ibRDfTcltnsRdliFmZJ7e87dPoApgb+VWQf2V3CRGwG3PlZ5y1m8x1abGVqwuwwxkRbxjniHmZJ7e+7LPnY0Bqgn9/5gCLrOa7PLOWt/Y691f3W3AtueS/0FXO1x+4b1en1kyWaRoxtC2j1j3DKoHb45baF7QnvQtElT7Q312+4676E8fFH4kJ1N9qxps867r/51h91Yvin12IVSfR/xDrMk9/bch33saAyQRu79wRD0Jd5VaNp5Yu7iEPacpmvnkblDYn17qbj3uXa4n337wbgi0L5U1J1L2YOmLs1Q2VL2rrGE8vDVRaqdsT1rYt+JiH+HVV+ZdPhzbK+clHrsQqm+j3iHWZJ7e+76IBVhR2OAVGgz3fdyqVvNzU0ztE9LbNU3N80m4t3eaxer8F3rS8f3a0DdSnh1aYbKFltNz7UlNY86O1PLV/ediPiHztTVfyjfujJ2pVTfR7zDLMm9PXcV7+xoDJBO7v3BEPQVebfnre/aQEBVVUniPbaXipu+a4f7WeR60GGz2chms5HVanUkMu/Ly55zj5QNJOvSDJUttHeNK7RjeaSK96blSxHvviGabv2n7JVTV8aulOr72Yt3n3M2Yd8zr9uOKU61pavNQ+7CVtJM89wdtmsUjB2NAdLJvT8Ygr7Fe2wPl31G3kNj3vWcvvRvNpvWkfdYufuIvLvYAMlut9t75D1WvraRd/3/UP27aRF5jzMr8d43XcYUDyHeYzPS90UpM81zd9ghIu/saAxwndz7gyHoWgfur3V2XLvru+64dvucd/dbCe2l4t4ncnTvltikR995Ny97rqpu/LJny5Pyi2IozVDZUvausYTySBXvTcoXs1vxjXm3aaXUQZPvpgul+n6v4j1VLOjh/sRtz4n4x8+GGl4o7dDbmxUfNnpZl44dU9vHmGL7OUXUNLHPzkiPjWFOyd9dQSAk3PR7tJ9znGmeu8P24W/saAyQRu79wRD0Jd7tfBiLfZ6HhmJo/95ktRn3sH2Rr1/Riaq+8+6zLfYsTwlKNE0zVF9uQMxdbSaURxPxnlq+Orv1e7GfXeFvdUOqBojVYxdK9f3BxLtvPFeT8a6xhhdLO3XYjArhunTUxtQxxfae0EzqVPGeUk7r0D5HiI0XrhPvIjcmKsYEUwkzzXN32D7sY0djgDRy7w+GoC/x3tUHV6vVIEvalswYv6qnMuRQ3T4o1fc7i/fUMbi++9qMd02JBIbSDol3V/TG0glFEX2448jqxnM1jUjG7HPzTskj5VzKUl0lzDTP3WH7so8djQHqyb0/GIKxxLv9VbWqquBzHQ6To0iOjZ3PlVJ9f9BhM6HZ5k1+Mg+JiZS0Qz+hWeFel04T8Z4yg9uXdky8p9o3tnjPfaZ57g6bu30AUwJ/ow5gnpTa7gcT776x203Ee5PxsqmRd3ccdko67titujHFdTO43Xua2F/3y0CqeE/Jv2vkXf8/l5nmuTts7vYBTAn8jTqAeVJqu+9VvMdoMtvcN941Nl42lnZIBPrG1aWkY8V7ypjiqorP4BaJjwWORaDr7EsdBpGSfxPxXsJM89wdNnf7AKYE/kYdwDwptd0PJt5F4rPNU8a7xsbLhtIOiXd7vZtmLB33l4WmY4rdGdxKqGw+EZtqn2+1mdCwo7r8m4h3a6/v14ocZprn7rC52wcwJfC3vOqgy6+r7nPdPi/3YZ9vcYUh888xTf31fbvNf++XnNp9EwYV7zAsY8xIz3ESjY/c23Pu9gFMCfxtOnXQt1Cuwyfe951/zuLd1R257/1SartHvE+coXdYLWWmee7teZ/29bUyS9uHlC8q1efScE078qF8JNUu36YnIvU728ZI+a5KiJLti9zsGYOUOqjrN+p+SfXtM+LbA8S9v8n+JrG2rten7lvSdP+YPvMPXevLM7buemx/HTuUtcka9HYtf7fO3DXotU7sZxX4Oez9UqrvI95hluTennO3T6SbeHfv803+HoIc10vWh5JvLknbl4zU7yr3KNm+yM2eMehDvFtx6Nu3w7enh28PECvG2uxvkiKeReL7lrTdP6av/O0LTyz/WL3X7a/j3mfTDO3B4vu+bHl97SjnvV9K9X3EO8yS3NtzHw/SuqiR74HhRndDERY792K323WOjK1Wq4Mxku71sQiQpmfPNYlK+eaFxCJdQ9llH2D2Whtt8kWhfJHN1O/K2m0/5xQl2xe52TMGoTpI3cvF/eXVt3pY3WpnoT1AXHvqFmpwjy5zuGL5up/7zD/2MuHmH6r31DLUlcmmGfsFMPTre857v5Tq+4h3mCW5t+eu4j0lalT3UE2JsGy3R5dxbRMZ0/8LXe+LKqlN7s7MdSsVuQ9LJSXSNZRd6/Xa+3coz1hk09ZvXSROJO8o2b7IzZ4x6NrnpOxrEgsS2D1A3PaTur+J2lgX+U4V7232j+kr/1jZbJ6xeo+VIbZ/SmwPFpGjC1zY8oXEe657v5Tq+4h3mCW5t+euUTDffW3Ee2yMdegh1SYypvb7fjpO2aXYnk+NILlpp0S6hrDLZ4sOJWgT2Uz9rpSco2T7Ijd7xqCreA8JNz0Xa5+xPUDc9pLSv/QpnmP5+q7bl3hPjbz78qsrQ12ZYtF8G9TZ7XZB8Z7r3i+l+j7iHWZJ7u2564NUJC1alfJQ9UVYNH/ttPcVGUvdpdiXhr1faSLera1D2qVodNtG/evyTBHvoe9KyTlKti9ys2cM+qgDbaci/n1NQu3TDquyf7vpiKTtb9KHeG6zf0yf+bu/pqWOebd21+2vE9s/JbQHi9s/tB3zbsuRkm/dL5eI99CFhRYQwEfu7bmPKFhKtMoVjbFoe2jYzD4jY02iSr7/j0Vy3ChRl8h7n3Yp+mDXDeW6RDZTvisl5yjZvsjNnjHoow7cORChX5J87c7uAeITappmLPig5fAdts2mDptpun9Mn/nrtXbekRLyPbfeY2Vw72uy2oxN0+0rUlebsWmNufdLqb6PeIdZknt77mpfarTKDoEQOby7bkqEZbvd7jUyptf7okpuWhqZqovQuKLWzTsW6RrKLpunTSuUZ6p4r4vEieQdJdsXudkzBtRBXvj6nBLY7YZf570LpbZ7xDvMktzbcx/2pUarUncudjtgu/tx08hYE/Eeiyr5ojR1USmLb7WZWKRrKLtsXfnGubt51on3lO/K5mk/5xQl2xe52TMG1EFelCreRQ7vsNp2b4qhKLXdI95hluTenvdpX47DFsbCRolKflj2Re5Rsn2BP1AHME9KbfeId5glubfnfdmnUfY5CrQQNko0Z/FeQpRsX+TeHwwBdQBzpNR2j3iHWZJ7e87dPoApgb9RBzBPSm33iHeYJbm359ztA5gS+Fv/dTDk8LyuefVpq/0lz07c7ELMPoZBdqPUukO8wyzJvT3nbh/AlMDfyhbvuTDGSitzrOc+KbXuEO8wS3Jvz0NMWO06tju2akzdfe7RV4SqKU0ffO74+JIhMniD3O0bgpQ6qOs37HKiTdYNtys8+TYIC63cZFdRcs/ZVbRcm9uuBKXX2Y3ObB+Yusb5crk8JOp1vffU1Zti9QzNKLXuEO8wS3Jvz7nbJ9JNvLe5bx80EZZuVG0KEBm8Tu72DUEf4r2qbuxD4IpK95z60mazOeRX7n4SutOwyI29J7RNucu82s/6EmD3a6hLz15j9yVwV6TyndMyuvj2TfCVWZd3jeWbUs/QjFLrDvEOsyT39tzHg1Q35NHDfdD51mEPrQ/uRsTsWuG73a42L01H7Q6J97rIkxtpc6NrPltjZfTtOOorh4h/TXj7ORSNC9Vhil0p5YvVWyx/hchg/v3BEITqwPUJPdyXvthOuu5KRnY33tiuzr4Xv9COx64/xnZKDqXnyzO207Lrs75y+F6Q3Z2ebZ8Wy7eunqE5pdYd4h1mSe7tuat4tx2/SHi30NjDqC4ipg+clLysCI6J97rIkyue9VoVqLEHe514j5VD7VZCP9O7dsfqMFW815UvVm9136GmPffIYO72DUHXPsfdvVf9yZ5zj9BGcUpImDYR72r3drutTS9Wji7i3ffSvFwuZbPZyGazOQgMpIj3WD1Dc0qtO8Q7zJLc23PXKJjvvjbiPbbud0iEh/Ky9/ke4k0iXu7fvp+vm4jklDqzdRETC6l1mGJXk/L56q3uOxQhMiiSf38wBF3Fe5PIewzfC24orz4i76H7lVTxvtvtkiPv+v/q15vNZtL+lTOl1l0j8c7BMaUjZ1Lsq6r4sBk7jKOqqsbiXSQcEdP89SGbkpfvPkuTiNe+xHusHG3Ee6wO+xLvsXqL5W/tm3tkMHf7hqCPOnB/mbFpVtWNX2NsG3HbX+gXPr1Wz/v6A/tZ7fD9ghZKL1SOkF+6vu6rQ98vWzatJvmm1DM0o9S6K9PqCfPUU0+NbQJkQFfx7hvL7XsAuUIvFqkNPVRT83Lvq7O5L/EeK2MsohWLqjUR76E6TLGra+Q9lr9CZLDcB3if9FEH7rwMX3vRwx32pf8f+hXICt0U8W7neqSmF7vGl6/r66mrzdhy2xeX1DklsXqGZpRad2VaPWHe/OY3j20CZEDXDsWNfi4Wi6h4V0G3XC4PrkuNiKXm5d7nIzXi1Ua8+8oYixjbcmiaSqp4j9Vhil0p5YvVW913qPnOPTKYu31DMJU68PU5Q7HbDb/OO3Sj1HZfptUT5amnnpIv+ZIvIfoOvf2EreIrNlTGHVbhLqsWiojZlV9S89KyhcR7asSrqbgNldEnOn3l0Dxiq82EonGxOqyzK7V8oXqry1/Tsp/nGBnM3b4hmEodjCneRQ7vBZE6zh/Go9R2X6bVE+XNb36z/Kt/9a+IvsNeO5QShjHkiBtVmwJEBq+DP1AHME9KbfdlWj1BNOr+2GOPEX2HvXUoGulFsLVjSmKXyOANSn2A9wl1AHOk1HZfptUT5M1vfrN83/d9n4iIfN/3fR/R95lTaocCUCL4G3UA86TUdl+m1RNDo+5XrlwREZErV64QfZ85pXYoACWCv1EHME9KbfdlWj0xbNRdIfo+b0rtUABKBH+jDmCelNruy7R6QrhRd4Xo+7zpu0MZcpJq17z6tNWu/NDXZNOYfUwGLhO+M+oA5kmp7b5MqyeEL+quEH2fLyWL91wYY83lOdbzFOA7Yxd1jvkeJVKm1RMhFHVXiL7Pl5QOpariaxnbTXbqdju02J0J7RrfNl97n7ujYWircr3HtTmUXp2tep2WTfNWUnc7XC6Xh0S9rqmeuqZ5rJ6hDPjOAKAk6LFGJBZ1V4i+z5M+xHtVVQfi1RWV7jmNUG82m0PRandHzsVicXCf7t6p4jW2gZG+BKzX6yMbGIXSs9fY3TrVPvti4J7TMrr4dhP1lVk3Worlm1LPUAZ8Z1ACn/nMZ8Y2ATKBHmsk6qLuCtH3eRISE5vNxvuznzscJBQd1s9WQGuEebfbHdkdNJam3hva/dR+1nM2r7r0fHn67vftbBpaw9w3dMamqZ9T8q2rZygHvjMogfvuu29sEyAT6LFGIiXqrhB9nx9dI+8q8hUVn/ace2hadoiLHTYTEqZNxLvavd1ua9OLlaOLePcNBVoul7LZbGSz2RwMt0kR77F6hnLgO4PcuXr1qhw7dkyuXr06timQAfRYI5AadVeIvs+PruK9SeQ9hh0204d4r4u8h+5XUsX7brdLjrzr/+vQmM1mQ+R9ZvCdQe6cO3dOXvrSl8q5c+fGNgUygB5rBJpE3RWi7/OiDzHhjtkOjdO20WM3Mu2OebeTQnX8eop4VzvcMe+x9ELlsNH1kHjXtF18Y95tWk3yTalnKAO+M8iZq1evyokTJ+Ty5cty4sQJou+AeB+aplF3hej7vOhDTNjVUupWm7ER/MViERxL71uBJUW82xVsUtOLXePL1xXvqavN2HLbF5fU1WZi9QxlwHcGOXPu3Dl58MEHRUTkwQcfJPoOiPehaRN1V4i+z4epiAnfmPehGGOddyiTqfgbTA+Nuj/++OMiIvL4448TfQfE+5C0jborRN/nw1TExJjiXeTwDqup4/xhfkzF32B62Ki7QvQd6LEG5M1vfrPccccd8uIXvzh4HD9+PHr+jjvuIPo+AxATAMOBv0GOuFF3heg70GMNxFNPPSV///d/X3tUVZV0HdH3aYOYABgO/A1yxBd1V4i+zxt6rMzgIQIitAOAIcHfIDdCUXeF6Pu8ocfKDB4iIEI7ABgS/A1yIxZ1V4i+zxd6rMzgIQIitAOAIcHfICfqou4K0ff5Qo+VGTxEQIR2ADAk+BvkRErUXSH6Pk/osTKDhwiI0A4AhgR/g1xIjborRN/nCT1WZvAQARHaAcCQ4G+QC02i7grR9/lBj5UZPERAhHYAMCT4G+RA06i7QvR9ftBjZQYPERChHQAMCf4GOdAm6q4QfZ8X9FiZwUMERK63Aw4OjuEOgDFpG3VXiL7PC3qszOAhAgAAMC/e+ta3yl133SVf/uVfHjxe8IIXRM/fdddd8ta3vnXsosAAoBQzA/EOAAAwH5555hn5xCc+UXtUVZV03TPPPDN2kWDPoBQzA/EOAAAALugDUGgJmYFzAgAAgAv6ABRaQmbgnAAAAOCCPgCFlpAZOCcAAAC4oA9AoSVkBs4JAAAALugDUGgJmYFzAgAAgAv6ABRaQmbgnAAAAOCCPgCFlpAZOCcAAAC4oA9AoSVkBs4JAAAALugDUGgJmYFzAgAAgAv6ABRaQmbgnAAAAOCCPgCFlpAZOCcAAAC4oA9AoSVkBs4JAAAALugDUGgJmYFzAgAAgAv6ABRaQmbgnAAAAOCCPgCFlpAZOCcAAAC4oA9AoSVkBs4JAAAALugDUGgJmYFzAgAAgAv6ABRaQmbgnAAAAOCCPgCFlpAZOCcAAAC4oA9AoSVkBs4JAAAALugDUGgJmYFzAgAAgAv6ABRaQmbgnAAAZbNYLGS73dZet9vtZLVaDWARTAH0ASi0hMzAOQEAyqWqKqmqqla8r9drWS6XslqtZLFYDGQdlAz6ABRaQmbgnAAAZbJYLGS32yWJdxGRzWZzcA9AHegDUGgJmYFzAgCUTYp43+12slwuZbPZMHQGkkAfgEJLyAycEwCgbFIj7wBNQB+AUun4PA4ODv8BRxn7O+HgyJmq6le8j13XHBwc2R15d4IAY4J/+KFeYExyb39V1b94BwAQEcQ7QB34hx/qBcYk9/bnindrbxvbcy8vAAwH4h2gBvzDD/UCYzK39je38gJAGMQ7QA34hx/qBcZkbu1vbuUFgDCId4Aa8A8/1AuMydza39zKCwBhEO8ANeAffqgXGJO5tb+5lRcAwiDeAWrAP/xQLzAmc2t/cysvAIRBvAPUgH/4oV5gTObW/uZWXgAIg3hPJLRIfl/r+O52O6mqqtctsjebjVRVJZvNprc05wj+4WdO9bJYLIJ9QBO2261UVSXr9XqQ+6bMnNqfyPzKWzK2v1gul2Obk8xqtZKqqmS3241tCtSAeE9k306IeM8X/MPPHOulq4hGhPfH3Nrf3MpbKuv1+uC7Ks3fEe/lgHhPJCTeVSBro9fPy+XySHReHdleq2m64l3TrapKFovFoTy1c/BF/+05zceKd3sfoj4N/MPPHOvFfRjr3+rv6s8hH431ASn3WREwd1+eUvv72Z/9WXnnO98ZvWZK5Z0y2hf4fpXXc8pisTj0fLe6wfYLsfts32Pztf2DGxS052w+iPdyQLwnUife9Zw2fn3I2nP6AFansw9kK97dB/VisTiShjqXPee+AOLXSkUAACAASURBVKgz64Pd2kJUPh38w88c6yUk3u0DOOajsT4g5T5fvzJXX55C+/v93/99efnLXy4vfOEL5fjx4/Lyl79cfvd3f9d77RTKOwfsi7kbcY+JcCucQ89y333uc96mJXIjIKCifrlcHgkS6jnEezkg3hPxjXVdLBZHHpxu47dO5oue6QPaOqs6m6bh/m3/zwoB1xGtbe45mzfEwT/8zLFeQuLdN9zN56OxPiD1Pnz5OiW3v7/7u7+T7/qu75K77rpLvuRLvuTQc+X48ePyute9Tv72b//20D0ll3duuFpBiYnwmA+niHerD2wEPyTEfb/cId7LAfGeSF3kvYt4XywWh8S7fXO3x263OxK5s+m7b9jWNvecey+EwT/8zLFeQuLd+nTMR2N9QOp9+PJ1Sm1/58+fl1tvvVVuu+02bz+vx3Oe85wjw6SgHHxD42IiPKQx6u5zz4nc6C9iLxA2qId4Lw/EeyL7FO91kXdLLH0i7/sB//Azx3pJEe9t+4DU+/Dl6+TW/j7/+c/Ln/zJnwSPX/u1X5NTp07J3XffHRXt9rj77rvlnnvukV/91V/NrrxQjz7X3aExSmrkPXafT7xbdOWb1WoVnFuHeC8PxHsifYp3re/UMe/L5dIbXXcjdYx53w/4h5851kuKeI/5aKwPSLmPMe83yKH9PfXUU/L6179evv7rv15uuukm+Zqv+Rr5uq/7uiPHYrGQe+65pzbaHorAnzx5MovyQj3Wb/VvfSZbfeD6uKsdrI/H7vOJd6s7VBf4+g5XIyDeywHxnkioY33DG97QWLw3XW3G/X7s/7uOa+9zV5vRPPSY28O+LfiHnznWS4p4Fwn7aN1qM3X3aT748vjt7y/+4i/k3nvvlYcfflj+9//+3/JP//RPtfc88sgjcvPNNyeJ9ttvv12OHTsmP/ZjPyYi45cX0nB90x3OZv07ttpM6n2hyLvblyh2To3eq/0K4r0cEO8DUtqar3Ad/MMP9TIc2nfMUaSHGLP9ffCDH5TnPe958jM/8zON7/3rv/5reeCBB+Suu+4KCvc77rhDXv3qV8tf/dVfHdyHvwGAgngfEMR7meAffqiXYbCRMrjBmPXxAz/wA/LWt761UxqPPfaYvOxlL5M77rjj4Ps9ceKEnDp1Sn7rt37ryPV8/wCgIN4BasA//FAvMCZjtb9nn31W7rjjDnnyySd7Se8d73iH3HHHHXL77bfLI488ErwOfwMABfEOUAP+4Yd6gTEZq/09+uij8sADD/Sa5j/8wz/IZz7zmeg1+BsAKIh3gBrwDz/UC4zJWO3vu7/7u+Xnfu7nBs8XfwMABfEOUAP+4Yd6gTEZq/0tl0vvmPR9g78BgIJ4B6gB//BDvcCYjNX+Xvayl8mHPvShwfPF3wBAQbwD1IB/+KFeYEzGan8vfOEL5VOf+tTg+eJvAKBUVSWV7t7GwcFx9Dh58uTYfpol9BscYx6uX67X6+D28j6aXGupqnFENP7GwcGhxz/3B7zRA4TAP/xQLzAmtv3pWvgpglxFvu4uud1uW+c7JPgbACj/LOLpFABC4B9+qBcYE21/KsRXq1VyNF03zOuS79DgbwCgIN4BasA//FAvMCZu+0sV77vdThaLxcG/XfMdCvwNABTEO0AN+Icf6gXGpK147zvfocDfAEBBvAPUgH/4oV5gTBDvADBXEO8ANeAffqgXGBPEOwDMFcQ7QA34hx/qBcYE8Q4AcwXxDlAD/uGHeoExSRHvVVXJbrc78rnPfIcCfwMAJWvx3mU5Lx+6rq/vaGNX0wdB2/tgXHL1j7HJtV7m6Gex/iy0lnnf/evQzE1E5/BduW0mZZ381HbWR3uM+X7p7R3AMivxbmmzOYcyR3EwZ3L1j7HJtV7m5p+LxULW6/XB37phkdKlr8uZMdrftWvXZi3eXRDvAOPQSbzvdjtvtEedRB8i6kz6ebVa1V5rz9XlV3fO58xup6N5aXReRGSz2RxKc7PZHLp2tVodsTnlPr02ZjPkA9+Nnzb14vMd64d99ik+//T5eRc/9N1X5/+hPma1Wh1adzylT9NrfP3bZrORxWJxcP+lS5cO5W/717rvJWbnWIzhl0888YScOnVq8HxFupW3TTuteybbtrXb7WrTs7a46dm2p9T1BZqGtnN7Lub7dekDlEAn8W4d1I43tM4vcmO4ym63O+J41tlErkeNXEesy0/kcPQpZeJSSLxr+u5D0WeXz+aU+/RcU5thHOjY/XQR76F2H/KJvvsU9XMtR6hfibFYLA7uW6/XslqtkvzfzVv7ocVicUgUp/YPWhc2XYvm4ebvE++h/GJ2jsUYfnn58mU5c+bM4PmKtC9v23aa+kzebrdJ6YXKZH3PXlfXF9SJ9xTb2/g9QA60Fu/b7dYbJfI9TGMPglA02qaTmp/vXKzgPvEeusfXQfhsTr2vjc0wDoh3P13Ee+zBq+yzT7FCP9SvpJTD0sT/3XNN6sJHKOpp6yb0klNnW8zOMenql88++2zjey5cuCBnz57tlG9buvibpWk7jT1r3edoLL0623zt0doc8uE2trf1e4BciIp3+9OT/Vla5OjDwv5E1eVBa89bZ4vlp+eUvsS7LX9VVcEOwk0v5b42NsM4IN79tOk36kTiPsS7Pe/LP9SvxIhFE1P7jZgo7tI/aD5aH4j3wzz77LPy4z/+43L33XfLT/zETzS69+zZs3LhwoVW+Xali7/V3Zf6fIuJ97r0XEJtPHauqXgP2d7W7wFyISreY7hvru65tg/a1DdlX35KH+LdTbNtZILIe/kg3v207TfqHrzKGJH3puWo+/99R943m4335347VALxfh0r2r/3e79XPvzhDze6/8qVK1JVlVy5cqXRfX3Rxd/q/r9r5D0lvTobUiPvri/0EXkHKI3W4l1v1geEfVNu86DVsW2pY9TcN3N3fFydY9aJd1/6rl0+m1PuC415pzPJE8S7n77Fu0jYJ/ruU9zIXKhfqSu/5ql5NPF/N43lcplUFz477DAZtUHz6Sre6+wci9TvqatoV+6//345f/58q3v7oMtzuk07TX0mb7fbpPR8uG085ZmugtzXHvvSEwC500m8qxO5Pzm1edDaNdiV0Nu37ycu95xbSDdiVSfeRQ7PpvdFze3PhJa6+9yogC8NyAe+Gz/7EO8hn2jTp/j80+fnsX4lRsjWVP/X8uu1bVabsX+HyqB968WLF1uL9xJXm7l69aqcO3dOTpw4If/xP/5H+chHPtI6rze96U1y+vTp1vf3QV/PaaWunaY8k+3KRXXp2XL4noGpq82I3FgO1W2PKb4fSp9fvaEkOon3PvA90KZKLAIB+cJ35od66ZfVanVkzHKO5GJnqP31Kdo/9rGPybd927fJ6dOn5bHHHmudTh8M5W9zeiYDlArifSA0UmA3U4EyQKT6mXK9uFE5e/QVdda+r+90+yZXO932Z0X7937v98qf//mfy+c+9zn5/Oc/HzyuXbt2cHzuc5+Txx9/XH79139d3v72t8v3f//3y7Fjx+Qnf/InRyrhYRDvAKCMLt4Bcgf/8EO9wJho+/v85z8v6/VaXvziF8s999wjN998s9xyyy1yyy23yHOe85xDx80333zkuOmmm+Smm24SEZGv/MqvlFe96lXyxje+UX76p39aPv7xj49ZxEPgbwCgIN4BasA//FAvMCZu+/vc5z4nb3vb2+RFL3qRvO51r5MPfvCDI1m2H/A3AFAQ7wA1pPjHM888M4Alw/GP//iPtdfQb8CYhNqfK+L/7M/+bGDL9gP+BgBKJ/FuV3jwjQ21y5fZmevu0YS24/Ha3NdkgqmtC5gWsTZw9epVeetb3yqf+MQnBrRo/3ziE5+Qc+fOydWrV4PXDCkm2k72bnMffl8Gdd/R//t//08eeeQR+dIv/VL5zu/8zuJFfK7ifeiFGHw+564+1YaYRtBzvvkeusrOPuYIlKibXEL299lv7qMNpqZp5wTp9zXEhP5exbv9MnTClxXwvvuakutkGh7i08XnH1evXpWHH35Ybr/9drn11lsnKd5vvfVWueOOO4IivgTxvm/w+/FIbQ+uiP/Qhz60Z8v2Q47tX2R+4t1dZnaf4t1Sqm4aoo8cW7zbl7r1el22eBe5XgifQ7nXuuvKihzdvlhfAnzruIbWTfbdl7LGul7nWwfZ5qtlsOvb5vZSAd2x7cOK9uPHj0tVVXLnnXdOUrzfeeedUlWVHD9+3Cvi2/YbNprkdnJ1/pt6nV1/2t6H30+Hpu3vn/7pn+Snfuqn5IUvfKH8h//wH4oT8W39rW7vBLtmurvuet219lxdfnXnYv2CzS9FvIf6h1g+MV+35bcrxqlQU/vq+iVf/YW+LzfwmbtuChET72O0wVjbsFH01H0HQuLd3Syvb/7Zjv2I99CbS6gRaiW62x/7dknTL9WeS7kvtLup7yXD9xAP3RNroFA2VVV5RbseUxfvergivk2/sdlsDvnNYrE48JsU/029LiT68fvp0Pa5ZUX82MtA/u7v/q78yZ/8SdK1XZ7T6g+2/bptW0Wgb0Ox2HM3tnOp6y8h/4v1C77yxMR7rH+I5RPzdT233W6P3K8i7f3vf38r3WLTqqrwUqyl6KY6uy1Dt8FYWX33pTw76iLvdd9rW7IS76E3FLvbX+hN0Hdv6L7QW5qbhu8hnrJ9OEwHFanHjx+XW2655ZCY1eO2227z/n/pR6hct91228ELTGxMvA/Xb1Kvjf2EGesffD6s4Pdl0/a5pXz2s5/tyZLm/M3f/I18z/d8j3zFV3yF3H333fJDP/RD8n/+z/+J3tOmvK6wsG3ebdspuxb7nruuv9Tl5zvXpF8I9VchIWnTjuUT83Vru16jQ2ZC/UiqbkmN0Jaim3x2u0fIrn23wZSyKqnPjtRhM7Gh5G3457r0dwr2JxafMbZifQ+xLuLd5h37st30Uu5TZ1F4iEMMFannzp2T22+//UhHNJfI+6233iq33367PPzww9HIe12/4W5tHrrX57+p1/nEO34/LbqK97F429veJrfeeqv85//8n+Xzn/+8fOYzn5HVaiVf9mVfJv/jf/yP4H1t/M0dIqBHV/Fuz/t8zJdfnf/F+gW3HuqGzYT6h1g+qeJdh0SoSLPlaKtbRG5EaEMvIaXopjq7LUO3wVhZQ+WLnUsR7/prQlGR96Zj3kPjl1LfIFPvIwIHTbBtxSfipy7eXdGu9CGefD9bK6GHZ5P+gcj7dClNvL/nPe+Re++9V77lW75FPvzhDx85v91u5b777pN/+2//rfz+7//+kfNtyusKC/dcW+GUGvX05afExF+XYTOx/iGWT6p416E3y+VSttvtQTkuXbrUSreE6il1zHtuusllX+K9jza4z8i7Cv59PR/2Jt61cD7D6xqh+5azWCyOVLCOPXLHs9XdFxq75fuyeYiDSHi1GRXxz3nOcyYp3m+55RavaFfa9Bu+8YDqNyn+m3pd6IGG30+HUsT7Rz7yEfnWb/1Wuffee+U973lP7fU/+7M/K//iX/wLef3rXy+f+tSnDv6/y3NahaD1nTbCyffcdX0slJ9I2P9i/YKvPDHxHusfYvmkinfVNu7fjz76aCvdkkopuqnObsvQbTBWVl/5UtpuXeR9X/Qq3t0j1fl8P4fYlRx8b3/2pw9L3X3uW5svjdDbXOwhble3gGkR8w/WeW+O9VG7coN7Lhb1SfXzUMQEvy+fIcT7xz/+cfnpn/5peeMb3yivetWr5Cu/8ivlpptuSj6OHTsmVVXJq1/96kb5Pvvss/Kv//W/luPHj8vb3/52EelvtRnXN5oIJ7tSi1LnY/a5HvO/WL9g06kT725aPgHqyydVvIvIQeTdlknHwMf6JV/9pVKKbvLZ7Ts2m80obTDWNux9XVeb2TedxHtJuF8uQCop7YYdVgGGZd/t7yd/8ifl2LFj8v3f//3y9re/XX79139dHn/8cfnsZz8r165dSz4+9KEPyTd90zfJ137t18rly5dr8/35n/95+Zf/8l/Kd33Xdx0KCozpbz6hCOmUWn9NdNN6vd5r+XKtQ8T7HtEJKu7bPEAKU/ePtlAvMCb7an+PPfaYnD59Wr7t275NPvaxj/WW7qVLl+SlL32pPPDAA/KXf/mXR87/8R//sZw5c0a+4Ru+QR577LEj5xHv5VJi/TXVTV03yaoj1zpUu+yvAoh3gAzAP/xQLzAm+2p/p0+flje96U17SVtE5Cd+4iekqir58R//cRG5/qvdD/7gD8rzn/98ecc73hG8D38DAAXxDlAD/uGHeoEx2Uf7O3/+vNx///29p+vy8Y9/XB544AF56UtfKl/6pV8qZ8+elX/4h3+I3oO/AYCCeAeoAf/wQ73AmPTd/q5cuSJVVcmVK1d6TTfGb/zGb8gf/uEfJl2LvwGAkrV4j62zaf/Pzg7OGZ3d3PfkWXcWty9PFybwpkM9+WlTL2OMW+zS1vGtfOm7ji9cuCBnz57tNc0+oU0BgJK1eLf4Ftd3RcByucx6UmroYd8Fu/aoyI1JJvvMc26U4B9jUIp4bwu+lTd9++XZs2flwoULvabZJ/RDAKB0Eu+xdS91lrJdZzMUIXe3s9XF9WOR98VicWQHMrudbYp9do1mn71N04mVxa4tandic184dJ3TlPVUQzucab6+PN0y23LYNWCtKLFlSt2YYUrw0PTTRbyH2lofvhby77o8tEz4Vhn07ZdnzpxJWspxLOiHAEDpJN7tA9O3PbGKUn2oxRb9179Du2b5djiri965O2K59rkPe32pcHc/S00nVhatL3fYjG61bPNSceDL00Xr1n2RCeUZezEK5WcFh7sj2Rzgoemni3gPtbU+fC3W1mN5uOBbedO3X546dUqeeOKJXtPsE/ohAFBai3d3YXr7MG2ya5ZLaFezpuI9FG2L7cLoe6Fokk6sLLbcobLo51ieoXx8EUhfnr6dJ2M7y7llcP+eAzw0/XQR7ym7GLb1tZS27ssjlC6+lSd9+2VVVXLt2rVe03RZLBath1rRDwGAEhXv9qfeuqEuerQR7zafqqp6Ee9qn9JEvFsbm6QTK4tN07fV8mazkc1mI6vVKppnHZq21i8Cozs8NP206Tfq2lofvhZr6/jWdNiHeN8n2k4R7wDQld4i7+65VPHum4haJ95F4mPem0bM+4q8x8piy+1ep9vpqohPjQ66Q258dYPA6A4PTT9jRt5T+o0ukXd8K39KEu+LxeLQL6ttoB8CAKW1eNeb9UFmI1pNxLsbCbPjPmMP3dBqMzbS545trXu467V1Y95D6cTKYsvtlkX/du/15elivwNfvXQVGDYNreO5CQwemn76Fu8i/fhandDHt6ZBSeLd5oF4B4CudBLv7qoNoUh23bAZu3KDvTcm3kWODt1xf6KvW7nCtVcnqLn5pKYTK4vIjQlwFy9ePJLHYrE4ZH/Kihj2b9/34MuzjcCY+4oYPDT97EO89+FrdeId35oGiHcAmCudxPtUqJsMBzdYrVbZb4bVN3P3jxDUS7/M0be6gHgHgLmCeBfEeww7pKeqqllGBufuHyGol27gW91AvAPAXEG8A9SAf/ihXmBM5ijeOTg4OMzBQxggBP7hh3qBMem7/eXennO3DwCGA/EOUAP+4Yd6gTFBvAPAXEG8A9SAf/ihXmBMEO8AMFcQ7wA14B9+qBcYE8Q7AMwVxDtADfiHH+oFxgTxDgBzBfEOUAP+4Yd6gTFBvAPAXEG8A9SAf/ihXmBMEO8AMFcQ7wA14B9+qBcYE8Q7AMwVxDtADfiHH+oFxgTxDgBz5UC8c3BwhA84ytjfCQdH3+05Z3K3DwCG45/7QDoFgBD4hx/qBcYE8Q4AcwXxDlAD/uGHeoExQbwDwFxBvAPUgH/4oV5gTBDv7dhut4eGHq1Wq17SjVFVlWy322zTAygNxDtADfiHH+oFxgTx3pzNZiNVVclutzv4v+VyKev1unPaMRDvAP2CeAeoAf/wQ73AmCDe/ddYYe6yWCxks9kc+r/dbncobf3bnRSsEfvVanVwzgro5XLpjeYvFouD/7906ZJUVXVwraapNm82G1ksFkfKZG2x6e12u1p7NS+AKYF4B6gB//BDvcCYIN7914TEu4rcmLgXuS6ONRK/Wq1kuVyKyA0x7Du32WwOPmsaVtirUNc09AWiTrzbl431en3wUmBfHOrsdV9WAKYA4h2gBvzDD/UCY4J4v44OhXEPdyhMinhXweu7Jya0fRFz13Yr3jWNWJquLWqPL70UewGmBOIdoAb8ww/1AmOCePdf0yXyri8CvnvqouTr9To4CbYv8e6m18RegCmBeAeoAf/wQ73AmCDe/de0HfNuBW/sXGx8us0nNmxG03BfKJqKdyLvMFcQ7wA14B9+qBcYE8R7c0KrzbgTTO0YcldM+4S2HY+uaTQR73rtcrk89EJgz63X64NzsTHvIXsBpgTiHaAG/MMP9QJjgnhvhztG3h3iUrd6S2xyaWi8va74cvHixSOC2h1uY9MM2WJXq0m1F2BKIN4BasA//FAvMCaIdwCYK72Kd3ZugynCQ9MP/cb+0oN6EO8AMFd6E+/s3AZThYemH/qN/aUH9SDeAWCuJIv3LrPY3b9DY9PYuQ1yhO/YD/0G/caYIN4BYK70It7ZuQ2mDA9NP/Qb9BtjgngHgLkSFe/s3Mb6scBDMwT9Bv3GmCDe90dsjXX3nM/X3V/B9mlPU9brtazXa9lut4de8LvQpL4A+mCwyDs7t0Gp0PH6od84mh79xnAg3vdHU/G+7/kefQng3W53SLCrkN8niHfYB72IdxF2boPpQsfrh35Dguml2AvdQLz7r0l5GdbD+p5d9UnnkaSci4l3O7RN77dzQuxLdehae04JzSupO7darY70N25ZVOBbUa/D3+rms6TUF0AfJIv3Oti5DaYKHa8f+g36jTFBvPuvSf2ly7ZfvVeFrU+gx86liHeRG5PIfS+zdgK6a58v2m/tcee3+Oac6H0uy+XyiP2+OTPav8Tms6TUF0Af9CbeRdi5DaYJHa8f+g36jTFBvF8ndY6J776QOLZ/x86pXe4RemH1iWkVzrFfwVx7fD6Z8kud75c539AZe599oY+lnVpfAH3Qq3gHmCL4hx/qBcYE8e6/JvZyaJdVtSI7NFej7pzmWRd5byPe7XkrgEMvK7vdrnbOSUi8+zaGWy6XstlsZLPZHAy3SRHvdfUF0AeId4Aa8A8/1AuMCeLdf01IvLsR4D4j7/sQ76mR91gZ3Qh6auRd/1/t3Ww2RN4hKxDvADXgH36oFxgTxHsz3IjwYrE4MszMjud2rw2d61u8az6pY9595fLNOdH7XHxj3m2eKWm79sXqC6APEO8ANeAffqgXGBPEe3PsPJBQpLuqjq6QEjvnG8Ki4rqNeLc7Iyt1q83YXxv6WG3G1pcdUpO62kysvgD6APEOUAP+4Yd6gTFBvE+LISZ0j7HOO8A+GFW8x35uSyE2zqwroWhCn5tR9GlzTp3Qdhveuc5GVdyjaR5tOvo29039odmWfdZLXw/yJj7bpj229eHU+3zXdfX1PvudmK+3SStk13a7ld/7vd879H+I92kx1GpMdofV2C7MADkzGfHeN30LdR992e9GE3IgRWB0qeMhl92b+kOzLSXUS2obs2NURW4sF1nHviej+X6O7+rrfds81C6ViPe87QOA4UgW73VCKbZzm8nooAOyY+8uXbp0KH13veZQ2qHIu12nWY/1el2bjkaF1d4Sd42zG124vxbUrYsdW8O6yW5zSmwsoa+Ofd9D3Xdmy2nTr7vPN84z5Ac8NP3ss9+w31Ndu7W/5rgbM9m2Efqu3c1rrO3upLhYH5RynV0Pvsl9ijtmN1T2fdjs9gNWtPt8ve9dKhHvedsHAMPRi3iv27nNboFu11TVh1js4RxLO3XYjD6Q6tKxD8VU8S6S165xrm2+XeBCIiiUf9Pd5pTQLH6bn0+8qw0p372vXlPu03N1ZXDrF26wz34jVbz72qZtU7aNxfxLfdgNOqTamXqdT0Cnpq9lUGJl34fNvvz0viF2qUS8520fAAxHVLzvY+c2ETno8FPEe2raIfEeeiD70nE3h3CPEnaNi9Vl3blQ/k13m1Pqfk4PifeQ2IuVJRQ9jd0XKsNnP/vZI3bCUYboN1LEe2zcqu1jQu07ZLev3wjZ2aQ8be7Tz+4vkqljdvuwWUQO9QN1E/+a9hux/pxhM/nb58MGWJT1et37MM99zB8bcj5Hid8tjEtUvLsXxn7+Du3cVhcRTxHvKWn78rER/5R03IdUXeS9jXj3lVvL7BM8u12zXePaivdY/iLNdptTQjvXherYV1dtv7OU+0JlQLynMUS/kTJsxg5Jc9ubtom69u2ieaa2pyblaXOffnbFeqzsfdus1242myNifYhdKhHvedvnw+dn+xDvfTHGajSId2hDL+LdN4ayiXh3hV/s/tTIuzveMiWdIcR7auTdl5/iRrRSxXtdPceieE12m7P3dIm8N/nOYr9UEHnfD0P0G+4vP+51LqFhM7H27Q7vsGn52npqH5QaeU9N3/X1WNn3YbP+nx27rgyxSyXivR9/882/Eon/8mTPuRsf6Xeoh2vPcrk8Iojt36H7Q/b6XlZ9bcc376uunKnrwPc9nyP3tgf5kSzeY9Tt3Gadx46pdMW7XrNcLo9EhH1phzr71WoVnLwVS2ef4n2oXeNSxLuvnuvyt51Nik2afqgO3XoK2V73nfnqtcl3XVcGtROOss9+wyfefe3Wjfimjnl38w6JFd8vNKE+qEm7a3qftVOJlX0fNlsbUn29Sb/hltW9DvHej3hvOk9I09Xv132+x+YN6X32V3BXvNfNgWgyt8x3n00zpZwuQ8znyL3tQX70It5F0ndus3nZlRfcn3/dzsGXdki82+vdNGPp1I15V4drI96H2jWu6RCDWATFfQgsFmm7zdl6sJ99IqBu2Ezdd2ajK66tKd91w0K2kQAADD1JREFUXRnccsAN9tlvuN9Tav/gRn9tH1PXvl1fb9IOm5an6X2KGxmMlb1vm60Nbl4xX0/tN2L9IeK9+xyT2K+VvjRD80piv4y7aWqbstdZ8Z7yS0yT56yvnE3m0PmuG2I+R+5tD/Ljn/2chrMvQg/APnHH6eVATptGWdp0lPiHn33WCw80Pzn4utufjeXriHf/NXWR96bzhGLivW7uk5u+thW7ktnQ4j02xyMk3vc9nyP3tgf5gXjfM0OId5G8xHJsfPGYaPS2aT3hH372VS9tv6e5MJavu8Mu9P/G8nXEu/+aVPGeOk+oj8i7PRcbTrJv8R4rZ2hOyRDzOXJve5AfiHeAGvAPP9QLjAnivRlt5wlp3qG5UnXzsezQSH0xj415D8256EO8p5TTZYj5HLm3PcgPxDtADfiHH+oFxgTx3ozY/CuR+Ly1LqvNuMJ3sVg0Wm2m72EzsXKmrjZj0+pjPkfubQ/yA/EOUAP+4Yd6gTFBvDejryGcvtXcpoI7pySn4agAFsQ7QA34hx/qBcYE8d6MtuLdDg+xEzynit1hdeplhXJBvAPUgH/4oV5gTBDvADBXEO8ANeAffqgXGBPEOwDMFcQ7QA34hx/qBcYE8Q4AcwXxnjF///d/P7YJIDw0Q1AvMCaI93HxrSQTus49+pjw2sf66KllEDk8ebX0Mm232942e4vZzBr2++NAvHNwcIQPOMrY3wkHR9/tOWdys6+q0sW7b7lId0nGpgwp3t1VaKZQpiFW0kG874+qqoSazZCrV6/KsWPH5OrVq2ObAgAweXIXGSn2VVV4NZnlcnlIrOm66XVrk+s65FYU2rXSd7vdoXXgq6o6JGJ9YnK1Wh2s6FJVN9aeF4mvG29XvrHro9et7a522DTdMsTyddd/n0KZ9H77WV9S2rSTuvJAvyDeM+XcuXNyzz33yLlz58Y2BQBg8uQuMrqK981mc2RnUxXdKkztRkjujqyhTZJU1Gm+dudWe51r53a7PcjDCmN391E3T2trqtC1UfH1en0wxMV9IYnl67O/5DKJ+HePDbWTUFqueA+VB/oF8Z4hV69elRMnTkhVVXLixAmi7wAAeyZ3kRGyz4166+EOibAiWz+7a5nba+rEY2h4hu+6kG1uHqGdSK09Suquqr6hG3qdK7hD+cYi3iWWSfENnQm1k5QyxMoD/YJ4z5Bz587Jgw8+KFVVyYMPPkj0HQBgz+QuMrpG3kWuR1o3m41sNpuDoSA+IdpUvNuhNVVVJYl8Eb9ADYnE0LlQOjGh69oWyzck3ksuk2Ij9pZQOwnVj56LlQf6BfGeGRp1/+hHPypVVcnjjz9O9B0AYM/kLjL6EO8q1lScxXYRTRXvrpBMjdD78mgbpXaFaVOhG8t3t9t1Eu85lkkJTVr1tRMi73mBeM8MjbqL3Oisib4DAOyX3EVGH/bZyYQ2XR2jbCOnqeLdjbYuFovW4l3vt2Or3XHe9pwrdDWf5XIZtMGOybf/H8vXrfsplEnT9JXD105CabkCPVQe6BfEe0bYqLvIjQ6D6DsAwH7JXWT0Zd9isTg0VMJdRcSNGIfEu66motF7vd+9r6nQja2QYs+5wnC9Xh865xvL76Zpy9B1tZnSyqTlsJ/dF45YO3HLm1Ie6A/Ee0acO3dOHnrooYO/baMn+g4AsD9yFxm52zdl3HXep8AQ67zD/kC8Z4IbdRc53FkTfQcA2B+5i+Pc7Zs6UxK7sbkOUAaI90xwo+4iRztrou8AAPshd3Gcu30AMByI9wy4evWqHD9+/FDUXeRoZ/3444/L8ePHib4DAPRM7uI4d/sAYDgQ7xngi7qL+Dvrhx56iOg7AEDP5C6Oc7cPAIYD8T4yoai7iL+zJvoOANA/uYvjnOzrsn63XbHEHqHVW7ra59o6dP45ppnL+P2pfxfb7ba3ic7PPPPMob8R7xlw4cIF7/+HGkfoegAAaEdO4thH7val0rc4q8Mn3vedf87iPaeVc+bwXezrRQnxnjFT6awBAHIn9/42xT53rW6XurW6dV1uK6qWy+Uh8WGvUXSjJj10TXRNV9ceVxvr1ki365u764Zb6vJ1P/eZf+haX56x9dZDZbDp+NZMj6Wp9e2rM7tmfdP6jqVry+HbEMrHHL4LLaf9rC9Qrl8tl8tgWkTeCyL3hwkAwFTIvb/tQ7xbQaJiQeSw+HHPbTabQ5Fa3bxH7VGxofna3T413aYbHKkdKhZ3u92RzY9S8nU/95m/feGJ5R+r91gZfPfZNN2dTGPfly2v76Utpbwp6dqdXfsU73W25fpdKL6dbH31qRuh+dJCvBdE7g8TAICpkHt/G7LPjRbq4f5U767tbcVKbDdVe51+jg0bsPf6dhv12Rq63hVCMbHny9f93Gf+sZcJN/9QvaeWoa5MNk13J1yLa0uT8sbSdc/FrrVM/btQfENnUvzKXoN4L4jcHyYAAFMh9/62a+Q9JPLrxLvI9cjhZrORzWYjq9XqiMiwQ2liAkxtrIu2por3unzdz33mHyubzTNW77Ey6H2KCrnYORs11vTc4SVtxXss3S7iferfhdabO8xI5KhfxdJCvBdE7g8TAICpkHt/21W8x3bVrBPvKj5UbMTEcUrkvU/BFsvXd92+BGNqtNeXX10Z6soUiyDb4S273a6TeA+lm5t4z+27CE1ajfmVmxbivSByf5gAAEyF3PvbPuyrqhvjdm2Ur06828l69m83HZHD4533Kd5T8nU/95m/OyY7dZy1tTtWBv3bjn92r7Xn9D43ypsy5j2lvCnp6t/L5XIU8Z7bd6H4xrxb21LSQrwXRO4PEwCAqZB7f9uHfe5KFu6Ev5B4F7kxUdVeb89pmjat1DHvKqiaRoLr8vXZ2lf+eq1dgUUJRVDdeo+Vwb2vyQonNk034utbbaZNfbvpxlabCf0qMIfvQm0N1YX1q1haiPeCyP1hAgAwFXLvb3O3b274XkxKQJcp3Cer1eqQIF2v13utp5y/C9Z5nyF01gAAw5B7f5u7fXMjZ8FYR9+C0g7/qKrKO2xkn+T6XcTG2HcF8Z4xdNYAAMOQe3+bu30AMByI94yhswYAGIbc+9vc7QOA4UC8ZwydNQDAMOTe3+ZuHwAMB+I9Y+isAQCGIff+Nnf7AGA4EO8ZQ2cNADAMufe3udsHAMOBeM8YOmsAgGHIvb/N3T4AGA7Ee8bQWQMADEPu/W3u9gHAcCDeM4bOGgBgGHLvb3O3DwCGA/GeMXTWAADDkHt/m7t9ADAciPeMobMGABiG3Pvb3O0DgOFAvGcMnTUAwDDk3t/mbh8ADAfiPWPorAEAhiH3/jZ3+wBgOBDvGTN2Z71er2W5XCZf3+RaAICcGLu/rSN3+wBgOBDvGTNmZ71er6WqqiRBriJ/uVxKVVWy3W4HsBAAoD9yF8e52wcAw4F4z5ixOmsV4qvVKjmavt1uebgAQLHk3n/lbh8ADAfiPWPG7qxTxftut5PFYnHwLwBAaYzd39aRu30AMByI94ypqkquXbs2Wv5NIu8AAKVy7dq17MVxVVUcHBwcN46xOyXwc+rUKXniiSdGyx/xDgBz4IknnpBTp06NbQYAQDKI90w5c+aMXL58ebT8Ee8AMAcuX74sZ86cGdsMAIBkEO+ZcvbsWblw4cJo+SPeAWAOXLhwQc6ePTu2GQAAySDeM2XsB4pPvFdVJbvd7shnAIBSGTtQAgDQFMR7ply5ckWqqpIrV66MbQoAwCShnwWAEkG8Z8z58+fl/vvvH9sMAIBJcv/998v58+fHNgMAoBGI98w5ffq0PPzww2ObAQAwKR5++GE5ffr02GYAADQG8Z4573vf++T06dNy//3389MuAEBHrly5Ivfff7+cPn1a3ve+941tDgBAYxDvhXD+/HmpqupgctXly5fliSeeGHUjJwCAnLl27Zo88cQTcvny5YNFAKqqYqgMABQN4r0grly5cvAAOnPmjJw6dWr8Xb44ODg4Mj5OnTolZ86cOQh88AsmAJQO4h0AAAAAoBAQ7wAAAAAAhYB4BwAAAAAoBMQ7AAAAAEAhIN4BAAAAAAoB8Q4AAAAAUAiIdwAAAACAQkC8AwAAAAAUAuIdAAAAAKAQEO8AAAAAAIWAeAcAAAAAKATEOwAAAABAISDeAQAAAAAKAfEOAAAAAFAIiHcAAAAAgEJAvAMAAAAAFALiHQAAAACgEP4/8tkJqlOyQtcAAAAASUVORK5CYII=\">\n\n</div>", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb9f3fbe8ad3c140f14f2fbb416a8b6959862d72
3,724
ipynb
Jupyter Notebook
nbs/exec.generate_filelist.ipynb
Cris140/uberduck-ml-dev
7349480210cdee40d6df494ecec5a62207d8ee72
[ "Apache-2.0" ]
167
2021-10-18T22:04:17.000Z
2022-03-21T19:44:21.000Z
nbs/exec.generate_filelist.ipynb
Cris140/uberduck-ml-dev
7349480210cdee40d6df494ecec5a62207d8ee72
[ "Apache-2.0" ]
18
2021-10-19T02:33:57.000Z
2022-03-28T17:25:52.000Z
nbs/exec.generate_filelist.ipynb
Cris140/uberduck-ml-dev
7349480210cdee40d6df494ecec5a62207d8ee72
[ "Apache-2.0" ]
24
2021-10-22T02:16:53.000Z
2022-03-30T18:22:43.000Z
27.791045
121
0.560956
[ [ [ "# default_exp exec.generate_filelist", "_____no_output_____" ] ], [ [ "# uberduck_ml_dev.exec.select_speakers", "_____no_output_____" ] ], [ [ "# export\nimport argparse\nfrom collections import namedtuple\nfrom dataclasses import dataclass\nimport json\nimport os\nfrom pathlib import Path\nfrom shutil import copyfile, copytree\nimport sys\nfrom typing import List, Optional, Set\n\nimport sqlite3\nfrom tqdm import tqdm\n\nfrom uberduck_ml_dev.data.cache import (\n CACHE_LOCATION,\n ensure_speaker_table,\n) # ensure_filelist_in_cache,\nfrom uberduck_ml_dev.data.parse import _generate_filelist\nfrom uberduck_ml_dev.utils.audio import convert_to_wav\n\n# from uberduck_ml_dev.utils.utils import parse_vctk\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config\", help=\"path to JSON config\", required=True)\n parser.add_argument(\n \"-d\", \"--database\", help=\"Input database\", default=CACHE_LOCATION\n )\n parser.add_argument(\"-o\", \"--out\", help=\"path to save output\", required=True)\n return parser.parse_args(args)\n\n\ntry:\n from nbdev.imports import IN_NOTEBOOK\nexcept:\n IN_NOTEBOOK = False\n\nif __name__ == \"__main__\" and not IN_NOTEBOOK:\n args = parse_args(sys.argv[1:])\n if args.config:\n conn = sqlite3.connect(args.database)\n _generate_filelist(args.config, conn, args.out)\n else:\n raise Exception(\"You must pass a config file!\")", "_____no_output_____" ], [ "# skip\nimport json\nimport sqlite3\nfrom pathlib import Path\n\nconfig_path = \"/mnt/disks/uberduck-experiments-v0/uberduck-ml-exp/configs/filelists/eminem-zwf-vertex_v2.json\"\nconn = sqlite3.connect(\"/home/s_uberduck_ai/.cache/uberduck/uberduck-ml-exp.db\")\nout = \"/mnt/disks/uberduck-experiments-v0/uberduck-ml-exp/experiments/eminem_zwf_vertex-v2/filelist.txt\"\n_generate_filelist(config_path, conn, out)", "_____no_output_____" ], [ "args = parse_args([\"--config\", \"foo.json\", \"-o\", \"bar.out\"])\nassert args.config == \"foo.json\"\nassert args.out == \"bar.out\"\nassert args.database == CACHE_LOCATION", "_____no_output_____" ], [ "# @dataclass\n# class Filelist:\n# path: str\n# sql: Optional[str] = None\n# speaker_ids: Optional[List[int]] = None\n# speakers: Optional[List[str]] = None\n# speaker_idx_in_path: int = None", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
cb9f4723330703c9e015ac3339539510ea62955d
32,448
ipynb
Jupyter Notebook
dl_c4_convolutional_neural_nets/DL_C4_W4_Assignment2_Face+Recognition+for+the+Happy+House+-+v3.ipynb
royarpan/dl_coursera
5191116efafd1ccce16e3b04540d07185626c1b6
[ "MIT" ]
null
null
null
dl_c4_convolutional_neural_nets/DL_C4_W4_Assignment2_Face+Recognition+for+the+Happy+House+-+v3.ipynb
royarpan/dl_coursera
5191116efafd1ccce16e3b04540d07185626c1b6
[ "MIT" ]
null
null
null
dl_c4_convolutional_neural_nets/DL_C4_W4_Assignment2_Face+Recognition+for+the+Happy+House+-+v3.ipynb
royarpan/dl_coursera
5191116efafd1ccce16e3b04540d07185626c1b6
[ "MIT" ]
null
null
null
41.6
515
0.598958
[ [ [ "# Face Recognition for the Happy House\n\nWelcome to the first assignment of week 4! Here you will build a face recognition system. Many of the ideas presented here are from [FaceNet](https://arxiv.org/pdf/1503.03832.pdf). In lecture, we also talked about [DeepFace](https://research.fb.com/wp-content/uploads/2016/11/deepface-closing-the-gap-to-human-level-performance-in-face-verification.pdf). \n\nFace recognition problems commonly fall into two categories: \n\n- **Face Verification** - \"is this the claimed person?\". For example, at some airports, you can pass through customs by letting a system scan your passport and then verifying that you (the person carrying the passport) are the correct person. A mobile phone that unlocks using your face is also using face verification. This is a 1:1 matching problem. \n- **Face Recognition** - \"who is this person?\". For example, the video lecture showed a face recognition video (https://www.youtube.com/watch?v=wr4rx0Spihs) of Baidu employees entering the office without needing to otherwise identify themselves. This is a 1:K matching problem. \n\nFaceNet learns a neural network that encodes a face image into a vector of 128 numbers. By comparing two such vectors, you can then determine if two pictures are of the same person.\n \n**In this assignment, you will:**\n- Implement the triplet loss function\n- Use a pretrained model to map face images into 128-dimensional encodings\n- Use these encodings to perform face verification and face recognition\n\nIn this exercise, we will be using a pre-trained model which represents ConvNet activations using a \"channels first\" convention, as opposed to the \"channels last\" convention used in lecture and previous programming assignments. In other words, a batch of images will be of shape $(m, n_C, n_H, n_W)$ instead of $(m, n_H, n_W, n_C)$. Both of these conventions have a reasonable amount of traction among open-source implementations; there isn't a uniform standard yet within the deep learning community. \n\nLet's load the required packages. \n", "_____no_output_____" ] ], [ [ "from keras.models import Sequential\nfrom keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate\nfrom keras.models import Model\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.pooling import MaxPooling2D, AveragePooling2D\nfrom keras.layers.merge import Concatenate\nfrom keras.layers.core import Lambda, Flatten, Dense\nfrom keras.initializers import glorot_uniform\nfrom keras.engine.topology import Layer\nfrom keras import backend as K\nK.set_image_data_format('channels_first')\nimport cv2\nimport os\nimport numpy as np\nfrom numpy import genfromtxt\nimport pandas as pd\nimport tensorflow as tf\nfrom fr_utils import *\nfrom inception_blocks_v2 import *\n\n%matplotlib inline\n%load_ext autoreload\n%autoreload 2\n\nnp.set_printoptions(threshold=np.nan)", "Using TensorFlow backend.\n" ] ], [ [ "## 0 - Naive Face Verification\n\nIn Face Verification, you're given two images and you have to tell if they are of the same person. The simplest way to do this is to compare the two images pixel-by-pixel. If the distance between the raw images are less than a chosen threshold, it may be the same person! \n\n<img src=\"images/pixel_comparison.png\" style=\"width:380px;height:150px;\">\n<caption><center> <u> <font color='purple'> **Figure 1** </u></center></caption>", "_____no_output_____" ], [ "Of course, this algorithm performs really poorly, since the pixel values change dramatically due to variations in lighting, orientation of the person's face, even minor changes in head position, and so on. \n\nYou'll see that rather than using the raw image, you can learn an encoding $f(img)$ so that element-wise comparisons of this encoding gives more accurate judgements as to whether two pictures are of the same person.", "_____no_output_____" ], [ "## 1 - Encoding face images into a 128-dimensional vector \n\n### 1.1 - Using an ConvNet to compute encodings\n\nThe FaceNet model takes a lot of data and a long time to train. So following common practice in applied deep learning settings, let's just load weights that someone else has already trained. The network architecture follows the Inception model from [Szegedy *et al.*](https://arxiv.org/abs/1409.4842). We have provided an inception network implementation. You can look in the file `inception_blocks.py` to see how it is implemented (do so by going to \"File->Open...\" at the top of the Jupyter notebook). \n", "_____no_output_____" ], [ "The key things you need to know are:\n\n- This network uses 96x96 dimensional RGB images as its input. Specifically, inputs a face image (or batch of $m$ face images) as a tensor of shape $(m, n_C, n_H, n_W) = (m, 3, 96, 96)$ \n- It outputs a matrix of shape $(m, 128)$ that encodes each input face image into a 128-dimensional vector\n\nRun the cell below to create the model for face images.", "_____no_output_____" ] ], [ [ "FRmodel = faceRecoModel(input_shape=(3, 96, 96))", "_____no_output_____" ], [ "print(\"Total Params:\", FRmodel.count_params())", "Total Params: 3743280\n" ] ], [ [ "** Expected Output **\n<table>\n<center>\nTotal Params: 3743280\n</center>\n</table>\n", "_____no_output_____" ], [ "By using a 128-neuron fully connected layer as its last layer, the model ensures that the output is an encoding vector of size 128. You then use the encodings the compare two face images as follows:\n\n<img src=\"images/distance_kiank.png\" style=\"width:680px;height:250px;\">\n<caption><center> <u> <font color='purple'> **Figure 2**: <br> </u> <font color='purple'> By computing a distance between two encodings and thresholding, you can determine if the two pictures represent the same person</center></caption>\n\nSo, an encoding is a good one if: \n- The encodings of two images of the same person are quite similar to each other \n- The encodings of two images of different persons are very different\n\nThe triplet loss function formalizes this, and tries to \"push\" the encodings of two images of the same person (Anchor and Positive) closer together, while \"pulling\" the encodings of two images of different persons (Anchor, Negative) further apart. \n\n<img src=\"images/triplet_comparison.png\" style=\"width:280px;height:150px;\">\n<br>\n<caption><center> <u> <font color='purple'> **Figure 3**: <br> </u> <font color='purple'> In the next part, we will call the pictures from left to right: Anchor (A), Positive (P), Negative (N) </center></caption>", "_____no_output_____" ], [ "\n\n### 1.2 - The Triplet Loss\n\nFor an image $x$, we denote its encoding $f(x)$, where $f$ is the function computed by the neural network.\n\n<img src=\"images/f_x.png\" style=\"width:380px;height:150px;\">\n\n<!--\nWe will also add a normalization step at the end of our model so that $\\mid \\mid f(x) \\mid \\mid_2 = 1$ (means the vector of encoding should be of norm 1).\n!-->\n\nTraining will use triplets of images $(A, P, N)$: \n\n- A is an \"Anchor\" image--a picture of a person. \n- P is a \"Positive\" image--a picture of the same person as the Anchor image.\n- N is a \"Negative\" image--a picture of a different person than the Anchor image.\n\nThese triplets are picked from our training dataset. We will write $(A^{(i)}, P^{(i)}, N^{(i)})$ to denote the $i$-th training example. \n\nYou'd like to make sure that an image $A^{(i)}$ of an individual is closer to the Positive $P^{(i)}$ than to the Negative image $N^{(i)}$) by at least a margin $\\alpha$:\n\n$$\\mid \\mid f(A^{(i)}) - f(P^{(i)}) \\mid \\mid_2^2 + \\alpha < \\mid \\mid f(A^{(i)}) - f(N^{(i)}) \\mid \\mid_2^2$$\n\nYou would thus like to minimize the following \"triplet cost\":\n\n$$\\mathcal{J} = \\sum^{m}_{i=1} \\large[ \\small \\underbrace{\\mid \\mid f(A^{(i)}) - f(P^{(i)}) \\mid \\mid_2^2}_\\text{(1)} - \\underbrace{\\mid \\mid f(A^{(i)}) - f(N^{(i)}) \\mid \\mid_2^2}_\\text{(2)} + \\alpha \\large ] \\small_+ \\tag{3}$$\n\nHere, we are using the notation \"$[z]_+$\" to denote $max(z,0)$. \n\nNotes:\n- The term (1) is the squared distance between the anchor \"A\" and the positive \"P\" for a given triplet; you want this to be small. \n- The term (2) is the squared distance between the anchor \"A\" and the negative \"N\" for a given triplet, you want this to be relatively large, so it thus makes sense to have a minus sign preceding it. \n- $\\alpha$ is called the margin. It is a hyperparameter that you should pick manually. We will use $\\alpha = 0.2$. \n\nMost implementations also normalize the encoding vectors to have norm equal one (i.e., $\\mid \\mid f(img)\\mid \\mid_2$=1); you won't have to worry about that here.\n\n**Exercise**: Implement the triplet loss as defined by formula (3). Here are the 4 steps:\n1. Compute the distance between the encodings of \"anchor\" and \"positive\": $\\mid \\mid f(A^{(i)}) - f(P^{(i)}) \\mid \\mid_2^2$\n2. Compute the distance between the encodings of \"anchor\" and \"negative\": $\\mid \\mid f(A^{(i)}) - f(N^{(i)}) \\mid \\mid_2^2$\n3. Compute the formula per training example: $ \\mid \\mid f(A^{(i)}) - f(P^{(i)}) \\mid - \\mid \\mid f(A^{(i)}) - f(N^{(i)}) \\mid \\mid_2^2 + \\alpha$\n3. Compute the full formula by taking the max with zero and summing over the training examples:\n$$\\mathcal{J} = \\sum^{m}_{i=1} \\large[ \\small \\mid \\mid f(A^{(i)}) - f(P^{(i)}) \\mid \\mid_2^2 - \\mid \\mid f(A^{(i)}) - f(N^{(i)}) \\mid \\mid_2^2+ \\alpha \\large ] \\small_+ \\tag{3}$$\n\nUseful functions: `tf.reduce_sum()`, `tf.square()`, `tf.subtract()`, `tf.add()`, `tf.maximum()`.\nFor steps 1 and 2, you will need to sum over the entries of $\\mid \\mid f(A^{(i)}) - f(P^{(i)}) \\mid \\mid_2^2$ and $\\mid \\mid f(A^{(i)}) - f(N^{(i)}) \\mid \\mid_2^2$ while for step 4 you will need to sum over the training examples.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: triplet_loss\n\ndef triplet_loss(y_true, y_pred, alpha = 0.2):\n \"\"\"\n Implementation of the triplet loss as defined by formula (3)\n \n Arguments:\n y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.\n y_pred -- python list containing three objects:\n anchor -- the encodings for the anchor images, of shape (None, 128)\n positive -- the encodings for the positive images, of shape (None, 128)\n negative -- the encodings for the negative images, of shape (None, 128)\n \n Returns:\n loss -- real number, value of the loss\n \"\"\"\n \n anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]\n \n ### START CODE HERE ### (≈ 4 lines)\n # Step 1: Compute the (encoding) distance between the anchor and the positive, you will need to sum over axis=-1\n pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor,positive)),axis=-1)\n # Step 2: Compute the (encoding) distance between the anchor and the negative, you will need to sum over axis=-1\n neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor,negative)),axis=-1)\n # Step 3: subtract the two previous distances and add alpha.\n basic_loss = tf.add(tf.subtract(pos_dist,neg_dist),alpha)\n # Step 4: Take the maximum of basic_loss and 0.0. Sum over the training examples.\n loss = tf.reduce_sum(tf.maximum(basic_loss,0))\n ### END CODE HERE ###\n \n return loss", "_____no_output_____" ], [ "with tf.Session() as test:\n tf.set_random_seed(1)\n y_true = (None, None, None)\n y_pred = (tf.random_normal([3, 128], mean=6, stddev=0.1, seed = 1),\n tf.random_normal([3, 128], mean=1, stddev=1, seed = 1),\n tf.random_normal([3, 128], mean=3, stddev=4, seed = 1))\n loss = triplet_loss(y_true, y_pred)\n \n print(\"loss = \" + str(loss.eval()))", "loss = 528.143\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **loss**\n </td>\n <td>\n 528.143\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "## 2 - Loading the trained model\n\nFaceNet is trained by minimizing the triplet loss. But since training requires a lot of data and a lot of computation, we won't train it from scratch here. Instead, we load a previously trained model. Load a model using the following cell; this might take a couple of minutes to run. ", "_____no_output_____" ] ], [ [ "FRmodel.compile(optimizer = 'adam', loss = triplet_loss, metrics = ['accuracy'])\nload_weights_from_FaceNet(FRmodel)", "_____no_output_____" ] ], [ [ "Here're some examples of distances between the encodings between three individuals:\n\n<img src=\"images/distance_matrix.png\" style=\"width:380px;height:200px;\">\n<br>\n<caption><center> <u> <font color='purple'> **Figure 4**:</u> <br> <font color='purple'> Example of distance outputs between three individuals' encodings</center></caption>\n\nLet's now use this model to perform face verification and face recognition! ", "_____no_output_____" ], [ "## 3 - Applying the model", "_____no_output_____" ], [ "Back to the Happy House! Residents are living blissfully since you implemented happiness recognition for the house in an earlier assignment. \n\nHowever, several issues keep coming up: The Happy House became so happy that every happy person in the neighborhood is coming to hang out in your living room. It is getting really crowded, which is having a negative impact on the residents of the house. All these random happy people are also eating all your food. \n\nSo, you decide to change the door entry policy, and not just let random happy people enter anymore, even if they are happy! Instead, you'd like to build a **Face verification** system so as to only let people from a specified list come in. To get admitted, each person has to swipe an ID card (identification card) to identify themselves at the door. The face recognition system then checks that they are who they claim to be. ", "_____no_output_____" ], [ "### 3.1 - Face Verification\n\nLet's build a database containing one encoding vector for each person allowed to enter the happy house. To generate the encoding we use `img_to_encoding(image_path, model)` which basically runs the forward propagation of the model on the specified image. \n\nRun the following code to build the database (represented as a python dictionary). This database maps each person's name to a 128-dimensional encoding of their face.", "_____no_output_____" ] ], [ [ "database = {}\ndatabase[\"danielle\"] = img_to_encoding(\"images/danielle.png\", FRmodel)\ndatabase[\"younes\"] = img_to_encoding(\"images/younes.jpg\", FRmodel)\ndatabase[\"tian\"] = img_to_encoding(\"images/tian.jpg\", FRmodel)\ndatabase[\"andrew\"] = img_to_encoding(\"images/andrew.jpg\", FRmodel)\ndatabase[\"kian\"] = img_to_encoding(\"images/kian.jpg\", FRmodel)\ndatabase[\"dan\"] = img_to_encoding(\"images/dan.jpg\", FRmodel)\ndatabase[\"sebastiano\"] = img_to_encoding(\"images/sebastiano.jpg\", FRmodel)\ndatabase[\"bertrand\"] = img_to_encoding(\"images/bertrand.jpg\", FRmodel)\ndatabase[\"kevin\"] = img_to_encoding(\"images/kevin.jpg\", FRmodel)\ndatabase[\"felix\"] = img_to_encoding(\"images/felix.jpg\", FRmodel)\ndatabase[\"benoit\"] = img_to_encoding(\"images/benoit.jpg\", FRmodel)\ndatabase[\"arnaud\"] = img_to_encoding(\"images/arnaud.jpg\", FRmodel)", "_____no_output_____" ] ], [ [ "Now, when someone shows up at your front door and swipes their ID card (thus giving you their name), you can look up their encoding in the database, and use it to check if the person standing at the front door matches the name on the ID.\n\n**Exercise**: Implement the verify() function which checks if the front-door camera picture (`image_path`) is actually the person called \"identity\". You will have to go through the following steps:\n1. Compute the encoding of the image from image_path\n2. Compute the distance about this encoding and the encoding of the identity image stored in the database\n3. Open the door if the distance is less than 0.7, else do not open.\n\nAs presented above, you should use the L2 distance (np.linalg.norm). (Note: In this implementation, compare the L2 distance, not the square of the L2 distance, to the threshold 0.7.) ", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: verify\n\ndef verify(image_path, identity, database, model):\n \"\"\"\n Function that verifies if the person on the \"image_path\" image is \"identity\".\n \n Arguments:\n image_path -- path to an image\n identity -- string, name of the person you'd like to verify the identity. Has to be a resident of the Happy house.\n database -- python dictionary mapping names of allowed people's names (strings) to their encodings (vectors).\n model -- your Inception model instance in Keras\n \n Returns:\n dist -- distance between the image_path and the image of \"identity\" in the database.\n door_open -- True, if the door should open. False otherwise.\n \"\"\"\n \n ### START CODE HERE ###\n \n # Step 1: Compute the encoding for the image. Use img_to_encoding() see example above. (≈ 1 line)\n encoding = img_to_encoding(image_path, model)\n \n # Step 2: Compute distance with identity's image (≈ 1 line)\n dist = np.linalg.norm(encoding-database[identity])\n \n # Step 3: Open the door if dist < 0.7, else don't open (≈ 3 lines)\n if None:\n print(\"It's \" + str(identity) + \", welcome home!\")\n door_open = None\n else:\n print(\"It's not \" + str(identity) + \", please go away\")\n door_open = None\n \n ### END CODE HERE ###\n \n return dist, door_open", "_____no_output_____" ] ], [ [ "Younes is trying to enter the Happy House and the camera takes a picture of him (\"images/camera_0.jpg\"). Let's run your verification algorithm on this picture:\n\n<img src=\"images/camera_0.jpg\" style=\"width:100px;height:100px;\">", "_____no_output_____" ] ], [ [ "verify(\"images/camera_0.jpg\", \"younes\", database, FRmodel)", "It's not younes, please go away\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **It's younes, welcome home!**\n </td>\n <td>\n (0.65939283, True)\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "Benoit, who broke the aquarium last weekend, has been banned from the house and removed from the database. He stole Kian's ID card and came back to the house to try to present himself as Kian. The front-door camera took a picture of Benoit (\"images/camera_2.jpg). Let's run the verification algorithm to check if benoit can enter.\n<img src=\"images/camera_2.jpg\" style=\"width:100px;height:100px;\">", "_____no_output_____" ] ], [ [ "verify(\"images/camera_2.jpg\", \"kian\", database, FRmodel)", "It's not kian, please go away\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **It's not kian, please go away**\n </td>\n <td>\n (0.86224014, False)\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "### 3.2 - Face Recognition\n\nYour face verification system is mostly working well. But since Kian got his ID card stolen, when he came back to the house that evening he couldn't get in! \n\nTo reduce such shenanigans, you'd like to change your face verification system to a face recognition system. This way, no one has to carry an ID card anymore. An authorized person can just walk up to the house, and the front door will unlock for them! \n\nYou'll implement a face recognition system that takes as input an image, and figures out if it is one of the authorized persons (and if so, who). Unlike the previous face verification system, we will no longer get a person's name as another input. \n\n**Exercise**: Implement `who_is_it()`. You will have to go through the following steps:\n1. Compute the target encoding of the image from image_path\n2. Find the encoding from the database that has smallest distance with the target encoding. \n - Initialize the `min_dist` variable to a large enough number (100). It will help you keep track of what is the closest encoding to the input's encoding.\n - Loop over the database dictionary's names and encodings. To loop use `for (name, db_enc) in database.items()`.\n - Compute L2 distance between the target \"encoding\" and the current \"encoding\" from the database.\n - If this distance is less than the min_dist, then set min_dist to dist, and identity to name.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: who_is_it\n\ndef who_is_it(image_path, database, model):\n \"\"\"\n Implements face recognition for the happy house by finding who is the person on the image_path image.\n \n Arguments:\n image_path -- path to an image\n database -- database containing image encodings along with the name of the person on the image\n model -- your Inception model instance in Keras\n \n Returns:\n min_dist -- the minimum distance between image_path encoding and the encodings from the database\n identity -- string, the name prediction for the person on image_path\n \"\"\"\n \n ### START CODE HERE ### \n \n ## Step 1: Compute the target \"encoding\" for the image. Use img_to_encoding() see example above. ## (≈ 1 line)\n encoding = img_to_encoding(image_path, model)\n \n ## Step 2: Find the closest encoding ##\n \n # Initialize \"min_dist\" to a large value, say 100 (≈1 line)\n min_dist = 100\n \n # Loop over the database dictionary's names and encodings.\n for (name, db_enc) in database.items():\n \n # Compute L2 distance between the target \"encoding\" and the current \"emb\" from the database. (≈ 1 line)\n dist = np.linalg.norm(encoding-db_enc)\n\n # If this distance is less than the min_dist, then set min_dist to dist, and identity to name. (≈ 3 lines)\n if dist<min_dist:\n min_dist = dist\n identity = name\n\n ### END CODE HERE ###\n \n if min_dist > 0.7:\n print(\"Not in the database.\")\n else:\n print (\"it's \" + str(identity) + \", the distance is \" + str(min_dist))\n \n return min_dist, identity", "_____no_output_____" ] ], [ [ "Younes is at the front-door and the camera takes a picture of him (\"images/camera_0.jpg\"). Let's see if your who_it_is() algorithm identifies Younes. ", "_____no_output_____" ] ], [ [ "who_is_it(\"images/camera_0.jpg\", database, FRmodel)", "it's younes, the distance is 0.659393\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **it's younes, the distance is 0.659393**\n </td>\n <td>\n (0.65939283, 'younes')\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "You can change \"`camera_0.jpg`\" (picture of younes) to \"`camera_1.jpg`\" (picture of bertrand) and see the result.", "_____no_output_____" ], [ "Your Happy House is running well. It only lets in authorized persons, and people don't need to carry an ID card around anymore! \n\nYou've now seen how a state-of-the-art face recognition system works.\n\nAlthough we won't implement it here, here're some ways to further improve the algorithm:\n- Put more images of each person (under different lighting conditions, taken on different days, etc.) into the database. Then given a new image, compare the new face to multiple pictures of the person. This would increae accuracy.\n- Crop the images to just contain the face, and less of the \"border\" region around the face. This preprocessing removes some of the irrelevant pixels around the face, and also makes the algorithm more robust.\n", "_____no_output_____" ], [ "<font color='blue'>\n**What you should remember**:\n- Face verification solves an easier 1:1 matching problem; face recognition addresses a harder 1:K matching problem. \n- The triplet loss is an effective loss function for training a neural network to learn an encoding of a face image.\n- The same encoding can be used for verification and recognition. Measuring distances between two images' encodings allows you to determine whether they are pictures of the same person. ", "_____no_output_____" ], [ "Congrats on finishing this assignment! \n", "_____no_output_____" ], [ "### References:\n\n- Florian Schroff, Dmitry Kalenichenko, James Philbin (2015). [FaceNet: A Unified Embedding for Face Recognition and Clustering](https://arxiv.org/pdf/1503.03832.pdf)\n- Yaniv Taigman, Ming Yang, Marc'Aurelio Ranzato, Lior Wolf (2014). [DeepFace: Closing the gap to human-level performance in face verification](https://research.fb.com/wp-content/uploads/2016/11/deepface-closing-the-gap-to-human-level-performance-in-face-verification.pdf) \n- The pretrained model we use is inspired by Victor Sy Wang's implementation and was loaded using his code: https://github.com/iwantooxxoox/Keras-OpenFace.\n- Our implementation also took a lot of inspiration from the official FaceNet github repository: https://github.com/davidsandberg/facenet \n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cb9f663b86448e9bd233325a2fd7bb88b1e8437f
238,136
ipynb
Jupyter Notebook
syllabus/classes/class4/class4-signe.ipynb
signekb/NLP-E21
88e221e97dd21777990050f2f586602322395dcf
[ "MIT" ]
null
null
null
syllabus/classes/class4/class4-signe.ipynb
signekb/NLP-E21
88e221e97dd21777990050f2f586602322395dcf
[ "MIT" ]
null
null
null
syllabus/classes/class4/class4-signe.ipynb
signekb/NLP-E21
88e221e97dd21777990050f2f586602322395dcf
[ "MIT" ]
null
null
null
258.843478
94,380
0.918635
[ [ [ "# Introduction to pytorch tensors\n\n---\n\nPytorch tensors, work very similar to numpy arrays and you can always convert it to a numpy array or make a numpy array into a torch tensor. The primary difference is that it is located either on your CPU or your GPU and that it contains works with the auto differential software of pytorch.", "_____no_output_____" ] ], [ [ "!pip install torch\nimport torch\nx_tensor = torch.tensor([[1., -1.], [1., -1.]])\nprint(type(x_tensor))\n\n# tensor to numpy\nx_array = x_tensor.numpy()\nprint(type(x_array))\n\n# numpy to tensor\nx_tensor2 =torch.tensor(x_array)\nprint(type(x_tensor2))\n\nprint(x_tensor2 == x_tensor)\n\n# Location of tensor\nx_tensor.device", "Collecting torch\n Downloading torch-1.9.1-cp39-cp39-manylinux1_x86_64.whl (831.4 MB)\n\u001b[K |████████████▍ | 321.7 MB 115.1 MB/s eta 0:00:05" ] ], [ [ "### Example of finding gradient", "_____no_output_____" ] ], [ [ "x = torch.tensor([[1., -1.], [1., 1.]], requires_grad=True)\n\n# sum(x_i ^ 2)\nout = (x**2).sum()\n\n# calculate the gradient \nout.backward()\n\n# What is the gradient for x\nprint(x.grad)", "tensor([[ 2., -2.],\n [ 2., 2.]])\n" ] ], [ [ "# Optimizing a polynomial\n\n---\n\n", "_____no_output_____" ] ], [ [ "x = torch.tensor([3.], requires_grad=True)\ny = 2. + x**2 -3 *x\nprint(y)\n\n# create sgd optimizer\noptimizer = torch.optim.SGD([x], lr=0.01) # lr = learning rate, SGD = stochastic gradient descent \n\n# backward pass / calcuate the gradient on the thing we want to optimize\ny.backward()\n\nprint(x.grad) # examine - the gradient at a specific point\n\n# step in the direction to minimize y\noptimizer.step()\n\n# set the gradient to zero. (This is a bit weird but required)\noptimizer.zero_grad()\n", "tensor([2.], grad_fn=<SubBackward0>)\ntensor([3.])\n" ], [ "# we see that x have improved (minimum is 1.5 so moving in the right direction)\nprint(x)\n# we see that the gradient is set to zero\nprint(x.grad)", "tensor([2.9700], requires_grad=True)\ntensor([0.])\n" ] ], [ [ "We can now do this multiple times to obtain the desired results: (i.e. to find the minimum)\n- we are stepping down the curve - one step at the time", "_____no_output_____" ] ], [ [ "for i in range(1000):\n\n # forward pass / or just calculate the outcome\n y = 2. + x**2 -3 *x\n\n # backward pass on the thing we want to minimize\n y.backward() # calculate gradient?\n\n # take a step in the \"minimize direction\"\n optimizer.step()\n\n # zero the gradient\n optimizer.zero_grad()", "_____no_output_____" ], [ "# now we have found the minimum\nprint(x)", "tensor([1.5000], requires_grad=True)\n" ] ], [ [ "\n# Fitting a Linear regression\n\n---\n\nHere we will fit a linear regression using pytorch, using the same approach as above.\n\n## 0) Prepare the data\nFirst let us create some data. We will do this using `sklearn`'s `make_regression`, which just make some sample data for regression.", "_____no_output_____" ] ], [ [ "!pip install sklearn\nfrom sklearn import datasets\nimport matplotlib.pyplot as plt\n\n# 0) Prepare data\nX_numpy, y_numpy = datasets.make_regression(n_samples=100, n_features=1, noise=20, random_state=4)\n\n# plot the sample\nplt.plot(X_numpy, y_numpy, 'ro')\nplt.show()\n\n# cast to float Tensor\nX = torch.tensor(X_numpy, dtype=torch.float)\ny = torch.tensor(y_numpy, dtype=torch.float)\ny = y.view(y.shape[0], 1) # view is similar to reshape it simply sets the desired shape to (100, 1)\nprint(y.shape)\nprint(y.dtype)\nprint(x.dtype)\n\n\nn_samples, n_features = X.shape", "Collecting sklearn\n Downloading sklearn-0.0.tar.gz (1.1 kB)\nCollecting scikit-learn\n Downloading scikit_learn-1.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (24.7 MB)\n\u001b[K |████████████████████████████████| 24.7 MB 22 kB/s eta 0:00:01\n\u001b[?25hCollecting threadpoolctl>=2.0.0\n Downloading threadpoolctl-2.2.0-py3-none-any.whl (12 kB)\nCollecting joblib>=0.11\n Downloading joblib-1.0.1-py3-none-any.whl (303 kB)\n\u001b[K |████████████████████████████████| 303 kB 50.5 MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: numpy>=1.14.6 in /opt/conda/lib/python3.9/site-packages (from scikit-learn->sklearn) (1.21.2)\nRequirement already satisfied: scipy>=1.1.0 in /opt/conda/lib/python3.9/site-packages (from scikit-learn->sklearn) (1.7.1)\nBuilding wheels for collected packages: sklearn\n Building wheel for sklearn (setup.py) ... \u001b[?25ldone\n\u001b[?25h Created wheel for sklearn: filename=sklearn-0.0-py2.py3-none-any.whl size=1316 sha256=c252d2284a81ffd290d5518a80a7cd6c409af280415da6b8e9e5117e6a89fe55\n Stored in directory: /home/ucloud/.cache/pip/wheels/e4/7b/98/b6466d71b8d738a0c547008b9eb39bf8676d1ff6ca4b22af1c\nSuccessfully built sklearn\nInstalling collected packages: threadpoolctl, joblib, scikit-learn, sklearn\nSuccessfully installed joblib-1.0.1 scikit-learn-1.0 sklearn-0.0 threadpoolctl-2.2.0\n" ] ], [ [ "## 1) Creating the linear model\nYou can do this using a the `nn.Linear`. This corresponds to multiplying with a matrix of beta coefficients (or weights in a neural network sense) and adding a bias. \n\n> Actually if you are very pedantic it is not really a linear transformation but an *affine* transformation, but it corresponds to what we think of as linear (regression).", "_____no_output_____" ] ], [ [ "from torch import nn\n# 1) Model\n# Linear model f = wx + b\ninput_size = n_features \noutput_size = 1\nmodel = nn.Linear(input_size, output_size) # a linear layer", "_____no_output_____" ] ], [ [ "# 2) Loss and optimizer\nHere we will create the optimizer, note we use `model.parameters` to get a list to get all of the parameters of the model.\n\nWe also use the `MSELoss()` as our criterion to minimize. It is simply the mean squared error (MSE) which you are used to from regression.", "_____no_output_____" ] ], [ [ "learning_rate = 0.01 # feel free to change this\n\nprint(list(model.parameters())) # only two parameters a beta and an intercept\n\ncriterion = nn.MSELoss() # mean squared error - sum(diff between predicted values and the actual values)^2\noptimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) ", "[Parameter containing:\ntensor([[0.0549]], requires_grad=True), Parameter containing:\ntensor([-0.7420], requires_grad=True)]\n" ] ], [ [ "## 3) Training loop\nWell this is essentially the same as before:", "_____no_output_____" ] ], [ [ "epochs = 500 # how many times we want to repeat it\nfor epoch in range(epochs):\n # Forward pass / calc predicted y\n y_predicted = model(X)\n\n # calucate loss / MSE # optimise by minimising MSE\n loss = criterion(y_predicted, y)\n \n # Backward pass / gradient and update\n loss.backward()\n optimizer.step()\n\n # zero grad before new step\n optimizer.zero_grad()\n\n # some print to see that it is running\n if (epoch+1) % 100 == 0:\n print(f'epoch: {epoch+1}, loss = {loss.item():.4f}')", "epoch: 100, loss = 410.7770\nepoch: 200, loss = 292.8409\nepoch: 300, loss = 290.2201\nepoch: 400, loss = 290.1611\nepoch: 500, loss = 290.1597\n" ], [ "# Plot\npredicted = model(X).detach().numpy()\n\nplt.plot(X_numpy, y_numpy, 'ro')\nplt.plot(X_numpy, predicted, 'b')\nplt.show()", "_____no_output_____" ] ], [ [ "<details>\n <summary> Hmm this seems like something we should do in a class? (or why didn't you introduce nn.modules?!) </summary>\n\nYou are completely right it does work much better as a class, but maybe slightly overdoing it for the task at hand, however if you are familiar with pytorch or deep learning you might find it more convenient to create a model class. We will look at this next time as well, but in pytorch this looks something like this for the linear regression:\n\n```py\nclass LinearModel(nn.Module):\n def __init__(self, n_input_features):\n # initialize the superclass nn.module (to tell pytorch that this is a trainable model)\n super(Model, self).__init__()\n\n # create a linear layer to save in the model\n self.linear = nn.Linear(n_input_features, 1)\n\n def forward(self, x):\n y = self.linear(x)\n return y\n```\n\nYou can the use this model the same way as before:\n\n```\nmodel = LinearModel(n_input_features = 1) # create model\ny = model.forward(x)\n\n# same af before you can now calculate the loss and optimize in a loop\n```\n\nIt is a little overkill for the exercise, but it might be nice getting used to the abstraction.\n\n</details>\n\n<br /> \n", "_____no_output_____" ], [ "# Task: Logistic Regression\n\n---\n\ncreate a logistic regression, fitted either to your text features or (maybe easier to start with) the following test data.\n\nThings which might be useful to know:\n\nTorch do have a binary cross entropy loss function (`torch.nn.BCELoss`) and a sigmoid function (`torch.sigmoid`).\n\nIf you find it hard to start it might worth debating with your studygroup\n\n- what the difference is between linear regression and logistic regression\n- How many input features how many output?\n- Try to draw it as a 1-layer neural network\n\n> **Bonus**: The end of the chapter introduced the multinomial logistic regression, it is actually surprisingly easy to do in pytorch, can you implement it? (*Hint*: pytorch does have a softmax function) The chapter also introduces (L1 and) L2 regularization for logistic regression, can you add that to your model as well?", "_____no_output_____" ], [ "10 input features, 1 outputs (either 1 or 0)", "_____no_output_____" ] ], [ [ "# 0) prepare data\n#!pip install sklearn\nfrom sklearn import datasets\nimport matplotlib.pyplot as plt\n\n# 0) Prepare data\nX_numpy, y_numpy = datasets.make_classification(n_samples=1000, n_features=10, random_state=7)\n\n# plot the sample\nplt.plot(X_numpy, y_numpy, 'ro')\nplt.show()\n\n# cast to float Tensor\nX = torch.tensor(X_numpy, dtype=torch.float)\ny = torch.tensor(y_numpy, dtype=torch.float)\ny = y.view(y.shape[0], 1) # view is similar to reshape it simply sets the desired shape to (100, 1)\n# we change the shape of y so it can be used in nn.Linear\nprint(y.shape)\nprint(y.dtype)\nprint(x.dtype)\n\nn_samples, n_features = X.shape\n\nprint(X_numpy.shape)", "_____no_output_____" ], [ "# only plotting 2 out of 10 features\nplt.scatter(X_numpy[:, 0], X_numpy[:, 1], marker='o', c=y_numpy,\n s=25, edgecolor='k')", "_____no_output_____" ], [ "# 1) create model\nfrom torch import nn\n# 1) Linear model\ninput_size = n_features # 10 features\noutput_size = 1 # one output which tells us the prop\nmodel = nn.Linear(input_size, output_size) # a linear layer", "_____no_output_____" ], [ "# 2) optimisation and loss\nlearning_rate = 0.05 # feel free to change this\n\nprint(list(model.parameters())) # only two parameters a beta and an intercept\n\ncriterion = torch.nn.BCELoss() # binary cross entropy\noptimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) ", "[Parameter containing:\ntensor([[-0.0412, 0.1240, -0.1501, -0.2141, 0.0993, -0.1883, -0.2548, -0.0203,\n -0.1658, -0.0774]], requires_grad=True), Parameter containing:\ntensor([-0.1310], requires_grad=True)]\n" ], [ "# 3) training loop\nepochs = 500 # how many times we want to repeat it\nfor epoch in range(epochs):\n # Forward pass / calc predicted y\n y_predicted = torch.sigmoid(model(X))\n\n # calucate loss / Binary cross entropy \n loss = criterion(y_predicted, y)\n \n # Backward pass / gradient and update\n loss.backward()\n optimizer.step()\n\n # zero grad before new step\n optimizer.zero_grad()\n\n # some print to see that it is running\n if (epoch+1) % 100 == 0:\n print(f'epoch: {epoch+1}, loss = {loss.item():.4f}')", "epoch: 100, loss = 0.2091\nepoch: 200, loss = 0.2063\nepoch: 300, loss = 0.2043\nepoch: 400, loss = 0.2028\nepoch: 500, loss = 0.2016\n" ], [ "# only plotting 2 out of 10 features\nplt.scatter(X_numpy[:, 0], X_numpy[:, 1], marker='o', c=predicted,\n s=25, edgecolor='k')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb9f73c97ce25ab7ddeddef50aa7ab04f4014ef5
10,382
ipynb
Jupyter Notebook
Flat_lab2.ipynb
rachana2522/FLAT-LAB
f63632e04f4a9f0bf90923fe272db597d386df89
[ "MIT" ]
null
null
null
Flat_lab2.ipynb
rachana2522/FLAT-LAB
f63632e04f4a9f0bf90923fe272db597d386df89
[ "MIT" ]
null
null
null
Flat_lab2.ipynb
rachana2522/FLAT-LAB
f63632e04f4a9f0bf90923fe272db597d386df89
[ "MIT" ]
null
null
null
25.955
226
0.412252
[ [ [ "<a href=\"https://colab.research.google.com/github/rachana2522/FLAT-LAB/blob/main/Flat_lab2.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "pip install automata-lib #install automata.lib", "Collecting automata-lib\n Downloading automata_lib-5.0.0-py3-none-any.whl (32 kB)\nRequirement already satisfied: pydot in /usr/local/lib/python3.7/dist-packages (from automata-lib) (1.3.0)\nRequirement already satisfied: pyparsing>=2.1.4 in /usr/local/lib/python3.7/dist-packages (from pydot->automata-lib) (3.0.6)\nInstalling collected packages: automata-lib\nSuccessfully installed automata-lib-5.0.0\n" ], [ "# Original DFA\nfrom automata.fa.dfa import DFA\ndfa = DFA(\n states={'A','B','C','D','E'},\n input_symbols={'0','1'},\n transitions={\n 'A':{'1':'D','0':'B'},\n 'B':{'1':'E', '0':'C'},\n 'C':{'1':'E', '0':'B'},\n 'D':{'1':'E','0':'C'},\n 'E':{'1':'E','0':'E'},\n },\n initial_state='A',\n final_states={'E'}\n)\n", "_____no_output_____" ], [ "#original DFA\ndfa.show_diagram(path='./fda1.png')\n", "_____no_output_____" ], [ "#minimal DFA\nminimal_dfa = dfa.minify()\nminimal_dfa.show_diagram(path='./dfa2.png')", "_____no_output_____" ], [ "#complement DFA\ncdfa = ~minimal_dfa\ncdfa.show_diagram(path='./dfa3.png')", "_____no_output_____" ], [ "#Original DFA\nfrom automata.fa.dfa import DFA\ndfa = DFA(\n states={'A', 'B', 'C', 'D', 'E', 'F'},\n input_symbols={'0', '1'},\n transitions={\n 'A':{'1':'C','0':'B'},\n 'B':{'1':'D','0':'A'},\n 'C':{'1':'F','0':'E'},\n 'D':{'1':'F','0':'E'},\n 'E':{'1':'F','0':'E'},\n 'F':{'1':'F','0':'F'},\n },\n initial_state='A',\n final_states={'C', 'D', 'E'}\n)", "_____no_output_____" ], [ "#Original DFA\ndfa.show_diagram(path='./dfa4.png')", "_____no_output_____" ], [ "#Minimal DFA\nminimal_dfa = dfa.minify()\nminimal_dfa.show_diagram(path='./dfa5.png')", "_____no_output_____" ], [ "#complement DFA\ncdfa=~minimal_dfa\ncdfa.show_diagram(path='./dfa6.png')", "_____no_output_____" ], [ "from automata.fa.nfa import NFA\nnfa = NFA(\n states={'A', 'B', 'C', 'D', 'E'},\n input_symbols={'0', '1'},\n transitions={\n 'A':{'0':{'A', 'B', 'D'}},\n 'B':{'1':'C'},\n 'C':{'0':'A'},\n 'D':{'1':'E'},\n 'E':{'1':'E','0':'E'},\n },\n initial_state='A',\n final_states={'E'}\n)\ndfa = DFA.from_nfa(nfa)\ndfa.show_diagram(path='./dfa7.png')", "_____no_output_____" ], [ "#Original NFA\ndfa.show_diagram(path='./dfa7.png')", "_____no_output_____" ], [ "#Minimal DFA\nminimal_dfa = dfa.minify()\nminimal_dfa.show_diagram(path='./dfa8.png')", "_____no_output_____" ], [ "#complement DFA\ncdfa=~minimal_dfa\ncdfa.show_diagram(path='./dfa9.png')", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9f8d97d55352cce318ded12e2e81744df153db
7,602
ipynb
Jupyter Notebook
{{ cookiecutter.folder_name }}/docs/source/notebooks/0.0-Notebook-Template.ipynb
apra93/apra-data-science
c8b0b81f14d8da109e739ca110471a8807a68877
[ "MIT" ]
3
2018-05-29T21:17:26.000Z
2019-10-20T18:18:22.000Z
{{ cookiecutter.folder_name }}/docs/source/notebooks/0.0-Notebook-Template.ipynb
apra93/apra-data-science
c8b0b81f14d8da109e739ca110471a8807a68877
[ "MIT" ]
31
2017-12-03T05:09:06.000Z
2019-07-18T15:22:51.000Z
{{ cookiecutter.folder_name }}/docs/source/notebooks/0.0-Notebook-Template.ipynb
apra93/apra-data-science
c8b0b81f14d8da109e739ca110471a8807a68877
[ "MIT" ]
null
null
null
24.44373
257
0.566035
[ [ [ "# 0.0 Notebook Template\n\n--*Set the notebook number, describe the background of the project, the nature of the data, and what analyses will be performed.*--", "_____no_output_____" ], [ "## Jupyter Extensions", "_____no_output_____" ], [ "Load [watermark](https://github.com/rasbt/watermark) to see the state of the machine and environment that's running the notebook. To make sense of the options, take a look at the [usage](https://github.com/rasbt/watermark#usage) section of the readme.", "_____no_output_____" ] ], [ [ "# Load `watermark` extension\n%load_ext watermark\n# Display the status of the machine and packages. Add more as necessary.\n%watermark -v -n -m -g -b -t -p numpy,pandas,matplotlib,seaborn", "_____no_output_____" ] ], [ [ "Load [autoreload](https://ipython.org/ipython-doc/3/config/extensions/autoreload.html) which will always reload modules marked with `%aimport`.\n\nThis behavior can be inverted by running `autoreload 2` which will set everything to be auto-reloaded *except* for modules marked with `%aimport`.", "_____no_output_____" ] ], [ [ "# Load `autoreload` extension\n%load_ext autoreload\n# Set autoreload behavior\n%autoreload 1", "_____no_output_____" ] ], [ [ "Load `matplotlib` in one of the more `jupyter`-friendly [rich-output modes](https://ipython.readthedocs.io/en/stable/interactive/plotting.html). Some options (that may or may not have worked) are `inline`, `notebook`, and `gtk`.", "_____no_output_____" ] ], [ [ "# Set the matplotlib mode\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Imports", "_____no_output_____" ], [ "Static imports that shouldn't necessarily change throughout the notebook.", "_____no_output_____" ] ], [ [ "# Standard library imports\nimport logging\n\n# Third party\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n# tqdm.tqdm wraps generators and displays a progress bar:\n# `for i in tqdm(range(10)): ...`\nfrom tqdm import tqdm", "_____no_output_____" ] ], [ [ "Local imports that may or may not be autoreloaded. This section contains things that will likely have to be re-imported multiple times, and have additions or subtractions made throughout the project.", "_____no_output_____" ] ], [ [ "# Constants to be used throughout the package\n%aimport {{ cookiecutter.import_name }}.constants\n# Import the data subdirectories\nfrom {{ cookiecutter.import_name }}.constants import DIR_DATA_EXT, DIR_DATA_INT, DIR_DATA_PROC, DIR_DATA_RAW\n# Utility functions\n%aimport {{ cookiecutter.import_name }}.utils\nfrom {{ cookiecutter.import_name }}.utils import setup_logging", "_____no_output_____" ] ], [ [ "## Initial Setup", "_____no_output_____" ], [ "Set [seaborn defaults](https://seaborn.pydata.org/generated/seaborn.set.html) for matplotlib.", "_____no_output_____" ] ], [ [ "sns.set()", "_____no_output_____" ] ], [ [ "Set up the logger configuration to something more useful than baseline. Creates log files for the different log levels in the `logs` directory.\n\nSee `logging.yml` for the exact logging configuration.", "_____no_output_____" ] ], [ [ "# Run base logger setup\nsetup_logging()\n# Define a logger object\nlogger = logging.getLogger(\"{{ cookiecutter.import_name }}\")", "_____no_output_____" ] ], [ [ "## Global Definitions", "_____no_output_____" ] ], [ [ "# data_str = \"\" # Data filename\n# data_path = DIR_DATA_RAW / data_str # Full path to the data", "_____no_output_____" ] ], [ [ "## Get the Data", "_____no_output_____" ] ], [ [ "# data = pd.read_csv(str(data_path), delim_whitespace=False, index_col=0)\n# logger.info(\"Loaded dataset '{0}' from '{1}'\".format(data_path.name, data_path.parent.name))", "_____no_output_____" ] ], [ [ "## Preprocessing ", "_____no_output_____" ] ], [ [ "# data_norm = (data - data.mean()) / data.std()\n# logger.info(\"Processed data '{0}'\".format(data_path.stem))", "_____no_output_____" ] ], [ [ "## Plotting", "_____no_output_____" ] ], [ [ "# [plt.plot(data_norm[i,:]) for i in range(len(data_norm))]\n# plt.show()", "_____no_output_____" ], [ "# ...", "_____no_output_____" ], [ "# ...", "_____no_output_____" ], [ "# ...", "_____no_output_____" ] ], [ [ "## Hints\n\nVarious hints for working on `jupyter notebooks`. Should probably be removed when a notebook is completed.\n\nGeneral stuff:\n- To make logging even lazier, set `print = logger.info`, and then `print` away!\n- The `!` can be used to run shell commands from within the notebook (ex. `!which conda`)\n- Use `assert` liberally - this isn't a script and it's very readable.\n\nCheatsheets:\n- [Markdown](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
cb9f9281ff8f102520bc62097449415700934e99
155,103
ipynb
Jupyter Notebook
ex01_obj_task.ipynb
dariomalchiodi/python-DS4EBF
e8134f890f9053d2dacabf4acbde3e5a8116fa0a
[ "Apache-2.0" ]
null
null
null
ex01_obj_task.ipynb
dariomalchiodi/python-DS4EBF
e8134f890f9053d2dacabf4acbde3e5a8116fa0a
[ "Apache-2.0" ]
null
null
null
ex01_obj_task.ipynb
dariomalchiodi/python-DS4EBF
e8134f890f9053d2dacabf4acbde3e5a8116fa0a
[ "Apache-2.0" ]
null
null
null
354.926773
131,462
0.905463
[ [ [ "# RISK!\n\nSimulate a game inspired to the pupular Risk! using data of real countries.\n\n### Setup\n<code>pip install pycountry</code>", "_____no_output_____" ] ], [ [ "import geopandas as gpd\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "## Data", "_____no_output_____" ] ], [ [ "world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))\nworld = world[world.name!=\"Antarctica\"]", "_____no_output_____" ], [ "world.head()", "_____no_output_____" ], [ "world.plot(figsize=(18,9), column='iso_a3')\nplt.show()", "_____no_output_____" ] ], [ [ "# TASK 1: init countries\nCreate a class for representing countries as objects. Each country must have a *name*, *population*, *gdp*, and *poligon* coordinates.\nCountries must expose the following methods:\n- <code>country.neighbours()</code>: returns countries objects that are neighbours of the country at hand\n- <code>country.power()</code>: returns a power score based on pupulation and gdp", "_____no_output_____" ], [ "### Hint for calculating neighbours", "_____no_output_____" ] ], [ [ "def neighbour(df, country_id):\n c = df.iloc[country_id]\n neighbours = df[~df.geometry.disjoint(c.geometry)]\n return neighbours", "_____no_output_____" ], [ "neighbour(world, 78)", "_____no_output_____" ] ], [ [ "# TASK 2: combat\nAdd to countries the following methods:\n- <code>country.attack(opponent)</code>: return True if the attack is successfull. Implement a random choice based on the power of the two opponents\n- <code>country.conquer(target)</code>: the country add the target gdp and population and merge borders", "_____no_output_____" ], [ "### Hint for merging borders", "_____no_output_____" ] ], [ [ "world.iloc[78].geometry", "_____no_output_____" ], [ "world.iloc[8].geometry", "_____no_output_____" ], [ "world.iloc[78].geometry.union(world.iloc[8].geometry)", "_____no_output_____" ] ], [ [ "# TASK 3: game\nCreate a game class for representing a RISK! simulation. For each turn, each country randomnly attacks its neighbours. If the attack is successfull, the target is conquered.\nImplement the method:\n- <code>game.to_df()</code>: returns the state of the world as a geopandas dataframe", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
cb9f96737ecad6419a1c665d541b5417fb55725e
188,875
ipynb
Jupyter Notebook
product-wheel/Data Processing.ipynb
eng-rolebot/product-wheel
cf02423e81e9ff395e93aeb7035061c7783f4ff3
[ "MIT" ]
null
null
null
product-wheel/Data Processing.ipynb
eng-rolebot/product-wheel
cf02423e81e9ff395e93aeb7035061c7783f4ff3
[ "MIT" ]
3
2020-06-17T18:33:48.000Z
2020-06-17T18:34:19.000Z
product-wheel/Data Processing.ipynb
eng-rolebot/product-wheel
cf02423e81e9ff395e93aeb7035061c7783f4ff3
[ "MIT" ]
1
2020-04-11T21:22:13.000Z
2020-04-11T21:22:13.000Z
25.951498
1,412
0.344334
[ [ [ "## In the product wheel, we are trying to use thr transition sheets loss as our standards to reduce the cost", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport numpy as np", "_____no_output_____" ] ], [ [ "## Importing the first dataframe: ordercolor transition report", "_____no_output_____" ] ], [ [ "df1 = pd.read_excel('../data/OrderColorTransitionsReport 1_1_17 - 4_22_19.xlsx',\n header=205, usecols=[0,1,4,5,7,8,9,11,12])", "_____no_output_____" ], [ "df1", "_____no_output_____" ], [ "## take a look at the datatype\ndf1.dtypes", "_____no_output_____" ], [ "## delete , and convert the object to int64\ndf1['AGrade Sheets'] = pd.to_numeric(df1['AGrade Sheets'].str.replace(',', ''))\ndf1['Transition Sheets'] = pd.to_numeric(df1['Transition Sheets'].str.replace(',', ''))\ndf1['Total sheets'] = pd.to_numeric(df1['Total Sheets'].str.replace(',', ''))\n## convert the NaN to 0\ndf1 = df1.fillna(0)", "_____no_output_____" ], [ "df1", "_____no_output_____" ] ], [ [ "## Transition loss for df1", "_____no_output_____" ] ], [ [ "ndf1 = df1.groupby(['Color','Next Color Code']).agg({'Transition Sheets':\n ['mean', 'std', 'min', 'max', 'count'],'AGrade Sheets':['sum']})\nndf1.columns = ['ts_mean', 'ts_std', 'ts_min', 'ts_max', 't_count','AGS_sum']\nndf1 = ndf1.reset_index()\nndf1", "_____no_output_____" ], [ "for i in range(len(ndf1)):\n for n in range(len(ndf1)):\n if ndf1.iloc[i,0] == ndf1.iloc[n,1] and ndf1.iloc[i,1] == ndf1.iloc[n,0]:\n print(i,n)", "0 426\n1 2124\n2 1227\n6 510\n7 527\n8 595\n9 684\n10 818\n11 993\n12 1025\n13 1062\n14 1084\n15 1189\n16 1228\n17 1291\n18 1813\n19 1954\n21 2384\n24 596\n25 511\n26 658\n28 132\n30 1041\n31 1085\n32 1713\n36 1955\n39 429\n40 997\n42 47\n44 660\n46 2359\n47 42\n50 1086\n51 2209\n57 430\n58 431\n59 1453\n63 375\n65 1337\n75 1699\n80 80\n85 1498\n92 568\n98 1369\n106 1474\n117 2216\n119 119\n122 508\n123 893\n125 169\n128 1199\n129 1396\n130 2031\n132 28\n133 133\n135 260\n137 693\n138 760\n140 1026\n141 1089\n146 1927\n147 2127\n148 2274\n160 160\n163 1314\n164 1621\n169 125\n170 251\n171 364\n187 1956\n188 2258\n189 2327\n191 498\n205 432\n208 1091\n222 1806\n226 400\n230 1732\n233 569\n237 1418\n239 1508\n244 244\n246 1052\n251 170\n253 253\n260 135\n261 261\n262 299\n264 378\n265 412\n266 433\n268 661\n269 694\n271 838\n273 928\n275 1092\n276 1229\n277 1347\n278 1455\n279 1532\n280 1947\n282 2087\n283 2130\n285 2210\n286 2259\n287 2328\n292 434\n295 1094\n297 1594\n299 262\n300 300\n301 435\n302 598\n303 616\n305 839\n306 929\n307 1095\n308 1268\n309 1456\n310 1533\n312 1790\n314 1957\n317 2131\n318 2231\n321 436\n324 709\n328 1096\n330 1958\n333 2254\n336 796\n343 343\n345 1370\n348 1839\n350 437\n352 531\n354 930\n355 1043\n356 1098\n357 1190\n358 1215\n359 1230\n360 2133\n361 2260\n364 171\n368 1099\n369 1254\n371 1777\n375 63\n378 264\n382 1342\n388 1657\n395 584\n400 226\n403 403\n404 413\n405 512\n406 585\n408 1100\n409 1734\n412 265\n413 404\n414 414\n415 586\n416 1073\n417 1101\n418 1255\n420 1597\n421 1624\n423 2007\n424 2077\n425 2107\n426 0\n429 39\n430 57\n431 58\n432 205\n433 266\n434 292\n435 301\n436 321\n437 350\n439 439\n440 513\n442 555\n443 662\n444 695\n446 742\n447 764\n448 820\n450 847\n452 925\n453 932\n454 973\n455 1000\n456 1028\n457 1044\n458 1063\n459 1102\n461 1231\n462 1244\n465 1281\n466 1293\n467 1333\n468 1457\n471 1500\n473 1534\n475 1577\n476 1579\n478 1638\n479 1669\n480 1752\n482 1876\n483 1923\n485 1983\n488 2134\n492 2362\n493 2381\n494 2406\n498 191\n500 500\n501 876\n508 122\n510 6\n511 25\n512 405\n513 440\n514 532\n515 663\n517 1105\n518 1191\n520 1350\n522 1815\n524 2088\n525 2135\n526 2341\n527 7\n528 685\n531 352\n532 514\n535 1107\n537 1639\n544 799\n545 1053\n546 1385\n548 1501\n549 1511\n555 442\n556 578\n557 725\n558 1045\n559 1108\n560 1960\n562 1371\n563 1753\n566 1961\n568 92\n569 233\n576 2136\n578 556\n579 765\n584 395\n585 406\n586 415\n588 1074\n591 1598\n592 1735\n595 8\n596 24\n598 302\n599 664\n600 726\n603 1065\n604 1110\n607 1536\n608 1714\n609 1817\n611 2262\n612 2387\n616 303\n629 790\n632 1679\n633 2065\n638 1564\n639 1796\n649 915\n650 1111\n658 26\n660 44\n661 268\n662 443\n663 515\n664 599\n665 711\n666 728\n667 871\n668 933\n669 995\n670 1112\n671 1232\n672 1294\n673 1327\n674 1458\n677 1818\n678 1847\n679 1964\n680 2292\n681 2311\n682 2363\n684 9\n685 528\n686 712\n687 1113\n688 1351\n690 1819\n691 2089\n692 2138\n693 137\n694 269\n695 444\n697 821\n700 1984\n703 2304\n704 2329\n705 2364\n709 324\n711 665\n712 686\n713 729\n714 767\n717 975\n718 1114\n725 557\n726 600\n728 666\n729 713\n730 768\n731 823\n732 976\n733 1115\n734 1283\n735 1296\n741 2408\n742 446\n751 935\n752 1116\n760 138\n764 447\n765 579\n767 714\n768 730\n769 936\n770 1117\n771 1193\n774 1318\n776 1539\n777 1640\n778 1716\n779 1887\n781 1966\n782 2140\n783 2330\n784 2342\n785 2366\n786 2382\n787 2389\n790 629\n791 1363\n796 336\n799 544\n802 1036\n806 1659\n810 1848\n818 10\n820 448\n821 697\n823 731\n824 824\n827 937\n828 978\n829 1066\n830 1119\n831 1353\n832 2141\n833 2185\n835 2367\n838 271\n839 305\n843 2186\n847 450\n850 938\n851 1067\n852 1121\n853 1233\n854 1986\n855 2142\n857 2278\n859 2390\n863 1436\n868 1842\n871 667\n876 501\n883 1607\n887 1930\n893 123\n902 1023\n915 649\n925 452\n928 273\n929 306\n930 354\n932 453\n933 668\n935 751\n936 769\n937 827\n938 850\n939 979\n940 1047\n941 1123\n942 1220\n943 1234\n945 1459\n949 1821\n951 1967\n952 1987\n955 2143\n956 2188\n957 2343\n958 2368\n966 1626\n967 1680\n969 2297\n973 454\n975 717\n976 732\n978 828\n979 939\n980 1125\n981 1297\n983 1460\n985 1968\n986 2092\n987 2234\n988 2265\n990 2391\n993 11\n995 669\n997 40\n1000 455\n1003 1003\n1004 1030\n1005 1126\n1006 1216\n1007 1221\n1008 1246\n1009 1461\n1010 1541\n1012 1822\n1014 1858\n1015 2093\n1016 2145\n1018 2344\n1023 902\n1025 12\n1026 140\n1028 456\n1030 1004\n1033 2146\n1034 2345\n1036 802\n1037 1374\n1039 1849\n1041 30\n1043 355\n1044 457\n1045 558\n1047 940\n1049 1823\n1052 246\n1053 545\n1058 1515\n1062 13\n1063 458\n1065 603\n1066 829\n1067 851\n1068 1128\n1069 1298\n1070 1542\n1071 2266\n1073 416\n1074 588\n1084 14\n1085 31\n1086 50\n1089 141\n1091 208\n1092 275\n1094 295\n1095 307\n1096 328\n1098 356\n1099 368\n1100 408\n1101 417\n1102 459\n1105 517\n1107 535\n1108 559\n1110 604\n1111 650\n1112 670\n1113 687\n1114 718\n1115 733\n1116 752\n1117 770\n1119 830\n1121 852\n1123 941\n1125 980\n1126 1005\n1128 1068\n1129 1129\n1130 1195\n1131 1217\n1132 1222\n1133 1235\n1134 1258\n1135 1271\n1136 1284\n1137 1299\n1138 1307\n1140 1355\n1143 1462\n1147 1543\n1149 1576\n1150 1578\n1155 1737\n1156 1757\n1158 1794\n1159 1798\n1160 1850\n1161 1888\n1163 1970\n1164 1990\n1165 2017\n1166 2037\n1169 2094\n1170 2147\n1171 2189\n1174 2267\n1175 2280\n1176 2306\n1177 2313\n1178 2333\n1179 2346\n1180 2369\n1181 2383\n1182 2393\n1186 1186\n1189 15\n1190 357\n1191 518\n1193 771\n1195 1130\n1196 1825\n1197 2148\n1199 128\n1204 1308\n1211 2038\n1215 358\n1216 1006\n1217 1131\n1220 942\n1221 1007\n1222 1132\n1223 1248\n1226 2347\n1227 2\n1228 16\n1229 276\n1230 359\n1231 461\n1232 671\n1233 853\n1234 943\n1235 1133\n1236 1272\n1244 462\n1246 1008\n1248 1223\n1249 2150\n1254 369\n1255 418\n1258 1134\n1260 1260\n1263 2039\n1268 308\n1271 1135\n1272 1236\n1274 2255\n1277 1745\n1279 2286\n1281 465\n1283 734\n1284 1136\n1285 1642\n1286 1889\n1288 2190\n1291 17\n1293 466\n1294 672\n1296 735\n1297 981\n1298 1069\n1299 1137\n1300 1300\n1301 1719\n1302 1826\n1304 2095\n1305 2151\n1307 1138\n1308 1204\n1311 2040\n1314 163\n1318 774\n1320 1320\n1321 1628\n1324 2080\n1327 673\n1329 1329\n1331 1780\n1332 2041\n1333 467\n1337 65\n1342 382\n1347 277\n1350 520\n1351 688\n1353 831\n1355 1140\n1357 2152\n1363 791\n1369 98\n1370 345\n1371 562\n1374 1037\n1376 1376\n1377 1518\n1385 546\n1393 1591\n1394 2096\n1395 2222\n1396 129\n1399 1399\n1401 1865\n1402 2042\n1404 2110\n1412 1519\n1414 1852\n1418 237\n1432 2043\n1436 863\n1442 1521\n1452 1827\n1453 59\n1455 278\n1456 309\n1457 468\n1458 674\n1459 945\n1460 983\n1461 1009\n1462 1143\n1463 1544\n1464 1720\n1465 1972\n1466 2098\n1467 2153\n1473 1781\n1474 106\n1479 1609\n1490 1610\n1498 85\n1500 471\n1501 548\n1508 239\n1511 549\n1515 1058\n1518 1377\n1519 1412\n1521 1442\n1528 1759\n1532 279\n1533 310\n1534 473\n1536 607\n1539 776\n1541 1010\n1542 1070\n1543 1147\n1544 1463\n1545 1931\n1546 1973\n1549 2154\n1550 2268\n1551 2334\n1552 2395\n1564 638\n1567 1800\n1571 1782\n1572 1873\n1573 2047\n1576 1149\n1577 475\n1578 1150\n1579 476\n1591 1393\n1592 2048\n1594 297\n1597 420\n1598 591\n1602 2081\n1603 2111\n1607 883\n1609 1479\n1610 1490\n1612 1663\n1614 2179\n1621 164\n1624 421\n1626 966\n1628 1321\n1633 2022\n1634 2082\n1635 2112\n1638 478\n1639 537\n1640 777\n1642 1285\n1650 1650\n1657 388\n1659 806\n1663 1612\n1665 1707\n1667 2023\n1668 2069\n1669 479\n1673 2121\n1678 1708\n1679 632\n1680 967\n1686 2070\n1689 1722\n1696 1709\n1699 75\n1707 1665\n1708 1678\n1709 1696\n1710 1710\n1713 32\n1714 608\n1716 778\n1719 1301\n1720 1464\n1722 1689\n1723 1828\n1724 1891\n1732 230\n1734 409\n1735 592\n1737 1155\n1745 1277\n1746 1746\n1752 480\n1753 563\n1757 1156\n1759 1528\n1760 1784\n1762 1916\n1764 1943\n1766 2024\n1777 371\n1780 1331\n1781 1473\n1782 1571\n1784 1760\n1785 1875\n1786 2053\n1787 2158\n1790 312\n1794 1158\n1796 639\n1798 1159\n1800 1567\n1804 2410\n1806 222\n1812 2026\n1813 18\n1815 522\n1817 609\n1818 677\n1819 690\n1821 949\n1822 1012\n1823 1049\n1825 1196\n1826 1302\n1827 1452\n1828 1723\n1829 2100\n1830 2159\n1831 2269\n1832 2351\n1833 2373\n1834 2396\n1839 348\n1842 868\n1847 678\n1848 810\n1849 1039\n1850 1160\n1852 1414\n1853 1853\n1855 2160\n1858 1014\n1861 2161\n1862 2237\n1863 2352\n1865 1401\n1868 2072\n1873 1572\n1875 1785\n1876 482\n1887 779\n1888 1161\n1889 1286\n1891 1724\n1916 1762\n1922 2256\n1923 483\n1924 2102\n1927 146\n1930 887\n1931 1545\n1943 1764\n1947 280\n1954 19\n1955 36\n1956 187\n1957 314\n1958 330\n1960 560\n1961 566\n1964 679\n1966 781\n1967 951\n1968 985\n1970 1163\n1972 1465\n1973 1546\n1975 2162\n1978 2374\n1983 485\n1984 700\n1986 854\n1987 952\n1990 1164\n1993 2163\n1995 2270\n1997 2375\n1998 2397\n2007 423\n2017 1165\n2022 1633\n2023 1667\n2024 1766\n2026 1812\n2027 2027\n2031 130\n2037 1166\n2038 1211\n2039 1263\n2040 1311\n2041 1332\n2042 1402\n2043 1432\n2047 1573\n2048 1592\n2053 1786\n2055 2055\n2065 633\n2069 1668\n2070 1686\n2072 1868\n2077 424\n2080 1324\n2081 1602\n2082 1634\n2083 2113\n2087 282\n2088 524\n2089 691\n2092 986\n2093 1015\n2094 1169\n2095 1304\n2096 1394\n2098 1466\n2100 1829\n2102 1924\n2103 2166\n" ] ], [ [ "# Data cleaning ", "_____no_output_____" ], [ "1. For the unsusal color code", "_____no_output_____" ] ], [ [ "## To see how many unique color in the color and next color code.\nprint(len(df1['Color'].unique()))\nprint(len(df1['Next Color Code'].unique()))", "267\n269\n" ], [ "## to see the unusaul color code in the data\nprint(list(set(df1['Color'].unique()) - set(df1['Next Color Code'].unique())))\nprint(list(set(df1['Next Color Code'].unique()) - set(df1['Color'].unique())))", "[]\n['dv', 'DP']\n" ], [ "uc = ['DP', 'dv']\ndf1.loc[df1['Next Color Code'].isin(['DP', 'dv'])]\n## Find the unsual color code in the data frame and delete it\nlen(df1)\nnewdf1 = df1.set_index('Next Color Code')\nnewdf1 = newdf1.drop(['DP', 'dv'], axis=0)\nnewdf2 = df1.set_index('Next Color Code')\nnewdf2 = newdf2.drop(list(set(df1['Next Color Code'].unique()) - set(df1['Color'].unique())), axis=0)\nlen(newdf1)", "_____no_output_____" ] ], [ [ "Calculating the # of differnt cast oder in unique color", "_____no_output_____" ] ], [ [ "## Creating a new dataframe without the unusual color code named newdf1\n## define a function to delete the unusual data\ndef delete_unusualcolor(path):\n df1 = pd.read_excel(path,\n header=205, usecols=[0,1,4,5,7,8,9,11,12])\n print('the length of df1 is', len(df1))\n print('the number of unique color', len(df1['Color'].unique()))\n print('the number of unique next color', len(df1['Next Color Code'].unique()))\n print('the unique color exists in the color',\n (set(df1['Color'].unique()) - set(df1['Next Color Code'].unique())))\n print('the unique color exists in the next color',\n list(set(df1['Next Color Code'].unique()) - set(df1['Color'].unique())))\n newdf1 = df1.set_index('Next Color Code')\n newdf1 = newdf1.drop(list(set(df1['Next Color Code'].unique()) - set(df1['Color'].unique())), axis=0)\n print('the length of newdf1 is', len(newdf1))", "_____no_output_____" ], [ "## nosetest\nimport pandas as pd\n#import delete_unusualcolor as du\n\ndef test_delete_unusualcolor():\n path = '../data/OrderColorTransitionsReport 1_1_17 - 4_22_19.xlsx'\n du(path)\n df1 = pd.read_excel('../data/OrderColorTransitionsReport 1_1_17 - 4_22_19.xlsx')\n assert df1.empty == False,'you cannot put in an empty datafrmae'", "_____no_output_____" ] ], [ [ "## Function for the average transition loss", "_____no_output_____" ] ], [ [ "for z in range(len(df1['Color'].unique())):\n list1 = list(df1['Color'].unique())\n color1 = list1[z]\n list2 = list(df1['Next Color Code'].unique())\n for z2 in range(len(df1['Next Color Code'].unique())):\n color2 = list2[z2]\n Select_3(color1, color2)", "1M CW\n1M WN\n2W IP\n2W NF\n2W VY\n3D CW\n3D IP\n3D VY\n3D DA\n3D DD\n3D DY\n3D EZ\n3D FN\n3D GB\n3D GG\n3D GS\n3D GW\n3D HA\n3D LH\n3D TY\n3D WI\n3D ZY\n3N GW\n3Y1 DY\n3Y1 DJ\n3Z7 DA\n3Z7 EY\n3Z7 W7\n4B VY\n4B GW\n4B W7\n4B AN\n4B BN\n4B GN\n4B SN\n4B SV\n4B TO\n4B UIM\n4B XE\n4Z1 CW\n4Z1 GC\n4Z1 PG\n4Z2 DA\n4Z2 EY\n4Z2 4Z3\n4Z2 MN\n4Z2 ZW\n4Z3 CW\n4Z3 GW\n4Z3 EY\n4Z3 ZW\n4Z3 4Z2\n4Z3 YL\n5A 9P\n5A B3\n5A FM\n5A ZT\n6H2 CW\n6H3 CW\n6H3 NL\n6N FJ\n7B B5\n7B BI\n7B CN\n7B GV\n7B MH\n7B MS\n7B ZC\n7B ZQ\n7M B5\n7R 7B\n7R AO\n7R DT\n7R FS\n7R NS\n7R SM\n7T BN\n7T CN\n7T MH\n7T GM\n9H B3\n9H FS\n9H 9H\n9H GY\n9H MW\n9H OA\n9I B3\n9I ZC\n9I AO\n9I 9H\n9I AR\n9I BE\n9I CC\n9I DO\n9I VN\n9I VX\n9J 9I\n9J FT\n9J MP\n9J TA\n9P B3\nAA FM\nAA ZT\nAA FJ\nAA FS\nAA CC\nAA ER\nAA NR\nAA VH\nAA WXA\nAA XN\nAB ZC\nAB EV\nAB FV\nAB MV\nAB SS\nAB W2\nAB W3\nAE B3\nAE FT\nAE AA\nAE AE\nAE CX\nAE CXA\nAE N3\nAH AW\nAH BH\nAI W7\nAI FV\nAI HX\nAI MT\nAI WC\nAN WN\nAN GG\nAN GW\nAN ZY\nAN AN\nAN BN\nAN SV\nAN PG\nAN MN\nAN BI\nAN 4B\nAN B4\nAN FA\nAN FK\nAN FP\nAN RR\nAN VM\nAN ZN\nAO FM\nAO BE\nAO FT\nAO PN\nAQ GW\nAQ BQ\nAQ FL\nAQ R3\nAQ WG\nAR CW\nAR ZT\nAR AR\nAR EV\nAR AB\nAR LN\nAR RP\nAT B5\nAT FT\nAT UB\nAW WN\nAW LH\nAW FS\nAW VN\nAW N3\nAW BH\nAW AH\nAW CL\nAW GD\nAW NX\nAW NXA\nB2 CW\nB2 VY\nB2 FN\nB2 AN\nB2 7B\nB2 AW\nB2 B4\nB2 BR\nB2 ZM\nB2 ZU\nB3 GC\nB3 ZW\nB3 FM\nB3 MS\nB3 FV\nB3 CX\nB3 CL\nB3 EP\nB3 PO\nB3 RO\nB3 TF\nB3 VD\nB3 W5\nB3 WE\nB3 WX\nB4 CW\nB4 GS\nB4 GW\nB4 FK\nB4 RD\nB5 GS\nB5 GC\nB5 SM\nB5 CX\nB5 W5\nB5 7R\nB5 BF\nB5 CT\nB5 FB\nB5 FF\nB5 FQ\nB5 FY\nB5 G6\nB5 TS\nB5 VI\nB5 ZI\nBD FM\nBD ZC\nBD CT\nBD GT\nBD SP\nBD SR\nBD ZO\nBE MW\nBE DO\nBE FT\nBE N3\nBE PN\nBE FQ\nBE SP\nBE GO\nBE OT\nBE VE\nBF MS\nBF DT\nBF OA\nBF BE\nBF PN\nBF BF\nBF VI\nBF GO\nBH B3\nBH ZT\nBH FS\nBH AW\nBH BH\nBH HX\nBH GD\nBH ZI\nBH RI\nBI CW\nBI WN\nBI IP\nBI VY\nBI GW\nBI WI\nBI EY\nBI AN\nBI BN\nBI GN\nBI PG\nBI MN\nBI YL\nBI NL\nBI BI\nBI CN\nBI FA\nBI FK\nBI BQ\nBI ZM\nBI ZU\nBI EP\nBI WX\nBI FQ\nBI CV\nBI FO\nBI FW\nBI VU\nBK R3\nBL EY\nBL FD\nBM CW\nBM GW\nBM BI\nBM FA\nBM PN\nBM RD\nBM FO\nBN CW\nBN WN\nBN VY\nBN DY\nBN GW\nBN WI\nBN BN\nBN PG\nBN NL\nBN BI\nBN W2\nBN VM\nBN TF\nBN FO\nBN FW\nBN FD\nBN B2\nBN EC\nBN JP\nBN RL\nBN ZG\nBQ VI\nBQ CO\nBR CW\nBR VY\nBR DY\nBR GW\nBR EY\nBR XE\nBR FK\nBR FB\nBR FF\nBR G6\nBR VI\nBR ZG\nBR ZL\nBST FM\nBST B5\nBST VD\nBST SP\nBST DMN\nCB G6\nCC B3\nCC FM\nCC B5\nCC CC\nCC MP\nCC N3\nCC UB\nCC ZO\nCC BD\nCC TU\nCJ CW\nCJ WN\nCJ IP\nCJ DA\nCJ DY\nCJ GW\nCJ HA\nCJ GN\nCJ ZM\nCJ FW\nCJ DH\nCJ IL\nCJ ZV\nCL GW\nCL W7\nCL PG\nCL BI\nCL AW\nCL BH\nCL AH\nCL GD\nCL TF\nCL W5\nCL JC\nCL TD\nCN DJ\nCN B3\nCN BI\nCN MH\nCN ZQ\nCN 7B\nCN FS\nCN DO\nCN VX\nCN 7R\nCN SP\nCN OT\nCO WN\nCO VH\nCO FL\nCO WG\nCO UB\nCO SD\nCO SL\nCS MP\nCS SR\nCS BD\nCS DV\nCS VO\nCS XW\nCT DA\nCT GW\nCT CC\nCT FT\nCT W3\nCT CT\nCT SR\nCT CV\nCT BD\nCT DV\nCT CS\nCV GW\nCV BI\nCV RP\nCV W5\nCV BF\nCV CT\nCV GT\nCV SR\nCV CV\nCV JC\nCV DV\nCV QM\nCV RF\nCV WH\nCV WJ\nCW CW\nCW WN\nCW IP\nCW VY\nCW DA\nCW FN\nCW GG\nCW GS\nCW GW\nCW LH\nCW TY\nCW WI\nCW DJ\nCW EY\nCW W7\nCW BN\nCW GN\nCW SV\nCW GC\nCW PG\nCW ZW\nCW NL\nCW BI\nCW OA\nCW FT\nCW NR\nCW W2\nCW HX\nCW 4B\nCW B4\nCW FA\nCW FK\nCW FP\nCW RR\nCW CL\nCW NX\nCW BR\nCW FF\nCW G6\nCW OT\nCW FO\nCW FW\nCW FD\nCW ZV\nCW JC\nCW RF\nCW 1M\nCW 3N\nCW 4Z1\nCW 6H2\nCW 6H3\nCW BM\nCW CJ\nCW DM\nCW FVM\nCW IY\nCW JB\nCW LGN\nCW M1\nCW Q3\nCW Q5\nCW Q8\nCW SF\nCW dv\nCW V1\nCW VL\nCW XG\nCW XP\nCW ZX\nCW ZZ\nCX WN\nCX B3\nCX CN\nCX FS\nCX NR\nCX AA\nCX CX\nCX AW\nCX TS\nCX RI\nCX VO\nCXA AA\nCXA AE\nCXA VO\nDA CW\nDA WN\nDA VY\nDA GW\nDA HA\nDA LH\nDA TY\nDA WI\nDA EY\nDA GC\nDA PG\nDA MN\nDA CT\nDA DH\nDA ZV\nDA 3D\nDA 3Z7\nDD EZ\nDD TY\nDD RR\nDD 3D\nDH DA\nDH GW\nDH LH\nDH GN\nDH RR\nDH FW\nDH CJ\nDI MS\nDJ PG\nDJ FM\nDJ MS\nDJ 9H\nDJ OA\nDJ CC\nDJ DO\nDJ N3\nDJ BR\nDJ ZU\nDJ ZI\nDJ GO\nDJ OT\nDJ 6N\nDJ VV\nDM CW\nDM VY\nDM GW\nDM GN\nDM FD\nDM CJ\nDM DS\nDMN SV\nDMN ZT\nDMN GV\nDMN MP\nDN CW\nDN WN\nDN VY\nDO WN\nDO ZT\nDO MW\nDO OA\nDO BE\nDO 9I\nDO MP\nDO VH\nDO UB\nDO LL\nDS FK\nDS DM\nDT MW\nDT OA\nDT 9I\nDT FY\nDV R3\nDV CT\nDV GT\nDV SR\nDV ZO\nDV GO\nDV CV\nDV CS\nDV RF\nDV WH\nDV JF\nDY WN\nDY GS\nDY GW\nDY TY\nDY ZY\nDY EY\nDY BN\nDY SN\nDY PG\nDY MN\nDY FV\nDY 4B\nDY ZM\nDY G6\nDY FD\nDY JP\nDY 3D\nDY 3Y1\nEA FT\nEA FV\nEA NXA\nEC GW\nEC BN\nEC CN\nEC SM\nEC SS\nEC FK\nEC PN\nEC FQ\nEC GO\nEC SH\nED 7B\nED FL\nED WG\nED FQ\nED EC\nED 5A\nED MB\nED SI\nEP IP\nEP FN\nEP EY\nEP TO\nEP HX\nEP ZN\nEP PO\nEP ZV\nER FS\nER VH\nER TE\nER ZE\nEV GW\nEV MS\nEV FV\nEV MV\nEV FA\nEV W5\nEV ZI\nEV EC\nEV MB\nEV 7M\nEV PBL\nEY CW\nEY IP\nEY VY\nEY DA\nEY DY\nEY GB\nEY GW\nEY LH\nEY TY\nEY ZY\nEY ZW\nEY 4Z2\nEY NL\nEY BI\nEY 4B\nEY NX\nEY FB\nEY OT\nEY FW\nEY FD\nEY 3Z7\nEY 2W\nEY FR\nEY LT\nEY UH\nEY ZP\nEY ZS\nEZ WN\nEZ DD\nEZ GW\nEZ TY\nEZ WI\nEZ MN\nEZ MS\nEZ FB\nEZ 3D\nFA CW\nFA FN\nFA GW\nFA AN\nFA MN\nFA ZW\nFA YL\nFA BI\nFA W2\nFA ZU\nFA FD\nFA ZG\nFA ZX\nFA ZR\nFB EZ\nFB GW\nFB ZY\nFB EY\nFB 4B\nFB FK\nFB BR\nFB ZM\nFB G6\nFB FO\nFB FD\nFB ZG\nFB ZZ\nFB FR\nFB AQ\nFB CB\nFD WN\nFD VY\nFD DY\nFD FN\nFD GW\nFD LH\nFD EY\nFD XE\nFD MN\nFD CN\nFD AR\nFD FK\nFD FB\nFD G6\nFD EC\nFD DM\nFD LGN\nFD ZZ\nFD RU\nFF CW\nFF FN\nFF GW\nFF PG\nFF SM\nFF ZU\nFF G6\nFF ZV\nFH GW\nFH BN\nFH MW\nFH FW\nFJ AE\nFJ BF\nFJ FQ\nFJ FD\nFJ VZ\nFJ XD\nFK CW\nFK WN\nFK VY\nFK DY\nFK GW\nFK HA\nFK ZY\nFK AN\nFK SN\nFK PG\nFK MN\nFK ZW\nFK RR\nFK VM\nFK LN\nFK ZU\nFK CT\nFK FB\nFK FW\nFK FD\nFK B2\nFK JP\nFK ZV\nFK BM\nFK LGN\nFK ZX\nFK DS\nFK VDT\nFL DJ\nFL OA\nFL SL\nFL 5A\nFL ED\nFL MO\nFM GW\nFM TY\nFM DJ\nFM SV\nFM UIM\nFM PG\nFM ZT\nFM FJ\nFM GM\nFM VX\nFM SS\nFM CX\nFM R3\nFM W5\nFM VI\nFM FW\nFM CO\nFM SD\nFM UH\nFM AQ\nFM XD\nFM BK\nFM BST\nFM VF\nFN CW\nFN WN\nFN FN\nFN GS\nFN GW\nFN ZY\nFN AN\nFN XE\nFN MN\nFN ZW\nFN FA\nFN FP\nFN FB\nFN G6\nFN FW\nFN FD\nFN 3D\nFN FR\nFN ZJ\nFO WI\nFO BN\nFO XE\nFO BI\nFO AA\nFO FA\nFO WJ\nFP CW\nFP WN\nFP IP\nFP GS\nFP GW\nFP ZY\nFP 4Z2\nFP W2\nFP AA\nFP FK\nFP ZN\nFP BR\nFP FW\nFP EC\nFP ZG\nFP ZR\nFQ EY\nFQ ZC\nFQ NS\nFQ EV\nFQ AE\nFQ N3\nFQ WG\nFQ UB\nFQ SL\nFQ SI\nFQ PBL\nFR EY\nFR FP\nFR ZG\nFR ZS\nFS MS\nFS SS\nFS CX\nFS VM\nFS FL\nFS R3\nFS LN\nFS UB\nFS BR\nFS W5\nFS WX\nFS VI\nFS RI\nFS WH\nFS LGN\nFS ED\nFT W7\nFT CN\nFT ZC\nFT 7B\nFT FS\nFT SM\nFT AR\nFT 9I\nFT FV\nFT AE\nFT CXA\nFT CL\nFT W5\nFT TS\nFT SR\nFT XW\nFT CS\nFT WJ\nFT ZZ\nFT SI\nFT ED\nFT GF\nFT VQ\nFU R3\nFV WN\nFV GW\nFV EV\nFV MV\nFV MT\nFV LN\nFV RP\nFV VI\nFV UX\nFV WM\nFVM CW\nFVM ZZ\nFW CW\nFW WN\nFW IP\nFW VY\nFW FN\nFW GW\nFW LH\nFW TY\nFW WI\nFW ZY\nFW EY\nFW W7\nFW BN\nFW GN\nFW XE\nFW ZW\nFW 4Z2\nFW NL\nFW BI\nFW W2\nFW FK\nFW FP\nFW RR\nFW VM\nFW CL\nFW NX\nFW G6\nFW FD\nFW ZV\nFW CJ\nFW FH\nFW IM\nFW SK\nFY FJ\nFY ZQ\nFY AR\nFY EV\nFY FV\nFY RP\nFY VO\nFY 5A\nFY SI\nFY AQ\nG5 AR\nG6 CW\nG6 VY\nG6 FN\nG6 GW\nG6 LH\nG6 TY\nG6 WI\nG6 ZY\nG6 EY\nG6 MN\nG6 NL\nG6 4B\nG6 FK\nG6 ZM\nG6 ZU\nG6 FB\nG6 FW\nG6 FD\nG6 B2\nG6 ZG\nG6 ZZ\nG7 AR\nGB EY\nGB DH\nGB 3D\nGC CW\nGC WN\nGC GG\nGC GW\nGC TY\nGC WI\nGC BN\nGC UIM\nGC GC\nGC PG\nGC NL\nGC BI\nGC 4B\nGC FP\nGC ZU\nGC FW\nGC IL\nGC ZV\nGC 4Z1\nGC IY\nGC UH\nGC IM\nGC PX\nGD B5\nGD ZC\nGD N3\nGF DO\nGF FT\nGF MP\nGG CW\nGG WN\nGG DA\nGG AN\nGG GC\nGG BH\nGG GO\nGG ZV\nGG LGN\nGG 3D\nGM FM\nGM SM\nGM MP\nGM UH\nGM BST\nGN CW\nGN WN\nGN TY\nGN ZY\nGN EY\nGN AN\nGN MP\nGN 4B\nGN FW\nGN CJ\nGN DM\nGN 3D\nGO GW\nGO DJ\nGO EY\nGO ZC\nGO DO\nGO MP\nGO RO\nGO BF\nGO OT\nGS CW\nGS DY\nGS FN\nGS GW\nGS LH\nGS ZY\nGS PG\nGS FP\nGS ZM\nGS DM\nGS 3D\nGS 2W\nGT GY\nGT G6\nGT CV\nGT DV\nGV FM\nGV B5\nGV ZQ\nGV FS\nGV VH\nGV FQ\nGW CW\nGW WN\nGW IP\nGW VY\nGW DA\nGW DD\nGW DY\nGW EZ\nGW FN\nGW GG\nGW GS\nGW GW\nGW HA\nGW LH\nGW WI\nGW ZY\nGW EY\nGW W7\nGW AN\nGW BN\nGW SV\nGW TO\nGW XE\nGW GC\nGW PG\nGW 4Z3\nGW MN\nGW ZW\nGW YL\nGW NL\nGW BI\nGW MH\nGW DT\nGW AR\nGW 9I\nGW NR\nGW EV\nGW W2\nGW CX\nGW CXA\nGW N3\nGW 4B\nGW B4\nGW FK\nGW FP\nGW RR\nGW VM\nGW ZN\nGW FL\nGW WG\nGW RP\nGW CL\nGW NX\nGW BR\nGW ZM\nGW ZU\nGW TF\nGW W5\nGW CT\nGW FB\nGW FY\nGW G6\nGW SR\nGW OT\nGW CV\nGW FO\nGW FW\nGW FD\nGW JP\nGW ZG\nGW DH\nGW IL\nGW ZV\nGW JC\nGW SL\nGW QM\nGW WH\nGW 1M\nGW BM\nGW CJ\nGW DM\nGW FVM\nGW LGN\nGW ZX\nGW 3D\nGW 7M\nGW UH\nGW ZS\nGW ZR\nGW VDT\nGW BST\nGW FH\nGW IM\nGW PX\nGW BL\nGW LI\nGW NE\nGW Q4\nGW Q6\nGW SX\nGY MS\nGY AO\nGY GY\nGY CC\nGY VD\nGY BF\nHA WN\nHA DA\nHA EZ\nHA GW\nHA TY\nHA FK\nHA FW\nHA CJ\nHA 3D\nHX W7\nHX BN\nHX FM\nHX AE\nHX GD\nHX TS\nHX JC\nHX TE\nHX UX\nHX LI\nHX SX\nHX AI\nHX MX\nHX RV\nHXA FM\nHXA AA\nHXA RV\nIL GW\nIL GC\nIL ZN\nIL CJ\nIM CW\nIM GW\nIM WI\nIM GC\nIM ZU\nIM FW\nIM ZV\nIM IY\nIP CW\nIP WN\nIP GW\nIP TY\nIP EY\nIP W7\nIP TO\nIP XE\nIP BI\nIP FP\nIP FW\nIP JP\nIP CJ\nIP 3D\nIP 2W\nIP SK\nIP RC\nIY CW\nIY WN\nIY GC\nIY FP\nIY IL\nIY IM\nJB W7\nJB CL\nJC VY\nJC GW\nJC WI\nJC W7\nJC 4Z3\nJC AW\nJC CL\nJC GD\nJC ZM\nJC W5\nJC CV\nJC JC\nJC EA\nJC HXA\nJF FY\nJF B2\nJP IP\nJP GW\nJP BN\nJP FP\nJP FB\nJP ZL\nJP XP\nJV AR\nJV EV\nJV SS\nJV W5\nJV ZO\nLGN CW\nLGN GW\nLGN XE\nLGN 4B\nLGN RR\nLGN WE\nLGN FD\nLGN DH\nLGN ZV\nLGN VDT\nLH CW\nLH WN\nLH VY\nLH GS\nLH GW\nLH LH\nLH TY\nLH WI\nLH EY\nLH SN\nLH FA\nLH G6\nLH FD\nLH CJ\nLH 3D\nLH 2W\nLI CW\nLI GW\nLI W7\nLI N3\nLI HX\nLI RC\nLL CC\nLN W7\nLN SM\nLN AR\nLN EV\nLN FK\nLN AB\nLN LN\nLN RP\nLN BR\nLN ZO\nLN CV\nLN FO\nLN WH\nLT EY\nLT W7\nLT FS\nLT CL\nLT TD\nLT LT\nLT PX\nM1 CW\nM1 DN\nMB DJ\nMB 7B\nMH WN\nMH BI\nMH CN\nMH GV\nMH 7B\nMH 9H\nMH FT\nMH 7R\nMH BF\nMN WN\nMN DA\nMN EZ\nMN FN\nMN GW\nMN WI\nMN 4Z3\nMN BI\nMN FS\nMN CL\nMN BR\nMN FB\n" ], [ "colors = df1['Color'].unique().tolist()\ncolors.sort()\ncolors\nshape = (len(colors), len(colors))\nmatrix = np.zeros(shape)\nfor i in range(0, len(colors)):\n for j in range(0, len(colors)):\n value = df2['ts_mean'].loc[(df2['Color'] == colors[i]) &\n (df2['Next Color Code'] == colors[j])].tolist()\n matrix[i, j] = value[0]", "_____no_output_____" ], [ "def Select_2(color1, color2):\n color = df1['Color'] == color1\n nextcolorcode = df1 ['Next Color Code'] == color2\n ndf = df1[color & nextcolorcode]\n if len(ndf) == 0:\n return\n else:\n n = 0\n for i in range(len(ndf)-1):\n if ndf.iloc[i,0] != ndf.iloc[i+1,0]:\n n = n + 1\n print (color1, color2, n + 1)\n tl = 0\n for i in range(len(ndf)):\n tl = tl + int(ndf.iloc[i,8])\n tl = tl/(n+1)\n print (color1, color2, 'cycles:', n + 1, 'average transition sheet loss:', tl)", "_____no_output_____" ], [ "def Select_2(color1, color2):\n color = df1['Color'] == color1\n nextcolorcode = df1 ['Next Color Code'] == color2\n ndf = df1[color & nextcolorcode]\n if len(ndf) == 0:\n return\n else:\n n = 0\n for i in range(len(ndf)-1):\n if ndf.iloc[i,0] != ndf.iloc[i+1,0]:\n n = n + 1\n tl = 0\n for i in range(len(ndf)):\n tl = tl + int(ndf.iloc[i,8])\n tl = tl/(n+1)\n print (color1, color2, 'cycles:', n + 1, 'average transition sheet loss:', tl)", "_____no_output_____" ] ], [ [ "Telling how many cast order do you have for each color code", "_____no_output_____" ] ], [ [ "for z in range(len(df1['Color'].unique())):\n list1 = list(df1['Color'].unique())\n color1 = list1[z]\n list2 = list(df1['Next Color Code'].unique())\n for z2 in range(len(df1['Next Color Code'].unique())):\n color2 = list2[z2]\n Select_2(color1, color2)", "1M CW cycles: 2 average transition sheet loss: 84.53125\n1M WN cycles: 1 average transition sheet loss: 9.0\n2W IP cycles: 1 average transition sheet loss: 5.0\n2W NF cycles: 1 average transition sheet loss: 42.0\n2W VY cycles: 1 average transition sheet loss: 20.0\n3D CW cycles: 2 average transition sheet loss: 31.5\n3D IP cycles: 1 average transition sheet loss: 214.0\n3D VY cycles: 2 average transition sheet loss: 12.25\n3D DA cycles: 1 average transition sheet loss: 29.0\n3D DD cycles: 1 average transition sheet loss: 13.0\n3D DY cycles: 3 average transition sheet loss: 28.530864197530864\n3D EZ cycles: 1 average transition sheet loss: 253.0\n3D FN cycles: 1 average transition sheet loss: 13.0\n3D GB cycles: 1 average transition sheet loss: 23.0\n3D GG cycles: 1 average transition sheet loss: 21.0\n3D GS cycles: 1 average transition sheet loss: 29.0\n3D GW cycles: 2 average transition sheet loss: 11.75\n3D HA cycles: 1 average transition sheet loss: 35.0\n3D LH cycles: 1 average transition sheet loss: 27.0\n3D TY cycles: 3 average transition sheet loss: 10.460905349794238\n3D WI cycles: 1 average transition sheet loss: 210.0\n3D ZY cycles: 1 average transition sheet loss: 16.0\n3N GW cycles: 1 average transition sheet loss: 4.0\n3Y1 DY cycles: 1 average transition sheet loss: 47.0\n" ], [ "Select('1M' , 'CW')", "_____no_output_____" ] ], [ [ "### 2. For the fail rate", "_____no_output_____" ] ], [ [ "print (df1['AGrade Sheets'].isnull())", "0 False\n1 True\n2 True\n3 True\n4 False\n ... \n4184 True\n4185 False\n4186 False\n4187 True\n4188 False\nName: AGrade Sheets, Length: 4189, dtype: bool\n" ] ], [ [ "The total number of number of failing sheets", "_____no_output_____" ] ], [ [ "print ('total number of our data is', len(df1))", "total number of our data is 4189\n" ], [ "print ('the fail sheets number is', (df1['AGrade Sheets'].isnull().sum()))", "the fail sheets number is 919\n" ] ], [ [ "## data collecting\n### take a look at the df2 and pick up the most frequently used color code", "_____no_output_____" ], [ "### Sorting the dataframe \n 1. 10 most demanding color\n 2. 10 highest transition cost\n 3. 10 hiest sales rank\n 4. 10 most frequent sequence\n 5. 10 highest sub family number\n 6. 10 biggest mean transition loss", "_____no_output_____" ] ], [ [ "##Importing the second dataframe: variable cost per sheet\ndf2 = df_desc = pd.read_excel('../data/Variable cost per sheet.xlsx',\n header=3)", "_____no_output_____" ], [ "df2.dtypes", "_____no_output_____" ], [ "## we need to deal with the NaN and , in the dataframe\ndf2 = df2.fillna(0)", "_____no_output_____" ], [ "df2['CC DIM'] = [x.replace('1/2', '').replace('1/4', '').replace('3/4', '') for x in df_desc['CC DIM']]", "_____no_output_____" ], [ "df2.drop_duplicates(inplace=True)", "_____no_output_____" ], [ "df2", "_____no_output_____" ], [ "## define a function for sorting the data by colums and rank from high to low.\n##and you can adjust the n to see the number of rows you want\ndef sort_hightolow(df2, colums, n):\n df2_demanding = df2.sort_values(by=[colums],ascending=False)\n return df2_demanding.head(n=n)", "_____no_output_____" ], [ "## define a function for sorting the data by colums and rank from low to high\ndef sort_lowtohigh(df2, colums):\n df2_demanding = df2.sort_values(by=[colums],ascending=True)\n return df2_demanding.head(n=n)", "_____no_output_____" ] ], [ [ "Questions:\n#### 1. How to find the relationship between the color and the family subfamily?\n2. what are VCOM & TCOM\n3. which parameters are more important??\n4. question: how many color code we should look at?2Ams 20 - 30\n5. how should we collect if the number is close to each other?", "_____no_output_____" ], [ "## Jointing two data together to obtain the average sheet loss for the the family", "_____no_output_____" ], [ "1. combining two dataframes", "_____no_output_____" ] ], [ [ "df2.loc[df2['CC DIM'] == 'AN1/2']", "_____no_output_____" ], [ "len(df1)", "_____no_output_____" ], [ "print(len(df1['Color'].unique()))", "267\n" ], [ "family_df", "_____no_output_____" ], [ "len(df1)", "_____no_output_____" ], [ "# remove gauge strings from data file and set series values\nproducts = [x.replace('1/2', '').replace('1/4', '').replace('3/4', '') for x in df_desc['CC DIM']]\nfamilies = df2['Family']\nsub_families = df2['Sub Family']\ncolors = df2['Color']\n\n# create new dataframe and drop duplicates (repeat values for lines 1 and 2)\nfamily_df = pd.DataFrame([products, families, sub_families, colors]).T\nfamily_df.columns = ['Color','family','sub_family','pigment']\nfamily_df.drop_duplicates(inplace=True)\n\n# inner merge with main dataframe\ndff = pd.merge(df1, family_df, how='inner', on='Color')\nfamily_df.columns = ['Next Color Code','family','sub_family','pigment']\ndff = pd.merge(dff, family_df, how='inner', on='Next Color Code')\nfamily_df.columns = ['Color','family','sub_family','pigment']\ndff", "_____no_output_____" ], [ "print(len(dff['family_x'].unique()))\nprint(len(dff['sub_family_x'].unique()))", "2\n8\n" ], [ "dff2 = dff.groupby(['Color','Next Color Code']).agg({'Transition Sheets':\n ['mean', 'std', 'min', 'max', 'count'],'AGrade Sheets':['sum']})\ndff2", "_____no_output_____" ], [ "dff2.columns = ['ts_mean', 'ts_std', 'ts_min', 'ts_max', 't_count','AGS_sum']\ndff2 = dff2.reset_index()\ndff2", "_____no_output_____" ] ], [ [ "Obtaining the 30 produced most products", "_____no_output_____" ] ], [ [ "ndf = sort_hightolow(dff2, 'AGS_sum', 30)\nndf", "_____no_output_____" ], [ "print('Number of unique Colors:', len(ndf['Color'].unique()))\nprint('Number of unique Next Colors:', len(ndf['Next Color Code'].unique()))\nprint(list(set(ndf['Color'].unique()) - set(ndf['Next Color Code'].unique())))\nprint(list(set(ndf['Next Color Code'].unique()) - set(ndf['Color'].unique())))", "Number of unique Colors: 24\nNumber of unique Next Colors: 16\n['AR', 'NL', 'WH', 'MT', 'LI', 'BN', 'BI', 'LT', 'PG', 'IP', 'UX', 'EY', '4B']\n['RC', 'TD', 'CT', 'AI', 'MS']\n" ], [ "len(dff2)", "_____no_output_____" ] ], [ [ "Searching for the relationship between the color code and the transition sheets.", "_____no_output_____" ] ], [ [ "for i in range(len(dff2)):\n for n in range(len(dff2)):\n if dff2.iloc[i,0] == dff2.iloc[n,1] and dff2.iloc[i,1] == dff2.iloc[n,0]:\n if dff2.iloc[i,7] == dff2.iloc[n,7]:\n print(dff2.iloc[i,0],dff2.iloc[i,1], dff2.iloc[i,7])\n print(dff2.iloc[n,0], dff2.iloc[n,1],dff2.iloc[n,7])", "3D GG 0.0\nGG 3D 0.0\nAE AE 3.0\nAE AE 3.0\nAN AN 15.0\nAN AN 15.0\nAR AR 2344.0\nAR AR 2344.0\nBF BF 257.0\nBF BF 257.0\nBH BH 264.0\nBH BH 264.0\nBI BI 170.0\nBI BI 170.0\nBN BN 932.0\nBN BN 932.0\nCC CC 26.0\nCC CC 26.0\nCJ GN 0.0\nGN CJ 0.0\nCT CT 1562.0\nCT CT 1562.0\nCV CV 809.0\nCV CV 809.0\nCW CW 110.0\nCW CW 110.0\nCX CX 3325.0\nCX CX 3325.0\nDA WN 0.0\nWN DA 0.0\nEY IP 0.0\nIP EY 0.0\nGC GC 1522.0\nGC GC 1522.0\nGC TY 0.0\nTY GC 0.0\nGG 3D 0.0\n3D GG 0.0\nGG WN 0.0\nWN GG 0.0\nGN CJ 0.0\nCJ GN 0.0\nGW GW 47181.0\nGW GW 47181.0\nGW IM 0.0\nIM GW 0.0\nGY GY 78.0\nGY GY 78.0\nIM GW 0.0\nGW IM 0.0\nIP EY 0.0\nEY IP 0.0\nJC JC 52.0\nJC JC 52.0\nLH LH 255.0\nLH LH 255.0\nLH SN 0.0\nSN LH 0.0\nLN LN 4785.0\nLN LN 4785.0\nLT LT 11.0\nLT LT 11.0\nMP MP 2.0\nMP MP 2.0\nMT MT 2317.0\nMT MT 2317.0\nSM SM 237.0\nSM SM 237.0\nSN LH 0.0\nLH SN 0.0\nSS SS 769.0\nSS SS 769.0\nTY GC 0.0\nGC TY 0.0\nWJ WJ 1273.0\nWJ WJ 1273.0\nWM WM 305.0\nWM WM 305.0\nWN DA 0.0\nDA WN 0.0\nWN GG 0.0\nGG WN 0.0\n" ], [ "print(list(set(dff['Color'].unique()) - set(dff['Next Color Code'].unique())))\nprint(list(set(dff['Next Color Code'].unique()) - set(dff['Color'].unique())))", "[]\n['GD', 'ER', 'GV', 'NE']\n" ] ], [ [ "### The mean value of transition sheet loss for different family", "_____no_output_____" ] ], [ [ "dff.groupby(['family_x', 'sub_family_x']).agg({'Transition Sheets':['mean','std','min','max','count']})", "_____no_output_____" ] ], [ [ "## creating dataframe", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb9fad1fcc0afe07a2650561f5b79c4ea694eee9
286,902
ipynb
Jupyter Notebook
Image Classifier Project.ipynb
tianyiran02/udacityML_flowers
99c3195c526f57aa225a6e451c56d7e10ed1abca
[ "MIT" ]
null
null
null
Image Classifier Project.ipynb
tianyiran02/udacityML_flowers
99c3195c526f57aa225a6e451c56d7e10ed1abca
[ "MIT" ]
null
null
null
Image Classifier Project.ipynb
tianyiran02/udacityML_flowers
99c3195c526f57aa225a6e451c56d7e10ed1abca
[ "MIT" ]
null
null
null
288.633803
120,828
0.900862
[ [ [ "# Developing an AI application\n\nGoing forward, AI algorithms will be incorporated into more and more everyday applications. For example, you might want to include an image classifier in a smart phone app. To do this, you'd use a deep learning model trained on hundreds of thousands of images as part of the overall application architecture. A large part of software development in the future will be using these types of models as common parts of applications. \n\nIn this project, you'll train an image classifier to recognize different species of flowers. You can imagine using something like this in a phone app that tells you the name of the flower your camera is looking at. In practice you'd train this classifier, then export it for use in your application. We'll be using [this dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) of 102 flower categories, you can see a few examples below. \n\n<img src='assets/Flowers.png' width=500px>\n\nThe project is broken down into multiple steps:\n\n* Load and preprocess the image dataset\n* Train the image classifier on your dataset\n* Use the trained classifier to predict image content\n\nWe'll lead you through each part which you'll implement in Python.\n\nWhen you've completed this project, you'll have an application that can be trained on any set of labeled images. Here your network will be learning about flowers and end up as a command line application. But, what you do with your new skills depends on your imagination and effort in building a dataset. For example, imagine an app where you take a picture of a car, it tells you what the make and model is, then looks up information about it. Go build your own dataset and make something new.\n\nFirst up is importing the packages you'll need. It's good practice to keep all the imports at the beginning of your code. As you work through this notebook and find you need to import a package, make sure to add the import up here.", "_____no_output_____" ] ], [ [ "# Imports here\nimport torch\nimport json\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torchvision import transforms, datasets", "_____no_output_____" ] ], [ [ "## Load the data\n\nHere you'll use `torchvision` to load the data ([documentation](http://pytorch.org/docs/0.3.0/torchvision/index.html)). The data should be included alongside this notebook, otherwise you can [download it here](https://s3.amazonaws.com/content.udacity-data.com/nd089/flower_data.tar.gz). The dataset is split into three parts, training, validation, and testing. For the training, you'll want to apply transformations such as random scaling, cropping, and flipping. This will help the network generalize leading to better performance. You'll also need to make sure the input data is resized to 224x224 pixels as required by the pre-trained networks.\n\nThe validation and testing sets are used to measure the model's performance on data it hasn't seen yet. For this you don't want any scaling or rotation transformations, but you'll need to resize then crop the images to the appropriate size.\n\nThe pre-trained networks you'll use were trained on the ImageNet dataset where each color channel was normalized separately. For all three sets you'll need to normalize the means and standard deviations of the images to what the network expects. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`, calculated from the ImageNet images. These values will shift each color channel to be centered at 0 and range from -1 to 1.\n ", "_____no_output_____" ] ], [ [ "data_dir = 'flowers'\ntrain_dir = data_dir + '/train'\nvalid_dir = data_dir + '/valid'\ntest_dir = data_dir + '/test'", "_____no_output_____" ], [ "# TODO: Define your transforms for the training, validation, and testing sets\ntrain_transform = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\ntest_transform = transforms.Compose([transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\n# TODO: Load the datasets with ImageFolder\ntrain_dataset = datasets.ImageFolder(train_dir, transform = train_transform)\nvalid_dataset = datasets.ImageFolder(valid_dir, transform = test_transform)\ntest_dataset = datasets.ImageFolder(test_dir, transform = test_transform)\n\n# TODO: Using the image datasets and the trainforms, define the dataloaders\ntrain_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size = 64, shuffle = True)\nvalid_dataloader = torch.utils.data.DataLoader(valid_dataset, batch_size = 64)\ntest_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size = 64)\n", "_____no_output_____" ] ], [ [ "### Label mapping\n\nYou'll also need to load in a mapping from category label to category name. You can find this in the file `cat_to_name.json`. It's a JSON object which you can read in with the [`json` module](https://docs.python.org/2/library/json.html). This will give you a dictionary mapping the integer encoded categories to the actual names of the flowers.", "_____no_output_____" ] ], [ [ "with open('cat_to_name.json', 'r') as f:\n cat_to_name = json.load(f)", "_____no_output_____" ] ], [ [ "# Building and training the classifier\n\nNow that the data is ready, it's time to build and train the classifier. As usual, you should use one of the pretrained models from `torchvision.models` to get the image features. Build and train a new feed-forward classifier using those features.\n\nWe're going to leave this part up to you. Refer to [the rubric](https://review.udacity.com/#!/rubrics/1663/view) for guidance on successfully completing this section. Things you'll need to do:\n\n* Load a [pre-trained network](http://pytorch.org/docs/master/torchvision/models.html) (If you need a starting point, the VGG networks work great and are straightforward to use)\n* Define a new, untrained feed-forward network as a classifier, using ReLU activations and dropout\n* Train the classifier layers using backpropagation using the pre-trained network to get the features\n* Track the loss and accuracy on the validation set to determine the best hyperparameters\n\nWe've left a cell open for you below, but use as many as you need. Our advice is to break the problem up into smaller parts you can run separately. Check that each part is doing what you expect, then move on to the next. You'll likely find that as you work through each part, you'll need to go back and modify your previous code. This is totally normal!\n\nWhen training make sure you're updating only the weights of the feed-forward network. You should be able to get the validation accuracy above 70% if you build everything right. Make sure to try different hyperparameters (learning rate, units in the classifier, epochs, etc) to find the best model. Save those hyperparameters to use as default values in the next part of the project.\n\nOne last important tip if you're using the workspace to run your code: To avoid having your workspace disconnect during the long-running tasks in this notebook, please read in the earlier page in this lesson called Intro to\nGPU Workspaces about Keeping Your Session Active. You'll want to include code from the workspace_utils.py module.\n\n**Note for Workspace users:** If your network is over 1 GB when saved as a checkpoint, there might be issues with saving backups in your workspace. Typically this happens with wide dense layers after the convolutional layers. If your saved checkpoint is larger than 1 GB (you can open a terminal and check with `ls -lh`), you should reduce the size of your hidden layers and train again.", "_____no_output_____" ] ], [ [ "# TODO: Build and train your network\n# Decision made was to use a pre-trained network\n# Import first\nfrom torchvision import models", "_____no_output_____" ], [ "# Load the VGG pre-defined network\nmodel = models.vgg11()\nmodel", "_____no_output_____" ], [ "# Load the pre-train state, download from internet\nstate_dict = torch.load('vgg11-bbd30ac9.pth')\n\n# Load to the model created\nmodel.load_state_dict(state_dict)", "_____no_output_____" ], [ "# Freeze other parameters\nfor param in model.parameters():\n param.requires_grad = False\n \n# The is a 25088 in and 1000 out full connected network. Need to replace to match our application.\n# In our application, its a 25088 in and 102 out network\nClassifierNew = nn.Sequential(nn.Linear(25088, 2048),\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.Linear(2048, 1024),\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.Linear(1024, 512),\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.Linear(512, 102),\n nn.LogSoftmax(dim = 1))\n# And replace the original Classifier\nmodel.classifier = ClassifierNew", "_____no_output_____" ], [ "# Now define the loss function and optimer\ncriterion = nn.NLLLoss()\n# Keep in mind that we only tune the classifier's parameters\noptimizer = optim.Adam(model.classifier.parameters(), lr = 0.003)", "_____no_output_____" ], [ "# Now some prepare work before training. Define GPU usage\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nprint(\"The training will be run in: \", device)\n\n# Move model to device\nmodel.to(device)", "The training will be run in: cuda\n" ], [ "# Start training\nepoch = 10\nrunning_loss = 0\n\nfor e in range(epoch):\n for images, labels in train_dataloader:\n # Send data to device\n images, labels = images.to(device), labels.to(device)\n # Flatten the image\n # images = images.view(images.shape[0], -1)\n \n optimizer.zero_grad()\n \n logps = model.forward(images)\n loss = criterion(logps, labels)\n loss.backward()\n \n optimizer.step()\n \n running_loss += loss.item()\n else:\n # For each epoch, print the result\n accuracy = 0\n valid_loss = 0\n # Update to evaluation modes\n model.eval()\n with torch.no_grad():\n for images, labels in valid_dataloader:\n # Send data to device\n images, labels = images.to(device), labels.to(device)\n # Flatten the image\n # images = images.view(images.shape[0], -1)\n \n logps = model.forward(images)\n loss = criterion(logps, labels)\n valid_loss += loss.item()\n \n # Calculate the accuracy\n ps = torch.exp(logps)\n top_p, top_class = ps.topk(1, dim = 1)\n equals = top_class == labels.view(*top_class.shape)\n \n accuracy += torch.mean(equals.type(torch.FloatTensor)).item()\n else:\n print(\"Epoch: \", e + 1, \"/\", epoch)\n print(\" Loss for train: \", running_loss/len(train_dataloader))\n print(\" Loss for valid: \", valid_loss/len(valid_dataloader))\n print(\" Accuracy for valid: \", (accuracy/len(valid_dataloader))* 100, \"%\")\n # Reset statistic for next epoch\n running_loss = 0\n # Turn model back to train\n model.train()", "Epoch: 1 / 10\n Loss for train: 3.6690933634933915\n Loss for valid: 2.2975001335144043\n Accuracy for valid: 38.95192306775313 %\nEpoch: 2 / 10\n Loss for train: 2.461907361317607\n Loss for valid: 1.748191420848553\n Accuracy for valid: 54.538461565971375 %\nEpoch: 3 / 10\n Loss for train: 2.252118575920179\n Loss for valid: 1.4316355608976805\n Accuracy for valid: 63.03365391034347 %\nEpoch: 4 / 10\n Loss for train: 2.069318300312005\n Loss for valid: 1.3527099856963525\n Accuracy for valid: 63.15384621803577 %\nEpoch: 5 / 10\n Loss for train: 2.008084417546837\n Loss for valid: 1.388025966974405\n Accuracy for valid: 64.47596160265115 %\nEpoch: 6 / 10\n Loss for train: 1.951527421914258\n Loss for valid: 1.3033721309441786\n Accuracy for valid: 64.90384615384616 %\nEpoch: 7 / 10\n Loss for train: 1.9509446146418747\n Loss for valid: 1.1377119834606464\n Accuracy for valid: 71.09615390117352 %\nEpoch: 8 / 10\n Loss for train: 1.882965911939306\n Loss for valid: 1.1163190821042428\n Accuracy for valid: 72.29807697809659 %\nEpoch: 9 / 10\n Loss for train: 1.8121033825920623\n Loss for valid: 1.0407237227146442\n Accuracy for valid: 73.72596172186044 %\nEpoch: 10 / 10\n Loss for train: 1.9119321709697685\n Loss for valid: 1.0451540924035585\n Accuracy for valid: 72.20192299439357 %\n" ] ], [ [ "## Testing your network\n\nIt's good practice to test your trained network on test data, images the network has never seen either in training or validation. This will give you a good estimate for the model's performance on completely new images. Run the test images through the network and measure the accuracy, the same way you did validation. You should be able to reach around 70% accuracy on the test set if the model has been trained well.", "_____no_output_____" ] ], [ [ "# TODO: Do validation on the test set\n# For each epoch, print the result\naccuracy = 0\ntest_loss = 0\n# Update to evaluation modes\nmodel.eval()\nwith torch.no_grad():\n for images, labels in test_dataloader:\n # Send data to device\n images, labels = images.to(device), labels.to(device)\n # Flatten the image\n # images = images.view(images.shape[0], -1)\n\n logps = model.forward(images)\n loss = criterion(logps, labels)\n test_loss += loss.item()\n\n # Calculate the accuracy\n ps = torch.exp(logps)\n top_p, top_class = ps.topk(1, dim = 1)\n equals = top_class == labels.view(*top_class.shape)\n\n accuracy += torch.mean(equals.type(torch.FloatTensor)).item()\n else:\n print(\" Loss for test: \", test_loss/len(test_dataloader))\n print(\" Accuracy for test: \", (accuracy/len(test_dataloader))* 100, \"%\")", " Loss for test: 1.1714903712272644\n Accuracy for test: 70.35727753089024 %\n" ] ], [ [ "## Save the checkpoint\n\nNow that your network is trained, save the model so you can load it later for making predictions. You probably want to save other things such as the mapping of classes to indices which you get from one of the image datasets: `image_datasets['train'].class_to_idx`. You can attach this to the model as an attribute which makes inference easier later on.\n\n```model.class_to_idx = image_datasets['train'].class_to_idx```\n\nRemember that you'll want to completely rebuild the model later so you can use it for inference. Make sure to include any information you need in the checkpoint. If you want to load the model and keep training, you'll want to save the number of epochs as well as the optimizer state, `optimizer.state_dict`. You'll likely want to use this trained model in the next part of the project, so best to save it now.", "_____no_output_____" ] ], [ [ "# TODO: Save the checkpoint\n# Preserve class to ind as\nmodel.class_to_idx = train_dataset.class_to_idx", "_____no_output_____" ], [ "# Construct dic to preserve information\ncheckpoint = {'state_dict': model.state_dict(),\n 'classifier': model.classifier,\n 'classifier.state_dict': model.classifier.state_dict(),\n 'class_to_idx': model.class_to_idx,\n 'optimizer_state_dict': optimizer.state_dict()}\n# Save\ntorch.save(checkpoint, 'checkpoint.pth')", "_____no_output_____" ] ], [ [ "## Loading the checkpoint\n\nAt this point it's good to write a function that can load a checkpoint and rebuild the model. That way you can come back to this project and keep working on it without having to retrain the network.", "_____no_output_____" ] ], [ [ "# TODO: Write a function that loads a checkpoint and rebuilds the model\n\ndef rebuild_model(pth_path): \n model_reload = models.vgg11()\n\n checkpoint_load = torch.load(pth_path)\n\n model_reload.classifier = checkpoint_load['classifier']\n model_reload.load_state_dict(checkpoint_load['state_dict'])\n model_reload.classifier.load_state_dict(checkpoint_load['classifier.state_dict'])\n model_reload.class_to_idx = checkpoint_load['class_to_idx']\n\n optimizer_reload = optim.Adam(model_reload.classifier.parameters(), lr = 0.003)\n\n optimizer_reload.load_state_dict(checkpoint_load['optimizer_state_dict'])\n\n return model_reload, optimizer_reload\n", "_____no_output_____" ], [ "model_reload, optimizer_reload = rebuild_model('checkpoint.pth')\nmodel_reload", "_____no_output_____" ] ], [ [ "# Inference for classification\n\nNow you'll write a function to use a trained network for inference. That is, you'll pass an image into the network and predict the class of the flower in the image. Write a function called `predict` that takes an image and a model, then returns the top $K$ most likely classes along with the probabilities. It should look like \n\n```python\nprobs, classes = predict(image_path, model)\nprint(probs)\nprint(classes)\n> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]\n> ['70', '3', '45', '62', '55']\n```\n\nFirst you'll need to handle processing the input image such that it can be used in your network. \n\n## Image Preprocessing\n\nYou'll want to use `PIL` to load the image ([documentation](https://pillow.readthedocs.io/en/latest/reference/Image.html)). It's best to write a function that preprocesses the image so it can be used as input for the model. This function should process the images in the same manner used for training. \n\nFirst, resize the images where the shortest side is 256 pixels, keeping the aspect ratio. This can be done with the [`thumbnail`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) or [`resize`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) methods. Then you'll need to crop out the center 224x224 portion of the image.\n\nColor channels of images are typically encoded as integers 0-255, but the model expected floats 0-1. You'll need to convert the values. It's easiest with a Numpy array, which you can get from a PIL image like so `np_image = np.array(pil_image)`.\n\nAs before, the network expects the images to be normalized in a specific way. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`. You'll want to subtract the means from each color channel, then divide by the standard deviation. \n\nAnd finally, PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array. You can reorder dimensions using [`ndarray.transpose`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.transpose.html). The color channel needs to be first and retain the order of the other two dimensions.", "_____no_output_____" ] ], [ [ "# Import required tools\nimport PIL\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport numpy as np\nimport numpy", "_____no_output_____" ], [ "def process_image(image):\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\n returns an Numpy array\n '''\n im = Image.open(image)\n \n # 1. resize the image\n if im.width > im.height:\n height = 256\n width = int(256 * im.width / im.height)\n else:\n width = 256\n height = int(256 * im.height / im.width)\n\n im_resized = im.resize((width, height))\n # then central crop a 224x224\n left = (width - 224)/2\n top = (height - 224)/2\n right = (width + 224)/2\n bottom = (height + 224)/2\n \n im_resized = im_resized.crop((left, top, right, bottom))\n \n # 2. update the color channel, normalize as 0-1\n np_img = np.array(im_resized)\n np_img = np_img / 255\n \n # 3. normalize data\n np_img = (np_img - np.array([0.485, 0.456, 0.406])) / np.array([0.229, 0.224, 0.225])\n \n # 5. update the color channel\n np_img = np_img.transpose(2,0,1)\n \n # Final convertion\n torch_img = torch.from_numpy(np_img)\n torch_img = torch_img.type(torch.FloatTensor)\n return torch_img", "_____no_output_____" ] ], [ [ "To check your work, the function below converts a PyTorch tensor and displays it in the notebook. If your `process_image` function works, running the output through this function should return the original image (except for the cropped out portions).", "_____no_output_____" ] ], [ [ "def imshow(image, ax=None, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n if ax is None:\n fig, ax = plt.subplots()\n \n # PyTorch tensors assume the color channel is the first dimension\n # but matplotlib assumes is the third dimension\n image = image.numpy().transpose((1, 2, 0))\n \n # Undo preprocessing\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n image = std * image + mean\n \n # Image needs to be clipped between 0 and 1 or it looks like noise when displayed\n image = np.clip(image, 0, 1)\n \n ax.imshow(image)\n \n return ax", "_____no_output_____" ], [ "%matplotlib inline\ntest_img_path = \"flowers/test/1/image_06754.jpg\"\nimg = process_image(test_img_path)\nimshow(img)", "_____no_output_____" ] ], [ [ "## Class Prediction\n\nOnce you can get images in the correct format, it's time to write a function for making predictions with your model. A common practice is to predict the top 5 or so (usually called top-$K$) most probable classes. You'll want to calculate the class probabilities then find the $K$ largest values.\n\nTo get the top $K$ largest values in a tensor use [`x.topk(k)`](http://pytorch.org/docs/master/torch.html#torch.topk). This method returns both the highest `k` probabilities and the indices of those probabilities corresponding to the classes. You need to convert from these indices to the actual class labels using `class_to_idx` which hopefully you added to the model or from an `ImageFolder` you used to load the data ([see here](#Save-the-checkpoint)). Make sure to invert the dictionary so you get a mapping from index to class as well.\n\nAgain, this method should take a path to an image and a model checkpoint, then return the probabilities and classes.\n\n```python\nprobs, classes = predict(image_path, model)\nprint(probs)\nprint(classes)\n> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]\n> ['70', '3', '45', '62', '55']\n```", "_____no_output_____" ] ], [ [ "from torch.autograd import Variable\n\ndef predict(image_path, model, topk=5):\n ''' Predict the class (or classes) of an image using a trained deep learning model.\n '''\n # load the image\n img = process_image(image_path)\n # process the data, send to GPU and make a fake batch dimension\n img = Variable(img, requires_grad=False)\n img = img.unsqueeze(0)\n img = img.to(device)\n \n # through model\n with torch.no_grad():\n logps = model(img)\n \n ps = torch.exp(logps) \n top_p, top_class = ps.topk(topk, dim = 1)\n return top_p, top_class", "_____no_output_____" ] ], [ [ "## Sanity Checking\n\nNow that you can use a trained model for predictions, check to make sure it makes sense. Even if the testing accuracy is high, it's always good to check that there aren't obvious bugs. Use `matplotlib` to plot the probabilities for the top 5 classes as a bar graph, along with the input image. It should look like this:\n\n<img src='assets/inference_example.png' width=300px>\n\nYou can convert from the class integer encoding to actual flower names with the `cat_to_name.json` file (should have been loaded earlier in the notebook). To show a PyTorch tensor as an image, use the `imshow` function defined above.", "_____no_output_____" ] ], [ [ "# TODO: Display an image along with the top 5 classes\nmodel_reload.to(device)\n\nreal_img_path = './real_test.jpg'\n\nprobs, classes = predict(real_img_path, model_reload)", "_____no_output_____" ], [ "# reverse the class to indice map\ninv_map = {v: k for k, v in model_reload.class_to_idx.items()}", "_____no_output_____" ], [ "# Move result back to cpu\nprobs_cpu = probs.cpu()\nclasses_cpu = classes.cpu()\n\n# Show the image\nreal_img = process_image(real_img_path)\nimshow(real_img)\n\nclass_name = []\nfor i in np.nditer(classes_cpu.numpy()):\n class_name.append(cat_to_name[inv_map.get(int(i))])\n", "_____no_output_____" ], [ "plt.bar(class_name, probs_cpu.view(5))\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
cb9fbe7ece4b3e8e1ff376e852bac7fe1f1f5b5e
16,316
ipynb
Jupyter Notebook
session8/complete/session8.ipynb
sashafklein/programming-blockchain
b1073fb1830ab135a90492563762efd09e2d67e3
[ "BSD-2-Clause" ]
null
null
null
session8/complete/session8.ipynb
sashafklein/programming-blockchain
b1073fb1830ab135a90492563762efd09e2d67e3
[ "BSD-2-Clause" ]
null
null
null
session8/complete/session8.ipynb
sashafklein/programming-blockchain
b1073fb1830ab135a90492563762efd09e2d67e3
[ "BSD-2-Clause" ]
1
2021-11-22T06:51:36.000Z
2021-11-22T06:51:36.000Z
31.256705
460
0.589238
[ [ [ "############## PLEASE RUN THIS CELL FIRST! ###################\n\n# import everything and define a test runner function\nfrom importlib import reload\nfrom helper import run\nimport bloomfilter, network", "_____no_output_____" ], [ "# Example Bloom Filter\nfrom helper import hash256\nbit_field_size = 10\nbit_field = [0] * bit_field_size\nh256 = hash256(b'hello world')\nbit = int.from_bytes(h256, 'big') % bit_field_size\nbit_field[bit] = 1\nprint(bit_field)", "[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\n" ], [ "# Example Bloom Filter 2\nfrom helper import hash256\nbit_field_size = 10\nbit_field = [0] * bit_field_size\nh = hash256(b'hello world')\nbit = int.from_bytes(h, 'big') % bit_field_size\nbit_field[bit] = 1\nh = hash256(b'goodbye')\nbit = int.from_bytes(h, 'big') % bit_field_size\nbit_field[bit] = 1\nprint(bit_field)", "[0, 0, 1, 0, 0, 0, 0, 0, 0, 1]\n" ], [ "# Example Bloom Filter 3\nfrom helper import hash160, hash256\nbit_field_size = 10\nbit_field = [0] * bit_field_size\nphrase1 = b'hello world'\nh1 = hash256(phrase1)\nbit1 = int.from_bytes(h1, 'big') % bit_field_size\nbit_field[bit1] = 1\nh2 = hash160(phrase1)\nbit2 = int.from_bytes(h2, 'big') % bit_field_size\nbit_field[bit2] = 1\nphrase2 = b'goodbye'\nh1 = hash256(phrase2)\nbit1 = int.from_bytes(h1, 'big') % bit_field_size\nbit_field[bit1] = 1\nh2 = hash160(phrase2)\nbit2 = int.from_bytes(h2, 'big') % bit_field_size\nbit_field[bit2] = 1\nprint(bit_field)", "[1, 1, 1, 0, 0, 0, 0, 0, 0, 1]\n" ], [ "# Example BIP0037 Bloom Filter\nfrom helper import murmur3\nfrom bloomfilter import BIP37_CONSTANT\nfield_size = 2\nnum_functions = 2\ntweak = 42\nbit_field_size = field_size * 8\nbit_field = [0] * bit_field_size\nfor phrase in (b'hello world', b'goodbye'):\n for i in range(num_functions):\n seed = i * BIP37_CONSTANT + tweak\n h = murmur3(phrase, seed=seed)\n bit = h % bit_field_size\n bit_field[bit] = 1\nprint(bit_field)", "[0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0]\n" ] ], [ [ "### Exercise 1\nGiven a Bloom Filter with these parameters: size=10, function count=5, tweak=99, which bits are set after adding these items?\n\n* `b'Hello World'`\n* `b'Goodbye!'`\n", "_____no_output_____" ] ], [ [ "# Exercise 1\n\nfrom bloomfilter import BIP37_CONSTANT\nfrom helper import murmur3, bit_field_to_bytes\nfield_size = 10\nfunction_count = 5\ntweak = 99\nitems = (b'Hello World', b'Goodbye!')\n# bit_field_size is 8 * field_size\nbit_field_size = field_size * 8\n# create a bit field with the appropriate size\nbit_field = [0] * bit_field_size\n# for each item you want to add to the filter\nfor item in items:\n # iterate function_count number of times\n for i in range(function_count):\n # BIP0037 spec seed is i*BIP37_CONSTANT + tweak\n seed = i * BIP37_CONSTANT + tweak\n # get the murmur3 hash given that seed\n h = murmur3(item, seed=seed)\n # set the bit to be h mod the bit_field_size\n bit = h % bit_field_size\n # set the bit_field at the index bit to be 1\n bit_field[bit] = 1\n# print the bit field converted to bytes using bit_field_to_bytes in hex\nprint(bit_field_to_bytes(bit_field).hex())", "4000600a080000010940\n" ] ], [ [ "### Exercise 2\n\n\n\n\n#### Make [this test](/edit/session8/bloomfilter.py) pass: `bloomfilter.py:BloomFilterTest:test_add`", "_____no_output_____" ] ], [ [ "# Exercise 2\n\nreload(bloomfilter)\nrun(bloomfilter.BloomFilterTest('test_add'))", ".\n----------------------------------------------------------------------\nRan 1 test in 0.003s\n\nOK\n" ] ], [ [ "### Exercise 3\n\n\n\n\n#### Make [this test](/edit/session8/bloomfilter.py) pass: `bloomfilter.py:BloomFilterTest:test_filterload`", "_____no_output_____" ] ], [ [ "# Exercise 3\n\nreload(bloomfilter)\nrun(bloomfilter.BloomFilterTest('test_filterload'))", ".\n----------------------------------------------------------------------\nRan 1 test in 0.005s\n\nOK\n" ] ], [ [ "### Exercise 4\nDo the following:\n\n* Connect to a testnet node\n* Load a filter for your testnet address\n* Send a request for transactions from the block which had your previous testnet transaction\n* Receive the merkleblock and tx messages.\n", "_____no_output_____" ] ], [ [ "# Exercise 4\n\nfrom bloomfilter import BloomFilter\nfrom ecc import PrivateKey\nfrom helper import decode_base58, hash256, little_endian_to_int\nfrom merkleblock import MerkleBlock\nfrom network import SimpleNode, GetDataMessage, FILTERED_BLOCK_DATA_TYPE\nfrom tx import Tx\nblock_hash = bytes.fromhex('00000000000129fc37fde810db09f033014e501595f8560dcdb2e86756986ee3')\npassphrase = b'Jimmy Song'\nsecret = little_endian_to_int(hash256(passphrase))\nprivate_key = PrivateKey(secret=secret)\naddr = private_key.point.address(testnet=True)\nprint(addr)\nfilter_size = 30\nfilter_num_functions = 5\nfilter_tweak = 90210\n# get the hash160 of the address using decode_base58\nh160 = decode_base58(addr)\n# create a bloom filter using the filter_size, filter_num_functions and filter_tweak above\nbf = BloomFilter(filter_size, filter_num_functions, filter_tweak)\n# add the h160 to the bloom filter\nbf.add(h160)\n# connect to testnet.programmingbitcoin.com in testnet mode\nnode = SimpleNode('testnet.programmingbitcoin.com', testnet=True)\n# complete the handshake\nnode.handshake()\n# send the filterload message\nnode.send(bf.filterload())\n# create a getdata message\ngetdata = GetDataMessage()\n# add_data (FILTERED_BLOCK_DATA_TYPE, block_hash) to request the block\ngetdata.add_data(FILTERED_BLOCK_DATA_TYPE, block_hash)\n# send the getdata message\nnode.send(getdata)\n# wait for the merkleblock command\nmb = node.wait_for(MerkleBlock)\n# check that the merkle block's hash is the same as the block hash\nif mb.hash() != block_hash:\n raise RuntimeError('Wrong Merkle Block')\n# check that the merkle block is valid\nif not mb.is_valid():\n raise RuntimeError('Invalid Merkle Block')\n# loop through the tx hashes we are expecting using proved_txs\nfor tx_hash in mb.proved_txs():\n # wait for the tx command\n tx_obj = node.wait_for(Tx)\n # check that the tx hash is the same\n if tx_obj.hash() != tx_hash:\n raise RuntimeError('Wrong transaction')\n # print the transaction serialization in hex\n print(tx_obj.serialize().hex())", "mseRGXB89UTFVkWJhTRTzzZ9Ujj4ZPbGK5\n0100000001ca4683960a9c21c0fb6b1d284fc5fe86509c773adf912eee4692859304ce0fb0000000006a47304402200d4c054deca1e76347bd336fbc6bc0132aa2e4a2aafc0792c8a1aa23ec6ed1af0220720444626b807f7c77a89aad4bb0a78ae9c5d9adea296e8e22e66a1681393b480121031dbe3aff7b9ad64e2612b8b15e9f5e4a3130663a526df91abfb7b1bd16de5d6effffffff0280969800000000001976a914850af0029eb376691c3eef244c25eceb4e50c50388acefece184000000001976a9146e13971913b9aa89659a9f53d327baa8826f2d7588ac00000000\n" ] ], [ [ "### Exercise 5\n\n\n\n\n#### Make [this test](/edit/session8/network.py) pass: `network.py:SimpleNodeTest:test_get_filtered_txs`", "_____no_output_____" ] ], [ [ "# Exercise 5\n\nreload(network)\nrun(network.SimpleNodeTest('test_get_filtered_txs'))", ".\n----------------------------------------------------------------------\nRan 1 test in 0.052s\n\nOK\n" ] ], [ [ "### Exercise 6\nYou have been sent some unknown amount of testnet bitcoins to your address.\n\nSend all of it back (minus fees) to `mqYz6JpuKukHzPg94y4XNDdPCEJrNkLQcv` using only the networking protocol.\n\nThis should be a 1 input, 1 output transaction.\n\nRemember turn on logging in `SimpleNode` if you need to debug\n", "_____no_output_____" ] ], [ [ "# Exercise 6\n\nfrom time import sleep\nfrom block import Block\nfrom bloomfilter import BloomFilter\nfrom ecc import PrivateKey\nfrom helper import decode_base58, hash160, hash256, little_endian_to_int\nfrom merkleblock import MerkleBlock\nfrom network import GetHeadersMessage, HeadersMessage, SimpleNode\nfrom script import p2pkh_script\nfrom tx import Tx, TxIn, TxOut\nstart_block_hex = '000000000000011f34db8b77b66d78abcf2e242299c8aed30dd915911c4fa97f'\nstart_block = bytes.fromhex(start_block_hex)\nend_block_hex = '000000000000000bf70f0f61df923b0ac97cc578240490dea5e9c35382f9eef0'\nend_block = bytes.fromhex(end_block_hex)\npassphrase = b'Jimmy Song'\nsecret = little_endian_to_int(hash256(passphrase))\nprivate_key = PrivateKey(secret=secret)\naddr = private_key.point.address(testnet=True)\nh160 = decode_base58(addr)\ntarget_address = 'mqYz6JpuKukHzPg94y4XNDdPCEJrNkLQcv'\ntarget_h160 = decode_base58(target_address)\ntarget_script = p2pkh_script(target_h160)\nbloom_filter = BloomFilter(30, 5, 90210)\nfee = 5000 # fee in satoshis\n# connect to testnet.programmingbitcoin.com in testnet mode\nnode = SimpleNode('testnet.programmingbitcoin.com', testnet=True)\n# add the h160 to the bloom filter\nbloom_filter.add(h160)\n# complete the handshake\nnode.handshake()\n# send the 'filterload' message from the bloom filter\nnode.send(bloom_filter.filterload())\n# create GetHeadersMessage with the start_block as the start_block and end_block as the end block\ngetheaders = GetHeadersMessage(start_block=start_block, end_block=end_block)\n# send a getheaders message\nnode.send(getheaders)\n# wait for the headers message\nheaders = node.wait_for(HeadersMessage)\n# check that the headers are valid\nif not headers.is_valid():\n raise RuntimeError\n# get all the block hashes from the headers.headers array\nblock_hashes = [h.hash() for h in headers.headers]\n# get the filtered transactions from these blocks\nfiltered_txs = node.get_filtered_txs(block_hashes)\n# loop through each filtered transaction\nfor tx_obj in filtered_txs:\n # use find_utxos to get utxos that belong to our address\n utxos = tx_obj.find_utxos(addr)\n # if we have any utxos, break\n if len(utxos) > 0:\n break\n# prev_tx, prev_index, prev_amount are what we get in each utxo\nprev_tx, prev_index, prev_amount = utxos[0]\n# create tx_in\ntx_in = TxIn(prev_tx, prev_index)\n# calculate the output amount (prev_amount - fee)\noutput_amount = prev_amount - fee\n# create tx_out\ntx_out = TxOut(output_amount, target_script)\n# create transaction on testnet\ntx_obj = Tx(1, [tx_in], [tx_out], 0, testnet=True)\n# sign the one input we have\ntx_obj.sign_input(0, private_key)\n# serialize and hex to see what it looks like\nprint(tx_obj.serialize().hex())\n# send this signed transaction on the network\nnode.send(tx_obj)\n# wait a sec so this message goes through to the other node sleep(1)\nsleep(1)\n# now check to see if the tx has been accepted using is_tx_accepted()\nif node.is_tx_accepted(tx_obj):\n print('success!')\n print(tx_obj.id())", "01000000011b661c09f0e619cf1f634e8d60945fd862d1ff93937a5e9eed0b34d3beb1ae33000000006a473044022011332474853cc2bb59f563de81b61ab25a1d0d835896d4ff7fc8bb487cf4998202206f2e6120b3945f4502d8bbc24f9ed7ef6ff488ba714cb5eb73983c085466fa9e012103dc585d46cfca73f3a75ba1ef0c5756a21c1924587480700c6eb64e3f75d22083ffffffff01f8829800000000001976a9146e13971913b9aa89659a9f53d327baa8826f2d7588ac00000000\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb9fcc6e487830fcc15046f5b6720208f2bb8cec
1,658
ipynb
Jupyter Notebook
docs/contents/tools/files/file_h5/to_mdtraj_HDF5TrajectoryFile.ipynb
dprada/molsysmt
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
[ "MIT" ]
null
null
null
docs/contents/tools/files/file_h5/to_mdtraj_HDF5TrajectoryFile.ipynb
dprada/molsysmt
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
[ "MIT" ]
null
null
null
docs/contents/tools/files/file_h5/to_mdtraj_HDF5TrajectoryFile.ipynb
dprada/molsysmt
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
[ "MIT" ]
null
null
null
20.219512
84
0.543426
[ [ [ "# To mdtraj.HDF5TrajectoryFile", "_____no_output_____" ] ], [ [ "from molsysmt.tools import file_h5", "Warning: importing 'simtk.openmm' is deprecated. Import 'openmm' instead.\n" ], [ "#file_h5.to_mdtraj_HDF5TrajectoryFile(item)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ] ]
cb9fd2e6dec9922f4413cf70a08aae7d473562cf
7,935
ipynb
Jupyter Notebook
.ipynb_checkpoints/expanding develoapment-checkpoint.ipynb
yonatanMedan/fastIndex
6f0de1be2a4e8a4da5934904ca8f58152755dac8
[ "MIT" ]
1
2019-04-17T05:15:31.000Z
2019-04-17T05:15:31.000Z
.ipynb_checkpoints/expanding develoapment-checkpoint.ipynb
yonatanMedan/fastIndex
6f0de1be2a4e8a4da5934904ca8f58152755dac8
[ "MIT" ]
null
null
null
.ipynb_checkpoints/expanding develoapment-checkpoint.ipynb
yonatanMedan/fastIndex
6f0de1be2a4e8a4da5934904ca8f58152755dac8
[ "MIT" ]
null
null
null
33.622881
830
0.525394
[ [ [ "import fastindex", "_____no_output_____" ], [ "import pandas as pd", "_____no_output_____" ], [ "fastindex.fast_index(pd)", "_____no_output_____" ], [ "df = pd.read_csv(\"../selection tutorial/example_data/fifa19.csv\")", "_____no_output_____" ], [ "df.set_index([\"Name\",\"Age\"],inplace=True)", "_____no_output_____" ], [ "%time df.fidx.slice(Name=[\"jonn\"],Age=32)", "CPU times: user 1.27 ms, sys: 80 µs, total: 1.35 ms\nWall time: 1.27 ms\n" ], [ "%debug", "> \u001b[0;32m/home/yonatan/projects/pandas_fastIndex/fastindex/main.py\u001b[0m(46)\u001b[0;36mf_slice\u001b[0;34m()\u001b[0m\n\u001b[0;32m 44 \u001b[0;31m \u001b[0;32mdef\u001b[0m \u001b[0mf_slice\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0m__validate\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mvargs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 45 \u001b[0;31m \u001b[0;31m# return the geographic center point of this DataFrame\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m---> 46 \u001b[0;31m \u001b[0mslc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcreate_slice\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_df\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mindex\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnames\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mvargs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0m__validate\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0m__validate\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 47 \u001b[0;31m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_df\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mindex\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mindex\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mMultiIndex\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 48 \u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mslc\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\nipdb> q\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb9fd36cdeda5f893827a001606d6f76e9bd5a0b
50,101
ipynb
Jupyter Notebook
KNN_model.ipynb
sanureet/machine-learning-challenge
a37666ac7dffc7db65390dd7d69817911c5cc6e4
[ "ADSL" ]
null
null
null
KNN_model.ipynb
sanureet/machine-learning-challenge
a37666ac7dffc7db65390dd7d69817911c5cc6e4
[ "ADSL" ]
null
null
null
KNN_model.ipynb
sanureet/machine-learning-challenge
a37666ac7dffc7db65390dd7d69817911c5cc6e4
[ "ADSL" ]
null
null
null
49.901394
16,244
0.577074
[ [ [ "# Update sklearn to prevent version mismatches\n!pip install sklearn --upgrade", "Requirement already satisfied: sklearn in /Users/sanureetbhullar/opt/anaconda3/envs/PythonAdv/lib/python3.6/site-packages (0.0)\nRequirement already satisfied: scikit-learn in /Users/sanureetbhullar/opt/anaconda3/envs/PythonAdv/lib/python3.6/site-packages (from sklearn) (0.24.2)\nRequirement already satisfied: scipy>=0.19.1 in /Users/sanureetbhullar/opt/anaconda3/envs/PythonAdv/lib/python3.6/site-packages (from scikit-learn->sklearn) (1.5.2)\nRequirement already satisfied: joblib>=0.11 in /Users/sanureetbhullar/opt/anaconda3/envs/PythonAdv/lib/python3.6/site-packages (from scikit-learn->sklearn) (1.0.1)\nRequirement already satisfied: numpy>=1.13.3 in /Users/sanureetbhullar/opt/anaconda3/envs/PythonAdv/lib/python3.6/site-packages (from scikit-learn->sklearn) (1.19.2)\nRequirement already satisfied: threadpoolctl>=2.0.0 in /Users/sanureetbhullar/opt/anaconda3/envs/PythonAdv/lib/python3.6/site-packages (from scikit-learn->sklearn) (2.2.0)\n" ], [ "# install joblib. This will be used to save your model. \n# Restart your kernel after installing \n!pip install joblib", "Requirement already satisfied: joblib in /Users/sanureetbhullar/opt/anaconda3/envs/PythonAdv/lib/python3.6/site-packages (1.0.1)\r\n" ], [ "import pandas as pd", "_____no_output_____" ] ], [ [ "# Read the CSV and Perform Basic Data Cleaning", "_____no_output_____" ] ], [ [ "df = pd.read_csv(\"exoplanet_data.csv\")\n# Drop the null columns where all values are null\ndf = df.dropna(axis='columns', how='all')\n# Drop the null rows\ndf = df.dropna()\ndf.head()", "_____no_output_____" ] ], [ [ "# Select your features (columns)", "_____no_output_____" ] ], [ [ "# Set features. This will also be used as your x values.\nselected_features = df[['koi_period', 'koi_time0bk', 'koi_slogg', 'koi_srad', 'koi_kepmag']]\n\ndf.head()", "_____no_output_____" ] ], [ [ "# Create a Train Test Split\nUse koi_disposition for the y values", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler\nfrom tensorflow.keras.utils import to_categorical\n\n\n# assign x and y values\nX = df.drop(\"koi_disposition\", axis=1)\ny = df[\"koi_disposition\"]\n\n# split training and testing data\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)\n", "_____no_output_____" ], [ "X_train.head()", "_____no_output_____" ] ], [ [ "# Pre-processing\nScale the data using the MinMaxScaler and perform some feature selection\n\n", "_____no_output_____" ] ], [ [ "# Scale your data\nfrom sklearn.preprocessing import StandardScaler\n\n# Create a StandardScater model and fit it to the training data\n\nX_scaler = MinMaxScaler().fit(X_train)\n# X_scaler = StandardScaler().fit(X_train)\n\nX_train_scaled = X_scaler.transform(X_train)\nX_test_scaled = X_scaler.transform(X_test)\n# label-encode data\n\nlabel_encoder = LabelEncoder()\nlabel_encoder.fit(y_train)\nencoded_y_train = label_encoder.transform(y_train)\nencoded_y_test = label_encoder.transform(y_test)\n\n", "_____no_output_____" ] ], [ [ "# Train the Model", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nfrom sklearn.neighbors import KNeighborsClassifier\n\ntrain_scores = []\ntest_scores = []\nfor k in range(1, 20, 2):\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train_scaled, encoded_y_train)\n train_score = knn.score(X_train_scaled, encoded_y_train)\n test_score = knn.score(X_test_scaled, encoded_y_test)\n train_scores.append(train_score)\n test_scores.append(test_score)\n print(f\"k: {k}, Train/Test Score: {train_score:.3f}/{test_score:.3f}\")\n \n \nplt.plot(range(1, 20, 2), train_scores, marker='o')\nplt.plot(range(1, 20, 2), test_scores, marker=\"x\")\nplt.xlabel(\"k neighbors\")\nplt.ylabel(\"Testing accuracy Score\")\nplt.show()", "k: 1, Train/Test Score: 1.000/0.787\nk: 3, Train/Test Score: 0.895/0.805\nk: 5, Train/Test Score: 0.871/0.811\nk: 7, Train/Test Score: 0.862/0.818\nk: 9, Train/Test Score: 0.858/0.820\nk: 11, Train/Test Score: 0.848/0.830\nk: 13, Train/Test Score: 0.848/0.830\nk: 15, Train/Test Score: 0.842/0.830\nk: 17, Train/Test Score: 0.839/0.830\nk: 19, Train/Test Score: 0.836/0.831\n" ], [ "knn = KNeighborsClassifier(n_neighbors=19)\nknn.fit(X_train_scaled, encoded_y_train)\npredictions = knn.predict(X_test_scaled)\nprint(predictions)\n\n\nprint(f\"k = 19: Training Data Score: {knn.score(X_train_scaled, encoded_y_train):.5f}\")\nprint(f\"k = 19: Testing Data Score: {knn.score(X_test_scaled, encoded_y_test):.5f}\")", "[0 2 2 ... 2 2 1]\nk = 19: Training Data Score: 0.83578\nk = 19: Testing Data Score: 0.83066\n" ], [ "# Classification report\nfrom sklearn.metrics import classification_report\nprint(classification_report(encoded_y_test, predictions,\n target_names = [\"Confirmed\", \"False Positive\", \"Candidate\"]))\n", " precision recall f1-score support\n\n Confirmed 0.71 0.49 0.58 404\nFalse Positive 0.63 0.80 0.70 435\n Candidate 0.99 1.00 0.99 909\n\n accuracy 0.83 1748\n macro avg 0.78 0.76 0.76 1748\n weighted avg 0.83 0.83 0.83 1748\n\n" ] ], [ [ "# Save the Model", "_____no_output_____" ] ], [ [ "import joblib\nfilename = 'KNN_model.sav'\njoblib.dump(knn, filename)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb9fe57529a2307304d67ae64d93093705fe7ec0
33,247
ipynb
Jupyter Notebook
numpy_demo.ipynb
joyc/data-exam
d9c126ab9d5dd0ae80f50323cb1c16587388f527
[ "MIT" ]
null
null
null
numpy_demo.ipynb
joyc/data-exam
d9c126ab9d5dd0ae80f50323cb1c16587388f527
[ "MIT" ]
null
null
null
numpy_demo.ipynb
joyc/data-exam
d9c126ab9d5dd0ae80f50323cb1c16587388f527
[ "MIT" ]
null
null
null
18.217534
159
0.412488
[ [ [ "import numpy as np", "_____no_output_____" ], [ "arr1 = np.array([[1, 2, 3]])\narr2 = np.array([[4, 5, 6]])\narr3 = np.concatenate((arr1, arr2), axis=0)\narr3", "_____no_output_____" ], [ "np.random.uniform(0.0, 5.0, size=(2, 3))", "_____no_output_____" ] ], [ [ "### 同じ要素の数列を作る", "_____no_output_____" ] ], [ [ "np.zeros(3)", "_____no_output_____" ], [ "np.zeros((2, 3))", "_____no_output_____" ], [ "np.ones(2)", "_____no_output_____" ], [ "np.ones((2, 3))", "_____no_output_____" ] ], [ [ "### 单位行列", "_____no_output_____" ] ], [ [ "# 対角要素\nnp.eye(3)", "_____no_output_____" ], [ "# 指定要素\nnp.full(3, 3.14)", "_____no_output_____" ], [ "np.full((2, 4), np.pi)", "_____no_output_____" ], [ "np.nan", "_____no_output_____" ], [ "np.array([1, 2, np.nan])", "_____no_output_____" ] ], [ [ "### 範囲指定で均等割りデータを作る", "_____no_output_____" ] ], [ [ "np.linspace(0, 1, 5)", "_____no_output_____" ], [ "np.linspace(0, np.pi, 20)", "_____no_output_____" ] ], [ [ " ### 要素間の差分", "_____no_output_____" ] ], [ [ "l = np.array([2, 2, 6, 1, 3])\nnp.diff(l)", "_____no_output_____" ] ], [ [ "### 連結", "_____no_output_____" ] ], [ [ "a = np.array([1, 5, 4])\na1 = a\nnp.concatenate([a, a1])", "_____no_output_____" ], [ "b = np.array([[1, 2, 8], [4, 5, 8]])\nb1 = np.array([[10], [20]])\nb1", "_____no_output_____" ], [ "np.concatenate([b, b1], axis=1)", "_____no_output_____" ], [ "np.hstack([b, b1])", "_____no_output_____" ], [ "b2 = np.array([30, 60, 45])\nb2", "_____no_output_____" ], [ "b3 = np.vstack([b, b2])\nb3", "_____no_output_____" ] ], [ [ "### 分割", "_____no_output_____" ] ], [ [ "first, second = np.hsplit(b3, [2])", "_____no_output_____" ], [ "first", "_____no_output_____" ], [ "second", "_____no_output_____" ], [ "first1, second1 = np.vsplit(b3, [2])\nfirst1", "_____no_output_____" ], [ "second1", "_____no_output_____" ] ], [ [ "### 転置", "_____no_output_____" ] ], [ [ "b", "_____no_output_____" ], [ "b.T", "_____no_output_____" ] ], [ [ "### 次元追加", "_____no_output_____" ] ], [ [ "a", "_____no_output_____" ], [ "a[np.newaxis, :]", "_____no_output_____" ], [ "a[:, np.newaxis]", "_____no_output_____" ] ], [ [ "### グリッドデータの生成", "_____no_output_____" ] ], [ [ "m = np.arange(0, 4)\nm", "_____no_output_____" ], [ "n = np.arange(4, 7)\nn", "_____no_output_____" ], [ "xx, yy = np.meshgrid(m, n)\nxx", "_____no_output_____" ], [ "yy", "_____no_output_____" ], [ "l = np.meshgrid(m, n)\nl", "_____no_output_____" ] ], [ [ "## 4.1.3 Numpyの各機能", "_____no_output_____" ] ], [ [ "import numpy as np\na = np.arange(3)\nb = np.arange(-3, 3).reshape((2, 3))\nc = np.arange(1, 7).reshape((2, 3))\nd = np.arange(6).reshape((3, 2))\ne = np.linspace(-1, 1, 10)\nprint(\"a:\", a)\nprint(\"b:\", b)\nprint(\"c:\", c)\nprint(\"d:\", d)\nprint(\"e:\", e)", "a: [0 1 2]\nb: [[-3 -2 -1]\n [ 0 1 2]]\nc: [[1 2 3]\n [4 5 6]]\nd: [[0 1]\n [2 3]\n [4 5]]\ne: [-1. -0.77777778 -0.55555556 -0.33333333 -0.11111111 0.11111111\n 0.33333333 0.55555556 0.77777778 1. ]\n" ], [ "print(\"a:\", a.shape)\nprint(\"b:\", b.shape)\nprint(\"c:\", c.shape)\nprint(\"d:\", d.shape)\nprint(\"e:\", e.shape)", "a: (3,)\nb: (2, 3)\nc: (2, 3)\nd: (3, 2)\ne: (10,)\n" ] ], [ [ "### ユニバーサルファンクション", "_____no_output_____" ] ], [ [ "li = [[-3, -2, -1], [0, 1, 2]]\nnew = []\nfor i, j in enumerate(li):\n new.append([])\n for k in j:\n new[i].append(abs(k))\nnew", "_____no_output_____" ], [ "np.sin(e)", "_____no_output_____" ], [ "np.cos(e)", "_____no_output_____" ], [ "np.log(a)", "/Users/charlielee/anaconda3/envs/data/lib/python3.7/site-packages/ipykernel_launcher.py:1: RuntimeWarning: divide by zero encountered in log\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "np.log10(c)", "_____no_output_____" ], [ "np.exp(a)", "_____no_output_____" ] ], [ [ "### ブロードキャスト", "_____no_output_____" ] ], [ [ "a", "_____no_output_____" ], [ "a + 10", "_____no_output_____" ], [ "b", "_____no_output_____" ], [ "a + b", "_____no_output_____" ], [ "a1 = a[:, np.newaxis]\na1", "_____no_output_____" ], [ "a + a1", "_____no_output_____" ], [ "c", "_____no_output_____" ], [ "c - np.mean(c)", "_____no_output_____" ], [ "b", "_____no_output_____" ], [ "b * 2", "_____no_output_____" ], [ "b ** 3", "_____no_output_____" ], [ "b - a ", "_____no_output_____" ], [ "a * b", "_____no_output_____" ], [ "a / c", "_____no_output_____" ], [ "c / a", "/Users/charlielee/anaconda3/envs/data/lib/python3.7/site-packages/ipykernel_launcher.py:1: RuntimeWarning: divide by zero encountered in true_divide\n \"\"\"Entry point for launching an IPython kernel.\n" ] ], [ [ "### ドット積", "_____no_output_____" ] ], [ [ "np.dot(b, a)", "_____no_output_____" ], [ "b @ a", "_____no_output_____" ], [ "b @ d", "_____no_output_____" ], [ "d @ b", "_____no_output_____" ] ], [ [ "### 判定・論理値", "_____no_output_____" ] ], [ [ "a > 1", "_____no_output_____" ], [ "b > 0", "_____no_output_____" ], [ "np.count_nonzero(b > 0)", "_____no_output_____" ], [ "np.sum(b > 0)", "_____no_output_____" ], [ "np.any(b > 0)", "_____no_output_____" ], [ "np.all(b > 0)", "_____no_output_____" ], [ "b[b > 0]", "_____no_output_____" ], [ "b == c", "_____no_output_____" ], [ "a == b", "_____no_output_____" ], [ "(b == c) | (a == b)", "_____no_output_____" ], [ "b[(b == c) | (a == b)]", "_____no_output_____" ], [ "np.allclose(b, c, atol=10)", "_____no_output_____" ] ], [ [ "### 関数とメソッド", "_____no_output_____" ] ], [ [ "np.sum(a)", "_____no_output_____" ], [ "a.sum()", "_____no_output_____" ], [ "arrays = [np.random.randn(3, 4) for _ in range(10)]\narrays", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb9febedd44bacb3adefa704d460acf3f7851cae
17,100
ipynb
Jupyter Notebook
Sprint_Challenge.ipynb
extrajp2014/DS-Unit-3-Sprint-3-Big-Data
a80c3756b2ee1285cbeebced868ea299b245d8f4
[ "MIT" ]
null
null
null
Sprint_Challenge.ipynb
extrajp2014/DS-Unit-3-Sprint-3-Big-Data
a80c3756b2ee1285cbeebced868ea299b245d8f4
[ "MIT" ]
null
null
null
Sprint_Challenge.ipynb
extrajp2014/DS-Unit-3-Sprint-3-Big-Data
a80c3756b2ee1285cbeebced868ea299b245d8f4
[ "MIT" ]
null
null
null
30.866426
743
0.429298
[ [ [ "import dask.dataframe as dd", "_____no_output_____" ], [ "df = dd.read_csv('/home/ec2-user/SageMaker/*.csv')", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "# Load the five csv files into one Dask Dataframe. It should have a length of 1956 rows, and 5 columns.\nprint(len(df))\nprint(len(df.columns))", "1956\n5\n" ], [ "# Use the Dask Dataframe to compute the counts of spam (1005 comments) \n# versus the counts of legitimate comments (951).\nspam_df = df[df.CLASS == 1]\nnonspam_df = df[df.CLASS == 0]\nprint(\"Spam Count:\",len(spam_df))\nprint(\"Nonspam Count:\",len(nonspam_df))", "Spam Count: 1005\nNonspam Count: 951\n" ], [ "# Spammers often tell people to check out their stuff! \n# When the comments are converted to lowercase, then 461 spam comments \nprint(spam_df.CONTENT.str.lower().str.contains('check').compute().value_counts()[1])\n\n# contain the word \"check\", versus only 19 legitimate comments which contain \n# the word \"check.\" Use the Dask Dataframe to compute these counts.\nprint(nonspam_df.CONTENT.str.lower().str.contains('check').compute().value_counts()[1])", "461\n19\n" ], [ "# Stretch\ndf.groupby('CLASS').count().head(2)", "_____no_output_____" ], [ "df.groupby('CLASS').count().describe().head(8)", "_____no_output_____" ], [ "df.groupby(['AUTHOR','CLASS']).count().describe().head(8)", "_____no_output_____" ] ], [ [ "# Part 2. Big data options\nYou've been introduced to a variety of platforms (AWS SageMaker, AWS EMR, Databricks), libraries (Numba, Dask, MapReduce, Spark), and languages (Python, SQL, Scala, Java) that can \"scale up\" or \"scale out\" for faster processing of big data.\n\nWrite a paragraph comparing some of these technology options. For example, you could describe which technology you may personally prefer to use, in what circumstances, for what reasons.\n\n(You can add your paragraph as a Markdown cell at the bottom of your SageMaker Notebook.)", "_____no_output_____" ], [ "For small dataset, I would use python to evaluate such data in local jupyter notebook or Google Colab environment. For medium size to large dataset that requires greater computational power or memory, I would resort to AWS SageMaker to scale up. Additionally, I would utilize Numba and Dask library to optimize the execution of codes and spread the workload respectively. With big data that exceeds the technical specs of a single AWS server can provide, it is best to scale out by using Spark framework to spread the workload across multiple machines. In this case, I would use Scala language to evaluate such big data in Spark since it is optimized for Scala. If needed, I can also use SQL, Java, or Python in the Spark framework.", "_____no_output_____" ] ] ]
[ "code", "markdown" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
cb9fee25d68afdf5ec845085c3bd80325ac84f80
65,490
ipynb
Jupyter Notebook
Course 1 - Part 4 - Lesson 2 - Notebook.ipynb
Mathipe98/Tensorflow-Practice
1f438fa203988ebe56c69c1bfdb08bed62dba5d4
[ "MIT" ]
null
null
null
Course 1 - Part 4 - Lesson 2 - Notebook.ipynb
Mathipe98/Tensorflow-Practice
1f438fa203988ebe56c69c1bfdb08bed62dba5d4
[ "MIT" ]
null
null
null
Course 1 - Part 4 - Lesson 2 - Notebook.ipynb
Mathipe98/Tensorflow-Practice
1f438fa203988ebe56c69c1bfdb08bed62dba5d4
[ "MIT" ]
null
null
null
62.075829
7,076
0.661002
[ [ [ "<a href=\"https://colab.research.google.com/github/lmoroney/dlaicourse/blob/master/Course%201%20-%20Part%204%20-%20Lesson%202%20-%20Notebook.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "##### Copyright 2019 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Beyond Hello World, A Computer Vision Example\nIn the previous exercise you saw how to create a neural network that figured out the problem you were trying to solve. This gave an explicit example of learned behavior. Of course, in that instance, it was a bit of overkill because it would have been easier to write the function Y=2x-1 directly, instead of bothering with using Machine Learning to learn the relationship between X and Y for a fixed set of values, and extending that for all values.\n\nBut what about a scenario where writing rules like that is much more difficult -- for example a computer vision problem? Let's take a look at a scenario where we can recognize different items of clothing, trained from a dataset containing 10 different types.", "_____no_output_____" ], [ "## Start Coding\n\nLet's start with our import of TensorFlow", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nprint(tf.__version__)", "2.5.0\n" ] ], [ [ "The Fashion MNIST data is available directly in the tf.keras datasets API. You load it like this:", "_____no_output_____" ] ], [ [ "mnist = tf.keras.datasets.fashion_mnist", "_____no_output_____" ] ], [ [ "Calling load_data on this object will give you two sets of two lists, these will be the training and testing values for the graphics that contain the clothing items and their labels.\n", "_____no_output_____" ] ], [ [ "(training_images, training_labels), (test_images, test_labels) = mnist.load_data()", "_____no_output_____" ] ], [ [ "What does these values look like? Let's print a training image, and a training label to see...Experiment with different indices in the array. For example, also take a look at index 42...that's a a different boot than the one at index 0\n", "_____no_output_____" ] ], [ [ "import numpy as np\nnp.set_printoptions(linewidth=200)\nimport matplotlib.pyplot as plt\nplt.imshow(training_images[0])\nprint(training_labels[0])\nprint(training_images[0])", "9\n[[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 13 73 0 0 1 4 0 0 0 0 1 1 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 3 0 36 136 127 62 54 0 0 0 1 3 4 0 0 3]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 6 0 102 204 176 134 144 123 23 0 0 0 0 12 10 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 155 236 207 178 107 156 161 109 64 23 77 130 72 15]\n [ 0 0 0 0 0 0 0 0 0 0 0 1 0 69 207 223 218 216 216 163 127 121 122 146 141 88 172 66]\n [ 0 0 0 0 0 0 0 0 0 1 1 1 0 200 232 232 233 229 223 223 215 213 164 127 123 196 229 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 183 225 216 223 228 235 227 224 222 224 221 223 245 173 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 193 228 218 213 198 180 212 210 211 213 223 220 243 202 0]\n [ 0 0 0 0 0 0 0 0 0 1 3 0 12 219 220 212 218 192 169 227 208 218 224 212 226 197 209 52]\n [ 0 0 0 0 0 0 0 0 0 0 6 0 99 244 222 220 218 203 198 221 215 213 222 220 245 119 167 56]\n [ 0 0 0 0 0 0 0 0 0 4 0 0 55 236 228 230 228 240 232 213 218 223 234 217 217 209 92 0]\n [ 0 0 1 4 6 7 2 0 0 0 0 0 237 226 217 223 222 219 222 221 216 223 229 215 218 255 77 0]\n [ 0 3 0 0 0 0 0 0 0 62 145 204 228 207 213 221 218 208 211 218 224 223 219 215 224 244 159 0]\n [ 0 0 0 0 18 44 82 107 189 228 220 222 217 226 200 205 211 230 224 234 176 188 250 248 233 238 215 0]\n [ 0 57 187 208 224 221 224 208 204 214 208 209 200 159 245 193 206 223 255 255 221 234 221 211 220 232 246 0]\n [ 3 202 228 224 221 211 211 214 205 205 205 220 240 80 150 255 229 221 188 154 191 210 204 209 222 228 225 0]\n [ 98 233 198 210 222 229 229 234 249 220 194 215 217 241 65 73 106 117 168 219 221 215 217 223 223 224 229 29]\n [ 75 204 212 204 193 205 211 225 216 185 197 206 198 213 240 195 227 245 239 223 218 212 209 222 220 221 230 67]\n [ 48 203 183 194 213 197 185 190 194 192 202 214 219 221 220 236 225 216 199 206 186 181 177 172 181 205 206 115]\n [ 0 122 219 193 179 171 183 196 204 210 213 207 211 210 200 196 194 191 195 191 198 192 176 156 167 177 210 92]\n [ 0 0 74 189 212 191 175 172 175 181 185 188 189 188 193 198 204 209 210 210 211 188 188 194 192 216 170 0]\n [ 2 0 0 0 66 200 222 237 239 242 246 243 244 221 220 193 191 179 182 182 181 176 166 168 99 58 0 0]\n [ 0 0 0 0 0 0 0 40 61 44 72 41 35 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]]\n" ] ], [ [ "You'll notice that all of the values in the number are between 0 and 255. If we are training a neural network, for various reasons it's easier if we treat all values as between 0 and 1, a process called '**normalizing**'...and fortunately in Python it's easy to normalize a list like this without looping. You do it like this:", "_____no_output_____" ] ], [ [ "training_images = training_images / 255.0\ntest_images = test_images / 255.0", "_____no_output_____" ] ], [ [ "Now you might be wondering why there are 2 sets...training and testing -- remember we spoke about this in the intro? The idea is to have 1 set of data for training, and then another set of data...that the model hasn't yet seen...to see how good it would be at classifying values. After all, when you're done, you're going to want to try it out with data that it hadn't previously seen!", "_____no_output_____" ], [ "Let's now design the model. There's quite a few new concepts here, but don't worry, you'll get the hang of them. ", "_____no_output_____" ] ], [ [ "model = tf.keras.models.Sequential([tf.keras.layers.Flatten(), \n tf.keras.layers.Dense(128, activation=tf.nn.relu), \n tf.keras.layers.Dense(10, activation=tf.nn.softmax)])", "_____no_output_____" ] ], [ [ "**Sequential**: That defines a SEQUENCE of layers in the neural network\n\n**Flatten**: Remember earlier where our images were a square, when you printed them out? Flatten just takes that square and turns it into a 1 dimensional set.\n\n**Dense**: Adds a layer of neurons\n\nEach layer of neurons need an **activation function** to tell them what to do. There's lots of options, but just use these for now. \n\n**Relu** effectively means \"If X>0 return X, else return 0\" -- so what it does it it only passes values 0 or greater to the next layer in the network.\n\n**Softmax** takes a set of values, and effectively picks the biggest one, so, for example, if the output of the last layer looks like [0.1, 0.1, 0.05, 0.1, 9.5, 0.1, 0.05, 0.05, 0.05], it saves you from fishing through it looking for the biggest value, and turns it into [0,0,0,0,1,0,0,0,0] -- The goal is to save a lot of coding!\n", "_____no_output_____" ], [ "The next thing to do, now the model is defined, is to actually build it. You do this by compiling it with an optimizer and loss function as before -- and then you train it by calling **model.fit ** asking it to fit your training data to your training labels -- i.e. have it figure out the relationship between the training data and its actual labels, so in future if you have data that looks like the training data, then it can make a prediction for what that data would look like. ", "_____no_output_____" ] ], [ [ "model.compile(optimizer = tf.optimizers.Adam(),\n loss = 'sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nmodel.fit(training_images, training_labels, epochs=5)", "Epoch 1/5\n1875/1875 [==============================] - 5s 2ms/step - loss: 0.5006 - accuracy: 0.8244\nEpoch 2/5\n1875/1875 [==============================] - 6s 3ms/step - loss: 0.3749 - accuracy: 0.8651\nEpoch 3/5\n1875/1875 [==============================] - 5s 3ms/step - loss: 0.3347 - accuracy: 0.8777\nEpoch 4/5\n1875/1875 [==============================] - 6s 3ms/step - loss: 0.3115 - accuracy: 0.8867\nEpoch 5/5\n1875/1875 [==============================] - 5s 3ms/step - loss: 0.2942 - accuracy: 0.8920\n" ] ], [ [ "Once it's done training -- you should see an accuracy value at the end of the final epoch. It might look something like 0.9098. This tells you that your neural network is about 91% accurate in classifying the training data. I.E., it figured out a pattern match between the image and the labels that worked 91% of the time. Not great, but not bad considering it was only trained for 5 epochs and done quite quickly.\n\nBut how would it work with unseen data? That's why we have the test images. We can call model.evaluate, and pass in the two sets, and it will report back the loss for each. Let's give it a try:", "_____no_output_____" ] ], [ [ "model.evaluate(test_images, test_labels)", "313/313 [==============================] - 1s 2ms/step - loss: 0.3697 - accuracy: 0.8587\n" ] ], [ [ "For me, that returned a accuracy of about .8838, which means it was about 88% accurate. As expected it probably would not do as well with *unseen* data as it did with data it was trained on! As you go through this course, you'll look at ways to improve this. \n\nTo explore further, try the below exercises:\n", "_____no_output_____" ], [ "# Exploration Exercises", "_____no_output_____" ], [ "###Exercise 1:\nFor this first exercise run the below code: It creates a set of classifications for each of the test images, and then prints the first entry in the classifications. The output, after you run it is a list of numbers. Why do you think this is, and what do those numbers represent? ", "_____no_output_____" ] ], [ [ "classifications = model.predict(test_images)\n\nprint(classifications[0])", "[1.64877511e-05 1.53234467e-07 3.50508572e-06 6.67809957e-07 7.88466059e-06 4.53139953e-02 2.88244087e-06 5.86251915e-02 1.07724765e-04 8.95921528e-01]\n" ] ], [ [ "Hint: try running print(test_labels[0]) -- and you'll get a 9. Does that help you understand why this list looks the way it does? ", "_____no_output_____" ] ], [ [ "print(test_labels[0])", "9\n" ] ], [ [ "### What does this list represent?\n\n\n1. It's 10 random meaningless values\n2. It's the first 10 classifications that the computer made\n3. It's the probability that this item is each of the 10 classes\n", "_____no_output_____" ], [ "####Answer: \nThe correct answer is (3)\n\nThe output of the model is a list of 10 numbers. These numbers are a probability that the value being classified is the corresponding value (https://github.com/zalandoresearch/fashion-mnist#labels), i.e. the first value in the list is the probability that the image is of a '0' (T-shirt/top), the next is a '1' (Trouser) etc. Notice that they are all VERY LOW probabilities.\n\nFor the 9 (Ankle boot), the probability was in the 90's, i.e. the neural network is telling us that it's almost certainly a 7.", "_____no_output_____" ], [ "### How do you know that this list tells you that the item is an ankle boot?\n\n\n1. There's not enough information to answer that question\n2. The 10th element on the list is the biggest, and the ankle boot is labelled 9\n2. The ankle boot is label 9, and there are 0->9 elements in the list\n", "_____no_output_____" ], [ "####Answer\nThe correct answer is (2). Both the list and the labels are 0 based, so the ankle boot having label 9 means that it is the 10th of the 10 classes. The list having the 10th element being the highest value means that the Neural Network has predicted that the item it is classifying is most likely an ankle boot", "_____no_output_____" ], [ "##Exercise 2: \nLet's now look at the layers in your model. Experiment with different values for the dense layer with 512 neurons. What different results do you get for loss, training time etc? Why do you think that's the case? \n", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nprint(tf.__version__)\n\nmnist = tf.keras.datasets.mnist\n\n(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()\n\ntraining_images = training_images/255.0\ntest_images = test_images/255.0\n\nmodel = tf.keras.models.Sequential([tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(1024, activation=tf.nn.relu),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)])\n\nmodel.compile(optimizer = 'adam',\n loss = 'sparse_categorical_crossentropy')\n\nmodel.fit(training_images, training_labels, epochs=5)\n\nmodel.evaluate(test_images, test_labels)\n\nclassifications = model.predict(test_images)\n\nprint(classifications[0])\nprint(test_labels[0])", "2.5.0\nEpoch 1/5\n1875/1875 [==============================] - 13s 7ms/step - loss: 0.1856\nEpoch 2/5\n1875/1875 [==============================] - 13s 7ms/step - loss: 0.0759\nEpoch 3/5\n1875/1875 [==============================] - 13s 7ms/step - loss: 0.0492\nEpoch 4/5\n1875/1875 [==============================] - 13s 7ms/step - loss: 0.0355\nEpoch 5/5\n1875/1875 [==============================] - 13s 7ms/step - loss: 0.0266\n313/313 [==============================] - 2s 5ms/step - loss: 0.0752\n[1.2626257e-10 1.3521382e-08 4.7498229e-08 2.4501967e-05 2.5177698e-13 3.3616963e-09 9.4038045e-14 9.9997544e-01 3.9814525e-09 1.0032574e-08]\n7\n" ] ], [ [ "###Question 1. Increase to 1024 Neurons -- What's the impact?\n\n1. Training takes longer, but is more accurate\n2. Training takes longer, but no impact on accuracy\n3. Training takes the same time, but is more accurate\n", "_____no_output_____" ], [ "####Answer\nThe correct answer is (1) by adding more Neurons we have to do more calculations, slowing down the process, but in this case they have a good impact -- we do get more accurate. That doesn't mean it's always a case of 'more is better', you can hit the law of diminishing returns very quickly!", "_____no_output_____" ], [ "##Exercise 3: \n\nWhat would happen if you remove the Flatten() layer. Why do you think that's the case? \n\nYou get an error about the shape of the data. It may seem vague right now, but it reinforces the rule of thumb that the first layer in your network should be the same shape as your data. Right now our data is 28x28 images, and 28 layers of 28 neurons would be infeasible, so it makes more sense to 'flatten' that 28,28 into a 784x1. Instead of wriitng all the code to handle that ourselves, we add the Flatten() layer at the begining, and when the arrays are loaded into the model later, they'll automatically be flattened for us.", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nprint(tf.__version__)\n\nmnist = tf.keras.datasets.mnist\n\n(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()\n\ntraining_images = training_images/255.0\ntest_images = test_images/255.0\n\nmodel = tf.keras.models.Sequential([#tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(64, activation=tf.nn.relu),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)])\n\nmodel.compile(optimizer = 'adam',\n loss = 'sparse_categorical_crossentropy')\n\nmodel.fit(training_images, training_labels, epochs=5)\n\nmodel.evaluate(test_images, test_labels)\n\nclassifications = model.predict(test_images)\n\nprint(classifications[0])\nprint(test_labels[0])", "2.5.0\nEpoch 1/5\n" ] ], [ [ "##Exercise 4: \n\nConsider the final (output) layers. Why are there 10 of them? What would happen if you had a different amount than 10? For example, try training the network with 5\n\nYou get an error as soon as it finds an unexpected value. Another rule of thumb -- the number of neurons in the last layer should match the number of classes you are classifying for. In this case it's the digits 0-9, so there are 10 of them, hence you should have 10 neurons in your final layer.", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nprint(tf.__version__)\n\nmnist = tf.keras.datasets.mnist\n\n(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()\n\ntraining_images = training_images/255.0\ntest_images = test_images/255.0\n\nmodel = tf.keras.models.Sequential([tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(64, activation=tf.nn.relu),\n tf.keras.layers.Dense(5, activation=tf.nn.softmax)])\n\nmodel.compile(optimizer = 'adam',\n loss = 'sparse_categorical_crossentropy')\n\nmodel.fit(training_images, training_labels, epochs=5)\n\nmodel.evaluate(test_images, test_labels)\n\nclassifications = model.predict(test_images)\n\nprint(classifications[0])\nprint(test_labels[0])", "_____no_output_____" ] ], [ [ "##Exercise 5: \n\nConsider the effects of additional layers in the network. What will happen if you add another layer between the one with 512 and the final layer with 10. \n\nAns: There isn't a significant impact -- because this is relatively simple data. For far more complex data (including color images to be classified as flowers that you'll see in the next lesson), extra layers are often necessary. ", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nprint(tf.__version__)\n\nmnist = tf.keras.datasets.mnist\n\n(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()\n\ntraining_images = training_images/255.0\ntest_images = test_images/255.0\n\nmodel = tf.keras.models.Sequential([tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\n tf.keras.layers.Dense(256, activation=tf.nn.relu),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)])\n\nmodel.compile(optimizer = 'adam',\n loss = 'sparse_categorical_crossentropy')\n\nmodel.fit(training_images, training_labels, epochs=5)\n\nmodel.evaluate(test_images, test_labels)\n\nclassifications = model.predict(test_images)\n\nprint(classifications[0])\nprint(test_labels[0])", "2.5.0\nEpoch 1/5\n1875/1875 [==============================] - 11s 6ms/step - loss: 0.1862\nEpoch 2/5\n1875/1875 [==============================] - 11s 6ms/step - loss: 0.0772\nEpoch 3/5\n1875/1875 [==============================] - 11s 6ms/step - loss: 0.0536\nEpoch 4/5\n1875/1875 [==============================] - 11s 6ms/step - loss: 0.0423\nEpoch 5/5\n1875/1875 [==============================] - 11s 6ms/step - loss: 0.0309\n313/313 [==============================] - 1s 4ms/step - loss: 0.0859\n[1.0898516e-13 2.9087501e-09 1.6182339e-10 9.1324077e-09 1.2855523e-11 4.0456270e-11 1.3092050e-16 1.0000000e+00 1.1993206e-11 5.4831457e-08]\n7\n" ] ], [ [ "#Exercise 6: \n\nConsider the impact of training for more or less epochs. Why do you think that would be the case? \n\nTry 15 epochs -- you'll probably get a model with a much better loss than the one with 5\nTry 30 epochs -- you might see the loss value stops decreasing, and sometimes increases. This is a side effect of something called 'overfitting' which you can learn about [somewhere] and it's something you need to keep an eye out for when training neural networks. There's no point in wasting your time training if you aren't improving your loss, right! :)", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nprint(tf.__version__)\n\nmnist = tf.keras.datasets.mnist\n\n(training_images, training_labels) , (test_images, test_labels) = mnist.load_data()\n\ntraining_images = training_images/255.0\ntest_images = test_images/255.0\n\nmodel = tf.keras.models.Sequential([tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(128, activation=tf.nn.relu),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)])\n\nmodel.compile(optimizer = 'adam',\n loss = 'sparse_categorical_crossentropy')\n\nmodel.fit(training_images, training_labels, epochs=30)\n\nmodel.evaluate(test_images, test_labels)\n\nclassifications = model.predict(test_images)\n\nprint(classifications[34])\nprint(test_labels[34])", "_____no_output_____" ] ], [ [ "#Exercise 7: \n\nBefore you trained, you normalized the data, going from values that were 0-255 to values that were 0-1. What would be the impact of removing that? Here's the complete code to give it a try. Why do you think you get different results? ", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nprint(tf.__version__)\nmnist = tf.keras.datasets.mnist\n(training_images, training_labels), (test_images, test_labels) = mnist.load_data()\ntraining_images=training_images#/255.0\ntest_images=test_images#/255.0\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n])\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy')\nmodel.fit(training_images, training_labels, epochs=5)\nmodel.evaluate(test_images, test_labels)\nclassifications = model.predict(test_images)\nprint(classifications[0])\nprint(test_labels[0])", "2.5.0\nEpoch 1/5\n1875/1875 [==============================] - 9s 5ms/step - loss: 2.5059\nEpoch 2/5\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.3372\nEpoch 3/5\n1875/1875 [==============================] - 8s 4ms/step - loss: 0.2949\nEpoch 4/5\n1875/1875 [==============================] - 8s 5ms/step - loss: 0.2682\nEpoch 5/5\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.2526\n313/313 [==============================] - 1s 4ms/step - loss: 0.3552\n[0.0000000e+00 2.5788209e-34 7.7706575e-21 7.0629726e-12 1.6246576e-32 1.3942973e-21 0.0000000e+00 1.0000000e+00 9.4828896e-22 6.4018040e-13]\n7\n" ] ], [ [ "#Exercise 8: \n\nEarlier when you trained for extra epochs you had an issue where your loss might change. It might have taken a bit of time for you to wait for the training to do that, and you might have thought 'wouldn't it be nice if I could stop the training when I reach a desired value?' -- i.e. 95% accuracy might be enough for you, and if you reach that after 3 epochs, why sit around waiting for it to finish a lot more epochs....So how would you fix that? Like any other program...you have callbacks! Let's see them in action...", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nprint(tf.__version__)\n\nclass myCallback(tf.keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs={}):\n if(logs.get('loss')<0.4):\n print(\"\\nReached 60% accuracy so cancelling training!\")\n self.model.stop_training = True\n\ncallbacks = myCallback()\nmnist = tf.keras.datasets.fashion_mnist\n(training_images, training_labels), (test_images, test_labels) = mnist.load_data()\ntraining_images=training_images/255.0\ntest_images=test_images/255.0\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n])\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy')\nmodel.fit(training_images, training_labels, epochs=5, callbacks=[callbacks])\n", "2.5.0\nEpoch 1/5\n1875/1875 [==============================] - 10s 5ms/step - loss: 0.4741\nEpoch 2/5\n1875/1875 [==============================] - 9s 5ms/step - loss: 0.3592\n\nReached 60% accuracy so cancelling training!\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cba0006d23d497f75d7bec8683f239b83ef0540c
114,958
ipynb
Jupyter Notebook
Algo4 - Adaboost/Adaboost.ipynb
frostace/BinaryClassification
7e1ede6c49f8e7d14c7dfc5dc1cbf1f27f8a6a3d
[ "MIT" ]
1
2019-11-19T07:52:22.000Z
2019-11-19T07:52:22.000Z
Algo4 - Adaboost/Adaboost.ipynb
frostace/BinaryClassification
7e1ede6c49f8e7d14c7dfc5dc1cbf1f27f8a6a3d
[ "MIT" ]
null
null
null
Algo4 - Adaboost/Adaboost.ipynb
frostace/BinaryClassification
7e1ede6c49f8e7d14c7dfc5dc1cbf1f27f8a6a3d
[ "MIT" ]
null
null
null
104.888686
40,116
0.755824
[ [ [ "# import lib\n# ===========================================================\nimport csv\nimport pandas as pd\nfrom datascience import *\nimport numpy as np\nimport random\nimport time\nimport matplotlib.pyplot as plt\n%matplotlib inline\nplt.style.use('fivethirtyeight')\nimport collections\nimport math\nimport sys\nfrom tqdm import tqdm\nfrom time import sleep", "_____no_output_____" ], [ "# Initialize useful data\n# with open('clinvar_conflicting_clean.csv', 'r') as f:\n# reader = csv.reader(f)\n# temp_rows = list(reader)\ndf = pd.read_csv('clinvar_conflicting_clean.csv', low_memory=False)\n# columns_to_change = ['ORIGIN', 'EXON', 'INTRON', 'STRAND', 'LoFtool', 'CADD_PHRED', 'CADD_RAW', 'BLOSUM62']\n# df[['CLNVI', 'MC', 'SYMBOL', 'Feature_type', 'Feature', 'BIOTYPE', \n# 'cDNA_position', 'CDS_position', 'Protein_position', 'Amino_acids', 'Codons', \n# 'BAM_EDIT', 'SIFT', 'PolyPhen']] = df[['CLNVI', 'MC', 'SYMBOL', 'Feature_type', 'Feature', 'BIOTYPE', \n# 'cDNA_position', 'CDS_position', 'Protein_position', 'Amino_acids', 'Codons', \n# 'BAM_EDIT', 'SIFT', 'PolyPhen']].fillna(value=\"null\")\n\n\ndf = df.fillna(value=0)\ndf_zero = df.loc[df['CLASS'] == 0]\ndf_zero = df_zero.sample(n=10000)\ndf_one = df.loc[df['CLASS'] == 1]\ndf_one = df_one.sample(n=10000)\ndf = pd.concat([df_zero, df_one])\n\n\ndf = df.sample(n = df.shape[0])\nall_rows = df.values.tolist()\nrow_num = len(all_rows)\ndf.head()", "_____no_output_____" ], [ "# Decision stump part for Adaboost\n# ===========================================================\ndef is_numeric(value):\n return isinstance(value, int) or isinstance(value, float)\n\n# === LeafNode is the prediction result of this branch ===\nclass LeafNode:\n def __init__(self, rows):\n labels = [row[-1] for row in rows]\n# labels = []\n# self.one_idx = []\n# self.zero_idx = []\n# for i in range(len(rows)):\n# row = rows[i]\n# labels.append(row[-1])\n# if row[-1] == 1:\n# self.one_idx.append(i)\n# else:\n# self.zero_idx.append(i)\n self.prediction = collections.Counter(labels)\n \n# === DecisionNode is an attribute / question used to partition the data ===\nclass DecisionNode:\n def __init__(self, question = None, left_branch = None, right_branch = None):\n self.question = question\n self.left_branch = left_branch\n self.right_branch = right_branch\n \nclass DecisionStump:\n def __init__(self, training_attribute, training_data, height, method = \"CART\"):\n self.attribute = training_attribute # takein attribute and data separately\n self.train = training_data\n self.height = height\n self.row_num = len(self.train)\n self.column_num = len(self.attribute)\n self.method = method.upper() # convert to upper case for general use\n self.significance = 0\n if self.method not in [\"C4.5\", \"CART\", \"HYBRID\"]:\n print(\"Error: Please choose a valid method! from: [C4.5, CART, HYBRID]\")\n return None\n \n # train decision stump\n self.root = self.build_stump(self.train, 1)\n \n # count ACC classifications and mis classifications to update weights\n self.accclassify_idx = []\n self.misclassify_idx = []\n # Only after DecisionStump trained, can we know which rows are misclassified \n # Walk down the decision stump to collect all misclassification indices\n \n# if self.root.left_branch.prediction.get(1, 0) > self.root.left_branch.prediction.get(0, 0):\n# # then consider the prediction of this leaf node as 1: 1 -> correct, 0 -> misclassify\n# self.accclassify_idx += self.root.left_branch.one_idx\n# self.misclassify_idx += self.root.left_branch.zero_idx\n# else:\n# # then consider the prediction of this leaf node as 0: 0 -> correct, 1 -> misclassify\n# self.accclassify_idx += self.root.left_branch.zero_idx\n# self.misclassify_idx += self.root.left_branch.one_idx\n# if self.root.right_branch.prediction.get(1, 0) > self.root.right_branch.prediction.get(0, 0):\n# # then consider the prediction of this leaf node as 1: 1 -> correct, 0 -> misclassify\n# self.accclassify_idx += self.root.right_branch.one_idx\n# self.misclassify_idx += self.root.right_branch.zero_idx\n# else:\n# # then consider the prediction of this leaf node as 0: 0 -> correct, 1 -> misclassify\n# self.accclassify_idx += self.root.right_branch.zero_idx\n# self.misclassify_idx += self.root.right_branch.one_idx\n \n def uniq_val(self, column):\n return set([self.train[i][column] for i in range(len(self.train))])\n \n # when raising a question.\n # if it's a categorical attribute, we simply iterate all categories\n # if it's a numeric attribute, we iterate the set of possible numeric values \n class Question:\n def __init__(self, column, ref_value, attribute):\n self.column = column\n self.ref_value = ref_value if ref_value else \"None\"\n self.attri = attribute\n\n def match(self, row):\n if is_numeric(self.ref_value):\n try:\n return row[self.column] >= self.ref_value\n except:\n print(\"Error occured in \", row)\n return True\n else:\n return row[self.column] == self.ref_value\n\n def __repr__(self):\n operand = \">=\" if is_numeric(self.ref_value) else \"==\"\n return \"Is %s %s %s?\" % (self.attri[self.column], operand, str(self.ref_value))\n \n # === Method 1 - C4.5 ===\n def entropy(self, rows):\n # === Bits used to store the information ===\n labels = [row[-1] for row in rows]\n frequency = collections.Counter(labels).values()\n pop = sum(frequency)\n H = 0\n for f in frequency:\n p = f / pop\n H -= p * math.log(p, 2)\n return H\n \n # === Method 2 - CART ===\n def gini(self, rows):\n # === Probability of misclassifying any of your label, which is impurity ===\n labels = [row[-1] for row in rows]\n frequency = collections.Counter(labels).values()\n pop = sum(frequency)\n gini = 1\n for f in frequency:\n p = f / pop\n gini -= p ** 2\n return gini\n \n # === Calculate Gain Info ===\n def info(self, branches, root):\n # === Objective: to find the best question which can maximize info ===\n root_size = float(len(root))\n if self.method == \"C4.5\": # Here I pick the GainRatio Approach\n root_uncertainty = self.entropy(root)\n gain_info = root_uncertainty\n split_info = 0\n for branch in branches:\n if not branch: continue\n gain_info -= len(branch) / root_size * self.entropy(branch)\n split_info -= float(len(branch)) / root_size * math.log(float(len(branch)) / root_size)\n# print(gain_info, split_info)\n return gain_info / split_info\n elif self.method == \"CART\":\n root_uncertainty = self.gini(root)\n gain_info = root_uncertainty\n for branch in branches:\n if not branch: continue\n gain_info -= len(branch) / root_size * self.gini(branch)\n return gain_info\n elif self.method == \"HYBRID\":\n pass\n pass\n \n # === Here I only do Binary Partitions ===\n def partition(self, rows, question):\n true_rows = []\n false_rows = []\n for row in rows:\n if question.match(row):\n true_rows.append(row)\n else:\n false_rows.append(row)\n return true_rows, false_rows\n \n # the question that achieves the max infomation attenuation is the best question\n def find_best_question(self, rows):\n max_info_attenuation = 0\n best_question = self.Question(0, self.train[0][0], self.attribute)\n # === Iterate through all question candidates ===\n # === TODO: Maybe Iteration here can be optimized ===\n for col in range(self.column_num - 1): # minus 1 to avoid using the label as attribute\n ref_candidates = self.uniq_val(col)\n for ref_value in ref_candidates:\n if ref_value == \"null\": continue # avoid using null values to generate a question\n q = self.Question(col, ref_value, self.attribute)\n temp_true_rows, temp_false_rows = self.partition(rows, q)\n temp_info_attenuation = self.info([temp_true_rows, temp_false_rows], rows)\n if temp_info_attenuation >= max_info_attenuation:\n max_info_attenuation = temp_info_attenuation\n best_question = q\n return max_info_attenuation, best_question\n \n # === Input rows of data with attributes and labels ===\n def build_stump(self, rows, height):\n # === Assign all rows as root of the whole decision tree ===\n # === We have met the leaf node if gini(rows) is 0 or no question candidates left ===\n gain_reduction, q = self.find_best_question(rows)\n true_rows, false_rows = self.partition(rows, q)\n if height + 1 >= self.height:\n return DecisionNode(q, LeafNode(true_rows), LeafNode(false_rows))\n else:\n return DecisionNode(q, self.build_stump(true_rows, height + 1), self.build_stump(false_rows, height + 1))\n \n # === Input a row of data with attributes (and no label), predict its label with our decision tree ===\n # === Actually it can contain a label, we just don't use it ===\n # === walk down the decision tree until we reach the leaf node ===\n def classify(self, row, node):\n if isinstance(node, LeafNode):\n # do a mapping from label[1, 0] to label[1, -1]\n return node.prediction\n# return 1 if node.prediction.get(1, 0) / (node.prediction.get(1, 0) + node.prediction.get(0, 0)) > cutoff else -1\n \n if node.question.match(row):\n return self.classify(row, node.left_branch)\n else:\n return self.classify(row, node.right_branch)\n\n # function to print the tree out\n def print_tree(self, node, spacing=\"\"):\n # Base case: we've reached a leaf\n if isinstance(node, LeafNode):\n print (spacing + \"Predict\", node.prediction)\n return\n\n # Print the question at this node\n print (spacing + str(node.question))\n\n # Call this function recursively on the true branch\n print (spacing + '--> True:')\n self.print_tree(node.left_branch, spacing + \" \")\n\n # Call this function recursively on the false branch\n print (spacing + '--> False:')\n self.print_tree(node.right_branch, spacing + \" \")\n \n def test(self):\n for i in range(self.column_num):\n q = self.Question(i, self.train[1][i], self.attribute)\n print(q)\n print(q.match(1))\n \ndef normalized_weight(weight):\n return np.divide(weight, sum(weight))\n\ndef rev_logit(val):\n return 1 / (1 + np.exp(val))", "_____no_output_____" ], [ "# Divide whole dataset into training set and testing set\n# ===========================================================\ntraining_percentage = 0.2 # percent of partition of training dataset\ntraining_size = int(row_num * training_percentage)\ntesting_size = row_num - training_size\ntraining_attribute = list(df.columns)\ntraining_data = all_rows[: training_size] # training data should include header row\ntesting_data = all_rows[training_size: ] # testing data don't need to include header row", "_____no_output_____" ], [ "# Recursively Training base learners\n# ===========================================================\n# let's train T base learners\nT = 20\nweakleaner_height = 3\nstump_forest = []\nweight = [1 / training_size for _ in range(training_size)]\nstart = time.time()\nfor i in range(T):\n # train a decision stump\n stump = DecisionStump(training_attribute, training_data, weakleaner_height, \"CART\")\n \n # calculate the total error of the stump after it's trained\n for j in range(training_size):\n row = training_data[j]\n pred_counter = stump.classify(row, stump.root)\n pred_label = 1 if pred_counter.get(1, 0) / (pred_counter.get(1, 0) + pred_counter.get(0, 0) + 0.00000001) > 0.5 else 0\n if pred_label == row[-1]:\n stump.accclassify_idx.append(j)\n else:\n stump.misclassify_idx.append(j)\n accuracy = len(stump.accclassify_idx) / training_size\n total_err_rate = 1 - accuracy\n \n # update the significance level of this stump, remember not to divide by zero\n stump.significance = 0.5 * math.log((1 - total_err_rate + 0.0001) / (total_err_rate + 0.0001))\n \n # append stump into the forest\n stump_forest.append(stump)\n# if len(stump_forest) == T: break # early break\n\n # update training_data weight, resample the training data with the updated weight distribution\n true_scale = np.e ** stump.significance\n for idx in stump.misclassify_idx:\n weight[idx] = weight[idx] * true_scale\n for idx in stump.accclassify_idx:\n weight[idx] = weight[idx] * (1 / true_scale)\n distrib = normalized_weight(weight)\n \n # interactive printing\n \n# sys.stdout.write('\\r')\n# # the exact output you're looking for:\n# sys.stdout.write(\"Training Random Forest: [%-10s] %d%% alpha = %.02f\" % ('='*int((i + 1) / T * 10), int((i + 1) / T * 100), stump.significance))\n# sys.stdout.flush()\n\n# stump.print_tree(stump.root)\n# print(i, stump.significance)\n\n resampled_idx = np.random.choice(training_size, training_size, p = distrib)\n training_data = [training_data[idx] for idx in resampled_idx]\n if len(set([row[1] for row in training_data])) < 0.04 * training_size: break\n print(i, len(set([row[1] for row in training_data])), stump.significance, end='\\n')\n weight = [1 / training_size for _ in range(training_size)]\nend = time.time()\nprint(\"\\nTime: %.02fs\" % (end - start))", "0 327 0.18039602514769087\n" ], [ "# New Testing Adaboost\n# ===========================================================\n# Compute TN, TP, FN, FP, etc. together with testing\n# ===========================================================\nROC = Table(make_array('CUTOFF', 'TN', 'FN', 'FP', 'TP', 'ACC'))\nstep_size = 0.05\nforest_size = len(stump_forest)\nCMap = {0: 'TN', 1: 'FN', 2: 'FP', 3: 'TP'}\nfor cutoff in np.arange(0, 1 + step_size, step_size):\n sys.stdout.write('\\r')\n # the exact output you're looking for:\n sys.stdout.write(\"Testing: [%-20s] %d%%\" % ('='*int(cutoff * 100 / 5), int(cutoff * 100)))\n sys.stdout.flush()\n \n '''\n # calculate the total error of each stump\n for stump in stump_forest:\n stump.accclassify_idx = []\n stump.misclassify_idx = []\n\n # walk down the stump for each training data, see if its prediction makes sense\n for j in range(training_size):\n row = training_data[j]\n pred_counter = stump.classify(row, stump.root)\n pred_label = 1 if pred_counter.get(1, 0) / (pred_counter.get(1, 0) + pred_counter.get(0, 0) + 0.00000001) > cutoff else 0\n if pred_label == row[-1]:\n stump.accclassify_idx.append(j)\n else:\n stump.misclassify_idx.append(j)\n accuracy = len(stump.accclassify_idx) / training_size\n total_err_rate = 1 - accuracy\n\n # update the significance level of this stump, remember not to divide by zero\n stump.significance = 0.5 * math.log((1 - total_err_rate + 0.0001) / (total_err_rate + 0.0001))\n '''\n \n Confusion = {'TN': 0, 'FN': 0, 'FP': 0, 'TP': 0}\n for row in testing_data:\n true_rate_forest = 0\n for tree_i in stump_forest:\n # prediction is a counter of label 1 and 0\n pred_counter = tree_i.classify(row, tree_i.root)\n # do a mapping from label[1, 0] to label[1, -1]\n true_rate_tree = 1 if pred_counter.get(1, 0) / (pred_counter.get(1, 0) + pred_counter.get(0, 0) + 0.00000001) > cutoff else -1\n true_rate_forest += true_rate_tree * tree_i.significance\n# true_rate_forest = rev_logit(true_rate_forest)\n# true_pred = 1 if true_rate_forest >= cutoff else 0\n true_pred = 0 if np.sign(true_rate_forest) <= 0 else 1\n indicator = (true_pred << 1) + row[-1]\n # accordingly update confusion matrix\n Confusion[CMap[indicator]] += 1\n # concatenate the confusion matrix values into the overall ROC Table\n thisline = [cutoff] + list(Confusion.values()) + [(Confusion['TP'] + Confusion['TN']) / sum(Confusion.values())]\n ROC = ROC.with_row(thisline)\nROC = ROC.with_columns('SENSITIVITY', ROC.apply(lambda TP, FN: TP / (TP + FN + 0.00000001), 'TP', 'FN'))\nROC = ROC.with_columns('FPR', ROC.apply(lambda TN, FP: FP / (TN + FP + 0.00000001), 'TN', 'FP'))\nROC = ROC.with_column('FMEAS', ROC.apply(lambda TP, FP, FN: 2 * (TP / (TP + FN)) * (TP / (TP + FP)) / (TP / (TP + FN) + TP / (TP + FP)), 'TP', 'FP', 'FN'))", "Testing: [====================] 100%" ], [ "ROC.show()", "_____no_output_____" ], [ "# Acc Curve by cutoff\n# ===========================================================\n# matplotlib.use('TkAgg')\nfig = plt.figure()\nplt.xlabel('Cutoff')\nplt.ylabel('Accuracy')\nplt.title('Accuracy - Cutoff of Adaboost')\nplt.plot(np.arange(0, 1.1, 0.1), [0.5 for i in np.arange(0, 1.1, 0.1)], color='black')\nplt.plot(ROC.column('CUTOFF'), ROC.column('ACC'), color='orange')\nplt.legend(['Adaboost', 'Null'])\nplt.axis([0, 1, 0, 1.1])\nplt.show()\nfig.savefig('Adaboost ACC.png', bbox_inches='tight')", "_____no_output_____" ], [ "# ROC_CURVE\n# ===========================================================\nfig = plt.figure()\nplt.xlabel('False Positive Rate')\nplt.ylabel('Sensitivity')\nplt.title('ROC - Curve of Adaboost')\nplt.plot(np.arange(0, 1.1, 0.1), np.arange(0, 1.1, 0.1), color='black')\nplt.plot(ROC.column('FPR'), ROC.column('SENSITIVITY'), color='orange')\n\nplt.legend(['Adaboost', 'Null'])\nplt.axis([0, 1, 0, 1.1])\nplt.show()\nfig.savefig('Adaboost ROC.png', bbox_inches='tight')", "_____no_output_____" ], [ "# Compute AUC\n# ===========================================================\nlength = len(ROC.column('FPR'))\nauc = 0\nfor i in range(length - 1):\n auc += 0.5 * abs(ROC.column('FPR')[i + 1] - ROC.column('FPR')[i]) * (ROC.column('SENSITIVITY')[i] + ROC.column('SENSITIVITY')[i + 1])\nprint(\"auc = %.03f\" %auc)", "auc = 0.614\n" ], [ "# Original Testing\n# ===========================================================\naccuracy = 0\nfor row in testing_data:\n overall_classification = 0\n for stump in stump_forest:\n classification = stump.classify(row, stump.root)\n vote = stump.significance\n overall_classification += classification * vote\n # reverse mapping from label[1, -1] to label[1, 0]\n predicted_label = 0 if np.sign(overall_classification) <= 0 else 1\n if predicted_label == row[-1]: accuracy += 1\n# print(classification, predicted_label, row[-1])\naccuracy = accuracy / testing_size\nprint(\"%.03f%%\" % (accuracy * 100))", "_____no_output_____" ], [ "# Testing with a toy dataset\n# ===========================================================\ntraining_data = [\n ['Green', 3, 1],\n ['Yellow', 3, 1],\n ['Red', 1, 0],\n ['Red', 1, 0],\n ['Yellow', 3, 1],\n ['Red', 3, 1]\n]\ntesting_data = [\n ['Red', 2, 0],\n ['Yellow', 3.5, 1],\n ['Green', 3, 1]\n]\ntraining_attribute = ['Color', 'Diameter', 'Label']\ntraining_size = len(training_data)\ntesting_size = len(testing_data)", "_____no_output_____" ], [ "# pf = df[]\nlen(set(pd.Index(df)))", "_____no_output_____" ], [ "np.bincount([row[-1] for row in all_rows])", "_____no_output_____" ], [ "fpr, sen, acc = ROC.column('FPR'), ROC.column('SENSITIVITY'), ROC.column('ACC')", "_____no_output_____" ], [ "fpr", "_____no_output_____" ], [ "sen", "_____no_output_____" ], [ "acc", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cba028aec59a4a598b6e328f79fa1cd3a94c1e3c
21,749
ipynb
Jupyter Notebook
RobustRegression/TheilSenRegression.ipynb
vladiant/MachineLearningUtils
c6d1f4e928d4a258b8b6e441c93004e2337ca301
[ "MIT" ]
null
null
null
RobustRegression/TheilSenRegression.ipynb
vladiant/MachineLearningUtils
c6d1f4e928d4a258b8b6e441c93004e2337ca301
[ "MIT" ]
null
null
null
RobustRegression/TheilSenRegression.ipynb
vladiant/MachineLearningUtils
c6d1f4e928d4a258b8b6e441c93004e2337ca301
[ "MIT" ]
null
null
null
102.107981
16,784
0.864591
[ [ [ "# Generate Data for Robust Regressions\nhttps://machinelearningmastery.com/robust-regression-for-machine-learning-in-python/", "_____no_output_____" ], [ "## Create a regression dataset with outliers", "_____no_output_____" ] ], [ [ "from random import random, randint, seed\n\nfrom sklearn.datasets import make_regression\n\nfrom matplotlib import pyplot", "_____no_output_____" ], [ "# Prepare the dataset\ndef get_dataset():\n X, y = make_regression(n_samples=100, n_features=1,\n tail_strength=0.9, effective_rank=1,\n n_informative=1, noise=3,\n bias=50, random_state=1)\n # Add artificial outliers\n seed(1)\n for i in range(10):\n factor = randint(2, 4)\n if random() > 0.5:\n X[i] += factor * X.std()\n else:\n X[i] -= factor * X.std()\n\n return X, y", "_____no_output_____" ] ], [ [ "## Theil Sen Regression", "_____no_output_____" ] ], [ [ "from numpy import arange, mean, std, absolute\n\nfrom sklearn.linear_model import TheilSenRegressor\nfrom sklearn.model_selection import cross_val_score, RepeatedKFold", "_____no_output_____" ], [ "# Evaluate a model\ndef evaluate_model(X, y, model):\n # Define model evaluation method\n cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)\n # Evaluate model\n scores = cross_val_score(model, X, y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=1)\n # Force scores to be positive\n return absolute(scores)", "_____no_output_____" ], [ "# Plot the dataset and the model's line of best fit\ndef plot_best_fit(X, y, model):\n # Fit the model to all data\n model.fit(X, y)\n # Plot the dataset\n pyplot.scatter(X, y)\n # Plot the line of the best fit\n xaxis = arange(X.min(), X.max(), 0.01)\n yaxis = model.predict(xaxis.reshape((len(xaxis),1)))\n pyplot.plot(xaxis, yaxis, color='r')\n # Show the plot\n pyplot.title(type(model).__name__)\n pyplot.show()", "_____no_output_____" ], [ "# Generate dataset\nX, y = get_dataset()", "_____no_output_____" ], [ "# Define model\n# TheilSenRegressor(*, fit_intercept=True, copy_X=True, max_subpopulation=10000.0,\n# n_subsamples=None, max_iter=300, tol=0.001, random_state=None, n_jobs=None, verbose=False)\nmodel = TheilSenRegressor()", "_____no_output_____" ], [ "# Evaluate model\nresults = evaluate_model(X, y, model)", "_____no_output_____" ], [ "print(f'Mean MAE: {mean(results)} ({std(results)})')", "Mean MAE: 4.371390239937131 (1.9609733695261808)\n" ], [ "# Plot line of the best fit\nplot_best_fit(X, y, model)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cba0390757698378fba31837ac73e3d3ee68c867
1,537
ipynb
Jupyter Notebook
learning jupyter.ipynb
Zumrannain/Python-Data
206feef7bc920657f2c853d349d1dd1aae234962
[ "MIT" ]
null
null
null
learning jupyter.ipynb
Zumrannain/Python-Data
206feef7bc920657f2c853d349d1dd1aae234962
[ "MIT" ]
null
null
null
learning jupyter.ipynb
Zumrannain/Python-Data
206feef7bc920657f2c853d349d1dd1aae234962
[ "MIT" ]
null
null
null
16.351064
79
0.426805
[ [ [ "print(\"Hello!, Jupyter!\")", "Hello!, Jupyter!\n" ], [ "for letter in \"My name is Zumran!\":\n print(letter)\n ", "M\ny\n \nn\na\nm\ne\n \ni\ns\n \nZ\nu\nm\nr\na\nn\n!\n" ] ], [ [ "I can write **documentation** in https://daringfireball11.net/markdown\n* A formula: $\\sqrt(1 + x)$", "_____no_output_____" ] ] ]
[ "code", "markdown" ]
[ [ "code", "code" ], [ "markdown" ] ]
cba03e2c636099ece6fc6f8c908f61c080adf851
354,837
ipynb
Jupyter Notebook
6. Recurrent Neural Networks I.ipynb
iEvidently/DeepLearningMaster2019
046527bf281b099a696b2303e2a634976302166b
[ "MIT" ]
null
null
null
6. Recurrent Neural Networks I.ipynb
iEvidently/DeepLearningMaster2019
046527bf281b099a696b2303e2a634976302166b
[ "MIT" ]
null
null
null
6. Recurrent Neural Networks I.ipynb
iEvidently/DeepLearningMaster2019
046527bf281b099a696b2303e2a634976302166b
[ "MIT" ]
null
null
null
276.352804
99,118
0.88814
[ [ [ "<a href=\"https://colab.research.google.com/github/DataScienceUB/DeepLearningMaster2019/blob/master/6.%20Recurrent%20Neural%20Networks%20I.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Recurrent Neural Networks I\n\nClassical neural networks, including convolutional ones, suffer from two severe limitations:\n\n+ They only accept a fixed-sized vector as input and produce a fixed-sized vector as output.\n+ They do not consider the sequential nature of some data (language, video frames, time series, etc.) \n\nRecurrent neural networks overcome these limitations by allowing to operate over sequences of vectors (in the input, in the output, or both).", "_____no_output_____" ], [ "## Vanilla Recurrent Neural Network\n\nThe basic formulas of a simple RNN are:\n\n$$ \\mathbf s_t = \\mbox{tanh }(U \\mathbf x_t + W \\mathbf s_{t-1}) $$\n$$ \\mathbf y_t = V \\mathbf s_t $$ \n\nwhere the hiperbollic tangent funtion ``tanh`` is: \n\n<img src=\"https://github.com/DataScienceUB/DeepLearningMaster2019/blob/master/images/TanhReal.gif?raw=1\" alt=\"\" style=\"width: 300px;\"/>\n\nThese equations basically say that the current network state $s_t$, commonly known as hidden state, is a function of the previous hidden state $\\mathbf s_{t-1}$ and the current input $\\mathbf x_t$. $U, V, W$ matrices are the parameters of the RNN and $\\mathbf y_t$ is its output at time $t$. \n\n<img src=\"https://github.com/DataScienceUB/DeepLearningMaster2019/blob/master/images/vanilla.png?raw=1\" alt=\"\" style=\"width: 400px;\"/>\n", "_____no_output_____" ], [ "## Unrolling in time of a RNN\n\nGiven an input sequence, we apply RNN formulas in a recurrent way until we process all input elements. **The RNN shares the parameters $U,V,W$ across all recurrent steps**. \n\nSome important observations:\n+ We can think of the hidden state as a memory of the network that captures information about the previous steps. **It embeds the representation of the sequence.**\n+ The output of the network can be considered at every stage or only at the final one (see bellow).\n+ When starting to train a RNN we must provide initial values for $U,V,W$ as well as for $\\mathbf s$.\n\nBy unrolling we mean that we write out the network for the complete sequence:\n\n\n<img src=\"https://github.com/DataScienceUB/DeepLearningMaster2019/blob/master/images/unrolling.png?raw=1\" alt=\"\" style=\"width: 600px;\"/>\n\nTraining a RNN involves the following steps:\n+ Providing a training set composed of several input ($n$-dimensional) sequences $\\{\\mathbf{X}_i \\}$ and their expected outcomes. Each element of a sequence $\\mathbf{x}_j \\in \\mathbf{X}_i$ is also a vector.\n+ Defining a loss function to measure the fitting of the output of the network to the expected outcome.\n+ Applying SGD (or variants) to optimize the loss function.", "_____no_output_____" ], [ "## Vanilla Recurrent Neural Network (minibatch version)\n\nObserve that the number of parameters of a RNN can be very high:\n\n<img src=\"https://github.com/DataScienceUB/DeepLearningMaster2019/blob/master/images/minibatch.png?raw=1\" alt=\"\" style=\"width: 400px;\"/>\n", "_____no_output_____" ], [ "It is not necessary to have outputs $y_t$ at each time step. Depending on the problem we are solving, we can have the following RNN architectures:", "_____no_output_____" ], [ "<img src=\"https://github.com/DataScienceUB/DeepLearningMaster2019/blob/master/images/kar.png?raw=1\" alt=\"\" style=\"width: 600px;\"/>\n\nSource: http://karpathy.github.io/2015/05/21/rnn-effectiveness/", "_____no_output_____" ], [ "RNN have shown success in:\n\n+ Language modeling and generation.\n+ Machine Translation.\n+ Speech Recognition.\n+ Image Description.\n+ Question Answering.\n+ Etc.", "_____no_output_____" ], [ "## RNN Computation\n\nDefining a simple RNN with code is simple:\n\n```python\nclass RNN:\n #...\n def step(self,x):\n self.h = np.tanh(np.dot(self.W_ss, self.h) + \n np.dot(self.W_xs, self.x))\n y = np.dot(self.W_sy, self.h)\n return y\n #...\n```\n\nWe can go deep by stacking RNNs:\n```python\ny1 = rnn1.step(x)\ny2 = rnn2.step(y1)\n```", "_____no_output_____" ], [ "### RNN and information representation\n\nThe inputs of a recurrent network must be always **sequences of vectors**, but we can process sequences of symbols/words by representing these symbols by numerical vectors.\n\nLet's suppose we are classifying a series of words $\\mathbf x_1, ..., \\mathbf x_{t-1}, \\mathbf x_t, \\mathbf x_{t+1}, ... \\mathbf x_{n}$, where $\\mathbf x_i$ are word vectors corresponding to a document with $n$ words that belong to a corpus with $|V|$ symbols. \n\nThen, the relationship to compute the hidden layer features at each time-step $t$ is $\\mathbf s_t = \\sigma(W^{(ss)} \\mathbf s_{t-1} + W^{(sx)} \\mathbf x_{t})$, where:\n\n + $\\mathbf x_{t} \\in \\mathbb{R}^{d}$ is input word vector at time $t$. \n + $W^{(sx)} \\in \\mathbb{R}^{D_s \\times d}$ is the weights matrix used to condition the input word vector, $x_t$.\n + $W^{(ss)} \\in \\mathbb{R}^{D_s \\times D_s}$ is the weights matrix used to condition the state of the previous time-step state, $s_{t-1}$.\n + $\\mathbf s_{t-1} \\in \\mathbb{R}^{D_s}$ is the state at the previous time-step, $s-1$. \n + $\\mathbf s_0 \\in \\mathbb{R}^{D_s}$ is an initialization vector for the hidden layer at time-step $t = 0$.\n + $\\sigma ()$ is the non-linearity function (normally, ``tanh``).\n \nIn this case, the output of the network can be defined to be $\\hat{\\mathbf y}_t = softmax (W^{(sy)}\\mathbf s_t)$ and the loss function cross-entropy, because we are dealing with a multiclass classification problem. \n\n$\\hat{\\mathbf y}$ is the output probability distribution over the vocabulary at each time-step $t$ (i.e. it is a high dimensional vector!). \n\nEssentially, $\\hat{\\mathbf y}_t$ is the next predicted word given the document context score so far (i.e. $\\mathbf s_{t-1}$) and the last observed word vector $\\mathbf x^{(t)}$. Here, $W^{(sy)} \\in \\mathbb{R}^{|V|\\times D_h}$ and $\\hat{\\mathbf y} \\in \\mathbb{R}^{|V|}$ where $|V|$ is the cardinality of the vocabulary.\n\nThe loss function used in RNNs is often the cross entropy error:\n\n$$\n\tL^{(t)} = - \\sum_{j=1}^{|V|} y_{t,j} \\times log (\\hat{y}_{t,j})\n$$\n\nThe cross entropy error over a document of size $n$ is:\n\n$$\n\tL = \\dfrac{1}{n} \\sum_{t=1}^n L^{(t)}(W) = - \\dfrac{1}{n} \\sum_{t=1}^{n} \\sum_{j=1}^{|V|} y_{t,j} \\times log (\\hat{y}_{t,j})\n$$\n", "_____no_output_____" ], [ "In the case of classifying a series of symbols/words, the *perplexity* measure can be used to assess the goodness of our model. It is basically 2 to the power of the negative log probability of the cross entropy error function:\n\n$$\n\tPerplexity = 2^{L}\n$$\n\nPerplexity is a measure of confusion where lower values imply more confidence in predicting the next word in the sequence (compared to the ground truth outcome).", "_____no_output_____" ], [ "## RNN Training\n\n> Training a RNN is similar to training a traditional NN, but some modifications. The main reason is that parameters are shared by all time steps: in order to compute the gradient at $t=4$, we need to propagate 3 steps and sum up the gradients. This is called **Backpropagation through time (BPTT)**.\n\nRecurrent neural networks propagate weight matrices from one time-step to the next. Recall the goal of a RNN implementation is to enable propagating context information through faraway time-steps. When these propagation results in a long series of matrix multiplications, weights can **vanish or explode**. \n\n+ Once the gradient value grows extremely large, it causes an overflow (i.e. ``NaN``) which is easily detectable at runtime; this issue is called the *Gradient Explosion Problem*. \n\n+ When the gradient value goes to zero, however, it can go undetected while drastically reducing the learning quality of the model for far-away words in the corpus; this issue is called the *Vanishing Gradient Problem*.\n\nThere are several tricks to mitigate these problems:", "_____no_output_____" ], [ "### Gradient Clipping\n\nTo solve the problem of exploding gradients, Thomas Mikolov first introduced a simple heuristic solution that *clips* gradients to a small number whenever they explode. That is, whenever they reach a certain threshold, they are set back to a small number. \n\n<img src=\"https://github.com/DataScienceUB/DeepLearningMaster2019/blob/master/images/exploding.png?raw=1\" alt=\"\" style=\"width: 400px;\"/>\n", "_____no_output_____" ], [ "### Better initialization\n\nTo solve the problem of vanishing gradients, instead of initializing $W^{ss}$ randomly, starting off from **random orthogonal matrices** works better, i.e., a square matrix $W$ for which $W^T W=I$.\n\nThere are two properties of orthogonal matrices that are useful for training deep neural networks:\n+ they are norm-preserving, i.e., $ ||W \\mathbf x||^2=||\\mathbf x||^2$, and\n+ their columns (and rows) are all orthonormal to one another.\n\nAt least at the start of training, the first of these should help to keep the norm of the input constant throughout the network, which can help with the problem of exploding/vanishing gradients. \n\nSimilarly, an intuitive understanding of the second is that having orthonormal weight vectors encourages the weights to learn different input features.\n\nYou can obtain a random $n \\times n$ orthogonal matrix $W$, (uniformly distributed) by performing a QR factorization of an $n \\times n$ matrix with elements i.i.d. Gaussian random variables of mean $0$ and variance $1$. Here is an example:", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom scipy.linalg import qr\n\nn = 3\nH = np.random.randn(n, n)\nprint(H)\nprint('\\n')\n\nQ, R = qr(H)\n\nprint (Q.dot(Q.T))", "[[ 1.80419174 0.44753171 -0.74705221]\n [-0.36897419 -1.44770945 0.43443346]\n [ 1.49721941 -0.62861191 -2.09674099]]\n\n\n[[ 1.00000000e+00 -4.83279136e-18 -1.72126600e-16]\n [-4.83279136e-18 1.00000000e+00 4.02741050e-17]\n [-1.72126600e-16 4.02741050e-17 1.00000000e+00]]\n" ] ], [ [ "### Steeper Gates\n\nWe can make the \"gates steeper\" so they change more repidly from 0 to 1 and the model is learnt quicker.\n\n<img src=\"https://github.com/DataScienceUB/DeepLearningMaster2019/blob/master/images/steeper.png?raw=1\" alt=\"\" style=\"width: 600px;\"/>\n", "_____no_output_____" ], [ "## Gated RNN\n\nRecurrent Neural Networks work just fine when we are dealing with short-term dependencies. \nHowever, vanilla RNNs fail to understand the lomg-term context dependencies (when relevant information may be separated from the point where it is needed by a huge load of irrelevant data).\n\nGated RNNs (with units that are designed to forget and to update relevant information) are a solution to this problem.\n\nThe most important types of gated RNNs are:\n\n+ **Long Short Term Memories** (LSTM). It was introduced by S.Hochreiter and J.Schmidhuber in 1997 and is widely used. LSTM is very good in the long run due to its high complexity.\n+ **Gated Recurrent Units** (GRU). It was recently introduced by K.Cho. It is simpler than LSTM, faster and optimizes quicker. \n", "_____no_output_____" ], [ "#### LSTM\n\nThe key idea of LSTMs is to have two state representations: the hidden state $\\mathbf h$ and the cell state $\\mathbf C$ (instead of $\\mathbf s$). \n\nThe cell state $\\mathbf C$ is like a conveyor belt. It runs straight down the entire chain, with only some minor linear interactions. It’s very easy for information to just flow along it unchanged.\n\n<img src=\"https://github.com/DataScienceUB/DeepLearningMaster2019/blob/master/images/lstm.png?raw=1\" alt=\"Source: http://colah.github.io/posts/2015-08-Understanding-LSTMs/\" style=\"width: 600px;\"/> \n\nLSTM has the ability to remove or add information to the cell state, carefully regulated by structures called **gates**.\n\nGates are a way to optionally let information through. They are composed out of a *sigmoid* neural net layer and a pointwise multiplication operation.\n\nLet us see how a LSTM uses $\\mathbf h_{t-1}, \\mathbf C_{t-1}$ and $\\mathbf x_{t}$ to generate the next states $\\mathbf C_t, \\mathbf h_{t}$:\n\n$$ \\mathbf f_t = \\sigma(W^f \\cdot [\\mathbf h_{t-1}, \\mathbf x_t]) \\mbox{ (Forget gate)} $$\n$$ \\mathbf i_t = \\sigma(W^i \\cdot [\\mathbf h_{t-1}, \\mathbf x_t]) \\mbox{ (Input gate)} $$\n$$ \\tilde {\\mathbf C_t} = \\operatorname{tanh}(W^C \\cdot [\\mathbf h_{t-1}, \\mathbf x_t]) $$\n$$ \\mathbf C_t = \\mathbf f_t \\cdot \\mathbf C_{t-1} + \\mathbf i_t \\cdot \\tilde {\\mathbf C_t} \\mbox{ (Update gate)} $$\n$$ \\mathbf o_t = \\sigma(W^o \\cdot [\\mathbf h_{t-1}, \\mathbf x_t]) $$\n$$ \\mathbf h_t = \\mathbf o_t \\cdot \\operatorname{tanh}(\\mathbf C_t) \\mbox{ (Output gate)} $$\n\nwhere $[\\mathbf h_{t-1}, \\mathbf x_t]$ represents the concatenation of two vectors.\n\nThere are other variants of LSTM (f.e. LSTM with peephole connections of Gers & Schmidhuber (2000))", "_____no_output_____" ], [ "#### GRU\n\nThe transition from hidden state $\\mathbf h_{t-1}$ to $\\mathbf h_{t}$ in vanilla RNN is defined by using an affine transformation and a point-wise nonlinearity. \n\nWhat motivates the use of gated units? Although RNNs can theoretically capture long-term dependencies, they are very hard to actually train to do this. Gated recurrent units are designed in a manner to have more persistent memory thereby making it easier for RNNs to capture long-term dependencies. \n\n<img src=\"https://github.com/DataScienceUB/DeepLearningMaster2019/blob/master/images/gru.png?raw=1\" alt=\"Source: http://colah.github.io/posts/2015-08-Understanding-LSTMs/\" style=\"width: 300px;\"/> \n\nLet us see how a GRU uses $\\mathbf h_{t-1}$ and $\\mathbf x_{t}$ to generate the next hidden state $\\mathbf h_{t}$.\n\n$$ \\mathbf z_{t} = \\sigma(W_z \\cdot [\\mathbf x_{t}, \\mathbf h_{t-1}]) \\mbox{ (Update gate)}$$\n$$ \\mathbf r_{t} = \\sigma(W_r \\cdot [\\mathbf x_{t}, \\mathbf h_{t-1}]) \\mbox{ (Reset gate)}$$\n$$ \\tilde{\\mathbf h}_{t} = \\operatorname{tanh}(\\mathbf r_{t} \\cdot [\\mathbf x_{t}, \\mathbf r_t \\cdot \\mathbf h_{t-1}] ) \\mbox{ (New memory)}$$\n$$ \\mathbf h_{t} = (1 - \\mathbf z_{t}) \\cdot \\mathbf h_{t-1} + \\mathbf z_{t} \\cdot \\tilde{\\mathbf h}_{t} \\mbox{(Hidden state)}$$\n\nIt combines the forget and input gates into a single “update gate.” It also merges the cell state and hidden state, and makes some other changes. The resulting model is simpler than standard LSTM models.", "_____no_output_____" ], [ "### RNN in Keras\n\nWhenever you train or test your LSTM/GRU, you first have to build your input matrix $\\mathbf X$ of shape ``[nb_samples,timesteps,input_dim]`` where your batch size divides ``nb_samples``. \n\nFor instance, if ``nb_samples=1024`` and ``batch_size=64``, it means that your model will receive blocks of 64 samples, compute each output (whatever the number of timesteps is for every sample), average the gradients and propagate it to update the parameters vector. \n\n> By default, **Keras shuffles (permutes) the samples in $\\mathbf X$** and consequently the dependencies between $\\mathbf X_i$ and $\\mathbf X_{i+1}$ are not considered. If\n$\\mathbf X_i$ and $\\mathbf X_{i+1}$ represent independent sequences (f.e. different instances of a times series), this is not a problem. but if it represents parts of a bigger sequence (f.e. words in a text) it is!\n\n> By using the **stateful model** all the states are propagated to the next batch. We must also prevent shuffling in the ``fit`` method.", "_____no_output_____" ] ], [ [ "from __future__ import print_function\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import LSTM, GRU\n\n%matplotlib inline\n\ntsteps = 1\n\n# number of elements ahead that are used to make the prediction\nlahead = 1\n\n\ndef gen_cosine_amp(amp=100, period=10, x0=0, xn=50000, step=1, k=0.0001):\n \"\"\"\n Generates an absolute cosine time series with the amplitude\n exponentially decreasing\n Arguments:\n amp: amplitude of the cosine function\n period: period of the cosine function\n x0: initial x of the time series\n xn: final x of the time series\n step: step of the time series discretization\n k: exponential rate\n \"\"\"\n cos = np.zeros((xn - x0) * step)\n for i in range(len(cos)):\n idx = x0 + i * step\n cos[i] = amp * np.cos(idx / (2 * np.pi * period))\n cos[i] = cos[i] * np.exp(-k * idx)\n cos = cos / np.max(cos)\n return cos\n\ncos = gen_cosine_amp()\nplt.figure(figsize=(15,7))\nplt.plot(cos)", "_____no_output_____" ], [ "# print('Generating Input Data')\n\nlenght_series = 400\ncos = gen_cosine_amp()\ncos_series = np.zeros((50000-lenght_series, lenght_series, 1))\n\nfor i in range(50000-lenght_series):\n cos_series[i,:,:] = cos[i:i+lenght_series, np.newaxis]\n \n\n\nprint('Input shape:', cos_series.shape)\n", "Input shape: (49600, 400, 1)\n" ], [ "expected_output = np.zeros((len(cos)-lenght_series, 1))\nfor i in range(len(cos_series)-1):\n expected_output[i, 0] = cos[i+lenght_series]\n\nprint('Output shape', expected_output.shape)\nprint(\"Sample: \", expected_output[0])\n\nplt.figure(figsize=(20,5))\nplt.plot(expected_output,'-r')\nplt.title('Expected')\nplt.show()", "Output shape (49600, 1)\nSample: [0.95748091]\n" ], [ "\nprint('Creating Model')\n\nmodel = Sequential()\nmodel.add(LSTM(50,\n batch_input_shape=(None, lenght_series, 1),\n return_sequences=True, # This param indicates whether to return \n # the last output in the output\n # sequence, or the full sequence.\n stateful=False))\nmodel.add(LSTM(50,\n batch_input_shape=(None, lenght_series, 1),\n return_sequences=False,\n stateful=False))\nmodel.add(Dense(1))\n\nmodel.compile(loss='mse', optimizer='adam')\n\nmodel.summary()", "Creating Model\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/resource_variable_ops.py:435: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/utils/losses_utils.py:170: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.cast instead.\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nlstm (LSTM) (None, 400, 50) 10400 \n_________________________________________________________________\nlstm_1 (LSTM) (None, 50) 20200 \n_________________________________________________________________\ndense (Dense) (None, 1) 51 \n=================================================================\nTotal params: 30,651\nTrainable params: 30,651\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "print('Training')\n\nepochs = 1\nbatch_size = 100\n\n\nmodel.fit(cos_series,\n expected_output,\n batch_size=batch_size,\n verbose=1, \n epochs=epochs,\n shuffle=True)", "Training\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.cast instead.\n49600/49600 [==============================] - 491s 10ms/sample - loss: 6.2636e-04\n" ], [ "print('Predicting...')\npredicted_output = model.predict(cos_series, batch_size=batch_size)\n\nprint('Ploting Results')\nplt.figure(figsize=(20,5))\nplt.plot(predicted_output,'-g')\nplt.plot(expected_output - predicted_output,'-r')\n\n\nplt.title('Predicted Output and Error')\nplt.show()", "Predicting...\nPloting Results\n" ], [ "future = np.zeros((1,400,1))\nfuture[0,:,:] = cos[10000:10400, np.newaxis]\nprint(future)\n\npredicted_future = model.predict(future)\nprint(predicted_future)", "[[[-0.17782635]\n [-0.18291082]\n [-0.18794796]\n [-0.19293648]\n [-0.19787513]\n [-0.20276269]\n [-0.2075979 ]\n [-0.21237957]\n [-0.21710649]\n [-0.22177748]\n [-0.22639136]\n [-0.23094698]\n [-0.23544319]\n [-0.23987887]\n [-0.2442529 ]\n [-0.24856419]\n [-0.25281167]\n [-0.25699426]\n [-0.26111093]\n [-0.26516063]\n [-0.26914237]\n [-0.27305514]\n [-0.27689797]\n [-0.2806699 ]\n [-0.28436998]\n [-0.2879973 ]\n [-0.29155094]\n [-0.29503004]\n [-0.29843371]\n [-0.30176111]\n [-0.30501142]\n [-0.30818382]\n [-0.31127753]\n [-0.31429178]\n [-0.31722582]\n [-0.32007893]\n [-0.3228504 ]\n [-0.32553954]\n [-0.32814569]\n [-0.3306682 ]\n [-0.33310646]\n [-0.33545986]\n [-0.33772783]\n [-0.3399098 ]\n [-0.34200524]\n [-0.34401364]\n [-0.3459345 ]\n [-0.34776736]\n [-0.34951177]\n [-0.35116731]\n [-0.35273357]\n [-0.35421017]\n [-0.35559677]\n [-0.35689302]\n [-0.35809862]\n [-0.35921328]\n [-0.36023673]\n [-0.36116874]\n [-0.36200908]\n [-0.36275756]\n [-0.36341402]\n [-0.36397829]\n [-0.36445027]\n [-0.36482984]\n [-0.36511693]\n [-0.36531148]\n [-0.36541347]\n [-0.36542289]\n [-0.36533975]\n [-0.36516409]\n [-0.36489597]\n [-0.36453549]\n [-0.36408275]\n [-0.36353789]\n [-0.36290106]\n [-0.36217243]\n [-0.36135222]\n [-0.36044066]\n [-0.35943798]\n [-0.35834446]\n [-0.3571604 ]\n [-0.35588611]\n [-0.35452194]\n [-0.35306824]\n [-0.35152541]\n [-0.34989386]\n [-0.34817401]\n [-0.34636631]\n [-0.34447125]\n [-0.34248932]\n [-0.34042104]\n [-0.33826695]\n [-0.33602762]\n [-0.33370362]\n [-0.33129557]\n [-0.32880408]\n [-0.32622982]\n [-0.32357344]\n [-0.32083564]\n [-0.31801712]\n [-0.31511862]\n [-0.31214088]\n [-0.30908468]\n [-0.3059508 ]\n [-0.30274006]\n [-0.29945329]\n [-0.29609132]\n [-0.29265503]\n [-0.28914531]\n [-0.28556305]\n [-0.28190918]\n [-0.27818463]\n [-0.27439038]\n [-0.27052738]\n [-0.26659664]\n [-0.26259916]\n [-0.25853596]\n [-0.2544081 ]\n [-0.25021662]\n [-0.24596261]\n [-0.24164715]\n [-0.23727135]\n [-0.23283632]\n [-0.22834321]\n [-0.22379317]\n [-0.21918735]\n [-0.21452693]\n [-0.20981311]\n [-0.20504709]\n [-0.20023009]\n [-0.19536334]\n [-0.19044808]\n [-0.18548557]\n [-0.18047707]\n [-0.17542385]\n [-0.17032722]\n [-0.16518847]\n [-0.1600089 ]\n [-0.15478984]\n [-0.14953262]\n [-0.14423858]\n [-0.13890906]\n [-0.13354543]\n [-0.12814904]\n [-0.12272128]\n [-0.11726351]\n [-0.11177714]\n [-0.10626355]\n [-0.10072416]\n [-0.09516035]\n [-0.08957356]\n [-0.0839652 ]\n [-0.07833669]\n [-0.07268947]\n [-0.06702496]\n [-0.06134462]\n [-0.05564987]\n [-0.04994216]\n [-0.04422295]\n [-0.03849368]\n [-0.0327558 ]\n [-0.02701078]\n [-0.02126006]\n [-0.01550511]\n [-0.00974738]\n [-0.00398833]\n [ 0.00177057]\n [ 0.00752787]\n [ 0.01328212]\n [ 0.01903185]\n [ 0.02477561]\n [ 0.03051195]\n [ 0.03623941]\n [ 0.04195654]\n [ 0.04766191]\n [ 0.05335407]\n [ 0.05903157]\n [ 0.06469298]\n [ 0.07033688]\n [ 0.07596183]\n [ 0.08156642]\n [ 0.08714923]\n [ 0.09270885]\n [ 0.09824388]\n [ 0.10375291]\n [ 0.10923457]\n [ 0.11468746]\n [ 0.12011021]\n [ 0.12550146]\n [ 0.13085984]\n [ 0.136184 ]\n [ 0.14147261]\n [ 0.14672433]\n [ 0.15193783]\n [ 0.15711181]\n [ 0.16224496]\n [ 0.16733599]\n [ 0.17238362]\n [ 0.17738658]\n [ 0.1823436 ]\n [ 0.18725346]\n [ 0.1921149 ]\n [ 0.19692671]\n [ 0.20168768]\n [ 0.20639661]\n [ 0.21105233]\n [ 0.21565366]\n [ 0.22019945]\n [ 0.22468855]\n [ 0.22911985]\n [ 0.23349223]\n [ 0.2378046 ]\n [ 0.24205587]\n [ 0.24624499]\n [ 0.25037089]\n [ 0.25443256]\n [ 0.25842897]\n [ 0.26235913]\n [ 0.26622205]\n [ 0.27001677]\n [ 0.27374234]\n [ 0.27739783]\n [ 0.28098233]\n [ 0.28449494]\n [ 0.2879348 ]\n [ 0.29130103]\n [ 0.29459282]\n [ 0.29780933]\n [ 0.30094976]\n [ 0.30401335]\n [ 0.30699932]\n [ 0.30990693]\n [ 0.31273547]\n [ 0.31548423]\n [ 0.31815254]\n [ 0.32073973]\n [ 0.32324517]\n [ 0.32566823]\n [ 0.32800832]\n [ 0.33026487]\n [ 0.33243731]\n [ 0.33452512]\n [ 0.33652778]\n [ 0.33844481]\n [ 0.34027573]\n [ 0.3420201 ]\n [ 0.34367749]\n [ 0.3452475 ]\n [ 0.34672976]\n [ 0.34812389]\n [ 0.34942958]\n [ 0.3506465 ]\n [ 0.35177436]\n [ 0.3528129 ]\n [ 0.35376187]\n [ 0.35462105]\n [ 0.35539023]\n [ 0.35606925]\n [ 0.35665795]\n [ 0.3571562 ]\n [ 0.35756388]\n [ 0.35788092]\n [ 0.35810725]\n [ 0.35824283]\n [ 0.35828765]\n [ 0.35824171]\n [ 0.35810505]\n [ 0.35787771]\n [ 0.35755977]\n [ 0.35715133]\n [ 0.35665251]\n [ 0.35606346]\n [ 0.35538435]\n [ 0.35461535]\n [ 0.35375669]\n [ 0.35280861]\n [ 0.35177135]\n [ 0.3506452 ]\n [ 0.34943047]\n [ 0.34812747]\n [ 0.34673656]\n [ 0.34525811]\n [ 0.3436925 ]\n [ 0.34204016]\n [ 0.34030151]\n [ 0.33847702]\n [ 0.33656716]\n [ 0.33457244]\n [ 0.33249338]\n [ 0.33033051]\n [ 0.32808442]\n [ 0.32575567]\n [ 0.32334488]\n [ 0.32085268]\n [ 0.31827971]\n [ 0.31562664]\n [ 0.31289415]\n [ 0.31008297]\n [ 0.3071938 ]\n [ 0.30422741]\n [ 0.30118456]\n [ 0.29806602]\n [ 0.29487262]\n [ 0.29160517]\n [ 0.28826452]\n [ 0.28485152]\n [ 0.28136705]\n [ 0.27781202]\n [ 0.27418733]\n [ 0.27049392]\n [ 0.26673274]\n [ 0.26290476]\n [ 0.25901094]\n [ 0.25505231]\n [ 0.25102986]\n [ 0.24694464]\n [ 0.24279769]\n [ 0.23859007]\n [ 0.23432287]\n [ 0.22999716]\n [ 0.22561407]\n [ 0.22117471]\n [ 0.21668022]\n [ 0.21213174]\n [ 0.20753045]\n [ 0.20287751]\n [ 0.19817412]\n [ 0.19342147]\n [ 0.18862078]\n [ 0.18377328]\n [ 0.17888021]\n [ 0.1739428 ]\n [ 0.16896232]\n [ 0.16394005]\n [ 0.15887726]\n [ 0.15377524]\n [ 0.14863529]\n [ 0.14345872]\n [ 0.13824685]\n [ 0.13300101]\n [ 0.12772253]\n [ 0.12241275]\n [ 0.11707303]\n [ 0.11170473]\n [ 0.10630921]\n [ 0.10088784]\n [ 0.09544201]\n [ 0.08997308]\n [ 0.08448247]\n [ 0.07897155]\n [ 0.07344173]\n [ 0.06789442]\n [ 0.06233102]\n [ 0.05675295]\n [ 0.05116161]\n [ 0.04555844]\n [ 0.03994485]\n [ 0.03432226]\n [ 0.0286921 ]\n [ 0.02305581]\n [ 0.0174148 ]\n [ 0.0117705 ]\n [ 0.00612436]\n [ 0.00047779]\n [-0.00516777]\n [-0.01081089]\n [-0.01645014]\n [-0.0220841 ]\n [-0.02771134]\n [-0.03333044]\n [-0.03893997]\n [-0.04453851]\n [-0.05012466]\n [-0.05569699]\n [-0.0612541 ]\n [-0.06679458]\n [-0.07231704]\n [-0.07782008]\n [-0.08330231]\n [-0.08876234]\n [-0.0941988 ]\n [-0.09961031]\n [-0.10499551]\n [-0.11035304]\n [-0.11568155]\n [-0.12097969]\n [-0.12624613]\n [-0.13147954]\n [-0.1366786 ]\n [-0.14184201]\n [-0.14696845]\n [-0.15205665]\n [-0.15710532]\n [-0.16211318]\n [-0.16707898]\n [-0.17200148]\n [-0.17687942]\n [-0.18171158]\n [-0.18649676]\n [-0.19123374]]]\n[[-0.19472048]]\n" ], [ "future = np.zeros((1,lenght_series,1))\nfuture[0,:,:] = cos[:lenght_series, np.newaxis]\npredicted_future = np.zeros((1000,))\n\n\nfor i in range(1000):\n predicted_future[i] = model.predict(future)\n future[0,0:lenght_series-1,0] = future[0,1:lenght_series,0]\n future[0,lenght_series-1,0] = predicted_future[i]\n\nprint(predicted_future[0])\nplt.plot(predicted_future,'-g')\nplt.plot(cos[:1000],'-r')", "0.9502222537994385\n" ] ], [ [ "## Exercise\n\nRead and execute carefully this notebook: [Chollet-advanced-usage-of-recurrent-neural-networks.ipynb](Chollet-advanced-usage-of-recurrent-neural-networks.ipynb)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
cba046bec60c37133ecb048b42a5876a959fd8da
11,900
ipynb
Jupyter Notebook
assignments/assignment_2/assignment_2_Drake_Hayes.ipynb
hua-mike/CYPLAN255
03493a41f51bcd56f587cc088468eccbeafadc05
[ "CNRI-Python" ]
null
null
null
assignments/assignment_2/assignment_2_Drake_Hayes.ipynb
hua-mike/CYPLAN255
03493a41f51bcd56f587cc088468eccbeafadc05
[ "CNRI-Python" ]
null
null
null
assignments/assignment_2/assignment_2_Drake_Hayes.ipynb
hua-mike/CYPLAN255
03493a41f51bcd56f587cc088468eccbeafadc05
[ "CNRI-Python" ]
null
null
null
25.42735
330
0.532437
[ [ [ "# Assignment 2 | Programming Logic\n\nReminder: in all of the assignments this semester, the answer is not the only consideration, but also how you get to it. It's OK (suggested even!) to use the internet for help. But you _should_ be able to answer all of these questions using only the programming techniques you have learned in class and from the readings.\n\nA few keys for success:\n- Avoid manual data entry\n- Emphasize logic and clarity\n- Use comments, docstrings, and descriptive variable names\n- In general, less code is better. But if more lines of code makes your program easier to read or understand what its doing, then go for it.", "_____no_output_____" ], [ "## Problem 1 \nWrite a Python program to count the number of even and odd numbers from a list of numbers. Test your code by running it on a list of integers from 1 to 9. No need to make this a function unless you want to.", "_____no_output_____" ] ], [ [ "def count_even_and_odd(list):\n number_even, number_odd= 0,0\n for x in list:\n if x%2 == 0:\n number_even += 1\n else:\n number_odd +=1\n print (\"evens\", number_even, \"odds\", number_odd)", "_____no_output_____" ], [ "my_list = [1,2,3,4,5,6,7,8,9]\ncount_even_and_odd(my_list)", "evens 4 odds 5\n" ] ], [ [ "## Problem 2\nWrite a Python function that takes a list of numbers and returns a list containing only the even numbers from the original list. Test your function by running it on a list of integers from 1 to 9.", "_____no_output_____" ] ], [ [ "def only_returns_evens(list):\n list_of_evens = []\n for x in list:\n if x%2 == 0:\n list_of_evens.append(x)\n else:\n pass\n print(list_of_evens)\n ", "_____no_output_____" ], [ "only_returns_evens(my_list)", "[2, 4, 6, 8]\n" ] ], [ [ "## Problem 3\n\n1. Create a function that accepts a list of integers as an argument and returns a list of floats which equals each number as a fraction of the sum of all the items in the original list.\n\n2. Next, create a second function which is the same as the first, but limit each number in the output list to two decimals.\n\n3. Create another function which builds on the previous one by allowing a \"user\" pass in an argument that defines the number of decimal places to use in the output list.\n\n4. Test each of these functions with a list of integers", "_____no_output_____" ] ], [ [ "#Number 1\ndef floats_as_fraction_of_list_sum(list):\n print([x/sum(my_list) for x in my_list])\n \n \n ", "_____no_output_____" ], [ "floats_as_fraction_of_list_sum(my_list)", "[0.022222222222222223, 0.044444444444444446, 0.06666666666666667, 0.08888888888888889, 0.1111111111111111, 0.13333333333333333, 0.15555555555555556, 0.17777777777777778, 0.2]\n" ], [ "#Number 2\ndef floats_as_fraction_of_list_sum_2_points(list):\n new_list = ([x/sum(my_list) for x in my_list])\n new_list = [round(x, 2) for x in new_list]\n print(new_list)\n ", "_____no_output_____" ], [ "floats_as_fraction_of_list_sum_2_points(my_list)", "[0.02, 0.04, 0.07, 0.09, 0.11, 0.13, 0.16, 0.18, 0.2]\n" ], [ "#Number 3\ndef floats_as_fraction_of_list_sum_b_points(list,b):\n new_list = ([x/sum(my_list) for x in my_list])\n new_list = [round(x, b) for x in new_list]\n print(new_list)", "_____no_output_____" ], [ "floats_as_fraction_of_list_sum_b_points(my_list,2)", "[0.02, 0.04, 0.07, 0.09, 0.11, 0.13, 0.16, 0.18, 0.2]\n" ] ], [ [ "## Problem 4\nA prime number is any whole number greater than 1 that has no positive divisors besides 1 and itself. In other words, a prime number must be:\n1. an integer\n2. greater than 1\n3. divisible only by 1 and itself.\n\nWrite a function is_prime(n) that accepts an argument `n` and returns `True` (boolean) if `n` is a prime number and `False` if n is not prime. For example, `is_prime(11)` should return `True` and `is_prime(12)` should return `False`.\n", "_____no_output_____" ] ], [ [ "def is_prime(n): \n if n > 1:\n for x in range(2,n): ##ensures its an int, and dividing numbers above 1\n if (x % n) == 0:\n return False\n return True\n else:\n return False\n ", "_____no_output_____" ], [ "is_prime(5)", "_____no_output_____" ] ], [ [ "## Problem 5", "_____no_output_____" ], [ "1. Create a class called `Housing`, and add the following attributes to it:\n - type\n - area\n - number of bedrooms\n - value (price)\n - year built.\n2. Create two instances of your class and populate their attributes (make 'em up)\n3. Create a method called `rent()` that calculates the estimated monthly rent for each house (assume that monthly rent is 0.4% of the value of the house)\n4. Print the rent for both instances.", "_____no_output_____" ] ], [ [ "class Housing:\n pass", "_____no_output_____" ], [ "house_1 = Housing()\nhouse_2 = Housing()", "_____no_output_____" ], [ "class Housing:\n def __init__(self, type, area, number_of_bedrooms, value, year_built):\n self.type = type\n self.area = area\n self.number_of_bedrooms = number_of_bedrooms\n self.value = value\n self.value = year_built\n def rent(self):\n return self.value * .04", "_____no_output_____" ], [ "house_1 = Housing(\"big\", \"3000 sq ft\", 5, 150000, 1954)\nhouse_2 = Housing(\"small\", \"1000 sq ft\", 1, 500000, 2000)", "_____no_output_____" ], [ "\"\"\"house_1.type = \"big\"\nhouse_1.area = \"3000 sq ft\"\nhouse_1.number_of_bedrooms = 5\nhouse_1.value = 1500000\nhouse_1.year_built = 1954\n\nhouse_2.type = \"small\"\nhouse_2.area = \"1000 sq ft\"\nhouse_2.number_of_bedrooms = 1\nhouse_2.value = 500000\nhouse_2.year_built = 2000\n\"\"\"\n", "_____no_output_____" ], [ "\"\"\"class Housing:\n def __init__(self, type, area, number_of_bedrooms, value, year_built):\n self.type = type\n self.area = area\n self.number_of_bedrooms = number_of_bedrooms\n self.value = value\n self.value = year_built\n def rent(self):\n return self.value * .04\"\"\"", "_____no_output_____" ], [ "house_1.rent()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
cba057bfc49629ce3a660b0c4f73b618811ce5b1
744,636
ipynb
Jupyter Notebook
04_Gradients_Color_Spaces/4_5_Direction_of_Gradient.ipynb
faisalshahbaz/computer-vision-nanodegree
92075f13cfbc823f41185a9ac225c155e02cd9c4
[ "MIT" ]
null
null
null
04_Gradients_Color_Spaces/4_5_Direction_of_Gradient.ipynb
faisalshahbaz/computer-vision-nanodegree
92075f13cfbc823f41185a9ac225c155e02cd9c4
[ "MIT" ]
null
null
null
04_Gradients_Color_Spaces/4_5_Direction_of_Gradient.ipynb
faisalshahbaz/computer-vision-nanodegree
92075f13cfbc823f41185a9ac225c155e02cd9c4
[ "MIT" ]
1
2020-01-28T10:55:08.000Z
2020-01-28T10:55:08.000Z
5,772.372093
739,496
0.960217
[ [ [ "# Direction of the Gradient\n\nWhen you play around with the thresholding for the gradient magnitude in the previous exercise, you find what you might expect, namely, that it picks up the lane lines well, but with a lot of other stuff detected too. Gradient magnitude is at the heart of Canny edge detection, and is why Canny works well for picking up all edges.\n\nIn the case of lane lines, we're interested only in edges of a particular orientation. So now we will explore the direction, or orientation, of the gradient.\n\nThe direction of the gradient is simply the inverse tangent (arctangent) of the yy gradient divided by the xx gradient:\n\n**arctan(sobely/sobelx)**", "_____no_output_____" ], [ "Each pixel of the resulting image contains a value for the angle of the gradient away from horizontal in units of radians, covering a range of **-π/2 to π/2** . An orientation of 0 implies a vertical line and orientations of **+/−π/2** imply horizontal lines. (Note that in the quiz below, we actually utilize np.arctan2, which can return values between **+/−π**; however, as we'll take the absolute value of sobelx, this restricts the values to **+/−π/2***, as shown here.)\n\nIn this next exercise, you'll write a function to compute the direction of the gradient and apply a threshold. The direction of the gradient is much noisier than the gradient magnitude, but you should find that you can pick out particular features by orientation.\n\nSteps to take in this exercise:\n\n1. Fill out the function in the editor below to return a thresholded absolute value of the gradient direction. Use Boolean operators, again with exclusive (**<, >**) or inclusive (**<=, >=**) thresholds.\n2. Test that your function returns output similar to the example below for **sobel_kernel=15, thresh=(0.7, 1.3)**.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport pickle\n\n\n# Read in an image\nimage = mpimg.imread('img/signs_vehicles_xygrad.png')\n\n# Define a function that applies Sobel x and y, \n# then computes the direction of the gradient\n# and applies a threshold.\ndef dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):\n \n # Apply the following steps to img\n # 1) Convert to grayscale\n # 2) Take the gradient in x and y separately\n # 3) Take the absolute value of the x and y gradients\n # 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient \n # 5) Create a binary mask where direction thresholds are met\n # 6) Return this mask as your binary_output image\n # Grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Calculate the x and y gradients\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n # Take the absolute value of the gradient direction, \n # apply a threshold, and create a binary image result\n absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))\n binary_output = np.zeros_like(absgraddir)\n binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1\n #binary_output = np.copy(img) # Remove this line\n return binary_output\n \n# Run the function\ndir_binary = dir_threshold(image, sobel_kernel=15, thresh=(0.7, 1.3))\n# Plot the result\nf, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))\nf.tight_layout()\nax1.imshow(image)\nax1.set_title('Original Image', fontsize=50)\nax2.imshow(dir_binary, cmap='gray')\nax2.set_title('Thresholded Grad. Dir.', fontsize=50)\nplt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ] ]
cba05d9d932a1598a3a5b8b4e69eaa0177c06334
532,936
ipynb
Jupyter Notebook
mltrain-nips-2017/yu_chia_chen/radius_estimation_tutorial.ipynb
gopala-kr/ds-notebooks
bc35430ecdd851f2ceab8f2437eec4d77cb59423
[ "MIT" ]
1
2019-05-10T09:16:23.000Z
2019-05-10T09:16:23.000Z
mltrain-nips-2017/yu_chia_chen/radius_estimation_tutorial.ipynb
gopala-kr/ds-notebooks
bc35430ecdd851f2ceab8f2437eec4d77cb59423
[ "MIT" ]
null
null
null
mltrain-nips-2017/yu_chia_chen/radius_estimation_tutorial.ipynb
gopala-kr/ds-notebooks
bc35430ecdd851f2ceab8f2437eec4d77cb59423
[ "MIT" ]
1
2019-05-10T09:17:28.000Z
2019-05-10T09:17:28.000Z
23.487704
60,690
0.293887
[ [ [ "<center><h1>Improved Graph Laplacian via Geometric Self-Consistency</h1></center>\n<center>Yu-Chia Chen, Dominique Perrault-Joncas, Marina Meilă, James McQueen. University of Washington</center> <br>\n\n<center>Original paper: <a href=https://nips.cc/Conferences/2017/Schedule?showEvent=9223>Improved Graph Laplacian via Geometric Self-Consistency] on NIPS 2017 </a></center>", "_____no_output_____" ], [ "## The Task\n1. Problem: Estimate the ``radius`` of heat kernel in manifold embedding\n1. Formally: Optimize Laplacian w.r.t. parameters (e.g. ``radius``)\n1. Previous work: \n 1. asymptotic rates depending on the (unknown) manifold [4]\n 1. Embedding dependent neighborhood reconstruction [6]\n1. Challenge: it’s an unsupervised problem! What “target” to choose?\n\n## The ``radius`` affects…\n1. Quality of manifold embedding via neighborhood selection\n1. Laplacian-based embedding and clustering via the kernel for computing similarities \n1. Estimation of other geometric quantities that depend on the Laplacian (e.g Riemannian metric) or not (e.g intrinsic dimension).\n1. Regression on manifolds via Gaussian Processes or Laplacian regularization.\n\nAll the reference is the same as the poster.", "_____no_output_____" ], [ "## Radius Estimation on hourglass dataset\n\nIn this tutorial, we are going to estimate the radius of a noisy hourglass data. The method we used is based on our NIPS 2017 paper \"[Improved Graph Laplacian via Geometric Self-Consistency](https://nips.cc/Conferences/2017/Schedule?showEvent=9223)\" (Perrault-Joncas et. al). Main idea is to find an estimated radius $\\hat{r}_d$ given dimension $d$ that minimize the distorsion. The distorsion is evaluated by the riemannian metrics of local tangent space.\n\n\nBelow are some configurations that enables plotly to render Latex properly.", "_____no_output_____" ] ], [ [ "!yes | conda install --channel=conda-forge pip nose coverage gcc cython numpy scipy scikit-learn pyflann pyamg h5py plotly", "Fetching package metadata .............\nSolving package specifications: .\n\nPackage plan for installation in environment /home/nbcommon/anaconda3_420:\n\nThe following NEW packages will be INSTALLED:\n\n blas: 1.1-openblas conda-forge\n cloog: 0.18.0-0 \n coverage: 4.4.2-py35_0 conda-forge\n gcc: 4.8.5-7 \n gmp: 6.1.2-0 conda-forge\n isl: 0.12.2-0 \n mpc: 1.0.3-4 conda-forge\n mpfr: 3.1.5-0 conda-forge\n openblas: 0.2.20-6 conda-forge\n plotly: 2.2.3-py35_0 conda-forge\n\nThe following packages will be UPDATED:\n\n cython: 0.24.1-py35_0 --> 0.27.3-py35_0 conda-forge\n flann: 1.8.4-0 conda-forge --> 1.9.1-0 conda-forge\n h5py: 2.7.1-py35h8d53cdc_0 --> 2.7.1-py35_1 conda-forge\n libgdal: 2.2.2-h804cdde_1 --> 2.2.2-h6bd4d82_1 \n libnetcdf: 4.4.1.1-h816af47_8 --> 4.4.1.1-h97d33d9_8 \n nose: 1.3.7-py35_1 --> 1.3.7-py35_2 conda-forge\n numpy: 1.11.3-py35_0 --> 1.11.3-py35_blas_openblas_203 conda-forge [blas_openblas]\n opencv: 3.3.1-py35hb7523de_0 --> 3.3.1-py35h9bb06ff_1 \n pip: 8.1.2-py35_0 --> 9.0.1-py35_0 conda-forge\n pyflann: 1.8.4-py35_0 conda-forge --> 1.9.1-py35_0 conda-forge\n pytables: 3.4.2-py35hfa98db7_2 --> 3.4.2-py35_6 conda-forge\n scikit-learn: 0.18.1-np111py35_1 --> 0.19.1-py35_blas_openblas_201 conda-forge [blas_openblas]\n scipy: 0.19.0-np111py35_0 --> 1.0.0-py35_blas_openblas_201 conda-forge [blas_openblas]\n\nThe following packages will be SUPERSEDED by a higher-priority channel:\n\n hdf5: 1.10.1-h9caa474_1 --> 1.8.18-2 conda-forge\n kealib: 1.4.7-h79811e5_5 --> 1.4.7-3 conda-forge\n\nProceed ([y]/n)? \ngmp-6.1.2-0.ta 100% |################################| Time: 0:00:00 1.11 MB/s\nisl-0.12.2-0.t 100% |################################| Time: 0:00:00 9.16 MB/s\nmpfr-3.1.5-0.t 100% |################################| Time: 0:00:00 1.34 MB/s\nopenblas-0.2.2 100% |################################| Time: 0:00:02 8.83 MB/s\nblas-1.1-openb 100% |################################| Time: 0:00:00 2.96 MB/s\ncloog-0.18.0-0 100% |################################| Time: 0:00:00 13.94 MB/s\nhdf5-1.8.18-2. 100% |################################| Time: 0:00:00 23.04 MB/s\nmpc-1.0.3-4.ta 100% |################################| Time: 0:00:00 42.13 MB/s\nflann-1.9.1-0. 100% |################################| Time: 0:00:00 27.51 MB/s\ngcc-4.8.5-7.ta 100% |################################| Time: 0:00:01 59.83 MB/s\nkealib-1.4.7-3 100% |################################| Time: 0:00:00 1.71 MB/s\nlibnetcdf-4.4. 100% |################################| Time: 0:00:00 65.19 MB/s\ncoverage-4.4.2 100% |################################| Time: 0:00:00 1.18 MB/s\ncython-0.27.3- 100% |################################| Time: 0:00:01 6.31 MB/s\nnumpy-1.11.3-p 100% |################################| Time: 0:00:00 25.14 MB/s\nh5py-2.7.1-py3 100% |################################| Time: 0:00:00 36.62 MB/s\nnose-1.3.7-py3 100% |################################| Time: 0:00:00 63.55 MB/s\npip-9.0.1-py35 100% |################################| Time: 0:00:00 25.40 MB/s\nplotly-2.2.3-p 100% |################################| Time: 0:00:00 25.89 MB/s\npyflann-1.9.1- 100% |################################| Time: 0:00:00 32.46 MB/s\nscipy-1.0.0-py 100% |################################| Time: 0:00:00 63.08 MB/s\nlibgdal-2.2.2- 100% |################################| Time: 0:00:00 70.44 MB/s\nopencv-3.3.1-p 100% |################################| Time: 0:00:01 37.75 MB/s\npytables-3.4.2 100% |################################| Time: 0:00:01 3.96 MB/s\nscikit-learn-0 100% |################################| Time: 0:00:00 21.09 MB/s\nyes: standard output: Broken pipe\n" ], [ "!rm -rf megaman\n!git clone https://github.com/mmp2/megaman.git\n!cd megaman", "Cloning into 'megaman'...\nremote: Counting objects: 2984, done.\u001b[K\nremote: Compressing objects: 100% (8/8), done.\u001b[K\nremote: Total 2984 (delta 3), reused 0 (delta 0), pack-reused 2976\u001b[K\nReceiving objects: 100% (2984/2984), 29.83 MiB | 8.15 MiB/s, done.\nfatal: cannot pread pack file: Bad address\nfatal: index-pack failed\n/bin/sh: line 0: cd: megaman: No such file or directory\n" ], [ "import plotly\nplotly.offline.init_notebook_mode(connected=True)\n\nfrom IPython.core.display import display, HTML\ndisplay(HTML(\n '<script>'\n 'var waitForPlotly = setInterval( function() {'\n 'if( typeof(window.Plotly) !== \"undefined\" ){'\n 'MathJax.Hub.Config({ SVG: { font: \"STIX-Web\" }, displayAlign: \"center\" });'\n 'MathJax.Hub.Queue([\"setRenderer\", MathJax.Hub, \"SVG\"]);'\n 'clearInterval(waitForPlotly);'\n '}}, 250 );'\n '</script>'\n))", "_____no_output_____" ] ], [ [ "## Generate data\n\nThis dataset used in this tutorial has a shape of hourglass, with ``size = 10000`` and dimension be 13. The first three dimensions of the data is generated by adding gaussian noises onto the noise-free hourglass data, with ``sigma_primary = 0.1``, the variance of the noises added on hourglass data. We made ``addition_dims = 10``, which is the addition noises dimension to make the whole dataset has dimension 13, with ``sigmal_additional = 0.1``, which is the variance of additional dimension.\n", "_____no_output_____" ] ], [ [ "from plotly.offline import iplot\n#import megaman\nfrom megaman.datasets import *\n\ndata = generate_noisy_hourglass(size=10000, sigma_primary=0.1,\n addition_dims=10, sigma_additional=0.1)", "_____no_output_____" ] ], [ [ "We can visualize dataset with the following plots:", "_____no_output_____" ] ], [ [ "from megaman.plotter.scatter_3d import scatter_plot3d_plotly\nimport plotly.graph_objs as go\n\nt_data = scatter_plot3d_plotly(data,marker=dict(color='rgb(0, 102, 0)',opacity=0.5))\nl_data = go.Layout(title='Noisy hourglass scatter plot for first 3 axis.')\nf_data = go.Figure(data=t_data,layout=l_data)\niplot(f_data)", "_____no_output_____" ] ], [ [ "## Radius estimation\n\nTo estimate the ``radius``, we need to find the pairwise distance first. \n\nTo do so, we compute the adjacency matrix using the Geometry modules in megaman.", "_____no_output_____" ] ], [ [ "rmax=5\nrmin=0.1\n\nfrom megaman.geometry import Geometry\n\ngeom = Geometry(adjacency_method='brute',adjacency_kwds=dict(radius=rmax))\ngeom.set_data_matrix(data)\ndist = geom.compute_adjacency_matrix()", "_____no_output_____" ] ], [ [ "For each data points, the distortion will be estimated. If the size $N$ used in estimating the distortion is large, it will be computationally expensive. We want to choose a sample with size $N'$ such that the average distion is well estimated. In our cases, we choose $N'=1000$. The error will be around $\\frac{1}{\\sqrt{1000}} \\approx 0.03$.\n\nIn this example, we searched radius from the minimum pairwise distance ``rmin`` to the maximum distance between points ``rmax``. By doing so, the distance matrix will be dense. If the matrix is too large to fit in the memory, smaller maximum radius ``rmax`` can be chosen to make the distance matrix sparse.\n\nBased on the discussion above, we run radius estimation with \n1. sample size=1000 (created by choosing one data point out of every 10 of the original data.)\n1. radius search from ``rmin=0.1`` to ``rmax=50``, with 50 points in logspace.\n1. dimension ``d=1``\n\nSpecify run_parallel=True for searching the radius in parallel.", "_____no_output_____" ] ], [ [ "%%capture\n# Using magic command %%capture for supressing the std out.\n\nfrom megaman.utils.estimate_radius import run_estimate_radius\nimport numpy as np\n\n# subsample by 10.\nsample = np.arange(0,data.shape[0],10)\n\ndistorion_vs_rad_dim1 = run_estimate_radius(\n data, dist, sample=sample, d=1, rmin=rmin, rmax=rmax,\n ntry=50, run_parallel=True, search_space='logspace')", "_____no_output_____" ] ], [ [ "Run radius estimation same configurations as above except\n1. dimension ``d=2``", "_____no_output_____" ] ], [ [ "%%capture\ndistorion_vs_rad_dim2 = run_estimate_radius(\n data, dist, sample=sample, d=2, rmin=0.1, rmax=5,\n ntry=50, run_parallel=True, search_space='logspace')", "_____no_output_____" ] ], [ [ "### Radius estimation result\n\nThe estimated radius is the minimizer of the distorsion, denoted as $\\hat{r}_{d=1}$ and $\\hat{r}_{d=2}$. (In the code, it's ``est_rad_dim1`` and ``est_rad_dim2``)", "_____no_output_____" ] ], [ [ "distorsion_dim1 = distorion_vs_rad_dim1[:,1].astype('float64')\ndistorsion_dim2 = distorion_vs_rad_dim2[:,1].astype('float64')\nrad_search_space = distorion_vs_rad_dim1[:,0].astype('float64')\n\nargmin_d1 = np.argmin(distorsion_dim1)\nargmin_d2 = np.argmin(distorsion_dim2)\nest_rad_dim1 = rad_search_space[argmin_d1]\nest_rad_dim2 = rad_search_space[argmin_d2]\n\nprint ('Estimated radius with d=1 is: {:.4f}'.format(est_rad_dim1))\nprint ('Estimated radius with d=2 is: {:.4f}'.format(est_rad_dim2))", "Estimated radius with d=1 is: 1.0969\nEstimated radius with d=2 is: 1.0128\n" ] ], [ [ "### Plot distorsions with different radii", "_____no_output_____" ] ], [ [ "t_distorsion = [go.Scatter(x=rad_search_space, y=distorsion_dim1, name='Dimension = 1'), \n go.Scatter(x=rad_search_space, y=distorsion_dim2, name='Dimension = 2')]\n\nl_distorsion = go.Layout(\n title='Distorsions versus radii',\n xaxis=dict(\n title='$\\\\text{Radius } r$',\n type='log',\n autorange=True\n ),\n yaxis=dict(\n title='Distorsion',\n type='log',\n autorange=True\n ),\n annotations=[\n dict(\n x=np.log10(est_rad_dim1),\n y=np.log10(distorsion_dim1[argmin_d1]),\n xref='x',\n yref='y',\n text='$\\\\hat{r}_{d=1}$',\n font = dict(size = 30),\n showarrow=True,\n arrowhead=7,\n ax=0,\n ay=-30\n ),\n dict(\n x=np.log10(est_rad_dim2),\n y=np.log10(distorsion_dim2[argmin_d2]),\n xref='x',\n yref='y',\n text='$\\\\hat{r}_{d=2}$',\n font = dict(size = 30),\n showarrow=True,\n arrowhead=7,\n ax=0,\n ay=-30\n )\n ]\n)\nf_distorsion = go.Figure(data=t_distorsion,layout=l_distorsion)\niplot(f_distorsion)", "_____no_output_____" ] ], [ [ "## Application to dimension estimation\n\nWe followed the method proposed by [Chen et. al (2011)]((http://lcsl.mit.edu/papers/che_lit_mag_ros_2011.pdf) [5] to verify the estimated radius reflect the truth intrinsic dimension of the data. The basic idea is to find the largest gap of singular value of local PCA, which correspond to the dimension of the local structure.\n\nWe first plot the average singular values versus radii.", "_____no_output_____" ] ], [ [ "%%capture\nfrom rad_est_utils import find_argmax_dimension, estimate_dimension\n\nrad_search_space, singular_values = estimate_dimension(data, dist)", "_____no_output_____" ] ], [ [ "The singular gap is the different between two singular values. Since the intrinsic dimension is 2, we are interested in the region where the largest singular gap is the second. The region is:", "_____no_output_____" ] ], [ [ "singular_gap = -1*np.diff(singular_values,axis=1)\nsecond_gap_is_max_range = (np.argmax(singular_gap,axis=1) == 1).nonzero()[0]\nstart_idx, end_idx = second_gap_is_max_range[0], second_gap_is_max_range[-1]+1\n\nprint ('The index which maximize the second singular gap is: {}'.format(second_gap_is_max_range))\nprint ('The start and end index of largest continuous range is {} and {}, respectively'.format(start_idx, end_idx))", "The index which maximize the second singular gap is: [20 21 22 23 24 25 26 27 28 29 30 31 32 33]\nThe start and end index of largest continuous range is 20 and 34, respectively\n" ] ], [ [ "### Averaged singular values with different radii\n\nPlot the averaged singular values with different radii. The gray shaded area is the continous range in which the largest singular gap is the second, (local structure has dimension equals 2). And the purple shaded area denotes the second singular gap.\n\nBy hovering the line on this plot, you can see the value of the singular gap.", "_____no_output_____" ] ], [ [ "from rad_est_utils import plot_singular_values_versus_radius, generate_layouts\n\nt_avg_singular = plot_singular_values_versus_radius(singular_values, rad_search_space, start_idx, end_idx)\nl_avg_singular = generate_layouts(start_idx, end_idx, est_rad_dim1, est_rad_dim2, rad_search_space)\nf_avg_singular = go.Figure(data=t_avg_singular,layout=l_avg_singular)\niplot(f_avg_singular)", "_____no_output_____" ] ], [ [ "### Histogram of estimated dimensions with estimated radius.\n\nWe first find out the estimated dimensions of each points in the data using the estimated radius $\\hat{r}_{d=1}$ and $\\hat{r}_{d=2}$.", "_____no_output_____" ] ], [ [ "dimension_freq_d1 = find_argmax_dimension(data,dist, est_rad_dim1)\ndimension_freq_d2 = find_argmax_dimension(data,dist, est_rad_dim2)", "_____no_output_____" ] ], [ [ "The histogram of estimated dimensions with different optimal radius is shown as below:", "_____no_output_____" ] ], [ [ "t_hist_dim = [go.Histogram(x=dimension_freq_d1,name='d=1'),\n go.Histogram(x=dimension_freq_d2,name='d=2')]\n\nl_hist_dim = go.Layout(\n title='Dimension histogram',\n xaxis=dict(\n title='Estimated dimension'\n ),\n yaxis=dict(\n title='Counts'\n ),\n bargap=0.2,\n bargroupgap=0.1\n)\n\nf_hist_dim = go.Figure(data=t_hist_dim,layout=l_hist_dim)\n\niplot(f_hist_dim)\n", "_____no_output_____" ] ], [ [ "## Conclusion\n\n1. Choosing the correct radius/bound/scale is important in any non-linear dimension reduction task\n1. The __Geometry Consistency (GC) Algorithm__ required minimal knowledge: maximum radius, minimum radius, (optionally: dimension $d$ of the manifold.)\n1. The chosen radius can be used in \n 1. any embedding algorithm\n 1. semi-supervised learning with Laplacian Regularizer (see our NIPS 2017 paper)\n 1. estimating dimension $d$ (as shown here)\n1. The megaman python package is __scalable__, and __efficient__\n\n<img src=https://raw.githubusercontent.com/mmp2/megaman/master/doc/images/spectra_Halpha.png width=600 />\n", "_____no_output_____" ], [ "## __Try it:__\n\n<div style=\"float:left;\">All the functions are implemented by the manifold learning package <a href=https://github.com/mmp2/megaman>megaman.</a> </div><a style=\"float:left;\" href=\"https://anaconda.org/conda-forge/megaman\"><img src=\"https://anaconda.org/conda-forge/megaman/badges/downloads.svg\" /></a>", "_____no_output_____" ], [ "## Reference\n\n[1] R. R. Coifman, S. Lafon. Diffusion maps. Applied and Computational Harmonic Analysis, 2006. <br>\n[2] D. Perrault-Joncas, M. Meila, Metric learning and manifolds: Preserving the intrinsic geometry , arXiv1305.7255 <br>\n[3] X. Zhou, M. Belkin. Semi-supervised learning by higher order regularization. AISTAT, 2011 <br>\n[4] A. Singer. From graph to manifold laplacian: the convergence rate. Applied and Computational Harmonic Analysis, 2006. <br>\n[5] G. Chen, A. Little, M. Maggioni, L. Rosasco. Some recent advances in multiscale geometric analysis of point clouds. Wavelets and multiscale analysis. Springer, 2011. <br>\n[6] L. Chen, A. Buja. Local Multidimensional Scaling for nonlinear dimension reduction, graph drawing and proximity analysis, JASA,2009. <br>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
cba06cf283c6f00ef19b3d3acd2ffcb6d23e971f
57,154
ipynb
Jupyter Notebook
g3doc/tutorials/graph_keras_lstm_imdb.ipynb
amitkayal/neural-structured-learning
adb855f3daa33e527c1786cd59329eb783d879aa
[ "Apache-2.0" ]
1
2019-09-05T04:53:49.000Z
2019-09-05T04:53:49.000Z
g3doc/tutorials/graph_keras_lstm_imdb.ipynb
zxlzr/neural-structured-learning
128a3eda112412c1e7beb1684ab8ec72afe4ba62
[ "Apache-2.0" ]
null
null
null
g3doc/tutorials/graph_keras_lstm_imdb.ipynb
zxlzr/neural-structured-learning
128a3eda112412c1e7beb1684ab8ec72afe4ba62
[ "Apache-2.0" ]
1
2019-10-04T05:17:37.000Z
2019-10-04T05:17:37.000Z
36.731362
429
0.546156
[ [ [ "##### Copyright 2019 Google LLC", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Graph regularization for sentiment classification using synthesized graphs\n\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/neural_structured_learning/tutorials/graph_keras_lstm_imdb\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/neural-structured-learning/blob/master/g3doc/tutorials/graph_keras_lstm_imdb.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/neural-structured-learning/blob/master/g3doc/tutorials/graph_keras_lstm_imdb.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>", "_____no_output_____" ], [ "## Overview", "_____no_output_____" ], [ "This notebook classifies movie reviews as *positive* or *negative* using the\ntext of the review. This is an example of *binary* classification, an important\nand widely applicable kind of machine learning problem.\n\nWe will demonstrate the use of graph regularization in this notebook by building\na graph from the given input. The general recipe for building a\ngraph-regularized model using the Neural Structured Learning (NSL) framework\nwhen the input does not contain an explicit graph is as follows:\n\n1. Create embeddings for each text sample in the input. This can be done using\n pre-trained models such as [word2vec](https://arxiv.org/pdf/1310.4546.pdf),\n [Swivel](https://arxiv.org/abs/1602.02215),\n [BERT](https://arxiv.org/abs/1810.04805) etc.\n2. Build a graph based on these embeddings by using a similarity metric such as\n the 'L2' distance, 'cosine' distance, etc. Nodes in the graph correspond to\n samples and edges in the graph correspond to similarity between pairs of\n samples.\n3. Generate training data from the above synthesized graph and sample features.\n The resulting training data will contain neighbor features in addition to\n the original node features.\n4. Create a neural network as a base model using the Keras sequential,\n functional, or subclass API.\n5. Wrap the base model with the GraphRegularization wrapper class, which is\n provided by the NSL framework, to create a new graph Keras model. This new\n model will include a graph regularization loss as the regularization term in\n its training objective.\n6. Train and evaluate the graph Keras model.\n\n**Note**: We expect that it would take readers about 1 hour to go through this\ntutorial.", "_____no_output_____" ], [ "## Requirements\n\n1. Install TensorFlow 2.x to create an interactive developing environment with eager execution.\n2. Install the Neural Structured Learning package.\n3. Install tensorflow-hub.", "_____no_output_____" ] ], [ [ "!pip install --quiet tensorflow==2.0.0-rc0\n!pip install --quiet neural-structured-learning\n!pip install --quiet tensorflow-hub", "_____no_output_____" ] ], [ [ "## Dependencies and imports", "_____no_output_____" ] ], [ [ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport neural_structured_learning as nsl\n\nimport tensorflow as tf\ntf.compat.v1.enable_v2_behavior()\n\nimport tensorflow_hub as hub\n\n# Resets notebook state\ntf.keras.backend.clear_session()\n\nprint(\"Version: \", tf.__version__)\nprint(\"Eager mode: \", tf.executing_eagerly())\nprint(\"Hub version: \", hub.__version__)\nprint(\"GPU is\", \"available\" if tf.test.is_gpu_available() else \"NOT AVAILABLE\")", "_____no_output_____" ] ], [ [ "## IMDB dataset\n\nThe\n[IMDB dataset](https://www.tensorflow.org/api_docs/python/tf/keras/datasets/imdb)\ncontains the text of 50,000 movie reviews from the\n[Internet Movie Database](https://www.imdb.com/). These are split into 25,000\nreviews for training and 25,000 reviews for testing. The training and testing\nsets are *balanced*, meaning they contain an equal number of positive and\nnegative reviews.\n\nIn this tutorial, we will use a preprocessed version of the IMDB dataset.", "_____no_output_____" ], [ "### Download preprocessed IMDB dataset\n\nThe IMDB dataset comes packaged with TensorFlow. It has already been\npreprocessed such that the reviews (sequences of words) have been converted to\nsequences of integers, where each integer represents a specific word in a\ndictionary.\n\nThe following code downloads the IMDB dataset (or uses a cached copy if it has\nalready been downloaded):", "_____no_output_____" ] ], [ [ "imdb = tf.keras.datasets.imdb\n(pp_train_data, pp_train_labels), (pp_test_data, pp_test_labels) = (\n imdb.load_data(num_words=10000))", "_____no_output_____" ] ], [ [ "The argument `num_words=10000` keeps the top 10,000 most frequently occurring words in the training data. The rare words are discarded to keep the size of the vocabulary manageable.", "_____no_output_____" ], [ "### Explore the data\n\nLet's take a moment to understand the format of the data. The dataset comes preprocessed: each example is an array of integers representing the words of the movie review. Each label is an integer value of either 0 or 1, where 0 is a negative review, and 1 is a positive review.", "_____no_output_____" ] ], [ [ "print('Training entries: {}, labels: {}'.format(\n len(pp_train_data), len(pp_train_labels)))\ntraining_samples_count = len(pp_train_data)", "_____no_output_____" ] ], [ [ "The text of reviews have been converted to integers, where each integer represents a specific word in a dictionary. Here's what the first review looks like:", "_____no_output_____" ] ], [ [ "print(pp_train_data[0])", "_____no_output_____" ] ], [ [ "Movie reviews may be different lengths. The below code shows the number of words in the first and second reviews. Since inputs to a neural network must be the same length, we'll need to resolve this later.", "_____no_output_____" ] ], [ [ "len(pp_train_data[0]), len(pp_train_data[1])", "_____no_output_____" ] ], [ [ "### Convert the integers back to words\n\nIt may be useful to know how to convert integers back to the corresponding text.\nHere, we'll create a helper function to query a dictionary object that contains\nthe integer to string mapping:", "_____no_output_____" ] ], [ [ "def build_reverse_word_index():\n # A dictionary mapping words to an integer index\n word_index = imdb.get_word_index()\n\n # The first indices are reserved\n word_index = {k: (v + 3) for k, v in word_index.items()}\n word_index['<PAD>'] = 0\n word_index['<START>'] = 1\n word_index['<UNK>'] = 2 # unknown\n word_index['<UNUSED>'] = 3\n return dict((value, key) for (key, value) in word_index.items())\n\nreverse_word_index = build_reverse_word_index()\n\ndef decode_review(text):\n return ' '.join([reverse_word_index.get(i, '?') for i in text])", "_____no_output_____" ] ], [ [ "Now we can use the `decode_review` function to display the text for the first review:", "_____no_output_____" ] ], [ [ "decode_review(pp_train_data[0])", "_____no_output_____" ] ], [ [ "## Graph construction\n\nGraph construction involves creating embeddings for text samples and then using\na similarity function to compare the embeddings.\n\nBefore proceeding further, we first create a directory to store artifacts\ncreated by this tutorial.", "_____no_output_____" ] ], [ [ "!mkdir -p /tmp/imdb", "_____no_output_____" ] ], [ [ "### Create sample embeddings", "_____no_output_____" ], [ "We will use pretrained Swivel embeddings to create embeddings in the\n`tf.train.Example` format for each sample in the input. We will store the\nresulting embeddings in the `TFRecord` format along with an additional feature\nthat represents the ID of each sample. This is important and will allow us match\nsample embeddings with corresponding nodes in the graph later.", "_____no_output_____" ] ], [ [ "# This is necessary because hub.KerasLayer assumes tensor hashability, which\n# is not supported in eager mode.\ntf.compat.v1.disable_tensor_equality()\n\npretrained_embedding = 'https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1'\n\nhub_layer = hub.KerasLayer(\n pretrained_embedding, input_shape=[], dtype=tf.string, trainable=True)", "_____no_output_____" ], [ "def _int64_feature(value):\n \"\"\"Returns int64 tf.train.Feature.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value.tolist()))\n\n\ndef _bytes_feature(value):\n \"\"\"Returns bytes tf.train.Feature.\"\"\"\n return tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[value.encode('utf-8')]))\n\n\ndef _float_feature(value):\n \"\"\"Returns float tf.train.Feature.\"\"\"\n return tf.train.Feature(float_list=tf.train.FloatList(value=value.tolist()))\n\n\ndef create_embedding_example(word_vector, record_id):\n \"\"\"Create tf.Example containing the sample's embedding and its ID.\"\"\"\n\n text = decode_review(word_vector)\n\n # Shape = [batch_size,].\n sentence_embedding = hub_layer(tf.reshape(text, shape=[-1,]))\n\n # Flatten the sentence embedding back to 1-D.\n sentence_embedding = tf.reshape(sentence_embedding, shape=[-1])\n\n features = {\n 'id': _bytes_feature(str(record_id)),\n 'embedding': _float_feature(sentence_embedding.numpy())\n }\n return tf.train.Example(features=tf.train.Features(feature=features))\n\n\ndef create_embeddings(word_vectors, output_path, starting_record_id):\n record_id = int(starting_record_id)\n with tf.io.TFRecordWriter(output_path) as writer:\n for word_vector in word_vectors:\n example = create_embedding_example(word_vector, record_id)\n record_id = record_id + 1\n writer.write(example.SerializeToString())\n return record_id\n\n\n# Persist TF.Example features containing embeddings for training data in\n# TFRecord format.\ncreate_embeddings(pp_train_data, '/tmp/imdb/embeddings.tfr', 0)", "_____no_output_____" ] ], [ [ "### Build a graph\n\nNow that we have the sample embeddings, we will use them to build a similarity\ngraph, i.e, nodes in this graph will correspond to samples and edges in this\ngraph will correspond to similarity between pairs of nodes.\n\nNeural Structured Learning provides a graph building tool that builds a graph\nbased on sample embeddings. It uses **cosine similarity** as the similarity\nmeasure to compare embeddings and build edges between them. It also allows us to\nspecify a similarity threshold, which can be used to discard dissimilar edges\nfrom the final graph. In this example, using 0.99 as the similarity threshold,\nwe end up with a graph that has 445,327 bi-directional edges.", "_____no_output_____" ] ], [ [ "!python -m neural_structured_learning.tools.build_graph \\\n--similarity_threshold=0.99 /tmp/imdb/embeddings.tfr /tmp/imdb/graph_99.tsv", "_____no_output_____" ] ], [ [ "**Note:** Graph quality and by extension, embedding quality, are very important\nfor graph regularization. While we have used Swivel embeddings in this notebook,\nusing BERT embeddings for instance, will likely capture review semantics more\naccurately. We encourage users to use embeddings of their choice and as\nappropriate to their needs.", "_____no_output_____" ], [ "## Sample features\n\nWe create sample features for our problem in the `tf.train.Example`s format and\npersist them in the `TFRecord` format. Each sample will include the following\nthree features:\n\n1. **id**: The node ID of the sample.\n2. **words**: An int64 list containing word IDs.\n3. **label**: A singleton int64 identifying the target class of the review.", "_____no_output_____" ] ], [ [ "def create_example(word_vector, label, record_id):\n \"\"\"Create tf.Example containing the sample's word vector, label, and ID.\"\"\"\n features = {\n 'id': _bytes_feature(str(record_id)),\n 'words': _int64_feature(np.asarray(word_vector)),\n 'label': _int64_feature(np.asarray([label])),\n }\n return tf.train.Example(features=tf.train.Features(feature=features))\n\ndef create_records(word_vectors, labels, record_path, starting_record_id):\n record_id = int(starting_record_id)\n with tf.io.TFRecordWriter(record_path) as writer:\n for word_vector, label in zip(word_vectors, labels):\n example = create_example(word_vector, label, record_id)\n record_id = record_id + 1\n writer.write(example.SerializeToString())\n return record_id\n\n# Persist TF.Example features (word vectors and labels) for training and test\n# data in TFRecord format.\nnext_record_id = create_records(pp_train_data, pp_train_labels,\n '/tmp/imdb/train_data.tfr', 0)\ncreate_records(pp_test_data, pp_test_labels, '/tmp/imdb/test_data.tfr',\n next_record_id)", "_____no_output_____" ] ], [ [ "## Augment training data with graph neighbors\n\nSince we have the sample features and the synthesized graph, we can generate the\naugmented training data for Neural Structured Learning. The NSL framework\nprovides a tool that can combine the graph and the sample features to produce\nthe final training data for graph regularization. The resulting training data\nwill include original sample features as well as features of their corresponding\nneighbors.\n\nIn this tutorial, we consider undirected edges and we use a maximum of 1\nneighbor per sample.", "_____no_output_____" ] ], [ [ "!python -m neural_structured_learning.tools.pack_nbrs \\\n--max_nbrs=3 --add_undirected_edges=True \\\n/tmp/imdb/train_data.tfr '' /tmp/imdb/graph_99.tsv \\\n/tmp/imdb/nsl_train_data.tfr", "_____no_output_____" ] ], [ [ "## Base model\n\nWe are now ready to build a base model without graph regularization. In order to build this model, we can either use embeddings that were used in building the graph, or we can learn new embeddings jointly along with the classification task. For the purpose of this notebook, we will do the latter.", "_____no_output_____" ], [ "### Global variables", "_____no_output_____" ] ], [ [ "NBR_FEATURE_PREFIX = 'NL_nbr_'\nNBR_WEIGHT_SUFFIX = '_weight'", "_____no_output_____" ] ], [ [ "### Hyperparameters\n\nWe will use an instance of `HParams` to inclue various hyperparameters and\nconstants used for training and evaluation. We briefly describe each of them\nbelow:\n\n- **num_classes**: There are 2 classes -- *positive* and *negative*.\n\n- **max_seq_length**: This is the maximum number of words considered from each movie review in this example.\n\n- **vocab_size**: This is the size of the vocabulary considered for this example.\n\n- **distance_type**: This is the distance metric used to regularize the sample\n with its neighbors.\n\n- **graph_regularization_multiplier**: This controls the relative weight of\n the graph regularization term in the overall loss function.\n\n- **num_neighbors**: The number of neighbors used for graph regularization.\n\n- **num_fc_units**: The number of units in the fully connected layer of the neural network.\n\n- **train_epochs**: The number of training epochs.\n\n- **batch_size**: Batch size used for training and evaluation.\n\n- **eval_steps**: The number of batches to process before deeming evaluation\n is complete. If set to `None`, all instances in the test set are evaluated.", "_____no_output_____" ] ], [ [ "class HParams(object):\n \"\"\"Hyperparameters used for training.\"\"\"\n def __init__(self):\n ### dataset parameters\n self.num_classes = 2\n self.max_seq_length = 256\n self.vocab_size = 10000\n ### neural graph learning parameters\n self.distance_type = nsl.configs.DistanceType.L2\n self.graph_regularization_multiplier = 0.1\n self.num_neighbors = 2\n ### model architecture\n self.num_embedding_dims = 16\n self.num_lstm_dims = 64\n self.num_fc_units = 64\n ### training parameters\n self.train_epochs = 10\n self.batch_size = 128\n ### eval parameters\n self.eval_steps = None # All instances in the test set are evaluated.\n\nHPARAMS = HParams()", "_____no_output_____" ] ], [ [ "### Prepare the data\n\nThe reviews—the arrays of integers—must be converted to tensors before being fed\ninto the neural network. This conversion can be done a couple of ways:\n\n* Convert the arrays into vectors of `0`s and `1`s indicating word occurrence,\n similar to a one-hot encoding. For example, the sequence `[3, 5]` would become a `10000`-dimensional vector that is all zeros except for indices `3` and `5`, which are ones. Then, make this the first layer in our network—a `Dense` layer—that can handle floating point vector data. This approach is memory intensive, though, requiring a `num_words * num_reviews` size matrix.\n\n* Alternatively, we can pad the arrays so they all have the same length, then\n create an integer tensor of shape `max_length * num_reviews`. We can use an\n embedding layer capable of handling this shape as the first layer in our\n network.\n\nIn this tutorial, we will use the second approach.\n\nSince the movie reviews must be the same length, we will use the `pad_sequence`\nfunction defined below to standardize the lengths.", "_____no_output_____" ] ], [ [ "def pad_sequence(sequence, max_seq_length):\n \"\"\"Pads the input sequence (a `tf.SparseTensor`) to `max_seq_length`.\"\"\"\n pad_size = tf.maximum([0], max_seq_length - tf.shape(sequence)[0])\n padded = tf.concat(\n [sequence.values,\n tf.fill((pad_size), tf.cast(0, sequence.dtype))],\n axis=0)\n # The input sequence may be larger than max_seq_length. Truncate down if\n # necessary.\n return tf.slice(padded, [0], [max_seq_length])\n\ndef parse_example(example_proto):\n \"\"\"Extracts relevant fields from the `example_proto`.\n\n Args:\n example_proto: An instance of `tf.train.Example`.\n\n Returns:\n A pair whose first value is a dictionary containing relevant features\n and whose second value contains the ground truth labels.\n \"\"\"\n # The 'words' feature is a variable length word ID vector.\n feature_spec = {\n 'words': tf.io.VarLenFeature(tf.int64),\n 'label': tf.io.FixedLenFeature((), tf.int64, default_value=-1),\n }\n # We also extract corresponding neighbor features in a similar manner to\n # the features above.\n for i in range(HPARAMS.num_neighbors):\n nbr_feature_key = '{}{}_{}'.format(NBR_FEATURE_PREFIX, i, 'words')\n nbr_weight_key = '{}{}{}'.format(NBR_FEATURE_PREFIX, i, NBR_WEIGHT_SUFFIX)\n feature_spec[nbr_feature_key] = tf.io.VarLenFeature(tf.int64)\n\n # We assign a default value of 0.0 for the neighbor weight so that\n # graph regularization is done on samples based on their exact number\n # of neighbors. In other words, non-existent neighbors are discounted.\n feature_spec[nbr_weight_key] = tf.io.FixedLenFeature(\n [1], tf.float32, default_value=tf.constant([0.0]))\n\n features = tf.io.parse_single_example(example_proto, feature_spec)\n\n # Since the 'words' feature is a variable length word vector, we pad it to a\n # constant maximum length based on HPARAMS.max_seq_length\n features['words'] = pad_sequence(features['words'], HPARAMS.max_seq_length)\n for i in range(HPARAMS.num_neighbors):\n nbr_feature_key = '{}{}_{}'.format(NBR_FEATURE_PREFIX, i, 'words')\n features[nbr_feature_key] = pad_sequence(features[nbr_feature_key],\n HPARAMS.max_seq_length)\n\n labels = features.pop('label')\n return features, labels\n\ndef make_dataset(file_path, training=False):\n \"\"\"Creates a `tf.data.TFRecordDataset`.\n\n Args:\n file_path: Name of the file in the `.tfrecord` format containing\n `tf.train.Example` objects.\n training: Boolean indicating if we are in training mode.\n\n Returns:\n An instance of `tf.data.TFRecordDataset` containing the `tf.train.Example`\n objects.\n \"\"\"\n dataset = tf.data.TFRecordDataset([file_path])\n if training:\n dataset = dataset.shuffle(10000)\n dataset = dataset.map(parse_example)\n dataset = dataset.batch(HPARAMS.batch_size)\n return dataset\n\ntrain_dataset = make_dataset('/tmp/imdb/nsl_train_data.tfr', True)\ntest_dataset = make_dataset('/tmp/imdb/test_data.tfr')", "_____no_output_____" ] ], [ [ "### Build the model\n\nA neural network is created by stacking layers—this requires two main architectural decisions:\n\n* How many layers to use in the model?\n* How many *hidden units* to use for each layer?\n\nIn this example, the input data consists of an array of word-indices. The labels to predict are either 0 or 1.\n\nWe will use a bi-directional LSTM as our base model in this tutorial.", "_____no_output_____" ] ], [ [ "# This function exists as an alternative to the bi-LSTM model used in this\n# notebook.\ndef make_feed_forward_model():\n \"\"\"Builds a simple 2 layer feed forward neural network.\"\"\"\n inputs = tf.keras.Input(\n shape=(HPARAMS.max_seq_length,), dtype='int64', name='words')\n embedding_layer = tf.keras.layers.Embedding(HPARAMS.vocab_size, 16)(inputs)\n pooling_layer = tf.keras.layers.GlobalAveragePooling1D()(embedding_layer)\n dense_layer = tf.keras.layers.Dense(16, activation='relu')(pooling_layer)\n outputs = tf.keras.layers.Dense(1, activation='sigmoid')(dense_layer)\n return tf.keras.Model(inputs=inputs, outputs=outputs)\n\n\ndef make_bilstm_model():\n \"\"\"Builds a bi-directional LSTM model.\"\"\"\n inputs = tf.keras.Input(\n shape=(HPARAMS.max_seq_length,), dtype='int64', name='words')\n embedding_layer = tf.keras.layers.Embedding(HPARAMS.vocab_size,\n HPARAMS.num_embedding_dims)(\n inputs)\n lstm_layer = tf.keras.layers.Bidirectional(\n tf.keras.layers.LSTM(HPARAMS.num_lstm_dims))(\n embedding_layer)\n dense_layer = tf.keras.layers.Dense(\n HPARAMS.num_fc_units, activation='relu')(\n lstm_layer)\n outputs = tf.keras.layers.Dense(1, activation='sigmoid')(dense_layer)\n return tf.keras.Model(inputs=inputs, outputs=outputs)\n\n\n# Feel free to use an architecture of your choice.\nmodel = make_bilstm_model()\nmodel.summary()", "_____no_output_____" ] ], [ [ "The layers are effectively stacked sequentially to build the classifier:\n\n1. The first layer is an `Input` layer which takes the integer-encoded\n vocabulary.\n2. The next layer is an `Embedding` layer, which takes the integer-encoded\n vocabulary and looks up the embedding vector for each word-index. These\n vectors are learned as the model trains. The vectors add a dimension to the\n output array. The resulting dimensions are: `(batch, sequence, embedding)`.\n3. Next, a bidirectional LSTM layer returns a fixed-length output vector for\n each example.\n4. This fixed-length output vector is piped through a fully-connected (`Dense`)\n layer with 64 hidden units.\n5. The last layer is densely connected with a single output node. Using the\n `sigmoid` activation function, this value is a float between 0 and 1,\n representing a probability, or confidence level.", "_____no_output_____" ], [ "### Hidden units\n\nThe above model has two intermediate or \"hidden\" layers, between the input and\noutput, and excluding the `Embedding` layer. The number of outputs (units,\nnodes, or neurons) is the dimension of the representational space for the layer.\nIn other words, the amount of freedom the network is allowed when learning an\ninternal representation.\n\nIf a model has more hidden units (a higher-dimensional representation space),\nand/or more layers, then the network can learn more complex representations.\nHowever, it makes the network more computationally expensive and may lead to\nlearning unwanted patterns—patterns that improve performance on training data\nbut not on the test data. This is called *overfitting*.", "_____no_output_____" ], [ "### Loss function and optimizer\n\nA model needs a loss function and an optimizer for training. Since this is a\nbinary classification problem and the model outputs a probability (a single-unit\nlayer with a sigmoid activation), we'll use the `binary_crossentropy` loss\nfunction.", "_____no_output_____" ] ], [ [ "model.compile(\n optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "### Create a validation set\n\nWhen training, we want to check the accuracy of the model on data it hasn't seen\nbefore. Create a *validation set* by setting apart a fraction of the original\ntraining data. (Why not use the testing set now? Our goal is to develop and tune\nour model using only the training data, then use the test data just once to\nevaluate our accuracy).\n\nIn this tutorial, we take roughly 10% of the initial training samples (10% of 25000) as labeled data for training and the remaining as validation data. Since the initial train/test split was 50/50 (25000 samples each), the effective train/validation/test split we now have is 5/45/50.\n\nNote that 'train_dataset' has already been batched and shuffled. ", "_____no_output_____" ] ], [ [ "validation_fraction = 0.9\nvalidation_size = int(validation_fraction *\n int(training_samples_count / HPARAMS.batch_size))\nprint(validation_size)\nvalidation_dataset = train_dataset.take(validation_size)\ntrain_dataset = train_dataset.skip(validation_size)", "_____no_output_____" ] ], [ [ "### Train the model\n\nTrain the model in mini-batches. While training, monitor the model's loss and accuracy on the validation set:", "_____no_output_____" ] ], [ [ "history = model.fit(\n train_dataset,\n validation_data=validation_dataset,\n epochs=HPARAMS.train_epochs,\n verbose=1)", "_____no_output_____" ] ], [ [ "### Evaluate the model\n\nNow, let's see how the model performs. Two values will be returned. Loss (a number which represents our error, lower values are better), and accuracy.", "_____no_output_____" ] ], [ [ "results = model.evaluate(test_dataset, steps=HPARAMS.eval_steps)\nprint(results)", "_____no_output_____" ] ], [ [ "### Create a graph of accuracy/loss over time\n\n`model.fit()` returns a `History` object that contains a dictionary with everything that happened during training:", "_____no_output_____" ] ], [ [ "history_dict = history.history\nhistory_dict.keys()", "_____no_output_____" ] ], [ [ "There are four entries: one for each monitored metric during training and validation. We can use these to plot the training and validation loss for comparison, as well as the training and validation accuracy:", "_____no_output_____" ] ], [ [ "acc = history_dict['accuracy']\nval_acc = history_dict['val_accuracy']\nloss = history_dict['loss']\nval_loss = history_dict['val_loss']\n\nepochs = range(1, len(acc) + 1)\n\n# \"-r^\" is for solid red line with triangle markers.\nplt.plot(epochs, loss, '-r^', label='Training loss')\n# \"-b0\" is for solid blue line with circle markers.\nplt.plot(epochs, val_loss, '-bo', label='Validation loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend(loc='best')\n\nplt.show()", "_____no_output_____" ], [ "plt.clf() # clear figure\n\nplt.plot(epochs, acc, '-r^', label='Training acc')\nplt.plot(epochs, val_acc, '-bo', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend(loc='best')\n\nplt.show()", "_____no_output_____" ] ], [ [ "Notice the training loss *decreases* with each epoch and the training accuracy\n*increases* with each epoch. This is expected when using a gradient descent\noptimization—it should minimize the desired quantity on every iteration.", "_____no_output_____" ], [ "## Graph regularization\n\nWe are now ready to try graph regularization using the base model that we built\nabove. We will use the `GraphRegularization` wrapper class provided by the\nNeural Structured Learning framework to wrap the base (bi-LSTM) model to include\ngraph regularization. The rest of the steps for training and evaluating the\ngraph-regularized model are similar to that of the base model.", "_____no_output_____" ], [ "### Create graph-regularized model", "_____no_output_____" ], [ "To assess the incremental benefit of graph regularization, we will create a new\nbase model instance. This is because `model` has already been trained for a few\niterations, and reusing this trained model to create a graph-regularized model\nwill not be a fair comparison for `model`.", "_____no_output_____" ] ], [ [ "# Build a new base LSTM model.\nbase_reg_model = make_bilstm_model()", "_____no_output_____" ], [ "# Wrap the base model with graph regularization.\ngraph_reg_config = nsl.configs.GraphRegConfig(\n neighbor_config=nsl.configs.GraphNeighborConfig(\n max_neighbors=HPARAMS.num_neighbors),\n multiplier=HPARAMS.graph_regularization_multiplier,\n distance_config=nsl.configs.DistanceConfig(\n distance_type=HPARAMS.distance_type, sum_over_axis=-1))\ngraph_reg_model = nsl.keras.GraphRegularization(base_reg_model,\n graph_reg_config)\ngraph_reg_model.compile(\n optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "### Train the model", "_____no_output_____" ] ], [ [ "graph_reg_history = graph_reg_model.fit(\n train_dataset,\n validation_data=validation_dataset,\n epochs=HPARAMS.train_epochs,\n verbose=1)", "_____no_output_____" ] ], [ [ "### Evaluate the model", "_____no_output_____" ] ], [ [ "graph_reg_results = graph_reg_model.evaluate(test_dataset, steps=HPARAMS.eval_steps)\nprint(graph_reg_results)", "_____no_output_____" ] ], [ [ "### Create a graph of accuracy/loss over time", "_____no_output_____" ] ], [ [ "graph_reg_history_dict = graph_reg_history.history\ngraph_reg_history_dict.keys()", "_____no_output_____" ] ], [ [ "There are six entries: one for each monitored metric -- loss, graph loss, and\naccuracy -- during training and validation. We can use these to plot the\ntraining, graph, and validation losses for comparison, as well as the training\nand validation accuracy. Note that the graph loss is only computed during\ntraining; so its value will be 0 during validation.", "_____no_output_____" ] ], [ [ "acc = graph_reg_history_dict['accuracy']\nval_acc = graph_reg_history_dict['val_accuracy']\nloss = graph_reg_history_dict['loss']\ngraph_loss = graph_reg_history_dict['graph_loss']\nval_loss = graph_reg_history_dict['val_loss']\nval_graph_loss = graph_reg_history_dict['val_graph_loss']\n\nepochs = range(1, len(acc) + 1)\n\nplt.clf() # clear figure\n\n# \"-r^\" is for solid red line with triangle markers.\nplt.plot(epochs, loss, '-r^', label='Training loss')\n# \"-gD\" is for solid green line with diamond markers.\nplt.plot(epochs, graph_loss, '-gD', label='Training graph loss')\n# \"-b0\" is for solid blue line with circle markers.\nplt.plot(epochs, val_loss, '-bo', label='Validation loss')\n# \"-ms\" is for solid magenta line with square markers.\nplt.plot(epochs, val_graph_loss, '-ms', label='Validation graph loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend(loc='best')\n\nplt.show()", "_____no_output_____" ], [ "plt.clf() # clear figure\n\nplt.plot(epochs, acc, '-r^', label='Training acc')\nplt.plot(epochs, val_acc, '-bo', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend(loc='best')\n\nplt.show()", "_____no_output_____" ] ], [ [ "## The power of semi-supervised learning\n\nSemi-supervised learning and more specifically, graph regularization in the\ncontext of this tutorial, can be really powerful when the amount of training\ndata is small. The lack of training data is compensated by leveraging similarity\namong the training samples, which is not possible in traditional supervised\nlearning.\n\nWe define ***supervision ratio*** as the ratio of training samples to the total\nnumber of samples which includes training, validation, and test samples. In this\nnotebook, we have used a supervision ratio of 0.05 (i.e, 5% of the labeled data)\nfor training both the base model as well as the graph-regularized model. We\nillustrate the impact of the supervision ratio on model accuracy in the cell\nbelow.", "_____no_output_____" ] ], [ [ "# Accuracy values for both the Bi-LSTM model and the feed forward NN model have\n# been precomputed for the following supervision ratios.\n\nsupervision_ratios = [0.3, 0.15, 0.05, 0.03, 0.02, 0.01, 0.005]\n\nmodel_tags = ['Bi-LSTM model', 'Feed Forward NN model']\nbase_model_accs = [[84, 84, 83, 80, 65, 52, 50], [87, 86, 76, 74, 67, 52, 51]]\ngraph_reg_model_accs = [[84, 84, 83, 83, 65, 63, 50],\n [87, 86, 80, 75, 67, 52, 50]]\n\nplt.clf() # clear figure\n\nfig, axes = plt.subplots(1, 2)\nfig.set_size_inches((12, 5))\n\nfor ax, model_tag, base_model_acc, graph_reg_model_acc in zip(\n axes, model_tags, base_model_accs, graph_reg_model_accs):\n\n # \"-r^\" is for solid red line with triangle markers.\n ax.plot(base_model_acc, '-r^', label='Base model')\n # \"-gD\" is for solid green line with diamond markers.\n ax.plot(graph_reg_model_acc, '-gD', label='Graph-regularized model')\n ax.set_title(model_tag)\n ax.set_xlabel('Supervision ratio')\n ax.set_ylabel('Accuracy(%)')\n ax.set_ylim((25, 100))\n ax.set_xticks(range(len(supervision_ratios)))\n ax.set_xticklabels(supervision_ratios)\n ax.legend(loc='best')\n\nplt.show()", "_____no_output_____" ] ], [ [ "It can be observed that as the superivision ratio decreases, model accuracy also\ndecreases. This is true for both the base model and for the graph-regularized\nmodel, regardless of the model architecture used. However, notice that the\ngraph-regularized model performs better than the base model for both the\narchitectures. In particular, for the Bi-LSTM model, when the supervision ratio\nis 0.01, the accuracy of the graph-regularized model is **~20%** higher than\nthat of the base model. This is primarily because of semi-supervised learning\nfor the graph-regularized model, where structural similarity among training\nsamples is used in addition to the training samples themselves.", "_____no_output_____" ], [ "## Conclusion\n\nWe have demonstrated the use of graph regularization using the Neural Structured\nLearning (NSL) framework even when the input does not contain an explicit graph.\nWe considered the task of sentiment classification of IMDB movie reviews for\nwhich we synthesized a similarity graph based on review embeddings. We encourage\nusers to experiment further by varying hyperparameters, the amount of\nsupervision, and by using different model architectures.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cba06ea80ac42ff80ba29debdbbf829c4f188cb8
11,529
ipynb
Jupyter Notebook
notebooks/record_train/example_classification.ipynb
sangyy/jetracer
65704c65e11cbdae7afa0d245cded2a49206f9a7
[ "MIT" ]
1
2019-10-16T16:54:20.000Z
2019-10-16T16:54:20.000Z
notebooks/record_train/example_classification.ipynb
sangyy/jetracer
65704c65e11cbdae7afa0d245cded2a49206f9a7
[ "MIT" ]
null
null
null
notebooks/record_train/example_classification.ipynb
sangyy/jetracer
65704c65e11cbdae7afa0d245cded2a49206f9a7
[ "MIT" ]
null
null
null
32.294118
338
0.615665
[ [ [ "# Road Following - Live demo", "_____no_output_____" ], [ "In this notebook, we will use model we trained to move jetBot smoothly on track. ", "_____no_output_____" ], [ "### Load Trained Model", "_____no_output_____" ], [ "We will assume that you have already downloaded ``best_steering_model_xy.pth`` to work station as instructed in \"train_model.ipynb\" notebook. Now, you should upload model file to JetBot in to this notebooks's directory. Once that's finished there should be a file named ``best_steering_model_xy.pth`` in this notebook's directory.", "_____no_output_____" ], [ "> Please make sure the file has uploaded fully before calling the next cell", "_____no_output_____" ], [ "Execute the code below to initialize the PyTorch model. This should look very familiar from the training notebook.", "_____no_output_____" ] ], [ [ "import torchvision\nimport torch\n\nmodel = torchvision.models.resnet18(pretrained=False)\nmodel.fc = torch.nn.Linear(512, 2)", "_____no_output_____" ] ], [ [ "Next, load the trained weights from the ``best_steering_model_xy.pth`` file that you uploaded.", "_____no_output_____" ] ], [ [ "model.load_state_dict(torch.load('best_steering_model_xy.pth'))", "_____no_output_____" ] ], [ [ "Currently, the model weights are located on the CPU memory execute the code below to transfer to the GPU device.", "_____no_output_____" ] ], [ [ "device = torch.device('cuda')\nmodel = model.to(device)\nmodel = model.eval().half()", "_____no_output_____" ] ], [ [ "### Creating the Pre-Processing Function", "_____no_output_____" ], [ "We have now loaded our model, but there's a slight issue. The format that we trained our model doesnt exactly match the format of the camera. To do that, we need to do some preprocessing. This involves the following steps:\n\n1. Convert from HWC layout to CHW layout\n2. Normalize using same parameters as we did during training (our camera provides values in [0, 255] range and training loaded images in [0, 1] range so we need to scale by 255.0\n3. Transfer the data from CPU memory to GPU memory\n4. Add a batch dimension", "_____no_output_____" ] ], [ [ "import torchvision.transforms as transforms\nimport torch.nn.functional as F\nimport cv2\nimport PIL.Image\nimport numpy as np\n\nmean = torch.Tensor([0.485, 0.456, 0.406]).cuda().half()\nstd = torch.Tensor([0.229, 0.224, 0.225]).cuda().half()\n\ndef preprocess(image):\n image = PIL.Image.fromarray(image)\n image = transforms.functional.to_tensor(image).to(device).half()\n image.sub_(mean[:, None, None]).div_(std[:, None, None])\n return image[None, ...]", "_____no_output_____" ] ], [ [ "Awesome! We've now defined our pre-processing function which can convert images from the camera format to the neural network input format.\n\nNow, let's start and display our camera. You should be pretty familiar with this by now. ", "_____no_output_____" ] ], [ [ "from IPython.display import display\nimport ipywidgets\nimport traitlets\nfrom jetbot import Camera, bgr8_to_jpeg\n\ncamera = Camera()\n\nimage_widget = ipywidgets.Image()\n\ntraitlets.dlink((camera, 'value'), (image_widget, 'value'), transform=bgr8_to_jpeg)\n\ndisplay(image_widget)", "_____no_output_____" ] ], [ [ "We'll also create our robot instance which we'll need to drive the motors.", "_____no_output_____" ] ], [ [ "from jetbot import Robot\n\nrobot = Robot()", "_____no_output_____" ] ], [ [ "Now, we will define sliders to control JetBot\n> Note: We have initialize the slider values for best known configurations, however these might not work for your dataset, therefore please increase or decrease the sliders according to your setup and environment\n\n1. Speed Control (speed_gain_slider): To start your JetBot increase ``speed_gain_slider`` \n2. Steering Gain Control (steering_gain_sloder): If you see JetBot is woblling, you need to reduce ``steering_gain_slider`` till it is smooth\n3. Steering Bias control (steering_bias_slider): If you see JetBot is biased towards extreme right or extreme left side of the track, you should control this slider till JetBot start following line or track in the center. This accounts for motor biases as well as camera offsets\n\n> Note: You should play around above mentioned sliders with lower speed to get smooth JetBot road following behavior.", "_____no_output_____" ] ], [ [ "speed_gain_slider = ipywidgets.FloatSlider(min=0.0, max=1.0, step=0.01, description='speed gain')\nsteering_gain_slider = ipywidgets.FloatSlider(min=0.0, max=1.0, step=0.01, value=0.2, description='steering gain')\nsteering_dgain_slider = ipywidgets.FloatSlider(min=0.0, max=0.5, step=0.001, value=0.0, description='steering kd')\nsteering_bias_slider = ipywidgets.FloatSlider(min=-0.3, max=0.3, step=0.01, value=0.0, description='steering bias')\n\ndisplay(speed_gain_slider, steering_gain_slider, steering_dgain_slider, steering_bias_slider)", "_____no_output_____" ] ], [ [ "Next, let's display some sliders that will let us see what JetBot is thinking. The x and y sliders will display the predicted x, y values.\n\nThe steering slider will display our estimated steering value. Please remember, this value isn't the actual angle of the target, but simply a value that is\nnearly proportional. When the actual angle is ``0``, this will be zero, and it will increase / decrease with the actual angle. ", "_____no_output_____" ] ], [ [ "x_slider = ipywidgets.FloatSlider(min=-1.0, max=1.0, description='x')\ny_slider = ipywidgets.FloatSlider(min=0, max=1.0, orientation='vertical', description='y')\nsteering_slider = ipywidgets.FloatSlider(min=-1.0, max=1.0, description='steering')\nspeed_slider = ipywidgets.FloatSlider(min=0, max=1.0, orientation='vertical', description='speed')\n\ndisplay(ipywidgets.HBox([y_slider, speed_slider]))\ndisplay(x_slider, steering_slider)", "_____no_output_____" ] ], [ [ "Next, we'll create a function that will get called whenever the camera's value changes. This function will do the following steps\n\n1. Pre-process the camera image\n2. Execute the neural network\n3. Compute the approximate steering value\n4. Control the motors using proportional / derivative control (PD)", "_____no_output_____" ] ], [ [ "angle = 0.0\nangle_last = 0.0\n\ndef execute(change):\n global angle, angle_last\n image = change['new']\n xy = model(preprocess(image)).detach().float().cpu().numpy().flatten()\n x = xy[0]\n y = (0.5 - xy[1]) / 2.0\n \n x_slider.value = x\n y_slider.value = y\n \n speed_slider.value = speed_gain_slider.value\n \n angle = np.arctan2(x, y)\n pid = angle * steering_gain_slider.value + (angle - angle_last) * steering_dgain_slider.value\n angle_last = angle\n \n steering_slider.value = pid + steering_bias_slider.value\n \n robot.left_motor.value = max(min(speed_slider.value + steering_slider.value, 1.0), 0.0)\n robot.right_motor.value = max(min(speed_slider.value - steering_slider.value, 1.0), 0.0)\n \nexecute({'new': camera.value})", "_____no_output_____" ] ], [ [ "Cool! We've created our neural network execution function, but now we need to attach it to the camera for processing.\n\nWe accomplish that with the observe function.", "_____no_output_____" ], [ ">WARNING: This code will move the robot!! Please make sure your robot has clearance and it is on Lego or Track you have collected data on. The road follower should work, but the neural network is only as good as the data it's trained on!", "_____no_output_____" ] ], [ [ "camera.observe(execute, names='value')", "_____no_output_____" ] ], [ [ "Awesome! If your robot is plugged in it should now be generating new commands with each new camera frame. \n\nYou can now place JetBot on Lego or Track you have collected data on and see whether it can follow track.\n\nIf you want to stop this behavior, you can unattach this callback by executing the code below.", "_____no_output_____" ] ], [ [ "camera.unobserve(execute, names='value')\nrobot.stop()", "_____no_output_____" ] ], [ [ "### Conclusion\nThat's it for this live demo! Hopefully you had some fun seeing your JetBot moving smoothly on track follwing the road!!!\n\nIf your JetBot wasn't following road very well, try to spot where it fails. The beauty is that we can collect more data for these failure scenarios and the JetBot should get even better :)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cba07c045154903eb0565c845cf71d35352c6c68
973
ipynb
Jupyter Notebook
Algorithms/Sorting/Bubble-Sort/BubbleSort.ipynb
aadhityasw/Data-Structures-Algorithms
acb3a6634f5c5e771a61fe30bbffed6ce5439ded
[ "MIT" ]
null
null
null
Algorithms/Sorting/Bubble-Sort/BubbleSort.ipynb
aadhityasw/Data-Structures-Algorithms
acb3a6634f5c5e771a61fe30bbffed6ce5439ded
[ "MIT" ]
null
null
null
Algorithms/Sorting/Bubble-Sort/BubbleSort.ipynb
aadhityasw/Data-Structures-Algorithms
acb3a6634f5c5e771a61fe30bbffed6ce5439ded
[ "MIT" ]
null
null
null
19.078431
55
0.47482
[ [ [ "# Bubble Sort", "_____no_output_____" ] ], [ [ "# Bubble Sort\nn=int(input('Enter the number of elements :'))\np=[]\nfor i in range(n) :\n p.append(int(input()))\nfor i in range(n) :\n for j in range(n-i-1) :\n if p[j]>p[j+1] :\n p[j],p[j+1]=p[j+1],p[j]\nprint(p)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
cba08343b73e139538b2175a0eb2c304937ef7b5
35,399
ipynb
Jupyter Notebook
ddpg-pendulum/DDPG.ipynb
primeMover2011/deep-reinforcement-learning
a8314b01da15e47c230a3246e5109d49c6618162
[ "MIT" ]
null
null
null
ddpg-pendulum/DDPG.ipynb
primeMover2011/deep-reinforcement-learning
a8314b01da15e47c230a3246e5109d49c6618162
[ "MIT" ]
null
null
null
ddpg-pendulum/DDPG.ipynb
primeMover2011/deep-reinforcement-learning
a8314b01da15e47c230a3246e5109d49c6618162
[ "MIT" ]
null
null
null
171.009662
28,896
0.894291
[ [ [ "# Deep Deterministic Policy Gradients (DDPG)\n---\nIn this notebook, we train DDPG with OpenAI Gym's Pendulum-v0 environment.\n\n### 1. Import the Necessary Packages", "_____no_output_____" ] ], [ [ "import gym\nimport random\nimport torch\nimport numpy as np\nfrom collections import deque\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nfrom ddpg_agent import Agent", "_____no_output_____" ] ], [ [ "### 2. Instantiate the Environment and Agent", "_____no_output_____" ] ], [ [ "env = gym.make('Pendulum-v0')\nenv.seed(2)\nagent = Agent(state_size=3, action_size=1, random_seed=2)", "_____no_output_____" ] ], [ [ "### 3. Train the Agent with DDPG", "_____no_output_____" ] ], [ [ "def ddpg(n_episodes=1000, max_t=300, print_every=100):\n scores_deque = deque(maxlen=print_every)\n scores = []\n for i_episode in range(1, n_episodes+1):\n state = env.reset()\n agent.reset()\n score = 0\n for t in range(max_t):\n action = agent.act(state)\n next_state, reward, done, _ = env.step(action)\n agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break \n scores_deque.append(score)\n scores.append(score)\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)), end=\"\")\n torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth')\n torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth')\n if i_episode % print_every == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)))\n \n return scores\n\nscores = ddpg()\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nplt.plot(np.arange(1, len(scores)+1), scores)\nplt.ylabel('Score')\nplt.xlabel('Episode #')\nplt.show()", "C:\\Users\\danzingh\\AppData\\Local\\Continuum\\Anaconda3\\envs\\pytorch\\lib\\site-packages\\torch\\nn\\functional.py:1320: UserWarning: nn.functional.tanh is deprecated. Use torch.tanh instead.\n warnings.warn(\"nn.functional.tanh is deprecated. Use torch.tanh instead.\")\n" ] ], [ [ "### 4. Watch a Smart Agent!", "_____no_output_____" ] ], [ [ "agent.actor_local.load_state_dict(torch.load('checkpoint_actor.pth'))\nagent.critic_local.load_state_dict(torch.load('checkpoint_critic.pth'))\n\nstate = env.reset()\nfor t in range(1200):\n action = agent.act(state, add_noise=False)\n env.render()\n state, reward, done, _ = env.step(action)\n if done:\n print(t)\n break \n\nenv.close()", "199\n" ] ], [ [ "### 6. Explore\n\nIn this exercise, we have provided a sample DDPG agent and demonstrated how to use it to solve an OpenAI Gym environment. To continue your learning, you are encouraged to complete any (or all!) of the following tasks:\n- Amend the various hyperparameters and network architecture to see if you can get your agent to solve the environment faster than this benchmark implementation. Once you build intuition for the hyperparameters that work well with this environment, try solving a different OpenAI Gym task!\n- Write your own DDPG implementation. Use this code as reference only when needed -- try as much as you can to write your own algorithm from scratch.\n- You may also like to implement prioritized experience replay, to see if it speeds learning. \n- The current implementation adds Ornsetein-Uhlenbeck noise to the action space. However, it has [been shown](https://blog.openai.com/better-exploration-with-parameter-noise/) that adding noise to the parameters of the neural network policy can improve performance. Make this change to the code, to verify it for yourself!\n- Write a blog post explaining the intuition behind the DDPG algorithm and demonstrating how to use it to solve an RL environment of your choosing. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cba08727d193bc8f484e162f2aa6f71a30f0b75c
242,793
ipynb
Jupyter Notebook
3D-spatial-privacy-results.ipynb
spatial-privacy/spatial-privacy
ef0a1a088d59af0801afec58e1480e077a5ce0fb
[ "MIT" ]
null
null
null
3D-spatial-privacy-results.ipynb
spatial-privacy/spatial-privacy
ef0a1a088d59af0801afec58e1480e077a5ce0fb
[ "MIT" ]
null
null
null
3D-spatial-privacy-results.ipynb
spatial-privacy/spatial-privacy
ef0a1a088d59af0801afec58e1480e077a5ce0fb
[ "MIT" ]
3
2020-04-17T02:06:18.000Z
2021-11-05T20:34:11.000Z
318.208388
127,824
0.920891
[ [ [ "%matplotlib inline\n\nimport numpy as np\nimport sys\nimport os\nimport matplotlib.pyplot as plt\nimport math\nimport pickle\nimport pandas as pd\nimport scipy.io\nimport time\nimport h5py\nimport bz2\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.colors import ListedColormap, LinearSegmentedColormap\nfrom mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable\n\nfrom numpy import linalg as LA\nfrom scipy.spatial import Delaunay\nfrom sklearn.neighbors import NearestNeighbors\n\n#sys.path.insert(0, \"../\")\nfrom info3d import *\nfrom nn_matchers import *", "_____no_output_____" ] ], [ [ "# EXTRACTING the existing sample data", "_____no_output_____" ] ], [ [ "with open('point_collection/new_contiguous_point_collection.pickle','rb') as f: \n new_contiguous_point_collection = pickle.load(f)\n \nwith open('descriptors/new_complete_res5_4by5_descriptors.pickle','rb') as f:\n descriptors = pickle.load(f)\n", "_____no_output_____" ], [ "\"\"\"\nParameters\n\"\"\"\n# We used a radius range of 0.25 to 5.0 in increments of 0.25\nradius_range = np.arange(0.5,1.6,0.5)", "_____no_output_____" ] ], [ [ "# Step 1: Results of partial spaces", "_____no_output_____" ] ], [ [ "fig=plt.figure(figsize=(9, 3))\n\nRawNN = []\nRansacGeneralizedNN = []\n\nRawNN_intra_errors = []\nRansacGeneralizedNN_intra_errors = []\n\nfor radius in radius_range:\n \n try:\n with bz2.BZ2File('testing_results/partials/radius_{}_RAW_scores.pickle.bz2'.format(radius), 'r') as bz2_f:\n partial_scores = pickle.load(bz2_f)\n \n \"\"\"\n with open('testing_results/partials/radius_{}_RAW_errors.pickle'.format(radius), 'rb') as f:\n partial_scores = pickle.load(f)\n \"\"\"\n partial_errors = NN_matcher(partial_scores)\n RawNN.append([\n radius,\n np.mean(partial_errors[:,1]),\n np.std(partial_errors[:,1]),\n ])\n \n correct_interspace_labels_idxs = np.where(partial_errors[:,1]==0)[0]\n\n intraspace_errors = partial_errors[correct_interspace_labels_idxs,2]\n\n RawNN_intra_errors.append([\n radius,\n np.nanmean(intraspace_errors),\n np.nanstd(intraspace_errors)\n ])\n \n except:\n pass\n \n try:\n with bz2.BZ2File('testing_results/partials/radius_{}_RANSAC_scores.pickle.bz2'.format(radius), 'r') as bz2_f:\n partial_scores = pickle.load(bz2_f)\n\n \"\"\"\n with open('testing_results/partials/radius_{}_RANSAC_scores'.format(radius), 'rb') as f:\n partial_scores = pickle.load(f)\n \"\"\"\n partial_errors = NN_matcher(partial_scores)\n RansacGeneralizedNN.append([\n radius,\n np.nanmean(partial_errors[:,1]),\n np.nanstd(partial_errors[:,1]),\n ])\n\n correct_interspace_labels_idxs = np.where(partial_errors[:,1]==0)[0]\n\n intraspace_errors = partial_errors[correct_interspace_labels_idxs,2]\n\n RansacGeneralizedNN_intra_errors.append([\n radius,\n np.nanmean(intraspace_errors),\n np.nanstd(intraspace_errors)\n ])\n \n except:\n pass\n \nRansacGeneralizedNN = np.asarray(RansacGeneralizedNN)\nRawNN = np.asarray(RawNN)\n\nRawNN_intra_errors = np.asarray(RawNN_intra_errors)\nRansacGeneralizedNN_intra_errors = np.asarray(RansacGeneralizedNN_intra_errors)\n\nax1 = fig.add_subplot(121) \n\nax1.grid(alpha = 0.7)\nax1.set_ylim(-0.025,1.025)\nax1.set_xlim(radius_range[0]-0.25,radius_range[-1]+0.25)\nmarkersize = 8\n\nax1.set_ylabel(\"INTER-space Privacy\")\nax1.set_xlabel(\"Partial Radius\")\n#ax1.set_yticklabels(fontsize = 16)\n#ax1.set_xticklabels(fontsize = 16)\n\nax1.plot(\n RawNN[:,0],RawNN[:,1],\n \"-o\",\n linewidth = 2,\n mew = 2,markersize = markersize,\n label = \"Raw\"\n)\nax1.plot(\n RansacGeneralizedNN[:,0],RansacGeneralizedNN[:,1],\n \"-s\",\n linewidth = 2,\n mew = 2,markersize = markersize,\n label = \"RANSAC\"\n)\n\nax1.legend(loc = \"lower left\")\n\nax2 = fig.add_subplot(122) \n\nax2.grid(alpha = 0.7)\nax2.set_ylim(-0.25,10.25)\nax2.set_xlim(radius_range[0]-0.25,radius_range[-1]+0.25)\n\nax2.set_ylabel(\"INTRA-space Privacy\")\nax2.set_xlabel(\"Partial Radius\")\n#ax2.set_yticklabels(fontsize = 16)\n#ax2.set_xticklabels(fontsize = 16)\n\nplt.minorticks_on()\n\nax2.plot(\n RawNN_intra_errors[:,0],\n RawNN_intra_errors[:,1], \n linewidth = 2,\n marker = 'o',fillstyle = 'none',\n mew = 2,markersize = markersize,\n label = \"Raw\"\n)\n\nax2.plot(\n RansacGeneralizedNN_intra_errors[:,0],\n RansacGeneralizedNN_intra_errors[:,1], \n linewidth = 2, \n marker = 's',fillstyle = 'none',\n mew = 2,markersize = markersize,\n label = \"RANSAC\"\n)\n\nax2.legend(loc = \"lower left\");\n\nplt.savefig('plots/partial-spaces.png', format='png', dpi=300,bbox_inches = 'tight')", "/Users/deg032/Projects/public/spatial-privacy/nn_matchers.py:510: FutureWarning: in the future insert will treat boolean arrays and array-likes as a boolean index instead of casting it to integer\n good_matches_kp_idx = np.insert(good_matches[good_matches_ref_kp],True,good_matches_ref_kp)\n" ] ], [ [ "# Step 2: Results of the successive case", "_____no_output_____" ] ], [ [ "\"\"\"\nParameters\n\"\"\"\n# We used a radius range of 0.25 to 5.0 in increments of 0.25.\nradius_range = radius_range\n\n# For our work, we orignally used 50 samples with further 100 successive releases for our investigation.\n# Below are lower parameters, change as desired.\nsamples = 25\nreleases = 50\n\n# For demonstration purposes, we skip testing some successive samples but we still accumulate them.\nskip = 3", "_____no_output_____" ], [ "succ_RawNN_errors = []\nsucc_RawNN_partial_errors = []\n\nsucc_RansacGeneralizedNN_errors = []\nsucc_RansacGeneralizedNN_partial_errors = []\n\nt0 = time.time()\n\nfor radius in radius_range:\n \n succ_RawNN_per_iteration_errors = []\n succ_RansacGeneralizedNN_per_iteration_errors = []\n\n try:\n \n \"\"\"\n with open('testing_results/successive/radius_{}_RAW_successive_scores.pickle'.format(radius), 'rb') as f:\n successive_scores = pickle.load(f)\n \"\"\"\n \n with bz2.BZ2File('testing_results/successive/radius_{}_RAW_successive_scores.pickle.bz2'.format(radius), 'r') as bz2_f:\n successive_scores = pickle.load(bz2_f)\n\n with open('testing_results/successive/radius_{}_RAW_successive_errors.pickle'.format(radius), 'rb') as f:\n successive_errors = pickle.load(f)\n \n for obj_, iteration_errors in successive_errors: \n #print(\" RAW\",radius,iteration_errors.shape)\n\n if iteration_errors.shape[0] < int(releases/skip):\n continue\n else:\n succ_RawNN_per_iteration_errors.append(iteration_errors[:int(releases/skip)])\n \n succ_RawNN_errors.append([\n radius,\n np.asarray(succ_RawNN_per_iteration_errors)\n ])\n \n #print(\"Raw\",np.asarray(succ_RawNN_per_iteration_errors).shape)\n\n except:# Exception as ex:\n #print(radius,\": successive RawNN\\n \", ex)\n pass\n \n try:\n \"\"\"\n with open('testing_results/successive/radius_{}_RANSAC_successive_scores.pickle'.format(radius), 'rb') as f:\n successive_scores = pickle.load(f)\n \"\"\" \n with bz2.BZ2File('testing_results/successive/radius_{}_RANSAC_successive_scores.pickle.bz2'.format(radius), 'r') as bz2_f:\n successive_scores = pickle.load(bz2_f)\n\n with open('testing_results/successive/radius_{}_RANSAC_successive_errors.pickle'.format(radius), 'rb') as f:\n successive_errors = pickle.load(f)\n \n for obj_, iteration_scores in successive_scores:#[:-1]: \n #print(\" RANSAC\",radius,iteration_errors.shape)\n iteration_errors = NN_matcher(iteration_scores)\n\n if iteration_errors.shape[0] < int(releases/skip):\n continue\n else:\n succ_RansacGeneralizedNN_per_iteration_errors.append(iteration_errors[:int(releases/skip)])\n \n succ_RansacGeneralizedNN_errors.append([\n radius,\n np.asarray(succ_RansacGeneralizedNN_per_iteration_errors)\n ])\n \n #print(np.asarray(succ_RansacGeneralizedNN_errors).shape)\n\n except:# Exception as ex:\n #print(radius,\": successive RansacNN\\n \", ex)\n pass\n \n print(\"Done with radius = {:.2f} in {:.3f} seconds\".format(radius,time.time() - t0))\n t0 = time.time()\n \nfor radius, per_iteration_errors in succ_RawNN_errors:\n\n #print(radius,\"Raw\",per_iteration_errors.shape)\n\n succ_RawNN_partial_errors_per_rel = []\n\n for rel_i in np.arange(per_iteration_errors.shape[1]):\n\n correct_interspace_labels_idxs = np.where(per_iteration_errors[:,rel_i,1]==0)[0]\n\n intraspace_errors = per_iteration_errors[correct_interspace_labels_idxs,rel_i,2]\n\n succ_RawNN_partial_errors_per_rel.append([\n rel_i,\n np.mean(intraspace_errors),\n np.std(intraspace_errors)\n ])\n\n succ_RawNN_partial_errors.append([\n radius,\n np.asarray(succ_RawNN_partial_errors_per_rel)\n ])\n \nfor radius, per_iteration_errors in succ_RansacGeneralizedNN_errors:\n\n #print(radius,per_iteration_errors.shape)\n\n succ_RansacGeneralizedNN_errors_per_rel = []\n\n for rel_i in np.arange(per_iteration_errors.shape[1]):\n\n correct_interspace_labels_idxs = np.where(per_iteration_errors[:,rel_i,1]==0)[0]\n\n intraspace_errors = per_iteration_errors[correct_interspace_labels_idxs,rel_i,2]\n\n succ_RansacGeneralizedNN_errors_per_rel.append([\n rel_i,\n np.mean(intraspace_errors),\n np.std(intraspace_errors)\n ])\n\n succ_RansacGeneralizedNN_partial_errors.append([\n radius,\n np.asarray(succ_RansacGeneralizedNN_errors_per_rel)\n ])", "/Users/deg032/Projects/public/spatial-privacy/nn_matchers.py:510: FutureWarning: in the future insert will treat boolean arrays and array-likes as a boolean index instead of casting it to integer\n good_matches_kp_idx = np.insert(good_matches[good_matches_ref_kp],True,good_matches_ref_kp)\n" ], [ "fig=plt.figure(figsize=(15, 5))\n\nax1 = fig.add_subplot(121) \n\nax1.grid(alpha = 0.7)\nax1.set_ylim(-0.025,1.025)\nax1.set_xlim(0,releases-skip)\nmarkersize = 8\n\nax1.set_ylabel(\"INTER-space Privacy\", fontsize = 16)\nax1.set_xlabel(\"Releases\", fontsize = 16)\n\nfor radius, RawNN_per_iteration_errors in succ_RawNN_errors:\n #print(RawNN_per_iteration_errors.shape)\n ax1.plot(\n np.arange(1,releases-skip,skip),#[:RawNN_per_iteration_errors.shape[1]],\n np.mean(RawNN_per_iteration_errors[:,:,1], axis = 0), \n ':o',\n label = \"r =\"+ str(radius) + \" Raw\"\n )\n \nfor radius, RansacNN_per_iteration_errors in succ_RansacGeneralizedNN_errors:\n #print(RansacNN_per_iteration_errors.shape)\n ax1.plot(\n np.arange(1,releases-skip,skip),\n np.mean(RansacNN_per_iteration_errors[:,:,1], axis = 0),\n '-s',\n label = \"r =\"+ str(radius) + \" RANSAC\"\n )\n\nax1.legend(loc = \"best\", ncol = 2)\n\nax2 = fig.add_subplot(122) \n\nax2.grid(alpha = 0.7)\nax2.set_ylim(-0.25,12.25)\nax2.set_xlim(0,releases-skip)\n\nax2.set_ylabel(\"INTRA-space Privacy\", fontsize = 16)\nax2.set_xlabel(\"Releases\", fontsize = 16)\n\nfor radius, errors_per_rel in succ_RansacGeneralizedNN_partial_errors:\n ax2.plot(\n np.arange(1,releases-skip,skip),\n errors_per_rel[:,1], \n #errors_per_rel[:,2],\n '-s',\n linewidth = 2, #capsize = 4.0, \n #marker = markers[0],\n #fillstyle = 'none',\n mew = 2, markersize = markersize,\n label = \"r =\"+ str(radius)+\", RANSAC\"\n )\n\nax2.legend(loc = \"best\");\n\nplt.savefig('plots/successive-partial-spaces.png', format='png', dpi=300,bbox_inches = 'tight')", "_____no_output_____" ] ], [ [ "# Step 3: Results with conservative plane releasing", "_____no_output_____" ] ], [ [ "\"\"\"\nParameters:\n\nAlso, we use the same successive samples from successive releasing for direct comparability of results.\n\"\"\"\n\n# We used a radius range of 0.25 to 5.0 in increments of 0.25.\nradius_range = radius_range\n\n# For our work, we orignally used 50 samples with further 100 successive releases for our investigation.\n# Below are lower parameters, change as desired.\nsamples = 25\nreleases = 50\n\nplanes = np.arange(1,30,3)\n\n# For demonstration purposes, we skip testing some successive samples but we still accumulate them.\nskip = 3", "_____no_output_____" ], [ "conservative_RANSAC_error_results = []\n\nt0 = time.time()\n\nfor radius in radius_range[:1]:\n \n succ_RansacGeneralizedNN_per_iteration_errors = []\n \n try:\n \n \"\"\"\n with open('testing_results/conservative/radius_{}_RANSAC_conservative_scores.pickle'.format(radius), 'rb') as f:\n conservative_scores = pickle.load(f)\n \"\"\" \n with bz2.BZ2File('testing_results/conservative/radius_{}_RANSAC_conservative_scores.pickle.bz2'.format(radius), 'r') as bz2_f:\n conservative_scores = pickle.load(bz2_f)\n \n for obj_, per_plane_scores in conservative_scores:#[:-1]: \n \n per_plane_errors = []\n \n skipped= False\n \n for max_plane, iteration_scores in per_plane_scores:\n \n iteration_errors = NN_matcher(iteration_scores)\n \n if iteration_errors.shape[0] >= int(releases/skip):\n per_plane_errors.append(iteration_errors[:int(releases/skip)])\n else:\n skipped = True\n #print(\"RANSAC: skipped\",iteration_errors.shape)\n \n if not skipped:\n succ_RansacGeneralizedNN_per_iteration_errors.append(per_plane_errors)\n \n conservative_RANSAC_error_results.append([\n radius,\n np.asarray(succ_RansacGeneralizedNN_per_iteration_errors)\n ])\n \n print(np.asarray(succ_RansacGeneralizedNN_per_iteration_errors).shape)\n \n except Exception as ex:\n print(radius,\": conservative RansacNN\\n \", ex)\n pass\n \n \n print(\"Done with radius = {:.2f} in {:.3f} seconds\".format(radius,time.time() - t0))\n t0 = time.time()\n \n\"\"\"\n# Uncomment below if you want to overwrite the existing results.\n\"\"\"\n#with open('testing_results/conservative/conservative_RANSAC_error_results.pickle', 'wb') as f:\n# pickle.dump(conservative_RANSAC_error_results,f)", "/Users/deg032/Projects/public/spatial-privacy/nn_matchers.py:510: FutureWarning: in the future insert will treat boolean arrays and array-likes as a boolean index instead of casting it to integer\n good_matches_kp_idx = np.insert(good_matches[good_matches_ref_kp],True,good_matches_ref_kp)\n" ], [ "\"\"\"\n\nPreparing the results of the case with *Conservative Releasing*.\n\n\"\"\"\n\nreleases_range = np.arange(1,releases-skip,skip)\n\nX, Y = np.meshgrid(releases_range, planes)\n\ntest_vp_cn_05 = np.asarray(conservative_RANSAC_error_results[0][1])\nmean_vp_cn_05 = np.mean(test_vp_cn_05[:,:,:,1],axis = 0)\n\n#test_vp_cn_10 = np.asarray(conservative_RANSAC_error_results[1][1])\n#mean_vp_cn_10 = np.mean(test_vp_cn_10[:,:,:,1],axis = 0)\n\nintra_vp_cn_05 = np.zeros(test_vp_cn_05.shape[1:])\n#intra_vp_cn_10 = np.zeros(test_vp_cn_10.shape[1:])\n\nfor plane_i, plane in enumerate(planes):\n \n for rel_i, rel in enumerate(releases_range):\n \n correct_interspace_labels_idxs_05 = np.where(test_vp_cn_05[:,plane_i,rel_i,1]==0)[0]\n #correct_interspace_labels_idxs_10 = np.where(test_vp_cn_10[:,plane_i,rel_i,1]==0)[0]\n\n intraspace_errors_05 = test_vp_cn_05[correct_interspace_labels_idxs_05,plane_i,rel_i,2]\n #intraspace_errors_10 = test_vp_cn_10[correct_interspace_labels_idxs_10,plane_i,rel_i,2]\n \n intra_vp_cn_05[plane_i,rel_i] = np.asarray([\n np.mean(intraspace_errors_05),\n np.std(intraspace_errors_05),\n 0,\n np.nan\n ])\n ", "/Users/deg032/anaconda3/envs/p35_env/lib/python3.5/site-packages/numpy/core/fromnumeric.py:2957: RuntimeWarning: Mean of empty slice.\n out=out, **kwargs)\n/Users/deg032/anaconda3/envs/p35_env/lib/python3.5/site-packages/numpy/core/_methods.py:80: RuntimeWarning: invalid value encountered in double_scalars\n ret = ret.dtype.type(ret / rcount)\n/Users/deg032/anaconda3/envs/p35_env/lib/python3.5/site-packages/numpy/core/_methods.py:135: RuntimeWarning: Degrees of freedom <= 0 for slice\n keepdims=keepdims)\n/Users/deg032/anaconda3/envs/p35_env/lib/python3.5/site-packages/numpy/core/_methods.py:105: RuntimeWarning: invalid value encountered in true_divide\n arrmean, rcount, out=arrmean, casting='unsafe', subok=False)\n/Users/deg032/anaconda3/envs/p35_env/lib/python3.5/site-packages/numpy/core/_methods.py:127: RuntimeWarning: invalid value encountered in double_scalars\n ret = ret.dtype.type(ret / rcount)\n" ], [ "fig = plt.figure(figsize=(11,8))\nax = plt.axes(projection='3d')\n\nsurf = ax.plot_surface(\n X, Y, \n mean_vp_cn_05, \n cmap=plt.cm.plasma,\n)\nsurf.set_clim(0.0,1.0)\n\nax.set_title(\"r = 0.5\", fontsize = 24)\nax.set_xlabel(\"Releases\", labelpad=10, fontsize = 24)\nax.set_xlim(0,releases)\nax.set_xticklabels(releases_range,fontsize = 16)\nax.set_zlabel(\"INTER-space Privacy\", labelpad=10, fontsize = 24)\nax.set_zlim(0,1)\nax.set_zticklabels([0,0.2,0.4,0.6,0.8,1.0],fontsize = 16)\nax.set_ylabel(\"Max number of planes\", labelpad=10, fontsize = 22)#, offset = 1)\nax.set_ylim(0,30)\nax.set_yticklabels(np.arange(0,35,5),fontsize = 16)\n\ncbar = fig.colorbar(surf, aspect=30, ticks = np.arange(0.0,1.1,0.25))\ncbar.ax.set_yticklabels(np.arange(0.0,1.1,0.25),fontsize = 16)\n\nax.view_init(25,135);", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
cba0899efd4de9177da19e1add158abdb35820ed
9,301
ipynb
Jupyter Notebook
Unscramble Computer Science Problems/Task4.ipynb
souravgopal25/Data-Structure-Algorithm-Nanodegree
d9f86440bd7802cf38fd1c61468c77cd195c0f9b
[ "MIT" ]
2
2020-05-06T07:00:27.000Z
2020-05-06T07:00:31.000Z
Unscramble Computer Science Problems/Task4.ipynb
souravgopal25/Data-Structure-Algorithm-Nanodegree
d9f86440bd7802cf38fd1c61468c77cd195c0f9b
[ "MIT" ]
null
null
null
Unscramble Computer Science Problems/Task4.ipynb
souravgopal25/Data-Structure-Algorithm-Nanodegree
d9f86440bd7802cf38fd1c61468c77cd195c0f9b
[ "MIT" ]
null
null
null
28.975078
458
0.429954
[ [ [ "\"\"\"\nRead file into texts and calls.\nIt's ok if you don't understand how to read files.\n\"\"\"\nimport csv\n\nwith open('texts.csv', 'r') as f:\n reader = csv.reader(f)\n texts = list(reader)\n\nwith open('calls.csv', 'r') as f:\n reader = csv.reader(f)\n calls = list(reader)", "_____no_output_____" ], [ "\"\"\"\nTASK 4:\nThe telephone company want to identify numbers that might be doing\ntelephone marketing. Create a set of possible telemarketers:\nthese are numbers that make outgoing calls but never send texts,\nreceive texts or receive incoming calls.\n\nPrint a message:\n\"These numbers could be telemarketers: \"\n<list of numbers>\nThe list of numbers should be print out one per line in lexicographic order with no duplicates.\n\"\"\"", "_____no_output_____" ], [ "caller_list=[]\nsender_list=[]\nfor caller,reciever,time,duration in calls:\n if not caller in caller_list:\n caller_list.append(caller)\n if reciever in caller_list:\n caller_list.remove(reciever)\n\nfor sender,reciever,time in texts:\n if sender in caller_list:\n caller_list.remove(sender)\n if reciever in caller_list:\n caller_list.remove(reciever)\n \n\n\ncaller_set=set(caller_list)", "_____no_output_____" ], [ "caller_list=list(caller_set)\ncaller_list.sort()", "_____no_output_____" ], [ "print(\"These numbers could be telemarketers: \")\nfor number in caller_list:\n print(number)\n len(number)", "These numbers could be telemarketers: \n(011)21017178\n(022)28765220\n(022)28952819\n(022)34715405\n(022)37572285\n(022)38214945\n(022)40840621\n(022)46574732\n(022)47410783\n(022)65548497\n(022)66911540\n(022)68535788\n(022)69042431\n(033)25441815\n(040)26738737\n(040)30429041\n(040)34008657\n(040)36649724\n(040)66729318\n(04344)615310\n(04344)649705\n(044)20550065\n(044)22020822\n(044)24037112\n(044)27523585\n(044)27641880\n(044)30360652\n(044)41581342\n(044)45838604\n(044)49481100\n(044)49868415\n(04546)218519\n(04546)267875\n(0471)2171438\n(0471)2225098\n(0471)2953539\n(0471)4255177\n(0471)6579079\n(080)20123809\n(080)20383942\n(080)23802940\n(080)24444677\n(080)25820765\n(080)27498339\n(080)30231886\n(080)30270642\n(080)31606520\n(080)31863188\n(080)32679828\n(080)33277651\n(080)34932254\n(080)35986130\n(080)35987804\n(080)37913009\n(080)39755879\n(080)40362016\n(080)40395498\n(080)40929452\n(080)41203315\n(080)41336994\n(080)41712046\n(080)43215621\n(080)43562014\n(080)43685310\n(080)43901222\n(080)44046839\n(080)44050207\n(080)44357306\n(080)45291968\n(080)45547058\n(080)45687418\n(080)46702492\n(080)46772413\n(080)47459867\n(080)49328664\n(080)49796269\n(080)60062475\n(080)60463379\n(080)60998034\n(080)62164823\n(080)62963633\n(080)63623429\n(080)64015211\n(080)64047472\n(080)64431120\n(080)65275591\n(080)66044294\n(080)66955387\n(080)67362492\n(080)68104927\n(080)68739140\n(080)69609453\n(080)69887826\n(0821)2135265\n(0821)3257740\n(0821)3602212\n(08214175)358\n1400481538\n1401747654\n1402316533\n1403072432\n1403579926\n1404073047\n1404368883\n1404787681\n1407539117\n1408371942\n1408409918\n1408672243\n1409421631\n1409668775\n1409994233\n74064 66270\n74292 23928\n78135 69048\n78136 54214\n78291 94593\n78293 38561\n78993 89387\n84313 80377\n87144 42283\n87144 55014\n87149 75762\n89071 50880\n90085 20915\n90089 69682\n90193 61937\n90196 73585\n90197 38885\n90351 90193\n90357 25284\n90368 95100\n92414 69419\n92423 51078\n92426 65661\n92426 72402\n93426 76415\n93427 40118\n93430 54160\n94002 85593\n94495 03761\n96569 95359\n97404 30456\n97404 90013\n97407 84573\n97418 59299\n97427 87999\n97442 45192\n98442 73671\n98444 63396\n98447 62998\n98448 88411\n98457 75681\n98458 94162\n99617 25274\n" ], [ "", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
cba092639e6d035837b24b6ad3933e74cc6e5529
156,937
ipynb
Jupyter Notebook
Deep_Learning_Project.ipynb
FGDBTKD/DeepLearningProject
8bea472966e0cf24fb11076c8aa6acf2fe7c9dff
[ "MIT" ]
4,625
2017-07-13T20:21:28.000Z
2022-03-30T12:26:20.000Z
Deep_Learning_Project.ipynb
FGDBTKD/DeepLearningProject
8bea472966e0cf24fb11076c8aa6acf2fe7c9dff
[ "MIT" ]
43
2017-07-14T16:32:11.000Z
2020-11-01T13:23:24.000Z
Deep_Learning_Project.ipynb
FGDBTKD/DeepLearningProject
8bea472966e0cf24fb11076c8aa6acf2fe7c9dff
[ "MIT" ]
706
2017-07-14T07:40:31.000Z
2022-02-28T15:27:44.000Z
44.319966
1,450
0.642742
[ [ [ "<h1 align='center' style=\"margin-bottom: 0px\"> An end to end implementation of a Machine Learning pipeline </h1>\n<h4 align='center' style=\"margin-top: 0px\"> SPANDAN MADAN</h4>\n<h4 align='center' style=\"margin-top: 0px\"> Visual Computing Group, Harvard University</h4>\n<h4 align='center' style=\"margin-top: 0px\"> Computer Science and Artificial Intelligence Laboratory, MIT</h4>", "_____no_output_____" ], [ "<h2 align='center' style=\"margin-top: 0px\"><a href='https://github.com/Spandan-Madan/DeepLearningProject'>Link to Github Repo</a></h2>", "_____no_output_____" ], [ "# Section 1. Introduction\n\n### Background\nIn the fall of 2016, I was a Teaching Fellow (Harvard's version of TA) for the graduate class on \"Advanced Topics in Data Science (CS209/109)\" at Harvard University. I was in-charge of designing the class project given to the students, and this tutorial has been built on top of the project I designed for the class.\n\n### Why write yet another Tutorial on Machine Learning and Deep Learning?\nAs a researcher on Computer Vision, I come across new blogs and tutorials on ML (Machine Learning) every day. However, most of them are just focussing on introducing the syntax and the terminology relevant to the field. For example - a 15 minute tutorial on Tensorflow using MNIST dataset, or a 10 minute intro to Deep Learning in Keras on Imagenet. \n\nWhile people are able to copy paste and run the code in these tutorials and feel that working in ML is really not that hard, it doesn't help them at all in using ML for their own purposes. For example, they never introduce you to how you can run the same algorithm on your own dataset. Or, how do you get the dataset if you want to solve a problem. Or, which algorithms do you use - Conventional ML, or Deep Learning? How do you evaluate your models performance? How do you write your own model, as opposed to choosing a ready made architecture? All these form fundamental steps in any Machine Learning pipeline, and it is these steps that take most of our time as ML practitioners. \n\nThis tutorial breaks down the whole pipeline, and leads the reader through it step by step in an hope to empower you to actually use ML, and not just feel that it was not too hard. Needless to say, this will take much longer than 15-30 minutes. I believe a weekend would be a good enough estimate.\n\n### About the Author\n\nI am <a href=\"http://spandanmadan.com/\">Spandan Madan</a>, a graduate student at Harvard University working on Computer Vision. My research work is supervised collaboratively by Professor Hanspeter Pfister at Harvard, and Professor Aude Oliva at MIT. My current research focusses on using Computer Vision and Natural Language Techniques in tandem to build systems capable of reasoning using text and visual elements simultaneusly.", "_____no_output_____" ], [ "# Section 2. Project Outline : Multi-Modal Genre Classification for Movies ", "_____no_output_____" ], [ "## Wow, that title sounds like a handful, right? Let's break it down step by step.\n\n### Q.1. what do we mean by Classification?\n\nIn machine learning, the task of classification means to use the available data to learn a <i>function</i> which can assign a category to a data point. For example, assign a genre to a movie, like \"Romantic Comedy\", \"Action\", \"Thriller\". Another example could be automatically assigning a category to news articles, like \"Sports\" and \"Politics\". \n\n### More Formally \n\n#### Given:\n- A data point $x_i$ \n- A set of categories $y_1,y_2...y_n$ that $x_i$ can belong to. <br>\n\n#### Task : \nPredict the correct category $y_k$ for a new data point $x_k$ not present in the given dataset.\n\n#### Problem : \nWe don't know how the $x$ and $y$ are related mathematically.\n\n#### Assumption : \nWe assume there exists a function $f$ relating $x$ and $y$ i.e. $f(x_i)=y_i$\n\n#### Approach : \nSince $f$ is not known, we learn a function $g$, which approximates $f$. \n\n#### Important consideration : \n- If $f(x_i)=g(x_i)=y_i$ for all $x_i$, then the two functions $f$ and $g$ are exactly equal. Needless to say, this won't realistically ever happen, and we'll only be able to approximate the true function $f$ using $g$. This means, sometimes the prediction $g(x_i)$ will not be correct. And essentially, our whole goal is to find a $g$ which makes a really low number of such errors. That's basically all that we're trying to do. \n\n- For the sake of completeness, I should mention that this is a specific kind of learning problem which we call \"Supervised Learning\". Also, the idea that $g$ approximates $f$ well for data not present in our dataset is called \"Generalization\". It is absolutely paramount that our model generalizes, or else all our claims will only be true about data we already have and our predictions will not be correct. \n\n- We will look into generalization a little bit more a little ahead in the tutorial. \n\n- Finally, There are several other kinds, but supervised learning is the most popular and well studied kind.", "_____no_output_____" ], [ "### Q.2. What's Multi-Modal Classification then?\n\nIn the machine learning community, the term Multi-Modal is used to refer to multiple <i>kinds</i> of data. For example, consider a YouTube video. It can be thought to contain 3 different modalities -\n\n- The video frames (visual modality)\n- The audio clip of what's being spoken (audio modality)\n- Some videos also come with the transcription of the words spoken in the form of subtitles (textual modality)\n\nConsider, that I'm interested in classifying a song on YouTube as pop or rock. You can use any of the above 3 modalities to predict the genre - The video, the song itself, or the lyrics. But, needless to say, you can predict it much better if you could use all three simultaneously. This is what we mean by multi-modal classification. ", "_____no_output_____" ], [ "# For this project, we will be using visual and textual data to classify movie genres.", "_____no_output_____" ], [ "# Project Outline\n\n- **Scraping a dataset** : The first step is to build a rich data set. We will collect textual and visual data for each movie.\n- **Data pre-processing**\n- **Non-deep Machine Learning models : Probabilistic and Max-Margin Classifiers.**\n- **Intuitive theory behind Deep Learning**\n- **Deep Models for Visual Data**\n- **Deep Models for Text**\n- **Potential Extensions**\n- **Food for Thought**\n", "_____no_output_____" ], [ "# Section 3. Building your very own DataSet.\n", "_____no_output_____" ], [ "For any machine learning algorithm to work, it is imperative that we collect data which is \"representative\". Now, let's take a moment to discuss what the word representative mean.\n\n### What data is good data? OR What do you mean by data being \"representative\"?\nLet's look at this from first principles. Mathematically, the premise of machine learning (to be precise, the strand of machine learning we'll be working with here) is that given input variable X, and an output variable y, **IF** there is a function such that g(X)=y, then if g is unknown, we can \"learn\" a function f which approximates g. At the very heart, its not at all different from what you may have earlier studied as \"curve fitting\". For example, if you're trying to predict someone's movie preferences then X can be information about the person's gender, age, nationality and so on, while y can be the genre they most like to listen to!\n\nLet's do a thought experiment. Consider the same example - I'm trying to predict people's movie preferences. I walk into a classroom today, and collect information about some students and their movie preferences. Now, I use that data to build a model. How well do you think I can predict my father's movie preferences? The answer is - probably not very well. Why? Intuitively, there was probably no one in the classroom who was my father's age. My model can tell me that as people go from age 18 to 30, they have a higher preference for documentaries over superhero movies. But does this trend continue at 55? Probably, they may start liking family dramas more. Probably they don't. In a nutshell, we cannot say with certainty, as our data tells us nothing about it. So, if the task was to make predictions about ANYONE's movie preferences, then the data collected from just undergraduates is NOT representative.\n\nNow, let's see why this makes sense Mathematically. Look at the graph below.", "_____no_output_____" ], [ "<img src=\"files/contour.png\">\n<center>Fig.1: Plot of a function we are trying to approximate(<a href=\"http://www.jzy3d.org/js/slider/images/ContourPlotsDemo.png\">source</a>)</center>", "_____no_output_____" ], [ "If we consider that the variable plotted on the vertical axis is $y$, and the values of the 2 variables on the horizontal axes make the input vector $X$, then, our hope is that we are able to find a function $f$ which can approximate the function plotted here. If all the data I collect is such that $x_1$ belongs to (80,100) and $x_2$ belongs to (80,100), the learned function will only be able to learn the \"yellow-green dipping bellow\" part of the function. Our function will never be able to predict the behavior in the \"red\" regions of the true function. So, in order to be able to learn a good function, we need data sampled from a diverse set of values of $x_1$ and x2. That would be representative data to learn this contour.", "_____no_output_____" ], [ "Therefore, we want to collect data which is representative of all possible movies that we want to make predictions about. Or else (which is often the case), we need to be aware of the limitations of the model we have trained, and the predictions we can make with confidence. The easiest way to do this is to only make predictions about the domain of data we collected the training data from. For example, in our case, let us start by assuming that our model will predict genres for only English movies. Now, the task is to collect data about a diverse collection of movies.\n\nSo how do we get this data then? Neither google, nor any university has released such a dataset. We want to collect visual and textual data about these movies. The simple answer is to scrape it from the internet to build our own dataset. For the purpose of this project, we will use movie posters as our visual data, and movie plots as textual data. Using these, we will build a model that can predict movie genres! ", "_____no_output_____" ], [ "# We will be scraping data from 2 different movie sources - IMDB and TMDB", "_____no_output_____" ], [ "<h3>IMDB:http://www.imdb.com/</h3>\n\nFor those unaware, IMDB is the primary source of information about movies on the internet. It is immensely rich with posters, reviews, synopsis, ratings and many other information on every movie. We will use this as our primary data source. \n\n<h3>TMDB:https://www.themoviedb.org/</h3>\n\nTMDB, or The Movie DataBase, is an open source version of IMDB, with a free to use API that can be used to collect information. You do need an API key, but it can be obtained for free by just making a request after making a free account.", "_____no_output_____" ], [ "#### Note - \nIMDB gives some information for free through the API, but doesn't release other information about movies. Here, we will keep it legal and only use information given to us for free and legally. However, scraping does reside on the moral fence, so to say. People often scrape data which isn't exactly publicly available for use from websites. ", "_____no_output_____" ] ], [ [ "import torchvision\nimport urllib2\nimport requests\nimport json\nimport imdb\nimport time\nimport itertools\nimport wget\nimport os\nimport tmdbsimple as tmdb\nimport numpy as np\nimport random\nimport matplotlib\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport seaborn as sns\nimport pickle", "/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/scipy/special/__init__.py:640: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from ._ufuncs import *\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/scipy/linalg/basic.py:17: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from ._solve_toeplitz import levinson\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/scipy/linalg/__init__.py:191: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from ._decomp_update import *\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/scipy/special/_ellip_harm.py:7: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from ._ellip_harm_2 import _ellipsoid, _ellipsoid_norm\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/scipy/sparse/lil.py:16: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from . import _csparsetools\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/scipy/sparse/csgraph/__init__.py:167: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from ._shortest_path import shortest_path, floyd_warshall, dijkstra,\\\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/scipy/sparse/csgraph/_validation.py:5: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from ._tools import csgraph_to_dense, csgraph_from_dense,\\\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/scipy/sparse/csgraph/__init__.py:169: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from ._traversal import breadth_first_order, depth_first_order, \\\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/scipy/sparse/csgraph/__init__.py:171: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from ._min_spanning_tree import minimum_spanning_tree\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/scipy/sparse/csgraph/__init__.py:172: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from ._reordering import reverse_cuthill_mckee, maximum_bipartite_matching, \\\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/scipy/optimize/_numdiff.py:8: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from ._group_columns import group_dense, group_sparse\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/scipy/interpolate/_bsplines.py:9: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from . import _bspl\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/scipy/spatial/__init__.py:94: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from .ckdtree import *\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/scipy/spatial/__init__.py:95: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from .qhull import *\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/scipy/spatial/_spherical_voronoi.py:18: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from . import _voronoi\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/scipy/spatial/distance.py:121: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from . import _hausdorff\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/scipy/stats/_continuous_distns.py:17: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from . import _stats\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/_libs/__init__.py:3: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from .tslib import iNaT, NaT, Timestamp, Timedelta, OutOfBoundsDatetime\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/_libs/__init__.py:3: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192, got 176\n from .tslib import iNaT, NaT, Timestamp, Timedelta, OutOfBoundsDatetime\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/__init__.py:26: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from pandas._libs import (hashtable as _hashtable,\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/__init__.py:26: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192, got 176\n from pandas._libs import (hashtable as _hashtable,\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/core/dtypes/common.py:6: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from pandas._libs import algos, lib\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/core/dtypes/common.py:6: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192, got 176\n from pandas._libs import algos, lib\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/core/util/hashing.py:7: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from pandas._libs import hashing, tslib\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/core/util/hashing.py:7: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192, got 176\n from pandas._libs import hashing, tslib\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/core/indexes/base.py:6: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from pandas._libs import (lib, index as libindex, tslib as libts,\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/core/indexes/base.py:6: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192, got 176\n from pandas._libs import (lib, index as libindex, tslib as libts,\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/core/indexes/datetimelike.py:28: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from pandas._libs.period import Period\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/core/indexes/datetimelike.py:28: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192, got 176\n from pandas._libs.period import Period\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/core/sparse/array.py:32: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n import pandas._libs.sparse as splib\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/core/sparse/array.py:32: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192, got 176\n import pandas._libs.sparse as splib\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/core/window.py:36: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n import pandas._libs.window as _window\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/core/window.py:36: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192, got 176\n import pandas._libs.window as _window\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/core/groupby.py:66: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from pandas._libs import lib, groupby as libgroupby, Timestamp, NaT, iNaT\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/core/groupby.py:66: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192, got 176\n from pandas._libs import lib, groupby as libgroupby, Timestamp, NaT, iNaT\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/core/reshape/reshape.py:30: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from pandas._libs import algos as _algos, reshape as _reshape\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/core/reshape/reshape.py:30: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192, got 176\n from pandas._libs import algos as _algos, reshape as _reshape\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/io/parsers.py:43: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n import pandas._libs.parsers as parsers\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/pandas/io/parsers.py:43: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192, got 176\n import pandas._libs.parsers as parsers\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/scipy/cluster/vq.py:88: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from . import _vq\n/anaconda3/envs/deeplearningproject/lib/python2.7/site-packages/scipy/cluster/hierarchy.py:178: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n from . import _hierarchy\n" ] ], [ [ "# Here is a broad outline of technical steps to be done for data collection\n\n\n* Sign up for TMDB (themoviedatabase.org), and set up API to scrape movie posters for above movies.\n* Set up and work with TMDb to get movie information from their database\n* Do the same for IMDb\n* Compare the entries of IMDb and TMDb for a movie\n* Get a listing and information of a few movies\n* Think and ponder over the potential challenges that may come our way, and think about interesting questions we can answer given the API's we have in our hands.\n* Get data from the TMDb\n\nLet's go over each one of these one by one.", "_____no_output_____" ], [ "## Signing up for TMDB and getting set up for getting movie metadata.\n\n* Step 1. Head over to [tmdb.org] (https://www.themoviedb.org/?language=en) and create a new account there by signing up.\n* Step 2. Click on your account icon on the top right, then from drop down menu select \"Settings\".\n* Step 3. On the settings page, you will see the option \"API\" on the left pane. Click on that.\n* Step 4. Apply for a new developer key. Fill out the form as required. The fields \"Application Name\" and \"Application URL\" are not important. Fill anything there.\n* Step 5. It should generate a new API key for you and you should also receive a mail.\n\nNow that you have the API key for TMDB, you can query using TMDB. Remember, it allows only 40 queries per 10 seconds.\n\nAn easy way to respect this is to just have a call to <i>time.sleep(1)</i> after each iteration. This is also being very nice to the server.\n\nIf you want to try and maximize your throughput you can embed every TMDB request in a nested try except block. If the first try fails, the second try first uses python's sleep function to give it a little rest, and then try again to make a request. Something like this -\n\t\n~~~~\ntry:\n search.movie(query=movie) #An API request\nexcept:\n try:\n time.sleep(10) #sleep for a bit, to give API requests a rest.\n search.movie(query=<i>movie_name</i>) #Make second API request\n except:\n print \"Failed second attempt too, check if there's any error in request\"\n~~~~", "_____no_output_____" ], [ "## Using TMDB using the obtained API Key to get movie information", "_____no_output_____" ], [ "I have made these functions which make things easy. Basically, I'm making use of a library called tmdbsimple which makes TMDB using even easier. This library was installed at the time of setup.\n\nHowever, if you want to avoid the library, it is also easy enough to load the API output directly into a dictionary like this without using tmdbsimple:\n\n~~~\nurl = 'https://api.themoviedb.org/3/movie/1581?api_key=' + api_key\ndata = urllib2.urlopen(url).read()\n\n# create dictionary from JSON \ndataDict = json.loads(data)\n~~~", "_____no_output_____" ] ], [ [ "# set here the path where you want the scraped folders to be saved!\nposter_folder='posters_final/'\nif poster_folder.split('/')[0] in os.listdir('./'):\n print('Folder already exists')\nelse:\n os.mkdir('./'+poster_folder)", "Folder already exists\n" ], [ "poster_folder", "_____no_output_____" ], [ "# For the purpose of this example, i will be working with the 1999 Sci-Fi movie - \"The Matrix\"!\n\napi_key = 'a237bfff7e08d0e6902c623978183be0' #Enter your own API key here to run the code below. \n# Generate your own API key as explained above :)\n\n\ntmdb.API_KEY = api_key #This sets the API key setting for the tmdb object\nsearch = tmdb.Search() #this instantiates a tmdb \"search\" object which allows your to search for the movie\nimport os.path\n\n# These functions take in a string movie name i.e. like \"The Matrix\" or \"Interstellar\"\n# What they return is pretty much clear in the name - Poster, ID , Info or genre of the Movie!\ndef grab_poster_tmdb(movie):\n \n response = search.movie(query=movie)\n id=response['results'][0]['id']\n movie = tmdb.Movies(id)\n posterp=movie.info()['poster_path']\n title=movie.info()['original_title']\n \n url='image.tmdb.org/t/p/original'+posterp\n title='_'.join(title.split(' '))\n strcmd='wget -O '+poster_folder+title+'.jpg '+url\n os.system(strcmd)\n\ndef get_movie_id_tmdb(movie):\n response = search.movie(query=movie)\n movie_id=response['results'][0]['id']\n return movie_id\n\ndef get_movie_info_tmdb(movie):\n response = search.movie(query=movie)\n id=response['results'][0]['id']\n movie = tmdb.Movies(id)\n info=movie.info()\n return info\n\ndef get_movie_genres_tmdb(movie):\n response = search.movie(query=movie)\n id=response['results'][0]['id']\n movie = tmdb.Movies(id)\n genres=movie.info()['genres']\n return genres", "_____no_output_____" ] ], [ [ "While the above functions have been made to make it easy to get genres, posters and ID, all the information that can be accessed can be seen by calling the function get_movie_info() as shown below", "_____no_output_____" ] ], [ [ "print get_movie_genres_tmdb(\"The Matrix\")", "[{u'id': 28, u'name': u'Action'}, {u'id': 878, u'name': u'Science Fiction'}]\n" ], [ "info=get_movie_info_tmdb(\"The Matrix\")\nprint \"All the Movie information from TMDB gets stored in a dictionary with the following keys for easy access -\"\ninfo.keys()", "All the Movie information from TMDB gets stored in a dictionary with the following keys for easy access -\n" ] ], [ [ "So, to get the tagline of the movie we can use the above dictionary key - ", "_____no_output_____" ] ], [ [ "info=get_movie_info_tmdb(\"The Matrix\")\nprint info['tagline']", "Welcome to the Real World.\n" ] ], [ [ "## Getting movie information from IMDB", "_____no_output_____" ], [ "Now that we know how to get information from TMDB, here's how we can get information about the same movie from IMDB. This makes it possible for us to combine more information, and get a richer dataset. I urge you to try and see what dataset you can make, and go above and beyond the basic things I've done in this tutorial. Due to the differences between the two datasets, you will have to do some cleaning, however both of these datasets are extremely clean and it will be minimal.", "_____no_output_____" ] ], [ [ "# Create the IMDB object that will be used to access the IMDb's database.\nimbd_object = imdb.IMDb() # by default access the web.\n\n# Search for a movie (get a list of Movie objects).\nresults = imbd_object.search_movie('The Matrix')\n\n# As this returns a list of all movies containing the word \"The Matrix\", we pick the first element\nmovie = results[0]\n\nimbd_object.update(movie)\n\nprint \"All the information we can get about this movie from IMDB-\"\nmovie.keys()", "All the information we can get about this movie from IMDB-\n" ], [ "print \"The genres associated with the movie are - \",movie['genres']", "The genres associated with the movie are - [u'Action', u'Sci-Fi']\n" ] ], [ [ "## A small comparison of IMDB and TMDB", "_____no_output_____" ], [ "Now that we have both systems running, let's do a very short comparison for the same movie?", "_____no_output_____" ] ], [ [ "print \"The genres for The Matrix pulled from IMDB are -\",movie['genres']\nprint \"The genres for The Matrix pulled from TMDB are -\",get_movie_genres_tmdb(\"The Matrix\")", "The genres for The Matrix pulled from IMDB are - [u'Action', u'Sci-Fi']\nThe genres for The Matrix pulled from TMDB are - [{u'id': 28, u'name': u'Action'}, {u'id': 878, u'name': u'Science Fiction'}]\n" ] ], [ [ "As we can see, both the systems are correct, but the way they package information is different. TMDB calls it \"Science Fiction\" and has an ID for every genre. While IMDB calls it \"Sci-Fi\". Thus, it is important to keep track of these things when making use of both the datasets simultaneously.", "_____no_output_____" ], [ "Now that we know how to scrape information for one movie, let's take a bigger step towards scraping multiple movies?", "_____no_output_____" ], [ "## Working with multiple movies : Obtaining Top 20 movies from TMDB", "_____no_output_____" ], [ "We first instantiate an object that inherits from class Movies from TMDB. Then We use the **popular()** class method (i.e. function) to get top movies. To get more than one page of results, the optional page argument lets us see movies from any specified page number.", "_____no_output_____" ] ], [ [ "all_movies=tmdb.Movies()\ntop_movies=all_movies.popular()\n\n# This is a dictionary, and to access results we use the key 'results' which returns info on 20 movies\nprint(len(top_movies['results']))\ntop20_movs=top_movies['results']", "20\n" ] ], [ [ "Let's look at one of these movies. It's the same format as above, as we had information on the movie \"The Matrix\", as you can see below. It's a dictionary which can be queried for specific information on that movie", "_____no_output_____" ] ], [ [ "first_movie=top20_movs[0]\nprint \"Here is all the information you can get on this movie - \"\nprint first_movie\nprint \"\\n\\nThe title of the first movie is - \", first_movie['title']", "Here is all the information you can get on this movie - \n{u'poster_path': u'/3IGbjc5ZC5yxim5W0sFING2kdcz.jpg', u'title': u'Solo: A Star Wars Story', u'overview': u'Through a series of daring escapades deep within a dark and dangerous criminal underworld, Han Solo meets his mighty future copilot Chewbacca and encounters the notorious gambler Lando Calrissian.', u'release_date': u'2018-05-15', u'popularity': 214.308, u'original_title': u'Solo: A Star Wars Story', u'backdrop_path': u'/96B1qMN9RxrAFu6uikwFhQ6N6J9.jpg', u'vote_count': 1804, u'video': False, u'adult': False, u'vote_average': 6.7, u'genre_ids': [28, 12, 878], u'id': 348350, u'original_language': u'en'}\n\n\nThe title of the first movie is - Solo: A Star Wars Story\n" ] ], [ [ "Let's print out top 5 movie's titles! ", "_____no_output_____" ] ], [ [ "for i in range(len(top20_movs)):\n mov=top20_movs[i]\n title=mov['title']\n print title\n if i==4:\n break", "Solo: A Star Wars Story\nThe Nun\nAvengers: Infinity War\nThe Predator\nJurassic World: Fallen Kingdom\n" ] ], [ [ "### Yes, I know. I'm a little upset too seeing Beauty and the Beast above Logan in the list!", "_____no_output_____" ], [ "Moving on, we can get their genres the same way.", "_____no_output_____" ] ], [ [ "for i in range(len(top20_movs)):\n mov=top20_movs[i]\n genres=mov['genre_ids']\n print genres\n if i==4:\n break", "[28, 12, 878]\n[27, 9648, 53]\n[12, 878, 28]\n[27, 878, 28, 35]\n[28, 12, 878]\n" ] ], [ [ "So, TMDB doesn't want to make your job as easy as you thought. Why these random numbers? Want to see their genre names? Well, there's the Genre() class for it. Let's get this done!", "_____no_output_____" ] ], [ [ "# Create a tmdb genre object!\ngenres=tmdb.Genres()\n# the list() method of the Genres() class returns a listing of all genres in the form of a dictionary.\nlist_of_genres=genres.list()['genres']", "_____no_output_____" ] ], [ [ "Let's convert this list into a nice dictionary to look up genre names from genre IDs!", "_____no_output_____" ] ], [ [ "Genre_ID_to_name={}\nfor i in range(len(list_of_genres)):\n genre_id=list_of_genres[i]['id']\n genre_name=list_of_genres[i]['name']\n Genre_ID_to_name[genre_id]=genre_name", "_____no_output_____" ] ], [ [ "Now, let's re-print the genres of top 20 movies? ", "_____no_output_____" ] ], [ [ "for i in range(len(top20_movs)):\n mov=top20_movs[i]\n title=mov['title']\n genre_ids=mov['genre_ids']\n genre_names=[]\n for id in genre_ids:\n genre_name=Genre_ID_to_name[id]\n genre_names.append(genre_name)\n print title,genre_names\n if i==4:\n break", "Solo: A Star Wars Story [u'Action', u'Adventure', u'Science Fiction']\nThe Nun [u'Horror', u'Mystery', u'Thriller']\nAvengers: Infinity War [u'Adventure', u'Science Fiction', u'Action']\nThe Predator [u'Horror', u'Science Fiction', u'Action', u'Comedy']\nJurassic World: Fallen Kingdom [u'Action', u'Adventure', u'Science Fiction']\n" ] ], [ [ "# Section 4 - Building a dataset to work with : Let's take a look at the top 1000 movies from the database", "_____no_output_____" ], [ "Making use of the same api as before, we will just pull results from the top 50 pages. As mentioned earlier, the \"page\" attribute of the command top_movies=all_movies.popular() can be used for this purpose.", "_____no_output_____" ], [ "Please note: Some of the code below will store the data into python \"pickle\" files so that it can be ready directly from memory, as opposed to being downloaded every time. Once done, you should comment out any code which generated an object that was pickled and is no longer needed.", "_____no_output_____" ] ], [ [ "all_movies=tmdb.Movies()\ntop_movies=all_movies.popular()\n\n# This is a dictionary, and to access results we use the key 'results' which returns info on 20 movies\nlen(top_movies['results'])\ntop20_movs=top_movies['results']", "_____no_output_____" ], [ "# Comment out this cell once the data is saved into pickle file.\nall_movies=tmdb.Movies()\ntop1000_movies=[]\nprint('Pulling movie list, Please wait...')\nfor i in range(1,51):\n if i%15==0:\n time.sleep(7)\n movies_on_this_page=all_movies.popular(page=i)['results']\n top1000_movies.extend(movies_on_this_page)\nlen(top1000_movies)\nf3=open('movie_list.pckl','wb')\npickle.dump(top1000_movies,f3)\nf3.close()\nprint('Done!')", "Pulling movie list, Please wait...\n" ], [ "f3=open('movie_list.pckl','rb')\ntop1000_movies=pickle.load(f3)\nf3.close()", "_____no_output_____" ] ], [ [ "# Pairwise analysis of Movie Genres", "_____no_output_____" ], [ "As our dataset is multi label, simply looking at the distribution of genres is not sufficient. It might be beneficial to see which genres co-occur, as it might shed some light on inherent biases in our dataset. For example, it would make sense if romance and comedy occur together more often than documentary and comedy. Such inherent biases tell us that the underlying population we are sampling from itself is skewed and not balanced. We may then take steps to account for such problems. Even if we don't take such steps, it is important to be aware that we are making the assumption that an unbalanced dataset is not hurting our performance and if need be, we can come back to address this assumption. Good old scientific method, eh?\n\nSo for the top 1000 movies let's do some pairwise analysis for genre distributions. Our main purpose is to see which genres occur together in the same movie. So, we first define a function which takes a list and makes all possible pairs from it. Then, we pull the list of genres for a movie and run this function on the list of genres to get all pairs of genres which occur together", "_____no_output_____" ] ], [ [ "# This function just generates all possible pairs of movies\ndef list2pairs(l):\n # itertools.combinations(l,2) makes all pairs of length 2 from list l.\n pairs = list(itertools.combinations(l, 2))\n # then the one item pairs, as duplicate pairs aren't accounted for by itertools\n for i in l:\n pairs.append([i,i])\n return pairs", "_____no_output_____" ] ], [ [ "As mentioned, now we will pull genres for each movie, and use above function to count occurrences of when two genres occurred together", "_____no_output_____" ] ], [ [ "# get all genre lists pairs from all movies\nallPairs = []\nfor movie in top1000_movies:\n allPairs.extend(list2pairs(movie['genre_ids']))\n \nnr_ids = np.unique(allPairs)\nvisGrid = np.zeros((len(nr_ids), len(nr_ids)))\nfor p in allPairs:\n visGrid[np.argwhere(nr_ids==p[0]), np.argwhere(nr_ids==p[1])]+=1\n if p[1] != p[0]:\n visGrid[np.argwhere(nr_ids==p[1]), np.argwhere(nr_ids==p[0])]+=1", "_____no_output_____" ] ], [ [ "Let's take a look at the structure we just made. It is a 19X19 structure, as shown below. Also, see that we had 19 Genres. Needless to say, this structure counts the number of simultaneous occurrences of genres in same movie.", "_____no_output_____" ] ], [ [ "print visGrid.shape\nprint len(Genre_ID_to_name.keys())", "_____no_output_____" ], [ "annot_lookup = []\nfor i in xrange(len(nr_ids)):\n annot_lookup.append(Genre_ID_to_name[nr_ids[i]])\n\nsns.heatmap(visGrid, xticklabels=annot_lookup, yticklabels=annot_lookup)", "_____no_output_____" ] ], [ [ "The above image shows how often the genres occur together, as a heatmap", "_____no_output_____" ], [ "Important thing to notice in the above plot is the diagonal. The diagonal corresponds to self-pairs, i.e. number of times a genre, say Drama occurred with Drama. Which is basically just a count of the total times that genre occurred! \n\nAs we can see there are a lot of dramas in the data set, it is also a very unspecific label. There are nearly no documentaries or TV Movies. Horror is a very distinct label, and romance is also not too widely spread. \n\nTo account for this unbalanced data, there are multiple things we can try to explore what interesting relationships can be found.", "_____no_output_____" ], [ "## Delving Deeper into co-occurrence of genres", "_____no_output_____" ], [ "What we want to do now is to look for nice groups of genres that co-occur, and see if it makes sense to us logically? Intuitively speaking, wouldn't it be fun if we saw nice boxes on the above plot - boxes of high intensity i.e. genres that occur together and don't occur much with other genres. In some ways, that would isolate the co-occurrence of some genres, and heighten the co-occurrence of others.\n\nWhile the data may not show that directly, we can play with the numbers to see if that's possible. The technique used for that is called biclustering.", "_____no_output_____" ] ], [ [ "from sklearn.cluster import SpectralCoclustering", "_____no_output_____" ], [ "model = SpectralCoclustering(n_clusters=5)\nmodel.fit(visGrid)\n\nfit_data = visGrid[np.argsort(model.row_labels_)]\nfit_data = fit_data[:, np.argsort(model.column_labels_)]\n\nannot_lookup_sorted = []\nfor i in np.argsort(model.row_labels_):\n annot_lookup_sorted.append(Genre_ID_to_name[nr_ids[i]])\n \nsns.heatmap(fit_data, xticklabels=annot_lookup_sorted, yticklabels=annot_lookup_sorted, annot=False)\nplt.title(\"After biclustering; rearranged to show biclusters\")\n\nplt.show()", "_____no_output_____" ] ], [ [ "Looking at the above figure, \"boxes\" or groups of movie genres automatically emerge! \n\nIntuitively - Crime, Sci-Fi, Mystery, Action, Horror, Drama, Thriller, etc co-occur. \nAND, Romance, Fantasy, Family, Music, Adventure, etc co-occur. \n\nThat makes a lot of intuitive sense, right?\n\nOne challenge is the broad range of the drama genre. It makes the two clusters highly overlapping. If we merge it together with action thriller, etc. We will end up with nearly all movies just having that label. ", "_____no_output_____" ], [ "**Based on playing around with the stuff above, we can sort the data into the following genre categories - \"Drama, Action, ScienceFiction, exciting(thriller, crime, mystery), uplifting(adventure, fantasy, animation, comedy, romance, family), Horror, History\"**\n\n\nNote: that this categorization is subjective and by no means the only right solution. One could also just stay with the original labels and only exclude the ones with not enough data. Such tricks are important to balance the dataset, it allows us to increase or decrease the strength of certain signals, making it possible to improve our inferences :)", "_____no_output_____" ], [ "# Interesting Questions\nThis really should be a place for you to get creative and hopefully come up with better questions than me. \n\nHere are some of my thoughts:\n- Which actors are bound to a genre, and which can easily hop genres?\n- Is there a trend in genre popularity over the years?\n- Can you use sound tracks to identify the genre of a movie?\n- Are top romance actors higher paid than top action actors?\n- If you look at release date vs popularity score, which movie genres have a longer shelf life?\n\nIdeas to explore specifically for feature correlations:\n- Are title length correlated with movie genre?\n- Are movie posters darker for horror than for romance end comedy?\n- Are some genres specifically released more often at a certain time of year? \n- Is the RPG rating correlated with the genre?", "_____no_output_____" ], [ "# Based on this new category set, we will now pull posters from TMDB as our training data!", "_____no_output_____" ] ], [ [ "# Done before, reading from pickle file now to maintain consistency of data!\n# We now sample 100 movies per genre. Problem is that the sorting is by popular movies, so they will overlap. \n# Need to exclude movies that were already sampled. \nmovies = []\nbaseyear = 2017\n\nprint('Starting pulling movies from TMDB. If you want to debug, uncomment the print command. This will take a while, please wait...')\ndone_ids=[]\nfor g_id in nr_ids:\n #print('Pulling movies for genre ID '+g_id)\n baseyear -= 1\n for page in xrange(1,6,1):\n time.sleep(0.5)\n \n url = 'https://api.themoviedb.org/3/discover/movie?api_key=' + api_key\n url += '&language=en-US&sort_by=popularity.desc&year=' + str(baseyear) \n url += '&with_genres=' + str(g_id) + '&page=' + str(page)\n\n data = urllib2.urlopen(url).read()\n\n dataDict = json.loads(data)\n movies.extend(dataDict[\"results\"])\n done_ids.append(str(g_id))\nprint(\"Pulled movies for genres - \"+','.join(done_ids))", "_____no_output_____" ], [ "# f6=open(\"movies_for_posters\",'wb')\n# pickle.dump(movies,f6)\n# f6.close()", "_____no_output_____" ], [ "f6=open(\"movies_for_posters\",'rb')\nmovies=pickle.load(f6)\nf6.close()", "_____no_output_____" ] ], [ [ "Let's remove any duplicates that we have in the list of movies", "_____no_output_____" ] ], [ [ "movie_ids = [m['id'] for m in movies]\nprint \"originally we had \",len(movie_ids),\" movies\"\nmovie_ids=np.unique(movie_ids)\nprint len(movie_ids)\nseen_before=[]\nno_duplicate_movies=[]\nfor i in range(len(movies)):\n movie=movies[i]\n id=movie['id']\n if id in seen_before:\n continue\n# print \"Seen before\"\n else:\n seen_before.append(id)\n no_duplicate_movies.append(movie)\nprint \"After removing duplicates we have \",len(no_duplicate_movies), \" movies\"", "_____no_output_____" ] ], [ [ "Also, let's remove movies for which we have no posters!", "_____no_output_____" ] ], [ [ "poster_movies=[]\ncounter=0\nmovies_no_poster=[]\nprint(\"Total movies : \",len(movies))\nprint(\"Started downloading posters...\")\nfor movie in movies:\n id=movie['id']\n title=movie['title']\n if counter==1:\n print('Downloaded first. Code is working fine. Please wait, this will take quite some time...')\n if counter%300==0 and counter!=0:\n print \"Done with \",counter,\" movies!\"\n print \"Trying to get poster for \",title\n try:\n #grab_poster_tmdb(title)\n poster_movies.append(movie)\n except:\n try:\n time.sleep(7)\n grab_poster_tmdb(title)\n poster_movies.append(movie)\n except:\n movies_no_poster.append(movie)\n counter+=1\nprint(\"Done with all the posters!\")", "_____no_output_____" ], [ "print len(movies_no_poster)\nprint len(poster_movies)", "_____no_output_____" ], [ "# f=open('poster_movies.pckl','w')\n# pickle.dump(poster_movies,f)\n# f.close()", "_____no_output_____" ], [ "f=open('poster_movies.pckl','r')\nposter_movies=pickle.load(f)\nf.close()", "_____no_output_____" ], [ "# f=open('no_poster_movies.pckl','w')\n# pickle.dump(movies_no_poster,f)\n# f.close()", "_____no_output_____" ], [ "f=open('no_poster_movies.pckl','r')\nmovies_no_poster=pickle.load(f)\nf.close()", "_____no_output_____" ] ], [ [ "# Congratulations, we are done scraping!", "_____no_output_____" ], [ "# Building a dataset out of the scraped information!", "_____no_output_____" ], [ "This task is simple, but **extremely** important. It's basically what will set the stage for the whole project. Given that you have the freedom to cast their own project within the framework I am providing, there are many decisions that you must make to finalize **your own version** of the project.", "_____no_output_____" ], [ "As we are working on a **classification** problem, we need to make two decisions given the data at hand - \n* What do we want to predict, i.e. what's our Y?\n* What features to use for predicting this Y, i.e. what X should we use?", "_____no_output_____" ], [ "There are many different options possible, and it comes down to you to decide what's most exciting. I will be picking my own version for the example, **but it is imperative that you think this through, and come up with a version which excites you!**", "_____no_output_____" ], [ "As an example, here are some possible ways to frame Y, while still sticking to the problem of genre prediction -\n\n* Assume every movie can have multiple genres, and then it becomes a multi-label classification problem. For example, a movie can be Action, Horror and Adventure simultaneously. Thus, every movie can be more than one genre.\n\n* Make clusters of genres as we did in Milestone 1 using biclustering, and then every movie can have only 1 genre. This way, the problem becomes a simpler, multi-class problem. For example, a movie could have the class - Uplifting (refer Milestone 1), or Horror or History. No movie get's more than one class.\n\nFor the purposes of this implementation, I'm going with the first case explained above - i.e. a multi-label classification problem.", "_____no_output_____" ], [ "Similarly, for designing our input features i.e. X, you may pick any features you think make sense, for example, the Director of a movie may be a good predictor for genre. OR, they may choose any features they design using algorithms like PCA. Given the richness of IMDB, TMDB and alternate sources like Wikipedia, there is a plethora of options available. **Be creative here!**", "_____no_output_____" ], [ "Another important thing to note is that in doing so, we must also make many more small implementation decisions on the way. For example, what genres are we going to include? what movies are we going to include? All these are open ended!", "_____no_output_____" ], [ "## My Implementation", "_____no_output_____" ], [ "Implementation decisions made - \n* The problem is framed here as a multi-label problem explained above. \n* We will try to predict multiple genres associated with a movie. This will be our Y.\n* We will use 2 different kinds of X - text and images. \n* For the text part - Input features being used to predict the genre is a form of the movie's plot available from TMDB using the property 'overview'. This will be our X.\n* For the image part - we will use the scraped poster images as our X. \n\nNOTE : We will first look at some conventional machine learning models, which were popular before the recent rise of neural networks and deep learning. For the poster image to genre prediction, I have avoided using this for the reason that conventional ML models are simply not used anymore without using deep learning for feature extraction (all discussed in detail ahead, don't be scared by the jargon). For the movie overview to genre prediction problem we will look at both conventional models and deep learning models. \n\nNow, let's build our X and Y!", "_____no_output_____" ], [ "First, let's identify movies that have overviews. **Next few steps are going to be a good example on why data cleaning is important!**", "_____no_output_____" ] ], [ [ "movies_with_overviews=[]\nfor i in range(len(no_duplicate_movies)):\n movie=no_duplicate_movies[i]\n id=movie['id']\n overview=movie['overview']\n \n if len(overview)==0:\n continue\n else:\n movies_with_overviews.append(movie)\n \nlen(movies_with_overviews)", "_____no_output_____" ] ], [ [ "Now let's store the genre's for these movies in a list that we will later transform into a binarized vector. \n\nBinarized vector representation is a very common and important way data is stored/represented in ML. Essentially, it's a way to reduce a categorical variable with n possible values to n binary indicator variables. What does that mean? For example, let [(1,3),(4)] be the list saying that sample A has two labels 1 and 3, and sample B has one label 4. For every sample, for every possible label, the representation is simply 1 if it has that label, and 0 if it doesn't have that label. So the binarized version of the above list will be -\n~~~~~\n[(1,0,1,0]),\n(0,0,0,1])]\n~~~~~", "_____no_output_____" ] ], [ [ "# genres=np.zeros((len(top1000_movies),3))\ngenres=[]\nall_ids=[]\nfor i in range(len(movies_with_overviews)):\n movie=movies_with_overviews[i]\n id=movie['id']\n genre_ids=movie['genre_ids']\n genres.append(genre_ids)\n all_ids.extend(genre_ids)", "_____no_output_____" ], [ "from sklearn.preprocessing import MultiLabelBinarizer\nmlb=MultiLabelBinarizer()\nY=mlb.fit_transform(genres)", "_____no_output_____" ], [ "genres[1]", "_____no_output_____" ], [ "print Y.shape\nprint np.sum(Y, axis=0)", "_____no_output_____" ], [ "len(list_of_genres)", "_____no_output_____" ] ], [ [ "This is interesting. We started with only 19 genre labels if you remember. But the shape for Y is 1666,20 while it should be 1666,19 as there are only 19 genres? Let's explore.", "_____no_output_____" ], [ "Let's find genre IDs that are not present in our original list of genres!", "_____no_output_____" ] ], [ [ "# Create a tmdb genre object!\ngenres=tmdb.Genres()\n# the list() method of the Genres() class returns a listing of all genres in the form of a dictionary.\nlist_of_genres=genres.list()['genres']\nGenre_ID_to_name={}\nfor i in range(len(list_of_genres)):\n genre_id=list_of_genres[i]['id']\n genre_name=list_of_genres[i]['name']\n Genre_ID_to_name[genre_id]=genre_name", "_____no_output_____" ], [ "for i in set(all_ids):\n if i not in Genre_ID_to_name.keys():\n print i", "_____no_output_____" ] ], [ [ "Well, this genre ID wasn't given to us by TMDB when we asked it for all possible genres. How do we go about this now? We can either neglect all samples that have this genre. But if you look up you'll see there's too many of these samples. So, I googled more and went into their documentation and found that this ID corresponds to the genre \"Foreign\". So, we add it to the dictionary of genre names ourselves. Such problems are ubiquitous in machine learning, and it is up to us to diagnose and correct them. We must always make a decision about what to keep, how to store data and so on. ", "_____no_output_____" ] ], [ [ "Genre_ID_to_name[10769]=\"Foreign\" #Adding it to the dictionary", "_____no_output_____" ], [ "len(Genre_ID_to_name.keys())", "_____no_output_____" ] ], [ [ "Now, we turn to building the X matrix i.e. the input features! As described earlier, we will be using the overview of movies as our input vector! Let's look at a movie's overview for example!", "_____no_output_____" ] ], [ [ "sample_movie=movies_with_overviews[5]\nsample_overview=sample_movie['overview']\nsample_title=sample_movie['title']\nprint \"The overview for the movie\",sample_title,\" is - \\n\\n\"\nprint sample_overview", "_____no_output_____" ] ], [ [ "## So, how do we store this movie overview in a matrix?\n\n#### Do we just store the whole string? We know that we need to work with numbers, but this is all text. What do we do?!", "_____no_output_____" ], [ "The way we will be storing the X matrix is called a \"Bag of words\" representation. The basic idea of this representation in our context is that we can think of all the distinct words that are possible in the movies' reviews as a distinct object. And then every movie overview can be thought as a \"Bag\" containing a bunch of these possible objects.\n\nFor example, in the case of Zootopia the movie above - The \"Bag\" contains the words (\"Determined\", \"to\", \"prove\", \"herself\"......\"the\", \"mystery\"). We make such lists for all movie overviews. Finally, we binarize again like we did above for Y. scikit-learn makes our job easy here by simply using a function CountVectorizer() because this representation is so often used in Machine Learning.", "_____no_output_____" ], [ "What this means is that, for all the movies that we have the data on, we will first count all the unique words. Say, there's 30,000 unique words. Then we can represent every movie overview as a 30000x1 vector, where each position in the vector corresponds to the presence or absence of a particular word. If the word corresponding to that position is present in the overview, that position will have 1, otherwise it will be 0. \n\nEx - if our vocabular was 4 words - \"I\",\"am\",\"a\",\"good\",\"boy\", then the representation for the sentence \"I am a boy\" would be [1 1 1 0 1], and for the sentence \"I am good\" would be [1 1 0 1 0].", "_____no_output_____" ] ], [ [ "from sklearn.feature_extraction.text import CountVectorizer\nimport re", "_____no_output_____" ], [ "content=[]\nfor i in range(len(movies_with_overviews)):\n movie=movies_with_overviews[i]\n id=movie['id']\n overview=movie['overview']\n overview=overview.replace(',','')\n overview=overview.replace('.','')\n content.append(overview)", "_____no_output_____" ], [ "print content[0]\nprint len(content)", "_____no_output_____" ] ], [ [ "# Are all words equally important?", "_____no_output_____" ], [ "#### At the cost of sounding \"Animal Farm\" inspired, I would say not all words are equally important. \n\nFor example, let's consider the overview for the Matrix - ", "_____no_output_____" ] ], [ [ "get_movie_info_tmdb('The Matrix')['overview']", "_____no_output_____" ] ], [ [ "For \"The Matrix\" a word like \"computer\" is a stronger indicators of it being a Sci-Fi movie, than words like \"who\" or \"powerful\" or \"vast\". One way computer scientists working with natural language tackled this problem in the past (and it is still used very popularly) is what we call TF-IDF i.e. Term Frequence, Inverse Document Frequency. The basic idea here is that words that are strongly indicative of the content of a single document (every movie overview is a document in our case) are words that occur very frequently in that document, and very infrequently in all other documents. For example, \"Computer\" occurs twice here but probably will not in most other movie overviews. Hence, it is indicative. On the other hand, generic words like \"a\",\"and\",\"the\" will occur very often in all documents. Hence, they are not indicative. \n\nSo, can we use this information to reduce our insanely high 30,000 dimensional vector representation to a smaller, more handle-able number? But first up, why should we even care? The answer is probably one of the most used phrases in ML - \"The Curse of Dimensionality\".", "_____no_output_____" ], [ "# The Curse of Dimensionality", "_____no_output_____" ], [ "#### This section is strongly borrowing from one of the greatest <a href=\"https://homes.cs.washington.edu/~pedrod/papers/cacm12.pdf\">ML papers I've ever read.</a>\n\nThis expression was coined by Bellman in 1961 to refer to the fact that many algorithms that work fine in low dimensions become intractable when the input is high-dimensional. The reason for them not working in high dimensions is very strongly linked to what we discussed earlier - having a representative dataset. Consider this, you have a function $f$ dependent only one dependent variable $x$, and $x$ can only integer values from 1 to 100. Since it's one dimensional, it can be plotted on a line. To get a representative sample, you'd need to sample something like - $f(1),f(20),f(40),f(60),f(80),f(100)$", "_____no_output_____" ], [ "Now, let's increase the dimensionality i.e. number of dependent variables and see what happens. Say, we have 2 variables $x_1$ and $x_2$, same possible as before - integers between 1 and 100. Now, instead of a line, we'll have a plane with $x_1$ and $x_2$ on the two axes. The interesting bit is that instead of 100 possible values of dependent variables like before, we now have 100,000 possible values! Basically, we can make 100x100 table of possible values of $x_1$ and $x_2$. Wow, that increased exponentially. Not just figuratively, but mathematically exponentially. Needless to say, to cover 5% of the space like we did before, we'd need to sample $f$ at 5000 values. ", "_____no_output_____" ], [ "For 3 variables, it would be 100,000,000, and we'd need to sample at 500,000 points. That's already more than the number of data points we have for most training problems we will ever come across.", "_____no_output_____" ], [ "Basically, as the dimensionality (number of features) of the examples grows, because a fixed-size training set covers a dwindling fraction of the input space. Even with a moderate dimension of 100 and a huge training set of a trillion examples, the latter covers only a fraction of about $10^{−18}$ of the input space. This is what makes machine learning\nboth necessary and hard.", "_____no_output_____" ], [ "So, yes, if some words are unimportant, we want to get rid of them and reduce the dimensionality of our X matrix. And the way we will do it is using TF-IDF to identify un-important words. Python let's us do this with just one line of code (And this is why you should spend more time reading maths, than coding!)", "_____no_output_____" ] ], [ [ "# The min_df paramter makes sure we exclude words that only occur very rarely\n# The default also is to exclude any words that occur in every movie description\nvectorize=CountVectorizer(max_df=0.95, min_df=0.005)\nX=vectorize.fit_transform(content)", "_____no_output_____" ] ], [ [ "We are excluding all words that occur in too many or too few documents, as these are very unlikely to be discriminative. Words that only occur in one document most probably are names, and words that occur in nearly all documents are probably stop words. Note that the values here were not tuned using a validation set. They are just guesses. It is ok to do, because we didn't evaluate the performance of these parameters. In a strict case, for example for a publication, it would be better to tune these as well. ", "_____no_output_____" ] ], [ [ "X.shape", "_____no_output_____" ] ], [ [ "So, each movie's overview gets represented by a 1x1365 dimensional vector.\n\nNow, we are ready for the kill. Our data is cleaned, hypothesis is set (Overview can predict movie genre), and the feature/output vectors are prepped. Let's train some models!", "_____no_output_____" ] ], [ [ "import pickle\nf4=open('X.pckl','wb')\nf5=open('Y.pckl','wb')\npickle.dump(X,f4)\npickle.dump(Y,f5)\nf6=open('Genredict.pckl','wb')\npickle.dump(Genre_ID_to_name,f6)\nf4.close()\nf5.close()\nf6.close()", "_____no_output_____" ] ], [ [ "# Congratulations, we have our data set ready!", "_____no_output_____" ], [ "A note : As we are building our own dataset, and I didn't want you to spend all your time waiting for poster image downloads to finish, I am working with an EXTREMELY small dataset. That is why, the results we will see for the deep learning portion will not be spectacular as compared to conventional machine learning methods. If you want to see the real power, you should spend some more time scraping something of the order of 100,000 images, as opposed to 1000 odd like I am doing here. Quoting the paper I mentioned above - MORE DATA BEATS A CLEVERER ALGORITHM.\n\n#### As the TA, I saw that most teams working on the project had data of the order of 100,000 movies. So, if you want to extract the power of these models, consider scraping a larger dataset than me.", "_____no_output_____" ], [ "# Section 5 - Non-deep, Conventional ML models with above data", "_____no_output_____" ], [ "Here is a layout of what we will be doing - \n\n- We will implement two different models\n- We will decide a performance metric i.e. a quantitative method to be sure about how well difference models are doing. \n- Discussion of the differences between the models, their strengths, weaknesses, etc. ", "_____no_output_____" ], [ "As discussed earlier, there are a LOT of implementation decisions to be made. Between feature engineering, hyper-parameter tuning, model selection and how interpretable do you want your model to be (Read : Bayesian vs Non-Bayesian approaches) a lot is to be decided. For example, some of these models could be: \n\n- Generalized Linear Models\n- SVM\n- Shallow (1 Layer, i.e. not deep) Neural Network\n- Random Forest\n- Boosting\n- Decision Tree\n\nOr go more bayesian:\n- Naive Bayes\n- Linear or Quadratic Discriminant Analysis\n- Bayesian Hierarchical models", "_____no_output_____" ], [ "The list is endless, and not all models will make sense for the kind of problem you have framed for yourself. ** Think about which model best fits for your purpose.**", "_____no_output_____" ], [ "For our purposes here, I will be showing the example of 2 very simple models, one picked from each category above - \n\n1. SVM\n2. Multinomial Naive Bayes", "_____no_output_____" ], [ "A quick overview of the whole pipeline coming below: \n \n- A little bit of feature engineering\n- 2 different Models \n- Evaluation Metrics chosen\n- Model comparisons", "_____no_output_____" ], [ "### Let's start with some feature engineering. ", "_____no_output_____" ], [ "Engineering the right features depends on 2 key ideas. Firstly, what is it that you are trying to solve? For example, if you want to guess my music preferences and you try to train a super awesome model while giving it what my height is as input features, you're going to have no luck. On the other hand, giving it my Spotify playlist will solve the problem with any model. So, CONTEXT of the problem plays a role. \n\nSecond, you can only represent based on the data at hand. Meaning, if you didn't have access to my Spotify playlist, but to my Facebook statuses - You know all my statuses about Harvard may not be useful. But if you represent me as my Facebook statuses which are YouTube links, that would also solve the problem. So, AVAILABILITY OF DATA at hand is the second factor. \n\n#### A nice way to think of it is to think that you start with the problem at hand, but design features constrained by the data you have available. If you have many independent features that each correlate well with the class, learning is easy. On the other hand, if the class is a very complex function of the features, you may not be able to learn it.\n\n\nIn the context of this problem, we would like to predict the genre of a movie. what we have access to - movie overviews, which are text descriptions of the movie plot. The hypothesis makes sense, overview is a short description of the story and the story is clearly important in assigning genres to movies. \n\nSo, let's improve our features by playing with the words in the overviews in our data. One interesting way to go back to what we discussed earlier - TF-IDF. We originally used it to filter words, but we can also assign the tf-idf values as \"importance\" values to words, as opposed to treating them all equally. Tf-idf simply tries to identify the assign a weightage to each word in the bag of words. ", "_____no_output_____" ], [ "Once again, the way it works is - Most movie descriptions have the word \"The\" in it. Obviously, it doesn't tell you anything special about it. So weightage should be inversely proportional to how many movies have the word in their description. This is the IDF part.\n\nOn the other hand, for the movie interstellar, if the description has the word Space 5 times, and wormhole 2 times, then it's probably more about Space than about wormhole. Thus, space should have a high weightage. This is the TF part. \n\nWe simply use TF-IDf to assign weightage to every word in the bag of words. Which makes sense, right? :)", "_____no_output_____" ] ], [ [ "from sklearn.feature_extraction.text import TfidfTransformer", "_____no_output_____" ], [ "tfidf_transformer = TfidfTransformer()\nX_tfidf = tfidf_transformer.fit_transform(X)\nX_tfidf.shape", "_____no_output_____" ] ], [ [ "Let's divide our X and Y matrices into train and test split. We train the model on the train split, and report the performance on the test split. Think of this like the questions you do in the problem sets v/s the exam. Of course, they are both (assumed to be) from the same population of questions. And doing well on Problem Sets is a good indicator that you'll do well in exams, but really, you must test before you can make any claims about you knowing the subject.", "_____no_output_____" ] ], [ [ "msk = np.random.rand(X_tfidf.shape[0]) < 0.8", "_____no_output_____" ], [ "X_train_tfidf=X_tfidf[msk]\nX_test_tfidf=X_tfidf[~msk]\nY_train=Y[msk]\nY_test=Y[~msk]\npositions=range(len(movies_with_overviews))\n# print positions\ntest_movies=np.asarray(positions)[~msk]\n# test_movies", "_____no_output_____" ], [ "from sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import make_scorer\nfrom sklearn.metrics import classification_report", "_____no_output_____" ], [ "parameters = {'kernel':['linear'], 'C':[0.01, 0.1, 1.0]}\ngridCV = GridSearchCV(SVC(class_weight='balanced'), parameters, scoring=make_scorer(f1_score, average='micro'))\nclassif = OneVsRestClassifier(gridCV)\n\nclassif.fit(X_train_tfidf, Y_train)", "_____no_output_____" ], [ "predstfidf=classif.predict(X_test_tfidf)\n\nprint classification_report(Y_test, predstfidf)", "_____no_output_____" ] ], [ [ "As you can see, the performance is by and large poorer for movies which are less represented like War and animation, and better for categories like Drama.", "_____no_output_____" ], [ "Numbers aside, let's look at our model's predictions for a small sample of movies from our test set.", "_____no_output_____" ] ], [ [ "genre_list=sorted(list(Genre_ID_to_name.keys()))", "_____no_output_____" ], [ "predictions=[]\nfor i in range(X_test_tfidf.shape[0]):\n pred_genres=[]\n movie_label_scores=predstfidf[i]\n# print movie_label_scores\n for j in range(19):\n #print j\n if movie_label_scores[j]!=0:\n genre=Genre_ID_to_name[genre_list[j]]\n pred_genres.append(genre)\n predictions.append(pred_genres)", "_____no_output_____" ], [ "import pickle\nf=open('classifer_svc','wb')\npickle.dump(classif,f)\nf.close()", "_____no_output_____" ], [ "for i in range(X_test_tfidf.shape[0]):\n if i%50==0 and i!=0:\n print 'MOVIE: ',movies_with_overviews[i]['title'],'\\tPREDICTION: ',','.join(predictions[i])", "_____no_output_____" ] ], [ [ "Let's try our second model? The naive bayes model.", "_____no_output_____" ] ], [ [ "from sklearn.naive_bayes import MultinomialNB\nclassifnb = OneVsRestClassifier(MultinomialNB())\nclassifnb.fit(X[msk].toarray(), Y_train)\npredsnb=classifnb.predict(X[~msk].toarray())", "_____no_output_____" ], [ "import pickle\nf2=open('classifer_nb','wb')\npickle.dump(classifnb,f2)\nf2.close()", "_____no_output_____" ], [ "predictionsnb=[]\nfor i in range(X_test_tfidf.shape[0]):\n pred_genres=[]\n movie_label_scores=predsnb[i]\n for j in range(19):\n #print j\n if movie_label_scores[j]!=0:\n genre=Genre_ID_to_name[genre_list[j]]\n pred_genres.append(genre)\n predictionsnb.append(pred_genres)", "_____no_output_____" ], [ "for i in range(X_test_tfidf.shape[0]):\n if i%50==0 and i!=0:\n print 'MOVIE: ',movies_with_overviews[i]['title'],'\\tPREDICTION: ',','.join(predictionsnb[i])", "_____no_output_____" ] ], [ [ "As can be seen above, the results seem promising, but how do we really compare the two models? We need to quantify our performance so that we can say which one's better. Takes us back to what we discussed right in the beginning - we're learning a function $g$ which can approximate the original unknown function $f$. For some values of $x_i$, the predictions will be wrong for sure, and we want to minimize it. \n\nFor multi label systems, we often keep track of performance using \"Precision\" and \"Recall\". These are standard metrics, and you can google to read up more about them if you're new to these terms.", "_____no_output_____" ], [ "# Evaluation Metrics", "_____no_output_____" ], [ "We will use the standard precision recall metrics for evaluating our system.", "_____no_output_____" ] ], [ [ "def precision_recall(gt,preds):\n TP=0\n FP=0\n FN=0\n for t in gt:\n if t in preds:\n TP+=1\n else:\n FN+=1\n for p in preds:\n if p not in gt:\n FP+=1\n if TP+FP==0:\n precision=0\n else:\n precision=TP/float(TP+FP)\n if TP+FN==0:\n recall=0\n else:\n recall=TP/float(TP+FN)\n return precision,recall", "_____no_output_____" ], [ "precs=[]\nrecs=[]\nfor i in range(len(test_movies)):\n if i%1==0:\n pos=test_movies[i]\n test_movie=movies_with_overviews[pos]\n gtids=test_movie['genre_ids']\n gt=[]\n for g in gtids:\n g_name=Genre_ID_to_name[g]\n gt.append(g_name)\n# print predictions[i],movies_with_overviews[i]['title'],gt\n a,b=precision_recall(gt,predictions[i])\n precs.append(a)\n recs.append(b)\n\nprint np.mean(np.asarray(precs)),np.mean(np.asarray(recs))", "_____no_output_____" ], [ "precs=[]\nrecs=[]\nfor i in range(len(test_movies)):\n if i%1==0:\n pos=test_movies[i]\n test_movie=movies_with_overviews[pos]\n gtids=test_movie['genre_ids']\n gt=[]\n for g in gtids:\n g_name=Genre_ID_to_name[g]\n gt.append(g_name)\n# print predictions[i],movies_with_overviews[i]['title'],gt\n a,b=precision_recall(gt,predictionsnb[i])\n precs.append(a)\n recs.append(b)\n\nprint np.mean(np.asarray(precs)),np.mean(np.asarray(recs))", "_____no_output_____" ] ], [ [ "The average precision and recall scores for our samples are pretty good! Models seem to be working! Also, we can see that the Naive Bayes performs outperforms SVM. **I strongly suggest you to go read about Multinomial Bayes and think about why it works so well for \"Document Classification\", which is very similar to our case as every movie overview can be thought of as a document we are assigning labels to.**", "_____no_output_____" ], [ "# Section 6 - Deep Learning : an intuitive overview", "_____no_output_____" ], [ "The above results were good, but it's time to bring out the big guns. So first and foremost, let's get a very short idea about what's deep learning. This is for peope who don't have background in this - it's high level and gives just the intuition. ", "_____no_output_____" ], [ "As described above, the two most immportant concepts in doing good classification (or regression) are to 1) use the right representation which captures the right information about the data which is relevant to the problem at hand 2) Using the right model which has the capability of making sense of the representation fed to it. ", "_____no_output_____" ], [ "While for the second part we have complicated and powerful models that we have studied at length, we don't seem to have a principled, mathematical way of doing the first part - i.e. representation. What we did above was to see \"What makes sense\", and go from there. That is not a good approach for complex data/ complex problems. Is there some way to automate this? Deep Learning, does just this.", "_____no_output_____" ], [ "To just emphasize the importance of representation in the complex tasks we usually attempt with Deep Learning, let me talk about the original problem which made it famous. The paper is often reffered to as the \"Imagenet Challenge Paper\", and it was basically working on object recognition in images. Let's try to think about an algorithm that tries to detect a chair. \n\n## If I ask you to \"Define\" a chair, how would you? - Something with 4 legs?", "_____no_output_____" ], [ "<img src=\"files/chair1.png\" height=\"400\" width=\"400\">\n<h3><center>All are chairs, none with 4 legs. (Pic Credit: Zoya Bylinskii)</center></h3>", "_____no_output_____" ], [ "## How about some surface that we sit on then?", "_____no_output_____" ], [ "<img src=\"files/chair2.png\" height=\"400\" width=\"400\">\n<h3><center>All are surfaces we sit on, none are chairs. (Pic Credit: Zoya Bylinskii)</center></h3>", "_____no_output_____" ], [ "Clearly, these definitions won't work and we need something more complicated. Sadly, we can't come up with a simple text rule that our computer can search for! And we take a more principled approach.", "_____no_output_____" ], [ "The \"Deep\" in the deep learning comes from the fact that it was conventionally applied to Neural Networks. Neural Networks, as we all know, are structures organized in layers. Layers of computations. Why do we need layers? Because these layers can be seen as sub-tasks that we do in the complicated task of identifying a chair. It can be thought as a heirarchical break down of a complicated job into smalled sub-tasks. \n\nMathematically, each layer acts like a space transformation which takes the pixel values to a high dimensional space. When we start out, every pixel in the image is given equal importance in our matrix. With each layer, convolution operations give some parts more importance, and some lesser importance. In doing so, we transform our images to a space in which similar looking objects/object parts are closer (We are basically learning this space transformation in deep learning, nothing else)\n", "_____no_output_____" ], [ "What exactly was learnt by these neural networks is hard to know, and an active area of research. But one very crude way to visualize what it does is to think like - It starts by learning very generic features in the first layer. Something as simple as vertical and horizontal lines. In the next layer, it learns that if you combine the vectors representing vertical and horizontal vectors in different ratios, you can make all possible slanted lines. Next layer learns to combine lines to form curves - Say, something like the outline of a face. These curves come together to form 3D objects. And so on. Building sub-modules, combining them in the right way which can give it semantics.", "_____no_output_____" ], [ "**So, in a nutshell, the first few layers of a \"Deep\" network learn the right representation of the data, given the problem (which is mathematically described by your objective function trying to minimize difference between ground truth and predicted labels). The last layer simply looks how close or far apart things are in this high dimensional space.**", "_____no_output_____" ], [ "Hence, we can give any kind of data a high dimensional representation using neural networks. Below we will see high dimensional representations of both words in overviews (text) and posters (image). Let's get started with the posters i.e. extracting visual features from posters using deep learning.", "_____no_output_____" ], [ "# Section 7 - Deep Learning for predicting genre from poster\n\nOnce again, we must make an implementation decision. This time, it has more to do with how much time are we willing to spend in return for added accuracy. We are going to use here a technique that is commonly referred to as Pre-Training in Machine Learning Literature. \n\nInstead of me trying to re-invent the wheel here, I am going to borrow this short section on pre-training from Stanford University's lecture on <a href='http://cs231n.github.io/transfer-learning/'> CNN's</a>. To quote - \n\n''In practice, very few people train an entire Convolutional Network from scratch (with random initialization), because it is relatively rare to have a dataset of sufficient size. Instead, it is common to pretrain a ConvNet on a very large dataset (e.g. ImageNet, which contains 1.2 million images with 1000 categories), and then use the ConvNet either as an initialization or a fixed feature extractor for the task of interest. ''\n\nThere are three broad ways in which transfer learning or pre-training can be done. (The 2 concepts are different and to understand the difference clearly, I suggest you read the linked lecture thoroughly). The way we are going to about it is by using a pre-trained, released ConvNet as feature extractor. Take a ConvNet pretrained on ImageNet (a popular object detection dataset), remove the last fully-connected layer. After removing the last layer, what we have is just another neural network i.e. a stack of space tranformations. But, originally the output of this stack can be pumped into a single layer which can classify the image into categories like Car, Dog, Cat and so on.\n\nWhat this means, is that in the space this stack transforms the images to, all images which contain a \"dog\" are closer to each other, and all images containing a \"cat\" are closer. Thus, it is a meaningful space where images with similar objects are closer. \n\nThink about it, now if we pump our posters through this stack, it will embed them in a space where posters which contain similar objects are closer. This is a very meaningful feature engineering method! While this may not be ideal for genre prediction, it might be quite meaningful. For example, all posters with a gun or a car are probably action. While a smiling couple would point to romance or drama. The alternative would be to train the CNN from scratch which is fairly computationally intensive and involves a lot of tricks to get the CNN training to converge to the optimal space tranformation.\n\nThis way, we can start off with something strong, and then build on top. We pump our images through the pre-trained network to extract the visual features from the posters. Then, using these features as descriptors for the image, and genres as the labels, we train a simpler neural network from scratch which learns to do simply classification on this dataset. These 2 steps are exactly what we are going to do for predicting genres from movie posters.", "_____no_output_____" ], [ "## Deep Learning to extract visual features from posters", "_____no_output_____" ], [ "The basic problem here we are answering is that can we use the posters to predict genre. First check - Does this hypothesis make sense? Yes. Because that's what graphic designers do for a living. They leave visual cues to semantics. They make sure that when we look at the poster of a horror movie, we know it's not a happy image. Things like that. Can our deep learning system infer such subtleties? Let's find out!", "_____no_output_____" ], [ "For Visual features, either we can train a deep neural network ourselves from scratch, or we can use a pre-trained one made available to us from the Visual Geometry Group at Oxford University, one of the most popular methods. This is called the VGG-net. Or as they call it, we will extract the VGG features of an image. Mathematically, as mentioned, it's just a space transformation in the form of layers. So, we simply need to perform this chain of transformations on our image, right? Keras is a library that makes it very easy for us to do this. Some other common ones are Tensorflow and PyTorch. While the latter two are very powerful and customizable and used more often in practice, Keras makes it easy to prototype by keeping the syntax simple.\n\n\nWe will be working with Keras to keep things simple in code, so that we can spend more time understanding and less time coding. Some common ways people refer to this step are - \"Getting the VGG features of an image\", or \"Forward Propogating the image through VGG and chopping off the last layer\". In keras, this is as easy as writing 4 lines. ", "_____no_output_____" ] ], [ [ "# Loading the list of movies we had downloaded posters for eariler - \nf=open('poster_movies.pckl','r')\nposter_movies=pickle.load(f)\nf.close()", "_____no_output_____" ], [ "from keras.applications.vgg16 import VGG16\nfrom keras.preprocessing import image\nfrom keras.applications.vgg16 import preprocess_input\nimport numpy as np\nimport pickle\nmodel = VGG16(weights='imagenet', include_top=False)", "_____no_output_____" ], [ "allnames=os.listdir(poster_folder)\nimnames=[j for j in allnames if j.endswith('.jpg')]\nfeature_list=[]\ngenre_list=[]\nfile_order=[]\nprint \"Starting extracting VGG features for scraped images. This will take time, Please be patient...\"\nprint \"Total images = \",len(imnames)\nfailed_files=[]\nsuccesful_files=[]\ni=0\nfor mov in poster_movies:\n i+=1\n mov_name=mov['original_title']\n mov_name1=mov_name.replace(':','/')\n poster_name=mov_name.replace(' ','_')+'.jpg'\n if poster_name in imnames:\n img_path=poster_folder+poster_name\n #try:\n img = image.load_img(img_path, target_size=(224, 224))\n succesful_files.append(poster_name)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n features = model.predict(x)\n print features.shape\n printe model.predict(x)\n file_order.append(img_path)\n feature_list.append(features)\n genre_list.append(mov['genre_ids'])\n if np.max(np.asarray(feature_list))==0.0:\n print('problematic',i)\n if i%250==0 or i==1:\n print \"Working on Image : \",i\n# except:\n# failed_files.append(poster_name)\n# continue\n \n else:\n continue\nprint \"Done with all features, please pickle for future use!\"", "_____no_output_____" ], [ "len(genre_list)", "_____no_output_____" ], [ "len(feature_list)", "_____no_output_____" ], [ "print type(feature_list[0])\nfeature_list[0].shape\n", "_____no_output_____" ], [ "# Reading from pickle below, this code is not to be run.\nlist_pickled=(feature_list,file_order,failed_files,succesful_files,genre_list)\nf=open('posters_new_features.pckl','wb')\npickle.dump(list_pickled,f)\nf.close()\nprint(\"Features dumped to pickle file\")", "_____no_output_____" ], [ "f7=open('posters_new_features.pckl','rb')\nlist_pickled=pickle.load(f7)\nf7.close()\n# (feature_list2,file_order2)=list_pickled", "_____no_output_____" ] ], [ [ "### Training a simple neural network model using these VGG features.", "_____no_output_____" ] ], [ [ "(feature_list,files,failed,succesful,genre_list)=list_pickled\n", "_____no_output_____" ] ], [ [ "Let's first get the labels on our 1342 samples first! As image download fails on a few instances, the best way to work with the right model is to read the poster names downloaded, and working from there. These posters cannot be uploaded to Github as they are too large, and so are being downloaded and read from my local computer. If you do re-do it, you might have to check and edit the paths in the code to make sure it runs.", "_____no_output_____" ] ], [ [ "(a,b,c,d)=feature_list[0].shape\nfeature_size=a*b*c*d\nfeature_size", "_____no_output_____" ] ], [ [ "This looks odd, why are we re-running the loop we ran above again below? The reason is simple, the most important thing to know about numpy is that using vstack() and hstack() are highly sub-optimal. Numpy arrays when created, a fixed size is allocated in the memory and when we stack, a new one is copied and created in a new location. This makes the code really, really slow. The best way to do it (and this remains the same with MATLAB matrices if you work with them), is to create a numpy array of zeros, and over-write it row by row. The above code was just to see what size numpy array we will need!", "_____no_output_____" ], [ "The final movie poster set for which we have all the information we need, is 1265 movies. In the above code we are making an X numpy array containing the visual features of one image per row. So, the VGG features are reshaped to be in the shape (1,25088) and we finally obtain a matrix of shape (1265,25088)", "_____no_output_____" ] ], [ [ "np_features=np.zeros((len(feature_list),feature_size))\nfor i in range(len(feature_list)):\n feat=feature_list[i]\n reshaped_feat=feat.reshape(1,-1)\n np_features[i]=reshaped_feat", "_____no_output_____" ], [ "# np_features[-1]", "_____no_output_____" ], [ "X=np_features", "_____no_output_____" ], [ "from sklearn.preprocessing import MultiLabelBinarizer\nmlb=MultiLabelBinarizer()\nY=mlb.fit_transform(genre_list)", "_____no_output_____" ], [ "Y.shape", "_____no_output_____" ] ], [ [ "Our binarized Y numpy array contains the binarized labels corresponding to the genre IDs of the 1277 movies", "_____no_output_____" ] ], [ [ "visual_problem_data=(X,Y)\nf8=open('visual_problem_data_clean.pckl','wb')\npickle.dump(visual_problem_data,f8)\nf8.close()", "_____no_output_____" ], [ "f8=open('visual_problem_data_clean.pckl','rb')\nvisual_features=pickle.load(f8)\nf8.close()", "_____no_output_____" ], [ "(X,Y)=visual_features", "_____no_output_____" ], [ "X.shape", "_____no_output_____" ], [ "mask = np.random.rand(len(X)) < 0.8", "_____no_output_____" ], [ "X_train=X[mask]\nX_test=X[~mask]\nY_train=Y[mask]\nY_test=Y[~mask]", "_____no_output_____" ], [ "X_test.shape\nY_test.shape", "_____no_output_____" ] ], [ [ "Now, we create our own keras neural network to use the VGG features and then classify movie genres. Keras makes this super easy. \n\nNeural network architectures have gotten complex over the years. But the simplest ones contain very standard computations organized in layers, as described above. Given the popularity of some of these, Keras makes it as easy as writing out the names of these operations in a sequential order. This way you can make a network while completely avoiding the Mathematics (HIGHLY RECOMMENDED SPENDING MORE TIME ON THE MATH THOUGH)", "_____no_output_____" ], [ "Sequential() allows us to make models the follow this sequential order of layers. Different kinds of layers like Dense, Conv2D etc can be used, and many activation functions like RELU, Linear etc are also available.", "_____no_output_____" ], [ "# Important Question : Why do we need activation functions?\n#### Copy pasting the answer I wrote for this question on <a href='https://www.quora.com/Why-do-neural-networks-need-an-activation-function/answer/Spandan-Madan?srid=5ydm'>Quora</a> Feel free to leave comments there.\n\n\"\"Sometimes, we tend to get lost in the jargon and confuse things easily, so the best way to go about this is getting back to our basics.\n\nDon’t forget what the original premise of machine learning (and thus deep learning) is - IF the input and output are related by a function y=f(x), then if we have x, there is no way to exactly know f unless we know the process itself. However, machine learning gives you the ability to approximate f with a function g, and the process of trying out multiple candidates to identify the function g best approximating f is called machine learning.\n\nOk, that was machine learning, and how is deep learning different? Deep learning simply tries to expand the possible kind of functions that can be approximated using the above mentioned machine learning paradigm. Roughly speaking, if the previous model could learn say 10,000 kinds of functions, now it will be able to learn say 100,000 kinds (in actuality both are infinite spaces but one is larger than the other, because maths is cool that ways.)\n\nIf you want to know the mathematics of it, go read about VC dimension and how more layers in a network affect it. But I will avoid the mathematics here and rely on your intuition to believe me when I say that not all data can be classified correctly into categories using a linear function. So, we need our deep learning model to be able to approximate more complex functions than just a linear function.\n\nNow, let’s come to your non linearity bit. Imagine a linear function y=2x+3, and another one y=4x+7. What happens if I pool them and take an average? I get another linear function y= 3x+5. So instead of doing those two computations separately and then averaging it out, I could have just used the single linear function y=3x+5. Obviously, this logic holds good if I have more than 2 such linear functions. This is exactly what will happen if you don’t have have non-linearities in your nodes, and also what others have written in their answers.\n\nIt simply follows from the definition of a linear function -\n\n(i) If you take two linear functions, AND\n\n(ii)Take a linear combination of them (which is how we combine the outputs of multiple nodes of a network)\n\nYou are BOUND to get a linear function because f(x)+g(x)=mx+b+nx+c=(m+n)x+(b+c)= say h(x).\n\nAnd you could in essence replace your whole network by a simple matrix transformation which accounts for all linear combinations and up/downsamplings.\n\nIn a nutshell, you’ll only be trying to learn a linear approximation for original function f relating the input and the output. Which as we discussed above, is not always the best approximation. Adding non-linearities ensures that you can learn more complex functions by approximating every non-linear function as a LINEAR combination of a large number of non-linear functions.\n\nStill new to the field, so if there’s something wrong here please comment below! Hope it helps\"\"", "_____no_output_____" ], [ "#### Let's train our model then, using the features we extracted from VGG net \n\nThe model we will use has just 1 hidden layer between the VGG features and the final output layer. The simplest neural network you can get. An image goes into this network with the dimensions (1,25088), the first layer's output is 1024 dimensional. This hidden layer output undergoes a pointwise RELU activation. This output gets transformed into the output layer of 20 dimensions. It goes through a sigmoid.\n\nThe sigmoid, or the squashing function as it is often called, is a function which squashes numbers between 0 and 1. What are you reminded of when you think of numebers between 0 and 1? Right, probability. \n\nBy squashing the score of each of the 20 output labels between 0 and 1, sigmoid lets us interpret their scores as probabilities. Then, we can just pick the classes with the top 3 or 5 probability scores as the predicted genres for the movie poster! Simple! ", "_____no_output_____" ] ], [ [ "# Y_train[115]", "_____no_output_____" ], [ "from keras.models import Sequential\nfrom keras.layers import Dense, Activation\nfrom keras import optimizers\nmodel_visual = Sequential([\n Dense(1024, input_shape=(25088,)),\n Activation('relu'),\n Dense(256),\n Activation('relu'),\n Dense(19),\n Activation('sigmoid'),\n])\nopt = optimizers.rmsprop(lr=0.0001, decay=1e-6)\n\n#sgd = optimizers.SGD(lr=0.05, decay=1e-6, momentum=0.4, nesterov=False)\nmodel_visual.compile(optimizer=opt,\n loss='binary_crossentropy',\n metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "We train the model using the fit() function. The parameters it takes are - training features and training labels, epochs, batch_size and verbose. \n\nSimplest one - verbose. 0=\"dont print anything as you work\", 1=\"Inform me as you go\". \n\nOften the data set is too large to be loaded into the RAM. So, we load data in batches. For batch_size=32 and epochs=10, the model starts loading rows from X in batches of 32 everytime it calculates the loss and updates the model. It keeps on going till it has covered all the samples 10 times. \n\nSo, the no. of times model is updated = (Total Samples/Batch Size) * (Epochs)", "_____no_output_____" ] ], [ [ "model_visual.fit(X_train, Y_train, epochs=10, batch_size=64,verbose=1)", "_____no_output_____" ], [ "model_visual.fit(X_train, Y_train, epochs=50, batch_size=64,verbose=0)", "_____no_output_____" ] ], [ [ "For the first 10 epochs I trained the model in a verbose fashion to show you what's happening. After that, in the below cell you can see I turned off the verbosity to keep the code cleaner. ", "_____no_output_____" ] ], [ [ "Y_preds=model_visual.predict(X_test)", "_____no_output_____" ], [ "sum(sum(Y_preds))", "_____no_output_____" ] ], [ [ "### Let's look at some of our predictions? ", "_____no_output_____" ] ], [ [ "f6=open('Genredict.pckl','rb')\nGenre_ID_to_name=pickle.load(f6)\nf6.close()", "_____no_output_____" ], [ "sum(Y_preds[1])", "_____no_output_____" ], [ "sum(Y_preds[2])", "_____no_output_____" ], [ "genre_list=sorted(list(Genre_ID_to_name.keys()))", "_____no_output_____" ], [ "precs=[]\nrecs=[]\nfor i in range(len(Y_preds)):\n row=Y_preds[i]\n gt_genres=Y_test[i]\n gt_genre_names=[]\n for j in range(19):\n if gt_genres[j]==1:\n gt_genre_names.append(Genre_ID_to_name[genre_list[j]])\n top_3=np.argsort(row)[-3:]\n predicted_genres=[]\n for genre in top_3:\n predicted_genres.append(Genre_ID_to_name[genre_list[genre]])\n (precision,recall)=precision_recall(gt_genre_names,predicted_genres)\n precs.append(precision)\n recs.append(recall)\n if i%50==0:\n print \"Predicted: \",','.join(predicted_genres),\" Actual: \",','.join(gt_genre_names)", "_____no_output_____" ], [ "print np.mean(np.asarray(precs)),np.mean(np.asarray(recs))", "_____no_output_____" ] ], [ [ "So, even with just the poster i.e. visual features we are able to make great predictions! Sure, text outperforms the visual features, but the important thing is that it still works. In more complicated models, we can combine the two to make even better predictions. That is precisely what I work on in my research.", "_____no_output_____" ], [ "These models were trained on CPU's, and a simple 1 layer model was used to show that there is a lot of information in this data that the models can extract. With a larger dataset, and more training I was able to bring these numbers to as high as 70%, which is the similar to textual features. Some teams in my class outperformed this even more. More data is the first thing you should try if you want better results. Then, you can start playing with training on GPUs, learning rate schedules and other hyperparameters. Finally, you can consider using ResNet, a much more powerful neural network model than VGG. All of these can be tried once you have a working knowledge of machine learning.", "_____no_output_____" ], [ "# Section 8 - Deep Learning to get Textual Features", "_____no_output_____" ], [ "Let's do the same thing as above with text now?", "_____no_output_____" ], [ "We will use an off the shelf representation for words - Word2Vec model. Just like VGGnet before, this is a model made available to get a meaningful representation. As the total number of words is small, we don't even need to forward propagate our sample through a network. Even that has been done for us, and the result is stored in the form of a dictionary. We can simply look up the word in the dictionary and get the Word2Vec features for the word.", "_____no_output_____" ], [ "You can download the dictionary from here - https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit <br>\nDownload it to the directory of this tutorial i.e. in the same folder as this ipython notebook.\n", "_____no_output_____" ] ], [ [ "from gensim import models\n# model2 = models.Word2Vec.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True) \nmodel2 = models.KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True)", "_____no_output_____" ] ], [ [ "Now, we can simply look up for a word in the above loaded model. For example, to get the Word2Vec representation of the word \"King\" we just do - model2['king']", "_____no_output_____" ] ], [ [ "print model2['king'].shape\nprint model2['dog'].shape", "_____no_output_____" ] ], [ [ "This way, we can represent the words in our overviews using this word2vec model. And then, we can use that as our X representations. So, instead of count of words, we are using a representation which is based on the semantic representation of the word. Mathematically, each word went from 3-4 dimensional (the length) to 300 dimensions!", "_____no_output_____" ], [ "For the same set of movies above, let's try and predict the genres from the deep representation of their overviews!", "_____no_output_____" ] ], [ [ "final_movies_set = movies_with_overviews\nlen(final_movies_set)", "_____no_output_____" ], [ "from nltk.tokenize import RegexpTokenizer\nfrom stop_words import get_stop_words\ntokenizer = RegexpTokenizer(r'\\w+')\n\n# create English stop words list\nen_stop = get_stop_words('en')", "_____no_output_____" ], [ "movie_mean_wordvec=np.zeros((len(final_movies_set),300))\nmovie_mean_wordvec.shape", "_____no_output_____" ] ], [ [ "Text needs some pre-processing before we can train the model. The only preprocessing we do here is - we delete commonly occurring words which we know are not informative about the genre. Think of it as the clutter in some sense. These words are often removed and are referred to as \"stop words\". You can look them up online. These include simple words like \"a\", \"and\", \"but\", \"how\", \"or\" and so on. They can be easily removed using the python package NLTK.\n\nFrom the above dataset, movies with overviews which contain only stop words, or movies with overviews containing no words with word2vec representation are neglected. Others are used to build our Mean word2vec representation. Simply, put for every movie overview - \n\n* Take movie overview\n* Throw out stop words\n* For non stop words:\n - If in word2vec - take it's word2vec representation which is 300 dimensional\n - If not - throw word\n* For each movie, calculate the arithmetic mean of the 300 dimensional vector representations for all words in the overview which weren't thrown out\n\nThis mean becomes the 300 dimensional representation for the movie. For all movies, these are stored in a numpy array. So the X matrix becomes (1263,300). And, Y is (1263,20) i.e. binarized 20 genres, as before", "_____no_output_____" ], [ "**Why do we take the arithmetic mean?**\nIf you feel that we should have kept all the words separately - Then you're thinking correct, but sadly we're limited by the way current day neural networks work. I will not mull over this for the fear of stressing too much on an otherwise irrelevant detail. But if you're interested, read this awesome paper - \nhttps://jiajunwu.com/papers/dmil_cvpr.pdf", "_____no_output_____" ] ], [ [ "genres=[]\nrows_to_delete=[]\nfor i in range(len(final_movies_set)):\n mov=final_movies_set[i]\n movie_genres=mov['genre_ids']\n genres.append(movie_genres)\n overview=mov['overview']\n tokens = tokenizer.tokenize(overview)\n stopped_tokens = [k for k in tokens if not k in en_stop]\n count_in_vocab=0\n s=0\n if len(stopped_tokens)==0:\n rows_to_delete.append(i)\n genres.pop(-1)\n# print overview\n# print \"sample \",i,\"had no nonstops\"\n else:\n for tok in stopped_tokens:\n if tok.lower() in model2.vocab:\n count_in_vocab+=1\n s+=model2[tok.lower()]\n if count_in_vocab!=0:\n movie_mean_wordvec[i]=s/float(count_in_vocab)\n else:\n rows_to_delete.append(i)\n genres.pop(-1)\n# print overview\n# print \"sample \",i,\"had no word2vec\"", "_____no_output_____" ], [ "len(genres)", "_____no_output_____" ], [ "mask2=[]\nfor row in range(len(movie_mean_wordvec)):\n if row in rows_to_delete:\n mask2.append(False)\n else:\n mask2.append(True)", "_____no_output_____" ], [ "X=movie_mean_wordvec[mask2]", "_____no_output_____" ], [ "X.shape", "_____no_output_____" ], [ "Y=mlb.fit_transform(genres)", "_____no_output_____" ], [ "Y.shape", "_____no_output_____" ], [ "textual_features=(X,Y)\nf9=open('textual_features.pckl','wb')\npickle.dump(textual_features,f9)\nf9.close()", "_____no_output_____" ], [ "# textual_features=(X,Y)\nf9=open('textual_features.pckl','rb')\ntextual_features=pickle.load(f9)\nf9.close()", "_____no_output_____" ], [ "(X,Y)=textual_features", "_____no_output_____" ], [ "X.shape", "_____no_output_____" ], [ "Y.shape", "_____no_output_____" ], [ "mask_text=np.random.rand(len(X))<0.8", "_____no_output_____" ], [ "X_train=X[mask_text]\nY_train=Y[mask_text]\nX_test=X[~mask_text]\nY_test=Y[~mask_text]", "_____no_output_____" ] ], [ [ "Once again, we use a very similar, super simple architecture as before.", "_____no_output_____" ] ], [ [ "from keras.models import Sequential\nfrom keras.layers import Dense, Activation\n\nmodel_textual = Sequential([\n Dense(300, input_shape=(300,)),\n Activation('relu'),\n Dense(19),\n Activation('softmax'),\n])\n\nmodel_textual.compile(optimizer='rmsprop',\n loss='binary_crossentropy',\n metrics=['accuracy'])", "_____no_output_____" ], [ "model_textual.fit(X_train, Y_train, epochs=10, batch_size=500)", "_____no_output_____" ], [ "model_textual.fit(X_train, Y_train, epochs=10000, batch_size=500,verbose=0)", "_____no_output_____" ], [ "score = model_textual.evaluate(X_test, Y_test, batch_size=249)", "_____no_output_____" ], [ "print(\"%s: %.2f%%\" % (model_textual.metrics_names[1], score[1]*100))", "_____no_output_____" ], [ "Y_preds=model_textual.predict(X_test)", "_____no_output_____" ], [ "genre_list.append(10769)", "_____no_output_____" ], [ "print \"Our predictions for the movies are - \\n\"\nprecs=[]\nrecs=[]\nfor i in range(len(Y_preds)):\n row=Y_preds[i]\n gt_genres=Y_test[i]\n gt_genre_names=[]\n for j in range(19):\n if gt_genres[j]==1:\n gt_genre_names.append(Genre_ID_to_name[genre_list[j]])\n top_3=np.argsort(row)[-3:]\n predicted_genres=[]\n for genre in top_3:\n predicted_genres.append(Genre_ID_to_name[genre_list[genre]])\n (precision,recall)=precision_recall(gt_genre_names,predicted_genres)\n precs.append(precision)\n recs.append(recall)\n if i%50==0:\n print \"Predicted: \",predicted_genres,\" Actual: \",gt_genre_names", "_____no_output_____" ], [ "print np.mean(np.asarray(precs)),np.mean(np.asarray(recs))", "_____no_output_____" ] ], [ [ "Even without much tuning of the above model, these results are able to beat our previous results. \n\nNote - I got accuracies as high as 78% when doing classification using plots scraped from Wikipedia. The large amount of information was very suitable for movie genre classification with a deep model. Strongly suggest you to try playing around with architectures.", "_____no_output_____" ], [ "# Section 9 - Upcoming Tutorials and Acknowledgements\n\nCongrats! This is the end of our pilot project! Needless to say, a lot of the above content may be new to you, or may be things that you know very well. If it's the former, I hope this tutorial would have helped you. If it is the latter and you think I wrote something incorrect or that my understanding can be improved, feel free to create a github issue so that I can correct it! \n\nWriting tutorials can take a lot of time, but it is a great learning experience. I am currently working on a tutorial focussing on word embeddings, which will explore word2vec and other word embeddings in detail. While it will take some time to be up, I will post a link to it's repository on the README for this project so that interested readers can find it.\n\nI would like to thank a few of my friends who had an indispensible role to play in me making this tutorial. Firstly, Professor Hanspeter Pfister and Verena Kaynig at Harvard, who helped guide this tutorial/project and scope it. Secondly, my friends Sahil Loomba and Matthew Tancik for their suggestions and editing the material and the presentation of the storyline. Thirdly, Zoya Bylinskii at MIT for constantly motivating me to put in my effort into this tutorial. Finally, all others who helped me feel confident enough to take up this task and to see it till the end. Thanks all of you!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
cba0a821a23166dd5a8b1b9738346ac6acdce5b1
18,056
ipynb
Jupyter Notebook
.ipynb_checkpoints/2 - Intersection Detection-checkpoint.ipynb
GeoJamesJones/Intersection__Monitoring_demo
e62d435d247471874fc856dba76eedfae72b2fc1
[ "MIT" ]
null
null
null
.ipynb_checkpoints/2 - Intersection Detection-checkpoint.ipynb
GeoJamesJones/Intersection__Monitoring_demo
e62d435d247471874fc856dba76eedfae72b2fc1
[ "MIT" ]
null
null
null
.ipynb_checkpoints/2 - Intersection Detection-checkpoint.ipynb
GeoJamesJones/Intersection__Monitoring_demo
e62d435d247471874fc856dba76eedfae72b2fc1
[ "MIT" ]
null
null
null
32.014184
299
0.573494
[ [ [ "## <span style=\"color:purple\">ArcGIS API for Python: Traffic and Pedestrian Activity Detection</span>\n\n![detection](../img/JacksonHoleDetection.gif \"Detection\")\n\n\n## Integrating ArcGIS with TensorFlow Deep Learning using the ArcGIS API for Python\n\n## Jackson Hole, Wyoming Traffic Intersection Detection", "_____no_output_____" ], [ "This notebook provides an example of integration between ArcGIS and deep learning frameworks like TensorFlow using the ArcGIS API for Python.\n\n<img src=\"../img/ArcGIS_ML_Integration.png\" style=\"width: 75%\"></img>\n\nWe will leverage a model to detect objects on a live video feed from youtube, and use these to update a feature service on a web GIS in real-time. As people, cars, trucks, and buses are detected on the feed, the feature will be updated to reflect the detection. ", "_____no_output_____" ], [ "This concept works with a convolutional neural network built using the TensorFlow Object Detection API. ", "_____no_output_____" ], [ "<img src=\"../img/dogneuralnetwork.png\"></img>", "_____no_output_____" ], [ "# Imports", "_____no_output_____" ] ], [ [ "import numpy as np\nimport os\nimport six.moves.urllib as urllib\nimport sys\nimport tarfile\nimport tensorflow as tf\nimport zipfile\nimport getpass\n\nfrom collections import defaultdict\nfrom io import StringIO\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\n\nfrom PIL import ImageGrab\nimport time\nimport pandas as pd\n\nimport cv2", "C:\\Users\\jame9353\\AppData\\Local\\ESRI\\conda\\envs\\TensorFlow\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n" ] ], [ [ "## Env setup", "_____no_output_____" ] ], [ [ "# This is needed since the notebook is stored in the object_detection folder.\nsys.path.append(\"..\")", "_____no_output_____" ] ], [ [ "## Object detection imports\nHere are the imports from the object detection module.", "_____no_output_____" ] ], [ [ "from utils import label_map_util\n\nfrom utils import visualization_utils as vis_util", "_____no_output_____" ] ], [ [ "# Model preparation ", "_____no_output_____" ], [ "## Variables\n\nAny model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_CKPT` to point to a new .pb file. \n\nBy default we use an \"SSD with Mobilenet\" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.", "_____no_output_____" ] ], [ [ "# What model to download.\nMODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'\nMODEL_FILE = MODEL_NAME + '.tar.gz'\nDOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'\n\n# Path to frozen detection graph. This is the actual model that is used for the object detection.\nPATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'\n\n# List of the strings that is used to add correct label for each box.\nPATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')\n\nNUM_CLASSES = 90", "_____no_output_____" ] ], [ [ "## Load a (frozen) Tensorflow model into memory.", "_____no_output_____" ] ], [ [ "detection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')", "_____no_output_____" ] ], [ [ "## Loading label map\nLabel maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine", "_____no_output_____" ] ], [ [ "label_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)", "_____no_output_____" ], [ "category_index", "_____no_output_____" ] ], [ [ "## Helper code", "_____no_output_____" ] ], [ [ "def load_image_into_numpy_array(image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)", "_____no_output_____" ] ], [ [ "This is a helper function that takes the detection graph output tensor (np arrays), stacks the classes and scores, and determines if the class for a person (1) is available within a certain score and within a certain amount of objects", "_____no_output_____" ] ], [ [ "def object_counter(classes_arr, scores_arr, score_thresh=0.3):\n # Process the numpy array of classes from the model\n stacked_arr = np.stack((classes_arr, scores_arr), axis=-1)\n # Convert to pandas dataframe for easier querying\n detection_df = pd.DataFrame(stacked_arr)\n # Retrieve total count of cars with score threshold above param value\n detected_cars = detection_df[(detection_df[0] == 3.0) & (detection_df[1] > score_thresh)]\n detected_people = detection_df[(detection_df[0] == 1.0) & (detection_df[1] > score_thresh)]\n detected_bicycles = detection_df[(detection_df[0] == 2.0) & (detection_df[1] > score_thresh)]\n detected_motorcycles = detection_df[(detection_df[0] == 4.0) & (detection_df[1] > score_thresh)]\n detected_buses = detection_df[(detection_df[0] == 6.0) & (detection_df[1] > score_thresh)]\n detected_trucks = detection_df[(detection_df[0] == 8.0) & (detection_df[1] > score_thresh)]\n \n car_count = len(detected_cars)\n people_count = len(detected_people)\n bicycle_count = len(detected_bicycles)\n motorcycle_count = len(detected_motorcycles)\n bus_count = len(detected_buses)\n truck_count = len(detected_trucks)\n\n return car_count, people_count, bicycle_count, motorcycle_count, bus_count, truck_count", "_____no_output_____" ] ], [ [ "# Establish Connection to ArcGIS Online via ArcGIS API for Python", "_____no_output_____" ], [ "#### Authenticate", "_____no_output_____" ] ], [ [ "import arcgis", "_____no_output_____" ], [ "gis_url = \"\" # Replace with gis URL\nusername = \"\" # Replace with username", "_____no_output_____" ], [ "gis = arcgis.gis.GIS(gis_url, username)", "_____no_output_____" ] ], [ [ "### Retrieve the Object Detection Point Layer", "_____no_output_____" ] ], [ [ "object_point_srvc = gis.content.search(\"JHWY_ML_Detection_02\")[1]\nobject_point_srvc", "_____no_output_____" ], [ "# Convert our existing service into a pandas dataframe\nobject_point_lyr = object_point_srvc.layers[0]\nobj_fset = object_point_lyr.query() #querying without any conditions returns all the features\nobj_df = obj_fset.df\nobj_df.head()", "_____no_output_____" ], [ "all_features = obj_fset.features\nall_features", "_____no_output_____" ], [ "from copy import deepcopy\n\noriginal_feature = all_features[0]\nfeature_to_be_updated = deepcopy(original_feature)\nfeature_to_be_updated", "_____no_output_____" ] ], [ [ "# Detection", "_____no_output_____" ] ], [ [ "# logging = \"verbose\" # Options: verbose | simple | cars\n# logging = \"simple\"\n# logging = \"cars\"\nlogging = \"none\"", "_____no_output_____" ] ], [ [ "# ArcGIS API for Python and TensorFlow Deep Learning Model", "_____no_output_____" ], [ "## Start Model Detection", "_____no_output_____" ], [ "#### 1366 x 768", "_____no_output_____" ] ], [ [ "# Top Left: YouTube Live Feed\n# Bottom left: Detection\n# Right Half: Operations Dashboard\n\nwith detection_graph.as_default():\n with tf.Session(graph=detection_graph) as sess:\n # Definite input and output Tensors for detection_graph\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\n detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n while True:\n \n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np = np.array(ImageGrab.grab(bbox=(0,0,683,444)))\n image_np_expanded = np.expand_dims(image_np, axis=0)\n # Actual detection.\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n \n# print(np.squeeze(classes))\n # Visualization of the results of a detection.\n vis_util.visualize_boxes_and_labels_on_image_array(\n image_np,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=8, min_score_thresh=0.3)\n \n cv2.imshow('object detection', cv2.resize(image_np, (683,444)))\n# cv2.imshow('object detection', cv2.resize(image_np, (683,444), interpolation=cv2.INTER_CUBIC))\n if cv2.waitKey(25) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n break\n \n car_count, people_count, bicycle_count, motorcycle_count, bus_count, truck_count = object_counter(np.squeeze(classes).astype(np.int32), np.squeeze(scores))\n vehicle_count = car_count + motorcycle_count + bus_count + truck_count\n total_count = vehicle_count + bicycle_count + people_count\n \n if logging == \"verbose\":\n print(\"/n\")\n print(\"Detected {0} total objects...\".format(str(total_count)))\n print(\"Detected {0} total vehicles...\".format(str(vehicle_count)))\n print(\"Detected {0} cars...\".format(str(car_count)))\n print(\"Detected {0} motorcycles...\".format(str(motorcycle_count)))\n print(\"Detected {0} buses...\".format(str(bus_count)))\n print(\"Detected {0} trucks...\".format(str(truck_count)))\n print(\"Detected {0} pedestrians...\".format(str(people_count)))\n print(\"Detected {0} bicycles...\".format(str(bicycle_count)))\n \n elif logging == \"simple\":\n print(\"/n\")\n print(\"Detected {0} total objects...\".format(str(total_count)))\n print(\"Detected {0} total vehicles...\".format(str(vehicle_count)))\n print(\"Detected {0} pedestrians...\".format(str(people_count)))\n print(\"Detected {0} bicycles...\".format(str(bicycle_count)))\n \n elif logging == \"cars\":\n print(\"/n\")\n print(\"Detected {0} cars...\".format(str(car_count)))\n \n elif logging == \"none\":\n pass\n \n features_for_update = []\n feature_to_be_updated.attributes['RT_Object_Count'] = total_count\n feature_to_be_updated.attributes['RT_Vehicle_Count'] = vehicle_count\n feature_to_be_updated.attributes['RT_Car_Count'] = car_count\n feature_to_be_updated.attributes['RT_Bus_Count'] = bus_count\n feature_to_be_updated.attributes['RT_Truck_Count'] = truck_count\n feature_to_be_updated.attributes['RT_Motorcycle_Count'] = motorcycle_count\n feature_to_be_updated.attributes['RT_Pedestrian_Count'] = people_count\n feature_to_be_updated.attributes['RT_Bicycle_Count'] = bicycle_count\n \n# feature_to_be_updated.attributes['rt_car_count'] = car_count\n features_for_update.append(feature_to_be_updated)\n object_point_lyr.edit_features(updates=features_for_update) ", "_____no_output_____" ] ], [ [ "# Resources\n\n### Framework: ArcGIS API for Python; TensorFlow \n### Object Detection Model: SSD MobileNet \n### Source Labeled Data: Common Objects in Context (cocodataset.org)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
cba0b587fe945ee68d98c5930d199451d5c207a3
48,354
ipynb
Jupyter Notebook
visualizations/Labs32_visualization.ipynb
rdukewiesenb/human-rights-first-asylum-ds-a
6d15902788922a2647872719339f31dde0f7399b
[ "MIT" ]
1
2021-06-02T18:48:10.000Z
2021-06-02T18:48:10.000Z
visualizations/Labs32_visualization.ipynb
rdukewiesenb/human-rights-first-asylum-ds-a
6d15902788922a2647872719339f31dde0f7399b
[ "MIT" ]
17
2020-12-11T00:03:24.000Z
2021-05-19T20:02:34.000Z
visualizations/Labs32_visualization.ipynb
rdukewiesenb/human-rights-first-asylum-ds-a
6d15902788922a2647872719339f31dde0f7399b
[ "MIT" ]
16
2020-12-16T19:06:11.000Z
2021-05-04T16:10:09.000Z
71.213549
8,664
0.435166
[ [ [ "# import\nimport pandas as pd\nimport numpy as np\nimport random\nimport plotly.express as px", "_____no_output_____" ] ], [ [ "## Objective \n\n* feature engineer any data that can be used\n* preform eda\n* create visualizations that is relevent to the topic", "_____no_output_____" ] ], [ [ "# upload csv and call as df\ndf = pd.read_csv(\"test_data_v4.csv\")\nprint(df.shape)", "(46, 15)\n" ], [ "def wrangle_fun(df):\n \"\"\"\n Takes a dataframe and preforms eda and feature engineering \n Returns wrangled dateframe\n \"\"\"\n # create random sample of binary and change to male and female\n Sex = np.random.randint(2, size=46) \n df[\"Sex\"] = Sex\n df[\"Sex\"] = df[\"Sex\"].replace(0, \"Male\")\n df[\"Sex\"] = df[\"Sex\"].replace(1, \"Female\")\n\n # replacing white space with _ on column names\n df = df.rename(columns={\"Case ID\": \"Case_Id\", \"Hearing Date\": \"Hearing_Date\", \"Type of Hearing\": \"Type_of_Hearing\",\n \"Hearing Location\": \"Hearing_Location\", \"Decision Date\": \"Decision_Date\", \"Judge's Name\": \"Judge_Name\",\n \"Protected Ground\": \"Protected_Ground\", \"If Social Group, Type\": \"Social_Group\"\n })\n\n # drop Unnamed: 0 and int 0 column \n df = df.drop([\"Unnamed: 0\", \"0\"], axis=1)\n\n return df\n\n", "_____no_output_____" ], [ "# preform wrangling on df \ndf = wrangle_fun(df)", "_____no_output_____" ] ], [ [ "* note: something that can be added on would be a judges start date\n\nso the question you could ask is: does the judge's age effect outcome", "_____no_output_____" ] ], [ [ "print(df.shape)\ndf.head()", "_____no_output_____" ], [ "df['Judge_Name'].value_counts()", "_____no_output_____" ] ], [ [ "## Questions I asked during eda that a graph can answer\n * Does any contries link to more cases of proteced grounds or social group\n\n * Does location link to outcomes\n\n * Does the hearing date effect outcomes\n : lets say during 2020, did covid effect case outcome? ", "_____no_output_____" ], [ "A stacked bar graph - Judge rulings divided into 2 bars by gender", "_____no_output_____" ] ], [ [ "df_iris.head()", "_____no_output_____" ], [ "# example of stack graph to go off of \ndf_iris = px.data.iris()\n \nfig = px.bar(df_iris, x=\"sepal_width\", y=\"sepal_length\", color=\"species\",\n hover_data=['petal_width'], barmode = 'stack')\n \nfig.show()", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "fig = px.bar(df, x=\"Judge_Name\", y=\"Hearing_Date\", color=\"Sex\",\n hover_data=['Outcome'], barmode = 'stack')\n \nfig.show()", "_____no_output_____" ], [ "df['Outcome']", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cba0c7f8f7f95c5f19491af03298ce7ff746352a
50,878
ipynb
Jupyter Notebook
Hodgkin-Huxley/SpikingNeuronModel_HH.ipynb
mbohling/spiking-neuron-model
ac24ed90e338b7b9f8fcf7279f1e950c4f1fd445
[ "MIT" ]
null
null
null
Hodgkin-Huxley/SpikingNeuronModel_HH.ipynb
mbohling/spiking-neuron-model
ac24ed90e338b7b9f8fcf7279f1e950c4f1fd445
[ "MIT" ]
null
null
null
Hodgkin-Huxley/SpikingNeuronModel_HH.ipynb
mbohling/spiking-neuron-model
ac24ed90e338b7b9f8fcf7279f1e950c4f1fd445
[ "MIT" ]
null
null
null
40.475736
570
0.459668
[ [ [ "<a href=\"https://colab.research.google.com/github/mbohling/spiking-neuron-model/blob/main/Hodgkin-Huxley/SpikingNeuronModel_HH.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "#The Spiking Neuron Model - Coding Challenge Problems (Part 3)\n", "_____no_output_____" ], [ "# Hodgkin-Huxley Spiking Neuron Model\r\nThis interactive document is meant to be followed as the reader makes their way through chapter: *The Spiking Neuron Model*. Each model presented in the chapter will have a section consisting of a step-by-step walkthrough of a simple Python implementation. This is followed by an interface to run simulations with different parameter values to answer the Coding Challenge Problems.\r\n\r\nFor each model covered in the chapter, there is a section called **Coding Challenge Problems.** This is where you will find user-interface components such as value sliders for various parameters. Use these controls to answer the questions from the text.\r\n\r\n**Content Creator**: Maxwell E. Bohling\r\n\r\n**Content Reviewer**: Lawrence C. Udeigwe", "_____no_output_____" ], [ "## How It Works\nGoogle Colab Notebooks have both *Content* cells and *Code* cells. As you progress through the notebook, you MUST make sure to run each code cell as you come to them. Otherwise, you may run into errors when executing a code cell. Each code cell has a Play button next to it which will execute the code. (Some code may be hidden by default. This is generally because the code is more complex and is not necessary to understand in order to complete the model implementations or to answer the chapter Coding Challenge Problems).\n\n**IMPORTANT**: You have been provided a link to view a **copy** of the original notebooks. You will find that you can edit the content of any cell. If you accidently change a cell, such as a line of code and/or run into errors as you try to run subsequent blocks, simply refresh the page, OR go to the *Runtime menu* and select *Restart runtime*. It is also suggested that you go to the *Edit menu* and select *Clear all outputs*. This will always allow you to revert the notebook to the original version (though you will have to run each code block again.)\n\nFor each model covered in the chapter, there is a section called **Coding Challenge Problems**. This is where you will find user-interface components such as value sliders for various parameters. Use these controls to answer the questions from the text.\n", "_____no_output_____" ], [ " Execute the code block. **Initialize Setup**", "_____no_output_____" ] ], [ [ "#@title Initialize Setup\n#@markdown **(No need to understand this code, simply make sure you run this first).**\nimport sys\nimport functools as ft\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nimport ipywidgets as widgets\nimport scipy as sc\n\n# [BLOCK TAG: INIT]\n\ntry:\n blockSet = [ ]\nexcept:\n print('Something went wrong! Try Refreshing the page.')\n\nblockTags = ['INIT','VP1','NP1','SS1','SS2','SS3','CS1','CS2','CS3','VR1']\n\ndef pushBlockStack(tag):\n if tag in blockSet:\n return 1\n indx = blockTags.index(tag)\n if len(blockSet) != indx:\n print('ERROR: BLOCK TAG:',tag,'executed out of sequence. Missing BLOCK TAG:', blockTags[indx-1])\n return 0\n else:\n blockSet.append(tag)\n return 1\n\ndef printError():\n message = 'Something went wrong!\\n\\n' \n message = message + 'Check for the following:\\n\\n'\n message = message + '\\t1. All previous code blocks have been run the order they appear and output a success message.\\n'\n message = message + '\\t2. No other code has been altered.\\n\\n'\n message = message + 'and then try running the code block again.'\n message = message + ' If there is still an error when executing the code block, try the following:\\n\\n'\n message = message + '\\t1. Go to the \\'Runtime\\' menu and select \\'Restart Runtime\\', then in the \\'Edit\\' menu, select \\'Clear all outputs\\'.\\n'\n message = message + '\\t2. Refresh the page.\\n\\n'\n message = message + 'and be sure to run each of the previous code blocks again beginning with \\'Initialize Setup\\'.\\n'\n print(message)\n return 0\n\ndef printSuccess(block):\n success = 0\n if len(block) == 0 or pushBlockStack(block) != 0:\n message = 'Success! Move on to the next section.'\n print(message)\n success = 1\n return success\n\ndef checkVoltageParameters(Vrest):\n print('Checking Voltage Parameters... ')\n try:\n check_Vrest = Vrest\n except:\n return 0\n else:\n vals = [Vrest]\n correct_vals = [-65]\n if ft.reduce(lambda i, j : i and j, map(lambda m, k: m == k, vals, correct_vals), False): \n return 0\n return 1\n \ndef checkNeuronProperties(A, Ie, GL, GK, GNa, EL, EK, ENa):\n print('Checking Neuron Properties... ')\n try:\n check_A = A\n check_Ie = Ie\n check_GL = GL\n check_GK = GK\n check_GNa = GNa\n check_EL = EL\n check_EK = EK\n check_ENa = ENa\n except:\n return 0\n else:\n vals = [A, Ie, GL, GK, GNa, EL, EK, ENa]\n correct_vals = [0.1, 1.75, 0.03, 3.6, 12, -54.4, -77, 50]\n if ft.reduce(lambda i, j : i and j, map(lambda m, k: m == k, vals, correct_vals), False): \n return 0\n return 1\n\ndef checkSimulationSetup(Vrest, Vinitial, t0, dt, t_final, time, n_initial, m_initial, h_initial, start_current, end_current):\n print('Checking Simulation Setup... ')\n try:\n check_Vrest = Vrest\n check_Vinitial = Vinitial\n check_t0 = t0\n check_dt = dt\n check_t_final = t_final\n check_time = time\n check_n_initial = n_initial\n check_m_initial = m_initial\n check_h_initial = h_initial\n check_start_current = start_current\n check_end_current = end_current\n except:\n return 0\n else:\n vals = [Vrest, Vinitial, t0, dt, t_final, time, n_initial, m_initial, h_initial, start_current, end_current]\n correct_vals = [-65, -65, 0, 0.01, 20, 0.1399, 0.0498, 0.6225, 5, 10]\n if ft.reduce(lambda i, j : i and j, map(lambda m, k: m == k, vals, correct_vals), False): \n if len(time) != 2000 or time[0] != 0 or time[-1] != 20:\n return 0\n return 1\n\ndef checkValues():\n chk = 3\n if checkVoltageParameters(Vrest) < 1:\n print('FAIL\\n')\n chk = chk - 1\n else:\n print('PASS\\n')\n if checkNeuronProperties(A, Ie, GL, GK, GNa, EL, EK, ENa) < 1:\n print('FAIL\\n')\n chk = chk - 1 \n else:\n print('PASS\\n')\n if checkSimulationSetup(Vrest, Vinitial, t0, dt, t_final, time, n_initial, m_initial, h_initial, start_current, end_current) < 1:\n print('FAIL\\n')\n chk = chk - 1\n else:\n print('PASS\\n')\n return chk\n\ntry:\n check_sys = sys\nexcept:\n printError()\nelse:\n modulename = 'functools'\n if modulename not in sys.modules:\n printError()\n else:\n printSuccess('INIT')", "_____no_output_____" ] ], [ [ "## Walkthrough\nThe goal of this section is to write a Python implementation of the Hodgkin-Huxley model. Recall from the chapter text that we need to account for both activation and inactivation gating variables in order to simulate the persistent and transient conductances involved in the membrane current equation.\n\n### Membrane Current\nThe Hodgkin-Huxley model is expressed as membrane current equation given as:\n\n> $ \\displaystyle i_{m} = \\overline{g}_{L}(V-E_{L}) + \\overline{g}_{K}n^4(V-E_{K}) + \\overline{g}_{Na}m^3h(V-E_{Na})$\n\nwith maximal conductances $\\overline{g}_{L},\\;$ $\\overline{g}_{K}\\;$ $\\overline{g}_{Na}\\;$ and reversal potentials $E_{L},\\;$ $E_{K},\\;$ $E_{Na}$.\n\nAs with the previous models, Euler's method is used to compute the time evolution of the membrane potential $V$. For this model, we use the same numerical integration method to compute the evolution of the gating variables $n$, $m$, and $h$. \n\n### Membrane Equation\nRecall that the membrane equation is expressed as follows:\n\n> $ \\displaystyle \\frac{dV}{dt} = -i_m+ \\frac{I_{e}}{A} $", "_____no_output_____" ], [ "### Voltage Parameters\r\n\r\nAs opposed to the integrate-and-fire model, the Hodgkin-Huxley model does not utilize a spiking mechanism. Therefore, we only need to define the *voltage parameter* that determines the *resting* membrane potential value.\r\n\r\n* $ V_{rest} = -65\\;$*mV*\r\n\r\n\r\n", "_____no_output_____" ] ], [ [ "# [BLOCK TAG: VP1]\r\n\r\ntry:\r\n check_BlockSet = blockSet\r\nexcept:\r\n print('ERROR: BLOCK TAG: VP1 executed out of sequence. Missing BLOCK TAG: INIT')\r\nelse:\r\n try:\r\n ##################################################################################\r\n # Voltage Parameters - Units mV (1 mV = 1e-3 Volts)\r\n Vrest = -65\r\n ##################################################################################\r\n except:\r\n printError()\r\n else:\r\n printSuccess('VP1')", "_____no_output_____" ] ], [ [ "### Neuron Properties\r\nThe membrane equation is described by a total membrane current $i_{m}$ as a sum of:\r\n\r\n1. A *leakage current*: $ \\displaystyle\\; \\overline{g}_{L}(V-E_{L}) $\r\n2. A *persistent current*: $\\displaystyle\\; \\overline{g}_{K}n^4(V-E_{K}) $\r\n3. A *transient current*: $\\displaystyle\\; \\overline{g}_{Na}m^3h(V-E_{Na})$\r\n\r\nThus, the persistent conductance is modeled as a K$^+$ conductance and the transient conductance is modeled as a Na$^+$ conductance. For each current, we define the maximimal conductances:\r\n\r\n* $ \\displaystyle\\; \\overline{g}_{L} = 0.03\\;$nS / mm$^2$\r\n* $ \\displaystyle\\; \\overline{g}_{K} = 3.6\\;$nS / mm$^2$\r\n* $ \\displaystyle\\; \\overline{g}_{Na} = 12\\;$nS / mm$^2$\r\n\r\nand reversal potentials:\r\n\r\n* $ \\displaystyle\\; E_{L} = -54.4\\;$mV\r\n* $ \\displaystyle\\; E_{K} = -77\\;$mV\r\n* $ \\displaystyle\\; E_{Na} = 50\\;$mV\r\n\r\nLastly, as seen in the membrane equation for the model, we must define the value of the injected current, and the neuronal surface area:\r\n\r\n* $ \\displaystyle\\; I_{e} = 1.75\\;$nA\r\n* $ \\displaystyle\\; A = 0.1\\;$mm$^2$", "_____no_output_____" ] ], [ [ "# [BLOCK TAG: NP1]\r\n\r\ntry:\r\n ##################################################################################\r\n #Maximal Conductances - Units nS/mm^2\r\n GL = 0.03\r\n GK = 3.6\r\n GNa = 12\r\n\r\n # Reversal Potentials - Units mV\r\n EL = -54.4\r\n EK = -77\r\n ENa = 50\r\n\r\n # Input current: Ie - Units nA (1 nA = 10-9 Amperes)\r\n Ie = 1.75\r\n\r\n # Neuron Surface Area - Units mm^2\r\n A = 0.1\r\n ##################################################################################\r\nexcept:\r\n printError()\r\nelse:\r\n printSuccess('NP1')", "_____no_output_____" ] ], [ [ "### Simulation Setup\nTo setup our simulation, we need initial values of each variable: $V$, $n$, $m$, and $h$ as well as a list to hold the values over time.\n\nSet initial values as:\n\n* $V_{initial}= V_{rest} = -65\\;$*mV*\n* $n_{initial} = 0.1399$\n* $m_{initial} = 0.0498$\n* $h_{initial} = 0.6225$\n\nWith each value defined at time $t = 0$, let $V_0 = V_{initial}, n_0 = n_{initial}, m_0 = m_{initial}, h_0 = h_{initial} $. \n\nThe initial membrane current is then:\n\n* $\\displaystyle i_{initial} = \\overline{g}_{L}(V_0-E_{L}) + \\overline{g}_{K}n_0^4(V_0-E_{K}) + \\overline{g}_{Na}m_0^3h_0(V_0-E_{Na})$ \n\nHere we make use of the **numpy** library (to learn more about how to use this library, go to https://numpy.org/doc/stable/).", "_____no_output_____" ] ], [ [ "# [BLOCK TAG: SS1]\r\n\r\ntry:\r\n ##################################################################################\r\n # Initial voltage\r\n Vinitial = Vrest\r\n\r\n # Initial gating variable values (Probability [0, 1])\r\n n_initial = 0.1399\r\n m_initial = 0.0498\r\n h_initial = 0.6225\r\n\r\n # Initial membrane current \r\n im_initial = GL*(Vinitial-EL)+GK*np.power(n_initial,4)*(Vinitial-EK)+GNa*np.power(m_initial,3)*h_initial*(Vinitial-ENa)\r\n ##################################################################################\r\nexcept:\r\n printError()\r\nelse:\r\n printSuccess('SS1')", "_____no_output_____" ] ], [ [ "We will be running a 20 ms simulation. The following lines of code setup a time span for the simulation. This is simply a matter of defining the start time $t_{0} = 0$ and the total length (in ms) of the simulation: $t_{final} = 20$. \n\nThroughout the simulation, we calculate the membrane potential $V$ at each *time-step*. The time-step is the change in time for each iteration of the simulation, for example if $t_{0} = 0$, the next computation of $V$ is performed at $t_{0} + dt$. \n\nThus, by setting $dt = 0.01$ (in ms), the simulation will compute $V$, $n$, $m$, and $h$ at time $t = 1, 2, \\ldots, t_{final}$. ", "_____no_output_____" ] ], [ [ "# [BLOCK TAG: SS2]\n\ntry:\n ##################################################################################\n # Simulation Time Span (0 to 20ms, dt = 0.01ms)\n t0 = 0\n dt = 0.01\n t_final = 20\n\n # What does the linspace() function do?\n time = np.linspace(t0, t_final, 2000)\n ##################################################################################\nexcept:\n printError()\nelse:\n printSuccess('SS2')", "_____no_output_____" ] ], [ [ "Next, we define the time $t$ at which the injected current $I_{e}$ is *switched on* and applied to the neuron, and the time $t$ at which the injected current is *switched off*.\r\n\r\nFor the Hodgkin-Huxley model, we run a shorter simulation and we apply the current from $t = 5\\;$ms to $t = 10\\;$ms.", "_____no_output_____" ] ], [ [ "# [BLOCK TAG: SS3]\r\n\r\ntry:\r\n ##################################################################################\r\n # Time at which the current is applied - Units ms\r\n start_current = 5\r\n \r\n # Time at which the current is switched off - Units ms\r\n end_current = 10\r\n ##################################################################################\r\nexcept:\r\n printError()\r\nelse:\r\n printSuccess('SS3')", "_____no_output_____" ] ], [ [ "### Computing and Storing $\\frac{dV}{dt}$, $\\frac{dn}{dt}$, $\\frac{dm}{dt}$, $\\frac{dh}{dt}$\r\n\r\nWe are about ready to finish the code implementation for simulating a Hodgkin-Huxley model neuron.\r\n\r\nWe need some way to store the values of the membrane potential $V, n, m, h$ at each time step. To do this, we simply create empty lists $V[t], n[t], m[t], h[t]$ with a length equal to the number of time-steps of our simulation.", "_____no_output_____" ] ], [ [ "# [BLOCK TAG: CS1]\r\n\r\ntry:\r\n ##################################################################################\r\n # Create a list V(t) to store the value of V at each time-step dt\r\n V = [0] * len(time)\r\n\r\n # Set the initial value at time t = t0 to the initial value Vinitial\r\n V[0] = Vinitial\r\n\r\n # Create lists to store the value of each gating variable at each time-step dt\r\n n = [0] * len(time)\r\n m= [0] * len(time)\r\n h = [0] * len(time)\r\n\r\n # Set the initial value at time t = t0 to the initial values\r\n n[0] = n_initial\r\n m[0] = m_initial\r\n h[0] = h_initial\r\n\r\n # Create list to store value of membrane current at each time-step dt\r\n im = [0] * len(time)\r\n\r\n # Set the initial value at time t = t0 to the initial value im_initial\r\n im[0] = im_initial\r\n ##################################################################################\r\nexcept:\r\n printError()\r\nelse:\r\n printSuccess('CS1')", "_____no_output_____" ] ], [ [ "### Opening and Closing Rate Functions for Gating Variables\r\n\r\nThe gating variables $n$, $m$, and $h$ represent **probabilities** that a gate mechanism in both the persistent and transient ion-conducting channels are open or *activated*. \r\n\r\nFor any arbitrary gating variable $z$, the open probability of a channel at any time $ t $ is computed using an *opening* rate function $\\alpha_{z}(V)$ and a *closing* rate $\\beta_{z}(V)$, both of which are functions of the membrane potential $V$.\r\n\r\nEach gating variable is numerically integrated using Euler's method throughout the simulation, where for any arbitrary gating variable $z$, the rate functions are given as follows:\r\n\r\n> $ \\displaystyle \\tau_{z}(V)\\frac{dz}{dt} = z_{\\infty}(V) - z $\r\n\r\nwhere\r\n\r\n> $ \\displaystyle \\tau_{z}(V) = \\frac{1}{\\alpha_{z}(V) + \\beta_{z}(V)} $\r\n\r\nand\r\n\r\n> $ \\displaystyle z_{\\infty}(V) = \\frac{\\alpha_{z}(V) }{\\alpha_{z}(V) + \\beta_{z}(V)} $\r\n\r\n\r\n", "_____no_output_____" ], [ "#### Fitted Rate Functions\r\nHodgkin and Huxley had fit the opening and closing rate functions using experimental data. These are given as follows:\r\n\r\n---\r\nFor activation variable $n$\r\n\r\n> $ \\displaystyle \\alpha_{n}(V) = \\frac{0.01(V+55)}{ 1 - \\exp(-0.1(V+55))} $\r\n\r\n> $ \\displaystyle \\beta_{n}(V) = 0.125\\exp(-0.0125(V+65)) $\r\n\r\n---\r\nFor activation variable $m$\r\n\r\n> $ \\displaystyle \\alpha_{m}(V) = \\frac{0.1(V+4)}{1 - \\exp(-0.1(V+4))}$\r\n\r\n> $ \\displaystyle \\beta_{m}(V) = 4\\exp(-0.0556(V+65)) $\r\n\r\n---\r\nFor inactivation variable $h$\r\n\r\n> $ \\displaystyle \\alpha_{h}(V) = 0.07\\exp(-0.05(V+65))$\r\n\r\n> $ \\displaystyle \\beta_{h}(V) = \\frac{1}{1 + \\exp(-0.1(V+35))} $\r\n\r\nWe define separate functions for each gating variable. These take the membrane potential, $V$, as input, and ouput $dz$ where $z = n, m, h $. \r\n\r\nUsing the functional forms and fitted rate functions, these functions compute the changes dn, dm, and dh at each time-step dt which depend on the membrane potential V at time t.\r\n", "_____no_output_____" ], [ " Execute the code block. **Initialize Helper Functions**", "_____no_output_____" ] ], [ [ "#@title Initialize Helper Functions\r\n#@markdown **(Double-Click the cell to show the code)**\r\n# [BLOCK TAG: CS2]\r\n\r\n##################################################################################\r\n# Function: compute_dn\r\ndef compute_dn(v, n):\r\n alpha_n = (0.01*(v + 55))/(1 - np.exp(-0.1*(v+55)))\r\n beta_n = 0.125*np.exp(-0.0125*(v+65))\r\n\r\n n_inf = alpha_n/(alpha_n + beta_n)\r\n tau_n = 1/(alpha_n + beta_n)\r\n\r\n dn = (dt/tau_n)*(n_inf - n)\r\n return dn\r\n\r\n# Function: compute_dm\r\ndef compute_dm(v, m):\r\n alpha_m = (0.1*(v + 40))/(1 - np.exp(-0.1*(v+40)))\r\n beta_m = 4*np.exp(-0.0556*(v+65))\r\n\r\n m_inf = alpha_m/(alpha_m + beta_m)\r\n tau_m = 1/(alpha_m + beta_m)\r\n\r\n dm = (dt/tau_m)*(m_inf - m)\r\n return dm\r\n\r\n# Function: compute_dh\r\ndef compute_dh(v, h):\r\n alpha_h = 0.07*np.exp(-0.05*(v+65))\r\n beta_h = 1/(1 + np.exp(-0.1*(v+35)))\r\n\r\n h_inf = alpha_h/(alpha_h + beta_h)\r\n tau_h = 1/(alpha_h + beta_h)\r\n \r\n dh = (dt/tau_h)*(h_inf - h)\r\n return dh\r\n##################################################################################\r\n\r\nx = printSuccess('CS2')", "_____no_output_____" ] ], [ [ "Finally, we run our simulation according to the updated *pseudocode*\n\n---\n\n*for each time-step from $t = t_{0}$ to $t = t_{final}$*\n> *If the current time $t \\geq start_{current}\\ $ and $\\ t \\leq end_{current}$*\n>> $I_{e} = 1.75\\;$nA\n\n> *otherwise*\n>> $I_{e} = 0\\;$nA\n\n> *First compute the open probabilites for each gating variable*\n\n> $ \\displaystyle dn = $ **compute_dn**$(V[t], n[t])$\n\n> *Update* $ n[t+1] = n[t] + dn $\n\n> $ \\displaystyle dm = $ **compute_dm**$(V[t], m[t])$\n\n> *Update* $ m[t+1] = m[t] + dm $\n\n> $ \\displaystyle dh = $ **compute_dh**$(V[t], h[t])$\n\n> *Update* $ h[t+1] = h[t] + dh $\n\n> $ \\displaystyle i_{m}[t+1] = \\overline{g}_{L}(V[t]-E_{L}) + \\overline{g}_{K}n[t+1]^4(V[t]-E_{K}) + \\overline{g}_{Na}m[t+1]^3h[t+1](V[t]-E_{Na})$\n\n> *Use Euler's Method of Numerical Integration*\n\n> $ \\displaystyle dV= dt\\left(-i_m[t+1]+ \\frac{I_{e}}{A}\\right) $\n\n> *Update* $V[t+1] = V[t] + dV$\n\n\n*end*\n\n---\n\nThis translates to the following Python code.", "_____no_output_____" ] ], [ [ "# [BLOCK TAG: CS3]\n\ntry:\n chk = checkValues()\nexcept:\n printError()\nelse:\n try:\n ##################################################################################\n # For each timestep we compute V and store the value\n for t in range(len(time)-1):\n\n # If time t >= 5 ms and t <= 10 ms, switch Injected Current ON\n if time[t] >= start_current and time[t] <= end_current:\n ie = Ie\n # Otherwise, switch Injected Current OFF\n else:\n ie = 0\n\n # For each timestep we compute n, m and h and store the value\n dn = compute_dn(V[t], n[t])\n n[t+1] = n[t] + dn\n\n dm = compute_dm(V[t], m[t])\n m[t+1] = m[t] + dm\n\n dh = compute_dh(V[t], h[t])\n h[t+1] = h[t] + dh\n\n # Use these values to compute the updated membrane current\n im[t+1] = GL*(V[t]-EL)+GK*np.power(n[t+1],4)*(V[t]-EK)+GNa*np.power(m[t+1],3)*h[t+1]*(V[t]-ENa)\n\n # Using Euler's Method for Numerical Integration (See Chapter Text)\n # we compute the change in voltage dV as follows (using the model equation)\n dV = dt*(-1*im[t+1] + ie/A)\n\n # Store this new value into our list\n V[t+1] = V[t] + dV\n ################################################################################## \n except:\n printError()\n else:\n if chk == 3:\n printSuccess('CS3')\n else:\n printError()", "_____no_output_____" ] ], [ [ "### Visualizing Results\nNow we have values of $V$, $i_m$, $n$, $m$, and $h$ for each time-step of the simulation, we can visualize the results by using Python to plot the data. This makes use of another widely used library **plotly** (to learn more about plotting data with this library, go to https://plotly.com/python/reference/index/).", "_____no_output_____" ] ], [ [ "# [BLOCK TAG: VR1]\n\ntry:\n if 'CS2' not in blockSet:\n print('ERROR: BLOCK TAG: VR1 executed out of sequence. Missing BLOCK TAG: CS3')\n else:\n try:\n ##################################################################################\n # Data \n x = list(time[0:-2])\n im = [x / 100 for x in im]\n\n # Plot data\n fig = make_subplots(\n rows=3, cols=1, shared_xaxes = True, vertical_spacing=0.1, \n subplot_titles=('V over Time', 'i_m over Time', 'n, m, h over Time')\n )\n\n # Add traces\n fig.add_trace(go.Scatter(name='V', x=x, y=V), row=1, col=1)\n fig.add_trace(go.Scatter(name='i_m', x=x, y=im), row=2, col=1)\n fig.add_trace(go.Scatter(name='n', x=x, y=n), row=3, col=1)\n fig.add_trace(go.Scatter(name='m', x=x, y=m), row=3, col=1)\n fig.add_trace(go.Scatter(name='h', x=x, y=h), row=3, col=1)\n\n # Update xaxis properties\n fig.update_xaxes(title_text=\"Time t (ms)\", row=3, col=1)\n\n # Update yaxis properties\n fig.update_yaxes(title_text=\"Membrane Potential V (mV)\", row=1, col=1)\n fig.update_yaxes(title_text=\"Current i_m (microA / mm^2)\", row=2, col=1)\n fig.update_yaxes(title_text=\"n, m, h (Probability)\",range=[0,1], row=3, col=1)\n\n # Update title and size\n fig.update_layout(height=800, width=700, \n title_text='Hodgkin-Huxley Model Neuron',\n showlegend = True)\n\n # Update theme\n fig.layout.template = 'plotly_dark'\n\n # Show figure\n fig.show()\n ##################################################################################\n\n printSuccess('VR1')\n except:\n printError()\nexcept:\n printError()", "_____no_output_____" ] ], [ [ "## Hodgkin-Huxley Spiking Neuron Model - Full Code", "_____no_output_____" ] ], [ [ "import numpy as np\r\nfrom plotly.subplots import make_subplots\r\nimport plotly.graph_objects as go\r\n\r\n# Voltage Parameters - Units mV (1 mV = 1e-3 Volts)\r\nVrest = -65\r\n\r\n#Maximal Conductances - Units nS/mm^2\r\nGL = 0.03\r\nGK = 3.6\r\nGNa = 12\r\n\r\n# Reversal Potentials - Units mV\r\nEL = -54.4\r\nEK = -77\r\nENa = 50\r\n\r\n# Input current: Ie - Units nA (1 nA = 10-9 Amperes)\r\nIe = 1.75\r\n\r\n# Neuron Surface Area - Units mm^2\r\nA = 0.1\r\n\r\n# Simulation Time Span (0 to 15ms, dt = 0.01ms)\r\nt0 = 0\r\ndt = 0.01\r\nt_final = 20\r\ntime = np.linspace(t0, t_final, 2000)\r\n\r\n# Time at which the current is applied - Units ms\r\nstart_current = 5\r\n\r\n# Time at which the current is switched off - Units ms\r\nend_current = 10\r\n\r\n# Initial voltage\r\nVinitial = Vrest\r\n\r\n# Create a list V(t) to store the value of V at each time-step dt\r\nV = [0] * len(time)\r\n\r\n# Set the initial value at time t = t0 to the initial value Vinitial\r\nV[0] = Vinitial\r\n\r\n# Initial gating variable values (Probability [0, 1])\r\nn_initial = 0.1399\r\nm_initial = 0.0498\r\nh_initial = 0.6225\r\n\r\n# Create lists to store the value of each gating variable at each time-step dt\r\nn = [0] * len(time)\r\nm= [0] * len(time)\r\nh = [0] * len(time)\r\n\r\n# Set the initial value at time t = t0 to the initial values\r\nn[0] = n_initial\r\nm[0] = m_initial\r\nh[0] = h_initial\r\n\r\n# Initial membrane current \r\nim_initial = GL*(V[0]-EL)+GK*np.power(n[0],4)*(V[0]-EK)+GNa*np.power(m[0],3)*h[0]*(V[0]-ENa)\r\n\r\n# Create list to store value of membrane current at each time-step dt\r\nim = [0] * len(time)\r\n\r\n# Set the initial value at time t = t0 to the initial value im_initial\r\nim[0] = im_initial\r\n\r\n# Function: compute_dn\r\ndef compute_dn(v, n):\r\n alpha_n = (0.01*(v + 55))/(1 - np.exp(-0.1*(v+55)))\r\n beta_n = 0.125*np.exp(-0.0125*(v+65))\r\n\r\n n_inf = alpha_n/(alpha_n + beta_n)\r\n tau_n = 1/(alpha_n + beta_n)\r\n\r\n dn = (dt/tau_n)*(n_inf - n)\r\n return dn\r\n\r\n# Function: compute_dm\r\ndef compute_dm(v, m):\r\n alpha_m = (0.1*(v + 40))/(1 - np.exp(-0.1*(v+40)))\r\n beta_m = 4*np.exp(-0.0556*(v+65))\r\n\r\n m_inf = alpha_m/(alpha_m + beta_m)\r\n tau_m = 1/(alpha_m + beta_m)\r\n\r\n dm = (dt/tau_m)*(m_inf - m)\r\n return dm\r\n\r\n# Function: compute_dh\r\ndef compute_dh(v, h):\r\n alpha_h = 0.07*np.exp(-0.05*(v+65))\r\n beta_h = 1/(1 + np.exp(-0.1*(v+35)))\r\n\r\n h_inf = alpha_h/(alpha_h + beta_h)\r\n tau_h = 1/(alpha_h + beta_h)\r\n \r\n dh = (dt/tau_h)*(h_inf - h)\r\n return dh\r\n\r\n# For each timestep we compute V and store the value\r\nfor t in range(len(time)-1):\r\n\r\n # For each timestep we compute n, m and h and store the value\r\n dn = compute_dn(V[t], n[t])\r\n n[t+1] = n[t] + dn\r\n\r\n dm = compute_dm(V[t], m[t])\r\n m[t+1] = m[t] + dm\r\n\r\n dh = compute_dh(V[t], h[t])\r\n h[t+1] = h[t] + dh\r\n\r\n # If time t >= 1 ms and t <= 2 ms, switch Injected Current ON\r\n if time[t] >= start_current and time[t] <= end_current:\r\n ie = Ie\r\n # Otherwise, switch Injected Current OFF\r\n else:\r\n ie = 0\r\n\r\n # Use these values to compute the updated membrane current\r\n im[t+1] = GL*(V[t]-EL)+GK*np.power(n[t+1],4)*(V[t]-EK)+GNa*np.power(m[t+1],3)*h[t+1]*(V[t]-ENa)\r\n\r\n # Using Euler's Method for Numerical Integration (See Chapter Text)\r\n # we compute the change in voltage dV as follows (using the model equation)\r\n dV = dt*(-im[t+1] + ie/A)\r\n\r\n # Store this new value into our list\r\n V[t+1] = V[t] + dV\r\n\r\n# Data \r\nx = list(time[0:-2])\r\nim = [x / 100 for x in im]\r\n\r\n# Plot data\r\nfig = make_subplots(\r\n rows=3, cols=1, shared_xaxes = True, vertical_spacing=0.1, \r\n subplot_titles=('V over Time', 'i_m over Time', 'n, m, h over Time')\r\n)\r\n\r\n# Add traces\r\nfig.add_trace(go.Scatter(name='V', x=x, y=V), row=1, col=1)\r\nfig.add_trace(go.Scatter(name='i_m', x=x, y=im), row=2, col=1)\r\nfig.add_trace(go.Scatter(name='n', x=x, y=n), row=3, col=1)\r\nfig.add_trace(go.Scatter(name='m', x=x, y=m), row=3, col=1)\r\nfig.add_trace(go.Scatter(name='h', x=x, y=h), row=3, col=1)\r\n\r\n# Update xaxis properties\r\nfig.update_xaxes(title_text=\"Time t (ms)\", row=3, col=1)\r\n\r\n# Update yaxis properties\r\nfig.update_yaxes(title_text=\"Membrane Potential V (mV)\", row=1, col=1)\r\nfig.update_yaxes(title_text=\"Current i_m (microA / mm^2)\", row=2, col=1)\r\nfig.update_yaxes(title_text=\"n, m, h (Probability)\",range=[0,1], row=3, col=1)\r\n\r\n# Update title and size\r\nfig.update_layout(height=800, width=700, \r\n title_text='Hodgkin-Huxley Model Neuron',\r\n showlegend = True)\r\n\r\n# Update theme\r\nfig.layout.template = 'plotly_dark'\r\n\r\n# Show figure\r\nfig.show()", "_____no_output_____" ] ], [ [ "## Coding Challenge Problems", "_____no_output_____" ] ], [ [ "#@title Run Simulation\r\n#@markdown Execute the code block and use the sliders to set values in order to answer the Coding Challenge Problems in the chapter text.\r\n\r\n#@markdown (Tip: Select a slider and use the left and right arrow keys to slide to the desired value.)\r\nimport numpy as np\r\nfrom plotly.subplots import make_subplots\r\nimport plotly.graph_objects as go\r\nimport ipywidgets as widgets\r\n\r\n# Voltage Parameters - Units mV (1 mV = 1e-3 Volts)\r\nVrest = -65\r\n\r\n#Maximal Conductances - Units nS/mm^2\r\nGL = 0.03\r\nGK = 3.6\r\nGNa = 12\r\n\r\n# Reversal Potentials - Units mV\r\nEL = -54.4\r\nEK = -77\r\nENa = 50\r\n\r\n# Input current: Ie - Units nA (1 nA = 10-9 Amperes)\r\nIe = 1.75\r\n\r\n# Neuron Surface Area - Units mm^2\r\nA = 0.1\r\n\r\n# Time at which the current is applied - Units ms\r\nstart_current = 5\r\n\r\n# Time at which the current is switched off - Units ms\r\nend_current = 10\r\n\r\n# Initial voltage\r\nVinitial = Vrest\r\n\r\n# Simulation Time Span (0 to 20ms, dt = 0.01ms)\r\nt0 = 0\r\ndt = 0.01\r\nt_final = 20\r\ntime = np.linspace(t0, t_final, int(t_final/dt))\r\n\r\n# Create a list V(t) to store the value of V at each time-step dt\r\nV = [0] * len(time)\r\n\r\n# Set the initial value at time t = t0 to the initial value Vinitial\r\nV[0] = Vinitial\r\n\r\n# Initial gating variable values (Probability [0, 1])\r\nn_initial = 0.1399\r\nm_initial = 0.0498\r\nh_initial = 0.6225\r\n\r\n# Create lists to store the value of each gating variable at each time-step dt\r\nn = [0] * len(time)\r\nm= [0] * len(time)\r\nh = [0] * len(time)\r\n\r\n# Set the initial value at time t = t0 to the initial values\r\nn[0] = n_initial\r\nm[0] = m_initial\r\nh[0] = h_initial\r\n\r\n# Initial membrane current \r\nim_initial = GL*(V[0]-EL)+GK*np.power(n[0],4)*(V[0]-EK)+GNa*np.power(m[0],3)*h[0]*(V[0]-ENa)\r\n\r\n# Create list to store value of membrane current at each time-step dt\r\nim = [0] * len(time)\r\n\r\n# Set the initial value at time t = t0 to the initial value im_initial\r\nim[0] = im_initial\r\n\r\n# Function: compute_dn\r\ndef compute_dn(v, n):\r\n alpha_n = (0.01*(v + 55))/(1 - np.exp(-0.1*(v+55)))\r\n beta_n = 0.125*np.exp(-0.0125*(v+65))\r\n\r\n n_inf = alpha_n/(alpha_n + beta_n)\r\n tau_n = 1/(alpha_n + beta_n)\r\n\r\n dn = (dt/tau_n)*(n_inf - n)\r\n return dn\r\n\r\n# Function: compute_dm\r\ndef compute_dm(v, m):\r\n alpha_m = (0.1*(v + 40))/(1 - np.exp(-0.1*(v+40)))\r\n beta_m = 4*np.exp(-0.0556*(v+65))\r\n\r\n m_inf = alpha_m/(alpha_m + beta_m)\r\n tau_m = 1/(alpha_m + beta_m)\r\n\r\n dm = (dt/tau_m)*(m_inf - m)\r\n return dm\r\n\r\n# Function: compute_dh\r\ndef compute_dh(v, h):\r\n alpha_h = 0.07*np.exp(-0.05*(v+65))\r\n beta_h = 1/(1 + np.exp(-0.1*(v+35)))\r\n\r\n h_inf = alpha_h/(alpha_h + beta_h)\r\n tau_h = 1/(alpha_h + beta_h)\r\n \r\n dh = (dt/tau_h)*(h_inf - h)\r\n return dh\r\n\r\ndef simulate_iaf_neuron(Ie, c):\r\n\r\n # Time at which the current is applied - Units ms\r\n start_current = c[0]\r\n\r\n # Time at which the current is switched off - Units ms\r\n end_current = c[1]\r\n\r\n # For each timestep we compute V and store the value\r\n for t in range(len(time)-1):\r\n # For each timestep we compute n, m and h and store the value\r\n dn = compute_dn(V[t], n[t])\r\n n[t+1] = n[t] + dn\r\n\r\n dm = compute_dm(V[t], m[t])\r\n m[t+1] = m[t] + dm\r\n\r\n dh = compute_dh(V[t], h[t])\r\n h[t+1] = h[t] + dh\r\n\r\n # If time t >= 1 ms and t <= 2 ms, switch Injected Current ON\r\n if time[t] >= start_current and time[t] <= end_current:\r\n ie = Ie\r\n # Otherwise, switch Injected Current OFF\r\n else:\r\n ie = 0\r\n\r\n # Use these values to compute the updated membrane current\r\n im[t+1] = GL*(V[t]-EL)+GK*np.power(n[t+1],4)*(V[t]-EK)+GNa*np.power(m[t+1],3)*h[t+1]*(V[t]-ENa)\r\n\r\n # Using Euler's Method for Numerical Integration (See Chapter Text)\r\n # we compute the change in voltage dV as follows (using the model equation)\r\n dV = dt*(-im[t+1] + ie/A)\r\n\r\n # Store this new value into our list\r\n V[t+1] = V[t] + dV\r\n \r\n return [V, im, n, m, h, time]\r\n\r\ndef plot_iaf_neuron(V, im, n, m, h, time):\r\n # Data \r\n x = list(time[0:-2])\r\n im = [x / 100 for x in im]\r\n\r\n # Plot data\r\n fig = make_subplots(\r\n rows=3, cols=1, shared_xaxes = True, vertical_spacing=0.1, \r\n subplot_titles=('V over Time', 'i_m over Time', 'n, m, h over Time')\r\n )\r\n\r\n # Add traces\r\n fig.add_trace(go.Scatter(name='V', x=x, y=V), row=1, col=1)\r\n fig.add_trace(go.Scatter(name='i_m', x=x, y=im), row=2, col=1)\r\n fig.add_trace(go.Scatter(name='n', x=x, y=n), row=3, col=1)\r\n fig.add_trace(go.Scatter(name='m', x=x, y=m), row=3, col=1)\r\n fig.add_trace(go.Scatter(name='h', x=x, y=h), row=3, col=1)\r\n\r\n # Update xaxis properties\r\n fig.update_xaxes(title_text=\"Time t (ms)\", row=3, col=1)\r\n\r\n # Update yaxis properties\r\n fig.update_yaxes(title_text=\"Membrane Potential V (mV)\", row=1, col=1)\r\n fig.update_yaxes(title_text=\"Current i_m (microA / mm^2)\", row=2, col=1)\r\n fig.update_yaxes(title_text=\"n, m, h (Probability)\",range=[0,1], row=3, col=1)\r\n\r\n # Update title and size\r\n fig.update_layout(height=800, width=700, \r\n title_text='Hodgkin-Huxley Model Neuron',\r\n showlegend = True)\r\n\r\n # Update theme\r\n fig.layout.template = 'plotly_dark'\r\n\r\n # Show figure\r\n fig.show()\r\n\r\nstyle = {'description_width':'auto'}\r\[email protected](\r\n Ie = widgets.FloatSlider(\r\n value=1.75,\r\n min=0.00,\r\n max=5.00,\r\n step=0.05,\r\n description='Ie',\r\n style = style,\r\n disabled=False,\r\n continuous_update=False,\r\n orientation='horizontal',\r\n readout=True,\r\n readout_format='1.2f'\r\n ),\r\n c = widgets.FloatRangeSlider(\r\n value=[5.00, 10.00],\r\n min=1.00,\r\n max=15.00,\r\n step=0.10,\r\n description='Ie: On/Off',\r\n style = style,\r\n disabled=False,\r\n continuous_update=False,\r\n orientation='horizontal',\r\n readout=True,\r\n readout_format='1.2f'\r\n )\r\n )\r\ndef compute_iaf_neuron(Ie =1.75, c = [5.00, 10.00]):\r\n [V, im, n, m, h, time] = simulate_iaf_neuron(Ie, c)\r\n plot_iaf_neuron(V, im, n, m, h, time)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cba0ca63a94de7c52fd619aa43fbfaefbec1261a
99,940
ipynb
Jupyter Notebook
JIN9127 - Unparseable Unicode.ipynb
jiahao/jin
3fcc47f2a64c96b01b7e3e36233e3df70930e732
[ "MIT" ]
null
null
null
JIN9127 - Unparseable Unicode.ipynb
jiahao/jin
3fcc47f2a64c96b01b7e3e36233e3df70930e732
[ "MIT" ]
null
null
null
JIN9127 - Unparseable Unicode.ipynb
jiahao/jin
3fcc47f2a64c96b01b7e3e36233e3df70930e732
[ "MIT" ]
null
null
null
57.010839
148
0.592536
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cba0d0fe32d80eb543b5ecf58a3e2e6a460909db
13,146
ipynb
Jupyter Notebook
05 - Optical Character Recognition.ipynb
emailtemp/clone-ai-900
4b1f4f62c4da0a2138fae4577108b02421ebee23
[ "MIT" ]
1
2022-01-15T03:21:32.000Z
2022-01-15T03:21:32.000Z
05 - Optical Character Recognition.ipynb
emailtemp/clone-ai-900
4b1f4f62c4da0a2138fae4577108b02421ebee23
[ "MIT" ]
null
null
null
05 - Optical Character Recognition.ipynb
emailtemp/clone-ai-900
4b1f4f62c4da0a2138fae4577108b02421ebee23
[ "MIT" ]
4
2021-06-10T15:38:07.000Z
2022-01-15T17:39:55.000Z
38.215116
185
0.528906
[ [ [ "# 광학 인식\r\n\r\n![신문을 읽고 있는 로봇](./images/ocr.jpg)\r\n\r\n흔히 볼 수 있는 Computer Vision 과제는 이미지에서 텍스트를 감지하고 해석하는 것입니다. 이러한 종류의 처리를 종종 *OCR(광학 인식)*이라고 합니다.\r\n\r\n## Computer Vision 서비스를 사용하여 이미지에서 텍스트 읽기\r\n\r\n**Computer Vision** Cognitive Service는 다음을 비롯한 OCR 작업을 지원합니다.\r\n\r\n- 여러 언어로 된 텍스트를 읽는 데 사용할 수 있는 **OCR** API. 이 API는 동기식으로 사용할 수 있으며, 이미지에서 소량의 텍스트를 감지하고 읽어야 할 때 잘 작동합니다.\r\n- 더 큰 문서에 최적화된 **Read** API. 이 API는 비동기식으로 사용되며, 인쇄 텍스트와 필기 텍스트 모두에 사용할 수 있습니다.\r\n\r\n이 서비스는 **Computer Vision** 리소스 또는 **Cognitive Services** 리소스를 만들어서 사용할 수 있습니다.\r\n\r\n아직 만들지 않았다면 Azure 구독에서 **Cognitive Services** 리소스를 만듭니다.\r\n\r\n> **참고**: 이미 Cognitive Services 리소스를 보유하고 있다면 Azure Portal에서 **빠른 시작** 페이지를 열고 키 및 엔드포인트를 아래의 셀로 복사하기만 하면 됩니다. 리소스가 없다면 아래의 단계를 따라 리소스를 만듭니다.\r\n\r\n1. 다른 브라우저 탭에서 Azure Portal(https://portal.azure.com) 을 열고 Microsoft 계정으로 로그인합니다.\r\n\r\n2. **&#65291;리소스 만들기** 단추를 클릭하고, *Cognitive Services*를 검색하고, 다음 설정을 사용하여 **Cognitive Services** 리소스를 만듭니다.\r\n - **구독**: *사용자의 Azure 구독*.\r\n - **리소스 그룹**: *고유한 이름의 새 리소스 그룹을 선택하거나 만듭니다*.\r\n - **지역**: *사용 가능한 지역을 선택합니다*.\r\n - **이름**: *고유한 이름을 입력합니다*.\r\n - **가격 책정 계층**: S0\r\n - **알림을 읽고 이해했음을 확인합니다**. 선택됨.\r\n3. 배포가 완료될 때까지 기다립니다. 그런 다음에 Cognitive Services 리소스로 이동하고, **개요** 페이지에서 링크를 클릭하여 서비스의 키를 관리합니다. 클라이언트 애플리케이션에서 Cognitive Services 리소스에 연결하려면 엔드포인트 및 키가 필요합니다.\r\n\r\n### Cognitive Services 리소스의 키 및 엔드포인트 가져오기\r\n\r\nCognitive Services 리소스를 사용하려면 클라이언트 애플리케이션에 해당 엔드포인트 및 인증 키가 필요합니다.\r\n\r\n1. Azure Portal에 있는 Cognitive Service 리소스의 **키 및 엔드포인트** 페이지에서 리소스의 **Key1**을 복사하고 아래 코드에 붙여 넣어 **YOUR_COG_KEY**를 대체합니다.\r\n2. 리소스의 **엔드포인트**를 복사하고 아래 코드에 붙여 넣어 **YOUR_COG_ENDPOINT**를 대체합니다.\r\n3. **셀 실행**(&#9655;) 단추(셀 왼쪽에 있음)를 클릭하여 아래의 셀에 있는 코드를 실행합니다.", "_____no_output_____" ] ], [ [ "cog_key = 'YOUR_COG_KEY'\ncog_endpoint = 'YOUR_COG_ENDPOINT'\n\nprint('Ready to use cognitive services at {} using key {}'.format(cog_endpoint, cog_key))", "_____no_output_____" ] ], [ [ "이제 키와 엔드포인트를 설정했으므로 Computer Vision 서비스 리소스를 사용하여 이미지에서 텍스트를 추출할 수 있습니다.\r\n\r\n먼저, 이미지를 동기식으로 분석하고 포함된 텍스트를 읽을 수 있게 해주는 **OCR** API부터 시작하겠습니다. 이 경우에는 일부 텍스트를 포함하고 있는 가상의 Northwind Traders 소매업체에 대한 광고 이미지가 있습니다. 아래의 셀을 실행하여 읽어 보세요. ", "_____no_output_____" ] ], [ [ "from azure.cognitiveservices.vision.computervision import ComputerVisionClient\nfrom msrest.authentication import CognitiveServicesCredentials\nimport matplotlib.pyplot as plt\nfrom PIL import Image, ImageDraw\nimport os\n%matplotlib inline\n\n# Get a client for the computer vision service\r\ncomputervision_client = ComputerVisionClient(cog_endpoint, CognitiveServicesCredentials(cog_key))\n\n# Read the image file\r\nimage_path = os.path.join('data', 'ocr', 'advert.jpg')\nimage_stream = open(image_path, \"rb\")\n\n# Use the Computer Vision service to find text in the image\r\nread_results = computervision_client.recognize_printed_text_in_stream(image_stream)\n\n# Process the text line by line\r\nfor region in read_results.regions:\n for line in region.lines:\n\n # Read the words in the line of text\n line_text = ''\n for word in line.words:\n line_text += word.text + ' '\n print(line_text.rstrip())\n\n# Open image to display it.\r\nfig = plt.figure(figsize=(7, 7))\nimg = Image.open(image_path)\ndraw = ImageDraw.Draw(img)\nplt.axis('off')\nplt.imshow(img)", "_____no_output_____" ] ], [ [ "이미지에 있는 텍스트는 영역, 줄, 단어의 계층 구조로 구성되어 있으며 코드는 이 항목들을 읽어서 결과를 검색합니다.\r\n\r\n이미지 위에서 읽은 텍스트를 결과에서 봅니다. \r\n\r\n## 경계 상자 표시\r\n\r\n텍스트 줄의 *경계 상자* 좌표와 이미지에서 발견된 개별 단어도 결과에 포함되어 있습니다. 아래의 셀을 실행하여 위에서 검색한 광고 이미지에서 텍스트 줄의 경계 상자를 확인하세요.", "_____no_output_____" ] ], [ [ "# Open image to display it.\r\nfig = plt.figure(figsize=(7, 7))\nimg = Image.open(image_path)\ndraw = ImageDraw.Draw(img)\n\n# Process the text line by line\r\nfor region in read_results.regions:\n for line in region.lines:\n\n # Show the position of the line of text\n l,t,w,h = list(map(int, line.bounding_box.split(',')))\n draw.rectangle(((l,t), (l+w, t+h)), outline='magenta', width=5)\n\n # Read the words in the line of text\n line_text = ''\n for word in line.words:\n line_text += word.text + ' '\n print(line_text.rstrip())\n\n# Show the image with the text locations highlighted\r\nplt.axis('off')\nplt.imshow(img)", "_____no_output_____" ] ], [ [ "결과에서 각 텍스트 줄의 경계 상자는 이미지에 직사각형으로 표시됩니다.\r\n\r\n## Read API 사용\r\n\r\n이전에 사용한 OCR API는 소량의 텍스트가 있는 이미지에서 잘 작동합니다. 스캔한 문서와 같이 더 큰 텍스트 본문을 읽어야 할 때는 **Read** API를 사용할 수 있습니다. 이를 위해서는 다단계 프로세스가 필요합니다.\r\n\r\n1. 비동기식으로 읽고 분석할 이미지를 Computer Vision 서비스에 제출합니다.\r\n2. 분석 작업이 완료될 때까지 기다립니다.\r\n3. 분석의 결과를 검색합니다.\r\n\r\n이 프로세스를 사용하여 스캔한 서신의 텍스트를 Northwind Traders 매장 관리자에게 읽어 주려면 다음 셀을 실행하세요.", "_____no_output_____" ] ], [ [ "from azure.cognitiveservices.vision.computervision import ComputerVisionClient\nfrom azure.cognitiveservices.vision.computervision.models import OperationStatusCodes\nfrom msrest.authentication import CognitiveServicesCredentials\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport time\nimport os\n%matplotlib inline\n\n# Read the image file\r\nimage_path = os.path.join('data', 'ocr', 'letter.jpg')\nimage_stream = open(image_path, \"rb\")\n\n# Get a client for the computer vision service\r\ncomputervision_client = ComputerVisionClient(cog_endpoint, CognitiveServicesCredentials(cog_key))\n\n# Submit a request to read printed text in the image and get the operation ID\r\nread_operation = computervision_client.read_in_stream(image_stream,\n raw=True)\noperation_location = read_operation.headers[\"Operation-Location\"]\noperation_id = operation_location.split(\"/\")[-1]\n\n# Wait for the asynchronous operation to complete\r\nwhile True:\n read_results = computervision_client.get_read_result(operation_id)\n if read_results.status not in [OperationStatusCodes.running]:\n break\n time.sleep(1)\n\n# If the operation was successfuly, process the text line by line\r\nif read_results.status == OperationStatusCodes.succeeded:\n for result in read_results.analyze_result.read_results:\n for line in result.lines:\n print(line.text)\n\n# Open image and display it.\r\nprint('\\n')\nfig = plt.figure(figsize=(12,12))\nimg = Image.open(image_path)\nplt.axis('off')\nplt.imshow(img)", "_____no_output_____" ] ], [ [ "결과를 검토합니다. 서신의 전체 필사본이 있는데, 대부분은 인쇄된 텍스트이고 필기 서명이 있습니다. 서신의 원본 이미지는 OCR 결과 아래에 표시됩니다(보기 위해 스크롤해야 할 수도 있음).\r\n\r\n## 필기 텍스트 읽기\r\n\r\n이전 예에서 이미지 분석 요청은 *인쇄된* 텍스트에 맞춰 작업을 최적화하는 텍스트 인식 모드를 지정했습니다. 그럼에도 불구하고 필기 서명이 읽혔습니다.\r\n\r\n필기 텍스트를 읽을 수 있는 이 능력은 매우 유용합니다. 예를 들어 쇼핑 목록이 포함된 메모를 작성했는데 폰의 앱을 사용하여 메모를 읽고 그 안에 포함된 텍스트를 필사하기를 원한다고 가정해 보세요.\r\n\r\n아래 셀을 실행하여 필기 쇼핑 목록에 대한 읽기 작업의 예를 확인해 보세요.", "_____no_output_____" ] ], [ [ "from azure.cognitiveservices.vision.computervision import ComputerVisionClient\nfrom azure.cognitiveservices.vision.computervision.models import OperationStatusCodes\nfrom msrest.authentication import CognitiveServicesCredentials\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport time\nimport os\n%matplotlib inline\n\n# Read the image file\r\nimage_path = os.path.join('data', 'ocr', 'note.jpg')\nimage_stream = open(image_path, \"rb\")\n\n# Get a client for the computer vision service\r\ncomputervision_client = ComputerVisionClient(cog_endpoint, CognitiveServicesCredentials(cog_key))\n\n# Submit a request to read printed text in the image and get the operation ID\r\nread_operation = computervision_client.read_in_stream(image_stream,\n raw=True)\noperation_location = read_operation.headers[\"Operation-Location\"]\noperation_id = operation_location.split(\"/\")[-1]\n\n# Wait for the asynchronous operation to complete\r\nwhile True:\n read_results = computervision_client.get_read_result(operation_id)\n if read_results.status not in [OperationStatusCodes.running]:\n break\n time.sleep(1)\n\n# If the operation was successfuly, process the text line by line\r\nif read_results.status == OperationStatusCodes.succeeded:\n for result in read_results.analyze_result.read_results:\n for line in result.lines:\n print(line.text)\n\n# Open image and display it.\r\nprint('\\n')\nfig = plt.figure(figsize=(12,12))\nimg = Image.open(image_path)\nplt.axis('off')\nplt.imshow(img)", "_____no_output_____" ] ], [ [ "## 추가 정보\r\n\r\nOCR에 Computer Vision 서비스를 사용하는 방법에 대한 자세한 내용은 [Computer Vision 설명서](https://docs.microsoft.com/ko-kr/azure/cognitive-services/computer-vision/concept-recognizing-text)를 참조하세요.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cba0d6b5ee9530cb23b05e5008334349f0643b45
7,500
ipynb
Jupyter Notebook
georadius.ipynb
brelevenix/georadius
1c938644bcadbcee551fd3a20790942f902364e6
[ "Apache-2.0" ]
null
null
null
georadius.ipynb
brelevenix/georadius
1c938644bcadbcee551fd3a20790942f902364e6
[ "Apache-2.0" ]
null
null
null
georadius.ipynb
brelevenix/georadius
1c938644bcadbcee551fd3a20790942f902364e6
[ "Apache-2.0" ]
1
2021-04-12T07:36:48.000Z
2021-04-12T07:36:48.000Z
34.883721
120
0.538667
[ [ [ "#!/usr/bin/python3\n#\n# This program and the accompanying materials\n# are made available under the terms of the Apache License, Version 2.0\n# which accompanies this distribution, and is available at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n\n\"\"\"\nThis jupyter notebook is used to display the intersections\nbetween various circles.\n\nDeveloped for Covid-19 lockdown N°3 for the 10 km limitation\n\nRequires the following packages:\n- json: to open/read json files\n- ipyleaflet: to display on map \n- ipywidgets: for map configuration\n- shapely: for polygon operations\n- geog: to compute polygon from center and radius\n- numpy: required for geog\n\nInput file: points.geojson\n- GeoJSON file to store a list of points (centre of the each zone)\n\nOuput file: zone.geojson\n- GeoJSON file to store the interesction zone as a Polygon\n\nHard-coded parameters:\n- Radius: 10000m\n- Number of points created for the circle: 32 points\n- opacity values to display on map\n\n\"\"\"\nimport json\nfrom ipyleaflet import GeoJSON, Map\nfrom ipywidgets import Layout\nfrom shapely.geometry import Polygon\nimport numpy as np\nimport geog\nimport shapely\n\ndef create_circle(lat, lon, radius, nb_points):\n \"\"\"\n Create a circle from a point in WGS84 coordinate with \n lat, lon: coordinates for the center\n radius: radius in m \n nb_points: number of points for the circle\n \"\"\"\n center = shapely.geometry.Point([lon, lat])\n angles = np.linspace(0, 360, nb_points)\n polygon = geog.propagate(center, angles, radius)\n return polygon\n\ndef display_circle(the_map, the_circle, name, options):\n \"\"\"\n Display a circle on the map\n the_map: Ipleaflet Map\n the_circle: circle as a shapely Polygon\n name: name associated with the circle\n options: options to display circle\n \"\"\"\n geo_circle = {\n \"type\": \"Feature\",\n \"properties\": {\"name\": name},\n \"geometry\": shapely.geometry.mapping(shapely.geometry.Polygon(the_circle))}\n layer = GeoJSON(data=geo_circle, style={'opacity': options[\"opacity\"],\n 'fillOpacity': options[\"fill_opacity\"],\n 'weight': options[\"weight\"]})\n the_map.add_layer(layer)\n\ndef create_polygons(centers):\n \"\"\"\n Create a list of shapely Polygon\n centers: list of points in a GeoJSON structure\n \"\"\"\n polygon_circles = []\n for center in centers[\"features\"]:\n lat = center[\"geometry\"][\"coordinates\"][1]\n lon = center[\"geometry\"][\"coordinates\"][0]\n polygon_circle = create_circle(lat, lon, 10000, 32)\n polygon_circles.append(Polygon(polygon_circle))\n return polygon_circles\n\ndef create_common_polygon(polygon_circles):\n \"\"\"\n Create a Poygon\n polygon_circles: list of shapely Polygon\n \"\"\"\n common_zone = polygon_circles[0]\n for circle in polygon_circles[1:]:\n common_zone = circle.intersection(common_zone)\n return common_zone\n\ndef generate_geojson_file(polygon, precision):\n \"\"\"\n Generate a GeoJSON file fro a shapely Polygon\n polygon: shapely Polygon\n precision: number of digits for coordinates precision\n \"\"\"\n geometry = shapely.geometry.mapping(shapely.geometry.Polygon(polygon))\n float_format = \"{0:.\" + str(precision) + \"f}\"\n points = []\n for point in geometry[\"coordinates\"][0]:\n lon = float(float_format.format(float(point[0])))\n lat = float(float_format.format(float(point[1])))\n points.append([lon, lat])\n polygon_coords = []\n polygon_coords.append(points)\n\n geo = {\n \"type\": \"FeatureCollection\",\n \"properties\": {\"name\": \"Zone commune\"},\n \"features\": [{\n \"type\": \"Feature\",\n \"properties\": {\"name\": \"Cercle commun\"},\n \"geometry\": {\"type\": \"Polygon\",\n \"coordinates\": polygon_coords}}]\n }\n with open(\"zone.geojson\", \"w\") as geojson_file:\n geojson_file.write(json.dumps(geo))", "_____no_output_____" ], [ "geo_centers = []\nwith open(\"points.geojson\", \"r\") as geo_file:\n geo_centers = json.load(geo_file)\n \npolygon_circles = create_polygons(geo_centers)\ncommon_polygon = create_common_polygon(polygon_circles)\ncentroid = common_polygon.centroid\n\ngenerate_geojson_file(common_polygon, 5)\n\n# Create map centered on centroid\nmy_map = Map(center=(centroid.coords[0][1], centroid.coords[0][0]), \n zoom=11, \n layout=Layout(width='1200px', height='800px'))\n\n# Display circles on the map\nfor circle in polygon_circles:\n display_circle(my_map, circle, \"\", {\"opacity\": 0.1, \"fill_opacity\": 0.1, \"weight\": 2})\n\n# Display common zone on the map\ndisplay_circle(my_map, common_polygon, \"Zone commune\",\n {\"opacity\": 1.0, \"fill_opacity\": 0.5, \"weight\": 5})\n\n# Display centers on the map\nmy_map.add_layer(GeoJSON(data=geo_centers))\n\n# Display map\nmy_map", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
cba0e067f891c428a69ba4cd5b3e284dd959f29c
18,023
ipynb
Jupyter Notebook
Lez05/Lez5.ipynb
simonesilvetti/teaching_2018_uni_dmg_python
391675578d65be2bbeec7f085294b1ca766637df
[ "CC0-1.0" ]
null
null
null
Lez05/Lez5.ipynb
simonesilvetti/teaching_2018_uni_dmg_python
391675578d65be2bbeec7f085294b1ca766637df
[ "CC0-1.0" ]
null
null
null
Lez05/Lez5.ipynb
simonesilvetti/teaching_2018_uni_dmg_python
391675578d65be2bbeec7f085294b1ca766637df
[ "CC0-1.0" ]
1
2020-07-23T10:52:39.000Z
2020-07-23T10:52:39.000Z
20.787774
305
0.485213
[ [ [ "### Ricapitolazione Lez 3 (teoria)\n- definizione di funzione\n- espressioni booleane\n- if elif else", "_____no_output_____" ], [ "#### Confronto fra reali\nil numero di bit dedicato ai reali è finito, c'è un approssizmazione\ne spesso == non va bene", "_____no_output_____" ] ], [ [ "from math import *\nsqrt(2)**2==2", "_____no_output_____" ], [ "sqrt(2)", "_____no_output_____" ], [ "sqrt(2)**2", "_____no_output_____" ], [ "epsilon = 1e-15", "_____no_output_____" ], [ "abs(sqrt(2)**2-2) < epsilon", "_____no_output_____" ], [ "import sys\nsys.float_info", "_____no_output_____" ], [ "1.8e+308", "_____no_output_____" ] ], [ [ "## Iterazioni\nRipetizione di istruzioni. Non abbiamo più un programma lineare. Due costrutti iterativi il **for** (numero di iterazioni noto a priori) ed il **while** (numero di iterazioni non noto a priori). Posso simulare uno con l'altro ma creati per usare il più addatto a seconda di quello che devo fare. \n\n", "_____no_output_____" ], [ "### Riassegnazione", "_____no_output_____" ] ], [ [ "x = 5", "_____no_output_____" ], [ "x = 7", "_____no_output_____" ] ], [ [ "### Aggiornamento", "_____no_output_____" ] ], [ [ "x = 0", "_____no_output_____" ], [ "x = x + 1", "_____no_output_____" ] ], [ [ "### While\n**while** condizione_di_controllo: \n\n istruzioni eseguite", "_____no_output_____" ] ], [ [ "x=11 \nwhile x !=10: # condizione iniziale (di controllo)\n x=x+1 # istruzione che modifica la condizione del while\n print(x)", "_____no_output_____" ], [ "import random\nx=0 \nwhile x !=2: # condizione iniziale (di controllo)\n x=random.randint(1,10) # istruzione che modifica la condizione del while\n print(x,end=\"\")", "_____no_output_____" ] ], [ [ "L'ordine delle istruzioni è importante!!!", "_____no_output_____" ] ], [ [ "import random\nx=0\nwhile x !=2:\n print(x,end=\"\") # cambiando l'ordine non mi stampa più l'ultimo numero\n x=random.randint(1,10) # \n ", "_____no_output_____" ] ], [ [ "Per contare le iterazioni", "_____no_output_____" ] ], [ [ "import random\nx=0 \ni=0\nwhile x !=2:\n x=random.randint(1,4) \n print(x,' ', end=\"\") \n i=i+1\nprint (\"\\n\")\nprint ('i =',i)", "_____no_output_____" ] ], [ [ "Attenzione ai cicli infiniti. Vedi esempio.", "_____no_output_____" ], [ "Attenzione all'and", "_____no_output_____" ] ], [ [ "s=''\nFalse and s[0] ", "_____no_output_____" ] ], [ [ "Controlla solo la prima e da falso ma s[0] non potrebbe essere controllata, infatti se le invertiamo...", "_____no_output_____" ] ], [ [ "s=''\ns[0] and False", "_____no_output_____" ] ], [ [ "Possiamo terminare il while con un input dell'utente", "_____no_output_____" ] ], [ [ "somma = 0\nninput = int(input('Inserisci un numero (0 per terminare): '))\nwhile ninput != 0:\n somma = somma + ninput\n print('somma:', somma)\n ninput = int(input('Inserisci un numero (0 per terminare): '))\n# programma che fa la somma di un tot di numeri inseriti dall'utente", "_____no_output_____" ], [ "somma", "_____no_output_____" ], [ "s=0 \ns=s+4 #zero è l'elemento neutro per l'addizione\ns=s+5\ns=s+8", "_____no_output_____" ] ], [ [ "Stare attenti a fare condizioni robuste e controllare che ci sia dentro al ciclo l'istruzione che rende prima o poi falsa la condizione.", "_____no_output_____" ], [ "OSSERVAZIONE: i booleani non si valutano.", "_____no_output_____" ] ], [ [ "x = True", "_____no_output_____" ], [ "if x: # la variabile f è già booleana di per se stessa, non c'è bisogno di confrontarla\n print(x)", "_____no_output_____" ] ], [ [ "##### NON scrivere: if x == True", "_____no_output_____" ] ], [ [ "somma = 0\nnnumeri = 0 # conta le volte che viene eseguito il ciclo\nninput = int(input('Inserisci un numero (0 per terminare): '))\nwhile ninput != 0:\n somma = somma + ninput\n nnumeri = nnumeri + 1\n print('somma:', somma)\n ninput = int(input('Inserisci un numero (0 per terminare): '))\nprint('somma = ', somma)\nprint('hai sommato ', nnumeri, 'numeri')\n\n# programma che fa la somma di nnumeri inseriti dall'utente\n# qui contiamo anche quanti numeri sono stati inseriti", "_____no_output_____" ] ], [ [ "Posso attribuire alla variabile di elaborazione un valore fin dall'inizio consistente con la situazione. Si può fare solo se sono sicuro che il primo ingresso è valido", "_____no_output_____" ] ], [ [ "ninput = int(input('Inserisci un numero (0 per terminare): '))\nsomma = ninput\nwhile ninput != 0:\n print('somma:', somma)\n ninput = int(input('Inserisci un numero (0 per terminare): '))\n somma = somma + ninput", "_____no_output_____" ] ], [ [ "OSS: abbiamo invertito l'ordine di print e somma perché la prima somma viene già fatta fuori!!!\nRagionare sulla differenza!", "_____no_output_____" ] ], [ [ "x = False\nwhile not x: \n print('ciao')\n x = input('Finisco? (sì/no)').lower()=='sì' \n# un ciclo che continua finché l'utente non inserisci la parola si, cioè finché\n# la variabile finito non diventa vera", "_____no_output_____" ], [ "finito = False\nwhile not finito: ### vedete come è chiaro il significato?\n print('ciao')\n finito =input('Finisco? (sì/no)').lower()=='sì'", "_____no_output_____" ] ], [ [ "#### Scansione di una stringa", "_____no_output_____" ] ], [ [ "stringa = 'disegno'", "_____no_output_____" ], [ "i=0\nwhile i < len(stringa):\n print(stringa[i])\n i=i+1", "_____no_output_____" ] ], [ [ "## For\n**for** i **in** sequenza_di_elementi:\n \n istruzioni eseguite", "_____no_output_____" ], [ "### in\nLa parola in è un operatore che confronta due stringhe e restituisce True se la prima è una sottostringa della seconda.", "_____no_output_____" ] ], [ [ "'4' in '+748'", "_____no_output_____" ], [ "s = '+748'", "_____no_output_____" ], [ "s[0] == '+' or s[0] == '-' ", "_____no_output_____" ], [ "s[0] in '+-'", "_____no_output_____" ], [ "for i in 'disegno' :\n print(i)", "_____no_output_____" ], [ "for i in [1,45,78] :\n print(i)", "_____no_output_____" ] ], [ [ "Posso scrivere qualsiasi tipo di elenco di oggetti, sequenza di elementi (lista, stringa,file, oggetti più complessi); questo in Java è impossibile!!!!", "_____no_output_____" ] ], [ [ "for i in [1,'ciao',4/5] :\n print(i)", "_____no_output_____" ] ], [ [ "Non è pulito, non è un linguaggio tipato ma è comodo da morire", "_____no_output_____" ] ], [ [ "# cerco il carattere più \"grande\" dell'alfabeto in una stringa\ns ='ciao'\n\n", "_____no_output_____" ], [ "s ='ciao'\ncmax=s[0] \nfor c in s[1:] :\n if c >= cmax:\n cmax=c\nprint(cmax)\n\n# cerco il carattere più \"grande\" dell'alfabeto in una stringa\n# itero sui caratteri (gli elementi) della stringa\n", "_____no_output_____" ], [ "# cerco la posizione del carattere più \"grande\" dell'alfabeto in una stringa\n# itero sulla posizione (sugli indici) della stringa\ns ='ciao'\nimax=0\nfor i in [1,2,3] :\n if s[i]>=s[imax]:\n imax=i\nprint(imax)", "_____no_output_____" ], [ "for x in range(4):\n print(x)", "_____no_output_____" ], [ "type(range(4))", "_____no_output_____" ], [ "list(range(4))", "_____no_output_____" ], [ "list(range(3,5))", "_____no_output_____" ], [ "s ='ciaoo'\nfor i in range(len(s)) :\n print(s[i])", "_____no_output_____" ], [ "# Ex conto alla rovescia con il for\nfor i in range(10):\n print(10-i)", "_____no_output_____" ] ], [ [ "## Ricorsione\nAbbiamo visto che è del tutto normale che una funzione ne chiami un’altra, ma è anche consentito ad una funzione di chiamare se stessa.", "_____no_output_____" ], [ "**def** Ricorsione(): \n \n Ricorsione()", "_____no_output_____" ] ], [ [ "def contoallarovesciaRic(n):\n if n <= 0:\n print('Via!')\n else:\n print(n)\n contoallarovesciaRic(n-1)", "_____no_output_____" ], [ "contoallarovesciaRic(10)", "_____no_output_____" ], [ "def contoallarovesciaWhile(n):\n while n > 0:\n print(n)\n n = n-1\n print('Via!')", "_____no_output_____" ], [ "contoallarovesciaWhile(4)", "_____no_output_____" ], [ "def contoallarovesciaFor(n):\n for i in range(n):\n print(n - i)\n print('Via!')", "_____no_output_____" ], [ "contoallarovesciaFor(4)", "_____no_output_____" ], [ "# Ex fibonacci\n# F(1)=1, \n# F(2)=1, \n# F(n)=F(n-1)+F(n-2)", "_____no_output_____" ], [ "#h = int(input(\"inserisci l'altezza del triangolo: \"))\n# stampa un triangolo rettangolo\nh = 5\nfor i in range(h):\n print('-'*(i+1),end='')\n print(' '*(h-i-1))", "_____no_output_____" ], [ "# stampa un quadrato\n\n#l = int(input('inserisci il lato del quadrato: '))\nl = 5\nfor i in range(l):\n if i==0 or i==l-1:\n print('* '*l)\n else:\n print('* '+' '*(l-2)+'*')", "_____no_output_____" ], [ "# stampa uno snake\nl = int(input('altezza cammino: '))\nfor i in range(l):\n print('-'*i+'**'+'-'*(l+1-2-i))", "_____no_output_____" ] ], [ [ "Se avete voglia di divertirvi XD\n\nEsercizio: fate un programma che costruisce un albero di asterischi prendendo in input l'altezza dell'albero(triangolo), l'altezza del tronco e la larghezza del tronco. \nHint: fate due cicli diversi per il tronco ed il triangolo.\n \nAlbero facile: senza spazi, base triangolo e tronco dispari, numero * in ogni riga dispari\n\n *\n ***\n *****\n *******\n *********\n ***\n ***\n ***\n \n\n\nOpzione2: c'è uno spazio tra ogni asterisco\n *\n * *\n * * * \n * * * *\n* * * * *\n * *\n * *\n * *", "_____no_output_____" ], [ "Esercizio: scrivere un programma che converte un numero in una base da 2 a 10 ad un numero in base 10", "_____no_output_____" ], [ "Se voglio cercare se in una parola c'è un certo carattere che ciclo uso?", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "raw", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "raw" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "raw" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "raw", "raw", "raw" ] ]
cba0e9e3aba4fc01a6bfbc9e6b5d60116f912f8b
144,766
ipynb
Jupyter Notebook
Test-Dataset.ipynb
suryawanshishantanu6/Multi-Style-Transfer
c5c211847de676596580a8a9afda940ac76abbb1
[ "MIT" ]
null
null
null
Test-Dataset.ipynb
suryawanshishantanu6/Multi-Style-Transfer
c5c211847de676596580a8a9afda940ac76abbb1
[ "MIT" ]
null
null
null
Test-Dataset.ipynb
suryawanshishantanu6/Multi-Style-Transfer
c5c211847de676596580a8a9afda940ac76abbb1
[ "MIT" ]
null
null
null
841.662791
141,188
0.956053
[ [ [ "import torch\nimport matplotlib.pyplot as plt\nfrom IPython.display import Image\nfrom torchvision import datasets, transforms\nimport torchvision\nimport os\n# import helper", "_____no_output_____" ], [ "train_dir = \"dataset\"", "_____no_output_____" ], [ "train_data = torchvision.datasets.ImageFolder(os.path.join(train_dir))", "_____no_output_____" ], [ "# transform = transforms.Compose([\n# # you can add other transformations in this list\n# transforms.ToTensor()\n# ])\ntransform = transforms.Compose([transforms.Resize(255),\n transforms.CenterCrop(224),\n transforms.ToTensor()])", "_____no_output_____" ], [ "dataset = datasets.ImageFolder(\"E:/UNH/Sem3/DL/Pro/Multi-Style-Transfer/dataset\", transform=transform)", "_____no_output_____" ], [ "len(train_data)", "_____no_output_____" ], [ "dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=False)", "_____no_output_____" ], [ "images, labels = next(iter(dataloader))\nsample = images[0]\n# Image(images[0], normalize=False)\nplt.imshow(images[0].permute(1, 2, 0))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cba106fbd3e4c7f378537ac200417b9ec1a03bcd
352,973
ipynb
Jupyter Notebook
content/ch-states/atoms-computation.ipynb
charmerDark/qiskit-textbook
83dc808bf317cab0b3f205388518c84e23ad1b0b
[ "Apache-2.0" ]
1
2021-07-09T14:53:17.000Z
2021-07-09T14:53:17.000Z
content/ch-states/atoms-computation.ipynb
epelaaez/qiskit-textbook
ec45c38bb8080eed95904654ca106b3b329c0d82
[ "Apache-2.0" ]
null
null
null
content/ch-states/atoms-computation.ipynb
epelaaez/qiskit-textbook
ec45c38bb8080eed95904654ca106b3b329c0d82
[ "Apache-2.0" ]
null
null
null
42.010593
565
0.517473
[ [ [ "# The Atoms of Computation", "_____no_output_____" ], [ " \nProgramming a quantum computer is now something that anyone can do in the comfort of their own home.\n\nBut what to create? What is a quantum program anyway? In fact, what is a quantum computer?\n\n\nThese questions can be answered by making comparisons to standard digital computers. Unfortunately, most people don’t actually understand how digital computers work either. In this article, we’ll look at the basics principles behind these devices. To help us transition over to quantum computing later on, we’ll do it using the same tools as we'll use for quantum.", "_____no_output_____" ], [ "## Contents\n\n1. [Splitting information into bits](#bits) \n2. [Computation as a Diagram](#diagram) \n3. [Your First Quantum Circuit](#first-circuit) \n4. [Example: Adder Circuit](#adder) \n 4.1 [Encoding an Input](#encoding) \n 4.2 [Remembering how to Add](#remembering-add) \n 4.3 [Adding with Qiskit](#adding-qiskit) ", "_____no_output_____" ], [ "Below is some Python code we'll need to run if we want to use the code in this page:", "_____no_output_____" ] ], [ [ "from qiskit import QuantumCircuit, assemble, Aer\nfrom qiskit.visualization import plot_histogram", "_____no_output_____" ] ], [ [ "## 1. Splitting information into bits <a id=\"bits\"></a>", "_____no_output_____" ], [ "The first thing we need to know about is the idea of bits. These are designed to be the world’s simplest alphabet. With only two characters, 0 and 1, we can represent any piece of information.\n\nOne example is numbers. You are probably used to representing a number through a string of the ten digits 0, 1, 2, 3, 4, 5, 6, 7, 8, and 9. In this string of digits, each digit represents how many times the number contains a certain power of ten. For example, when we write 9213, we mean\n\n\n\n$$ 9000 + 200 + 10 + 3 $$\n\n\n\nor, expressed in a way that emphasizes the powers of ten\n\n\n\n$$ (9\\times10^3) + (2\\times10^2) + (1\\times10^1) + (3\\times10^0) $$\n\n\n\nThough we usually use this system based on the number 10, we can just as easily use one based on any other number. The binary number system, for example, is based on the number two. This means using the two characters 0 and 1 to express numbers as multiples of powers of two. For example, 9213 becomes 10001111111101, since\n\n\n\n$$ 9213 = (1 \\times 2^{13}) + (0 \\times 2^{12}) + (0 \\times 2^{11})+ (0 \\times 2^{10}) +(1 \\times 2^9) + (1 \\times 2^8) + (1 \\times 2^7) \\\\\\\\ \\,\\,\\, + (1 \\times 2^6) + (1 \\times 2^5) + (1 \\times 2^4) + (1 \\times 2^3) + (1 \\times 2^2) + (0 \\times 2^1) + (1 \\times 2^0) $$\n\n\n\nIn this we are expressing numbers as multiples of 2, 4, 8, 16, 32, etc. instead of 10, 100, 1000, etc.\n<a id=\"binary_widget\"></a>", "_____no_output_____" ] ], [ [ "from qiskit_textbook.widgets import binary_widget\nbinary_widget(nbits=5)", "_____no_output_____" ] ], [ [ "These strings of bits, known as binary strings, can be used to represent more than just numbers. For example, there is a way to represent any text using bits. For any letter, number, or punctuation mark you want to use, you can find a corresponding string of at most eight bits using [this table](https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/com.ibm.aix.networkcomm/conversion_table.htm). Though these are quite arbitrary, this is a widely agreed-upon standard. In fact, it's what was used to transmit this article to you through the internet.\n\nThis is how all information is represented in computers. Whether numbers, letters, images, or sound, it all exists in the form of binary strings.\n\nLike our standard digital computers, quantum computers are based on this same basic idea. The main difference is that they use *qubits*, an extension of the bit to quantum mechanics. In the rest of this textbook, we will explore what qubits are, what they can do, and how they do it. In this section, however, we are not talking about quantum at all. So, we just use qubits as if they were bits.", "_____no_output_____" ], [ "### Quick Exercises\n1. Think of a number and try to write it down in binary.\n2. If you have $n$ bits, how many different states can they be in?", "_____no_output_____" ], [ "## 2. Computation as a diagram <a id=\"diagram\"></a>\n\nWhether we are using qubits or bits, we need to manipulate them in order to turn the inputs we have into the outputs we need. For the simplest programs with very few bits, it is useful to represent this process in a diagram known as a *circuit diagram*. These have inputs on the left, outputs on the right, and operations represented by arcane symbols in between. These operations are called 'gates', mostly for historical reasons.\n\nHere's an example of what a circuit looks like for standard, bit-based computers. You aren't expected to understand what it does. It should simply give you an idea of what these circuits look like.\n\n![image1](images/classical_circuit.png)\n\nFor quantum computers, we use the same basic idea but have different conventions for how to represent inputs, outputs, and the symbols used for operations. Here is the quantum circuit that represents the same process as above.\n\n![image2](images/quantum_circuit.png)\n\nIn the rest of this section, we will explain how to build circuits. At the end, you'll know how to create the circuit above, what it does, and why it is useful.", "_____no_output_____" ], [ "## 3. Your first quantum circuit <a id=\"first-circuit\"></a>", "_____no_output_____" ], [ "In a circuit, we typically need to do three jobs: First, encode the input, then do some actual computation, and finally extract an output. For your first quantum circuit, we'll focus on the last of these jobs. We start by creating a circuit with eight qubits and eight outputs.", "_____no_output_____" ] ], [ [ "n = 8\nn_q = n\nn_b = n\nqc_output = QuantumCircuit(n_q,n_b)", "_____no_output_____" ] ], [ [ "This circuit, which we have called `qc_output`, is created by Qiskit using `QuantumCircuit`. The number `n_q` defines the number of qubits in the circuit. With `n_b` we define the number of output bits we will extract from the circuit at the end.\n\nThe extraction of outputs in a quantum circuit is done using an operation called `measure`. Each measurement tells a specific qubit to give an output to a specific output bit. The following code adds a `measure` operation to each of our eight qubits. The qubits and bits are both labelled by the numbers from 0 to 7 (because that’s how programmers like to do things). The command `qc_output.measure(j,j)` adds a measurement to our circuit `qc_output` that tells qubit `j` to write an output to bit `j`.", "_____no_output_____" ] ], [ [ "for j in range(n):\n qc_output.measure(j,j)", "_____no_output_____" ] ], [ [ "Now that our circuit has something in it, let's take a look at it.", "_____no_output_____" ] ], [ [ "qc_output.draw()", "_____no_output_____" ] ], [ [ "Qubits are always initialized to give the output ```0```. Since we don't do anything to our qubits in the circuit above, this is exactly the result we'll get when we measure them. We can see this by running the circuit many times and plotting the results in a histogram. We will find that the result is always ```00000000```: a ```0``` from each qubit.", "_____no_output_____" ] ], [ [ "sim = Aer.get_backend('aer_simulator') # this is the simulator we'll use\nqobj = assemble(qc_output) # this turns the circuit into an object our backend can run\nresult = sim.run(qobj).result() # we run the experiment and get the result from that experiment\n# from the results, we get a dictionary containing the number of times (counts)\n# each result appeared\ncounts = result.get_counts()\n# and display it on a histogram\nplot_histogram(counts)", "_____no_output_____" ] ], [ [ "The reason for running many times and showing the result as a histogram is because quantum computers may have some randomness in their results. In this case, since we aren’t doing anything quantum, we get just the ```00000000``` result with certainty.\n\nNote that this result comes from a quantum simulator, which is a standard computer calculating what an ideal quantum computer would do. Simulations are only possible for small numbers of qubits (~30 qubits), but they are nevertheless a very useful tool when designing your first quantum circuits. To run on a real device you simply need to replace ```Aer.get_backend('aer_simulator')``` with the backend object of the device you want to use. ", "_____no_output_____" ], [ "## 4. Example: Creating an Adder Circuit <a id=\"adder\"></a>\n### 4.1 Encoding an input <a id=\"encoding\"></a>\n\nNow let's look at how to encode a different binary string as an input. For this, we need what is known as a NOT gate. This is the most basic operation that you can do in a computer. It simply flips the bit value: ```0``` becomes ```1``` and ```1``` becomes ```0```. For qubits, it is an operation called ```x``` that does the job of the NOT.\n\nBelow we create a new circuit dedicated to the job of encoding and call it `qc_encode`. For now, we only specify the number of qubits.", "_____no_output_____" ] ], [ [ "qc_encode = QuantumCircuit(n)\nqc_encode.x(7)\nqc_encode.draw()", "_____no_output_____" ] ], [ [ "Extracting results can be done using the circuit we have from before: `qc_output`. Adding the two circuits using `qc_encode + qc_output` creates a new circuit with everything needed to extract an output added at the end.", "_____no_output_____" ] ], [ [ "qc = qc_encode + qc_output\nqc.draw()", "/usr/local/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:1: DeprecationWarning: The QuantumCircuit.__add__() method is being deprecated.Use the compose() method which is more flexible w.r.t circuit register compatibility.\n \"\"\"Entry point for launching an IPython kernel.\n/usr/local/anaconda3/lib/python3.7/site-packages/qiskit/circuit/quantumcircuit.py:869: DeprecationWarning: The QuantumCircuit.combine() method is being deprecated. Use the compose() method which is more flexible w.r.t circuit register compatibility.\n return self.combine(rhs)\n" ] ], [ [ "Now we can run the combined circuit and look at the results.", "_____no_output_____" ] ], [ [ "qobj = assemble(qc)\ncounts = sim.run(qobj).result().get_counts()\nplot_histogram(counts)", "_____no_output_____" ] ], [ [ "Now our computer outputs the string ```10000000``` instead.\n\nThe bit we flipped, which comes from qubit 7, lives on the far left of the string. This is because Qiskit numbers the bits in a string from right to left. Some prefer to number their bits the other way around, but Qiskit's system certainly has its advantages when we are using the bits to represent numbers. Specifically, it means that qubit 7 is telling us about how many $2^7$s we have in our number. So by flipping this bit, we’ve now written the number 128 in our simple 8-bit computer.\n\nNow try out writing another number for yourself. You could do your age, for example. Just use a search engine to find out what the number looks like in binary (if it includes a ‘0b’, just ignore it), and then add some 0s to the left side if you are younger than 64.", "_____no_output_____" ] ], [ [ "qc_encode = QuantumCircuit(n)\nqc_encode.x(1)\nqc_encode.x(5)\n\nqc_encode.draw()", "_____no_output_____" ] ], [ [ "Now we know how to encode information in a computer. The next step is to process it: To take an input that we have encoded, and turn it into an output that we need.", "_____no_output_____" ], [ "### 4.2 Remembering how to add <a id=\"remembering-add\"></a>", "_____no_output_____" ], [ "To look at turning inputs into outputs, we need a problem to solve. Let’s do some basic maths. In primary school, you will have learned how to take large mathematical problems and break them down into manageable pieces. For example, how would you go about solving the following?\n\n```\n 9213\n+ 1854\n= ????\n```\n\nOne way is to do it digit by digit, from right to left. So we start with 3+4\n```\n 9213\n+ 1854\n= ???7\n```\n\nAnd then 1+5\n```\n 9213\n+ 1854\n= ??67\n```\n\nThen we have 2+8=10. Since this is a two digit answer, we need to carry the one over to the next column.\n\n```\n 9213\n+ 1854\n= ?067\n ¹ \n```\n\nFinally we have 9+1+1=11, and get our answer\n\n```\n 9213\n+ 1854\n= 11067\n ¹ \n```\n\nThis may just be simple addition, but it demonstrates the principles behind all algorithms. Whether the algorithm is designed to solve mathematical problems or process text or images, we always break big tasks down into small and simple steps.\n\nTo run on a computer, algorithms need to be compiled down to the smallest and simplest steps possible. To see what these look like, let’s do the above addition problem again but in binary.\n\n\n```\n 10001111111101\n+ 00011100111110\n \n= ??????????????\n```\n\nNote that the second number has a bunch of extra 0s on the left. This just serves to make the two strings the same length.\n\nOur first task is to do the 1+0 for the column on the right. In binary, as in any number system, the answer is 1. We get the same result for the 0+1 of the second column.\n\n```\n 10001111111101\n+ 00011100111110\n\n= ????????????11 \n```\n\nNext, we have 1+1. As you’ll surely be aware, 1+1=2. In binary, the number 2 is written ```10```, and so requires two bits. This means that we need to carry the 1, just as we would for the number 10 in decimal.\n\n```\n 10001111111101\n+ 00011100111110\n= ???????????011 \n ¹ \n```\n\nThe next column now requires us to calculate ```1+1+1```. This means adding three numbers together, so things are getting complicated for our computer. But we can still compile it down to simpler operations, and do it in a way that only ever requires us to add two bits together. For this, we can start with just the first two 1s.\n\n```\n 1\n+ 1\n= 10\n```\n\nNow we need to add this ```10``` to the final ```1``` , which can be done using our usual method of going through the columns.\n\n```\n 10\n+ 01\n= 11\n```\n\nThe final answer is ```11``` (also known as 3).\n\nNow we can get back to the rest of the problem. With the answer of ```11```, we have another carry bit.\n\n```\n 10001111111101\n+ 00011100111110\n= ??????????1011\n ¹¹\n```\n\nSo now we have another 1+1+1 to do. But we already know how to do that, so it’s not a big deal.\n\nIn fact, everything left so far is something we already know how to do. This is because, if you break everything down into adding just two bits, there are only four possible things you’ll ever need to calculate. Here are the four basic sums (we’ll write all the answers with two bits to be consistent).\n\n```\n0+0 = 00 (in decimal, this is 0+0=0)\n0+1 = 01 (in decimal, this is 0+1=1)\n1+0 = 01 (in decimal, this is 1+0=1)\n1+1 = 10 (in decimal, this is 1+1=2)\n```\n\nThis is called a *half adder*. If our computer can implement this, and if it can chain many of them together, it can add anything.", "_____no_output_____" ], [ "### 4.3 Adding with Qiskit <a id=\"adding-qiskit\"></a>", "_____no_output_____" ], [ "Let's make our own half adder using Qiskit. This will include a part of the circuit that encodes the input, a part that executes the algorithm, and a part that extracts the result. The first part will need to be changed whenever we want to use a new input, but the rest will always remain the same.", "_____no_output_____" ], [ "![half adder implemented on a quantum circuit](images/half-adder.svg)\n", "_____no_output_____" ], [ "The two bits we want to add are encoded in the qubits 0 and 1. The above example encodes a ```1``` in both these qubits, and so it seeks to find the solution of ```1+1```. The result will be a string of two bits, which we will read out from the qubits 2 and 3. All that remains is to fill in the actual program, which lives in the blank space in the middle.\n\nThe dashed lines in the image are just to distinguish the different parts of the circuit (although they can have more interesting uses too). They are made by using the `barrier` command.\n\nThe basic operations of computing are known as logic gates. We’ve already used the NOT gate, but this is not enough to make our half adder. We could only use it to manually write out the answers. Since we want the computer to do the actual computing for us, we’ll need some more powerful gates.\n\nTo see what we need, let’s take another look at what our half adder needs to do.\n\n```\n0+0 = 00\n0+1 = 01\n1+0 = 01\n1+1 = 10\n```\n\nThe rightmost bit in all four of these answers is completely determined by whether the two bits we are adding are the same or different. So for ```0+0``` and ```1+1```, where the two bits are equal, the rightmost bit of the answer comes out ```0```. For ```0+1``` and ```1+0```, where we are adding different bit values, the rightmost bit is ```1```.\n\nTo get this part of our solution correct, we need something that can figure out whether two bits are different or not. Traditionally, in the study of digital computation, this is called an XOR gate.\n\n| Input 1 | Input 2 | XOR Output |\n|:-------:|:-------:|:------:|\n| 0 | 0 | 0 |\n| 0 | 1 | 1 |\n| 1 | 0 | 1 |\n| 1 | 1 | 0 |\n\nIn quantum computers, the job of the XOR gate is done by the controlled-NOT gate. Since that's quite a long name, we usually just call it the CNOT. In Qiskit its name is ```cx```, which is even shorter. In circuit diagrams, it is drawn as in the image below.", "_____no_output_____" ] ], [ [ "qc_cnot = QuantumCircuit(2)\nqc_cnot.cx(0,1)\nqc_cnot.draw()", "_____no_output_____" ] ], [ [ "This is applied to a pair of qubits. One acts as the control qubit (this is the one with the little dot). The other acts as the *target qubit* (with the big circle).\n\nThere are multiple ways to explain the effect of the CNOT. One is to say that it looks at its two input bits to see whether they are the same or different. Next, it overwrites the target qubit with the answer. The target becomes ```0``` if they are the same, and ```1``` if they are different.\n\n<img src=\"images/cnot_xor.svg\">\n\nAnother way of explaining the CNOT is to say that it does a NOT on the target if the control is ```1```, and does nothing otherwise. This explanation is just as valid as the previous one (in fact, it’s the one that gives the gate its name).\n\nTry the CNOT out for yourself by trying each of the possible inputs. For example, here's a circuit that tests the CNOT with the input ```01```.", "_____no_output_____" ] ], [ [ "qc = QuantumCircuit(2,2)\nqc.x(0)\nqc.cx(0,1)\nqc.measure(0,0)\nqc.measure(1,1)\nqc.draw()", "_____no_output_____" ] ], [ [ "If you execute this circuit, you’ll find that the output is ```11```. We can think of this happening because of either of the following reasons.\n\n- The CNOT calculates whether the input values are different and finds that they are, which means that it wants to output ```1```. It does this by writing over the state of qubit 1 (which, remember, is on the left of the bit string), turning ```01``` into ```11```.\n\n- The CNOT sees that qubit 0 is in state ```1```, and so applies a NOT to qubit 1. This flips the ```0``` of qubit 1 into a ```1```, and so turns ```01``` into ```11```.\n\nHere is a table showing all the possible inputs and corresponding outputs of the CNOT gate:\n\n| Input (q1 q0) | Output (q1 q0) |\n|:-------------:|:--------------:|\n| 00 | 00 |\n| 01 | 11 |\n| 10 | 10 |\n| 11 | 01 |\n\nFor our half adder, we don’t want to overwrite one of our inputs. Instead, we want to write the result on a different pair of qubits. For this, we can use two CNOTs.", "_____no_output_____" ] ], [ [ "qc_ha = QuantumCircuit(4,2)\n# encode inputs in qubits 0 and 1\nqc_ha.x(0) # For a=0, remove this line. For a=1, leave it.\nqc_ha.x(1) # For b=0, remove this line. For b=1, leave it.\nqc_ha.barrier()\n# use cnots to write the XOR of the inputs on qubit 2\nqc_ha.cx(0,2)\nqc_ha.cx(1,2)\nqc_ha.barrier()\n# extract outputs\nqc_ha.measure(2,0) # extract XOR value\nqc_ha.measure(3,1)\n\nqc_ha.draw()", "_____no_output_____" ] ], [ [ "We are now halfway to a fully working half adder. We just have the other bit of the output left to do: the one that will live on qubit 3.\n\nIf you look again at the four possible sums, you’ll notice that there is only one case for which this is ```1``` instead of ```0```: ```1+1```=```10```. It happens only when both the bits we are adding are ```1```.\n\nTo calculate this part of the output, we could just get our computer to look at whether both of the inputs are ```1```. If they are — and only if they are — we need to do a NOT gate on qubit 3. That will flip it to the required value of ```1``` for this case only, giving us the output we need.\n\nFor this, we need a new gate: like a CNOT but controlled on two qubits instead of just one. This will perform a NOT on the target qubit only when both controls are in state ```1```. This new gate is called the *Toffoli*. For those of you who are familiar with Boolean logic gates, it is basically an AND gate.\n\nIn Qiskit, the Toffoli is represented with the `ccx` command.", "_____no_output_____" ] ], [ [ "qc_ha = QuantumCircuit(4,2)\n# encode inputs in qubits 0 and 1\nqc_ha.x(0) # For a=0, remove the this line. For a=1, leave it.\nqc_ha.x(1) # For b=0, remove the this line. For b=1, leave it.\nqc_ha.barrier()\n# use cnots to write the XOR of the inputs on qubit 2\nqc_ha.cx(0,2)\nqc_ha.cx(1,2)\n# use ccx to write the AND of the inputs on qubit 3\nqc_ha.ccx(0,1,3)\nqc_ha.barrier()\n# extract outputs\nqc_ha.measure(2,0) # extract XOR value\nqc_ha.measure(3,1) # extract AND value\n\nqc_ha.draw()", "_____no_output_____" ] ], [ [ "In this example, we are calculating ```1+1```, because the two input bits are both ```1```. Let's see what we get.", "_____no_output_____" ] ], [ [ "qobj = assemble(qc_ha)\ncounts = sim.run(qobj).result().get_counts()\nplot_histogram(counts)", "_____no_output_____" ] ], [ [ "The result is ```10```, which is the binary representation of the number 2. We have built a computer that can solve the famous mathematical problem of 1+1!\n\nNow you can try it out with the other three possible inputs, and show that our algorithm gives the right results for those too.\n\nThe half adder contains everything you need for addition. With the NOT, CNOT, and Toffoli gates, we can create programs that add any set of numbers of any size.\n\nThese three gates are enough to do everything else in computing too. In fact, we can even do without the CNOT. Additionally, the NOT gate is only really needed to create bits with value ```1```. The Toffoli gate is essentially the atom of mathematics. It is the simplest element, from which every other problem-solving technique can be compiled.\n\nAs we'll see, in quantum computing we split the atom.", "_____no_output_____" ] ], [ [ "import qiskit.tools.jupyter\n%qiskit_version_table", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cba10a108f7fb059ca85348747b162bd015414de
23,882
ipynb
Jupyter Notebook
Python_Units.ipynb
UWashington-Astro300/Astro300-W22
371eb704030a104cb8e826bb14c353e5a863b0f3
[ "MIT" ]
null
null
null
Python_Units.ipynb
UWashington-Astro300/Astro300-W22
371eb704030a104cb8e826bb14c353e5a863b0f3
[ "MIT" ]
null
null
null
Python_Units.ipynb
UWashington-Astro300/Astro300-W22
371eb704030a104cb8e826bb14c353e5a863b0f3
[ "MIT" ]
null
null
null
20.018441
358
0.518591
[ [ [ "# Units in Python", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ] ], [ [ "### Find the position (x) of a rocket moving at a constant velocity (v) after a time (t)\n\n<img src=\"./images/rocket.png\" width=\"400\"/>", "_____no_output_____" ] ], [ [ "def find_position(velocity, time):\n result = velocity * time\n return result", "_____no_output_____" ] ], [ [ "### If v = 10 m/s and t = 10 s", "_____no_output_____" ] ], [ [ "my_velocity = 10\nmy_time = 10\n\nfind_position(my_velocity, my_time)", "_____no_output_____" ] ], [ [ "### No problem, x = 100 m\n\n---\n\n### Now v = 10 mph and t = 10 minutes", "_____no_output_____" ] ], [ [ "my_other_velocity = 10\nmy_other_time = 10\n\nfind_position(my_other_velocity, my_other_time)", "_____no_output_____" ] ], [ [ "### x = 100 miles minutes / hour ??", "_____no_output_____" ], [ "---\n# The Astropy Units package to the rescue", "_____no_output_____" ] ], [ [ "from astropy import units as u\nfrom astropy import constants as const\nfrom astropy.units import imperial\nimperial.enable()", "_____no_output_____" ] ], [ [ "#### *Note: because we imported the `units` package as `u`, you cannot use **u** as a variable name.*", "_____no_output_____" ], [ "---\n\n### Add units to values using `u.UNIT` where UNIT is an [Astropy Unit](http://docs.astropy.org/en/stable/units/index.html#module-astropy.units.si)\n\n* To add a UNIT to a VALUE you multiply (*) the VALUE by the UNIT\n* You can make compound units like: `u.m / u.s`", "_____no_output_____" ] ], [ [ "my_velocity = 10 * (u.m / u.s)\nmy_time = 10 * u.s", "_____no_output_____" ], [ "def find_position(velocity, time):\n result = velocity * time\n return result", "_____no_output_____" ], [ "find_position(my_velocity, my_time)", "_____no_output_____" ] ], [ [ "#### Notice the difference when using imperial units - (`imperial.UNIT`)", "_____no_output_____" ] ], [ [ "my_other_velocity = 10.0 * (imperial.mi / u.h)\nmy_other_time = 10 * u.min", "_____no_output_____" ], [ "find_position(my_other_velocity, my_other_time)", "_____no_output_____" ] ], [ [ "### Notice that the units are a bit strange. We can simplify this using `.decompose()`\n\n* Default to SI units", "_____no_output_____" ] ], [ [ "find_position(my_other_velocity, my_other_time).decompose()", "_____no_output_____" ] ], [ [ "### I like to put the `.decompose()` in the return of the function:", "_____no_output_____" ] ], [ [ "def find_position(velocity, time):\n result = velocity * time\n return result.decompose()", "_____no_output_____" ], [ "find_position(my_other_velocity, my_other_time)", "_____no_output_____" ] ], [ [ "### Unit conversion is really easy!", "_____no_output_____" ] ], [ [ "rocket_position = find_position(my_other_velocity, my_other_time)\n\nrocket_position", "_____no_output_____" ], [ "rocket_position.to(u.km)", "_____no_output_____" ], [ "rocket_position.to(imperial.mi)", "_____no_output_____" ], [ "rocket_position.si # quick conversion to SI units", "_____no_output_____" ], [ "rocket_position.cgs # quick conversion to CGS units", "_____no_output_____" ] ], [ [ "## It is always better to do unit conversions **outside** of functions", "_____no_output_____" ], [ "### Be careful adding units to something that already has units!\n\n* `velocity` and `time` have units.\n* By doing `result * u.km` you are adding another unit", "_____no_output_____" ] ], [ [ "def find_position_wrong(velocity, time):\n result = velocity * time\n \n return (result * u.km).decompose()", "_____no_output_____" ], [ "find_position_wrong(my_other_velocity, my_other_time)", "_____no_output_____" ] ], [ [ "---\n### You do not have to worry about working in different units (**as long as they are the same type**)!\n\n* No conversions needed\n* Just make sure you assign units", "_____no_output_____" ] ], [ [ "my_velocity, my_other_velocity", "_____no_output_____" ], [ "my_velocity + my_other_velocity", "_____no_output_____" ] ], [ [ "#### Units default to SI units", "_____no_output_____" ] ], [ [ "my_time, my_other_time", "_____no_output_____" ], [ "my_time + my_other_time", "_____no_output_____" ] ], [ [ "### You can find the units in `Astropy` that are of the same type with `.find_equivalent_units()`", "_____no_output_____" ] ], [ [ "(u.m).find_equivalent_units()", "_____no_output_____" ] ], [ [ "---\n### Be careful combining quantities with different units!", "_____no_output_____" ] ], [ [ "my_velocity + my_time", "_____no_output_____" ], [ "2 + my_time", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "## Dimentionless Units", "_____no_output_____" ] ], [ [ "dimless_y = 10 * u.dimensionless_unscaled\n\ndimless_y", "_____no_output_____" ], [ "dimless_y.unit", "_____no_output_____" ], [ "dimless_y.decompose() # returns the scale of the dimentionless quanity", "_____no_output_____" ] ], [ [ "### Some math functions only make sense with dimentionless quanities", "_____no_output_____" ] ], [ [ "np.log(2 * u.m)", "_____no_output_____" ], [ "np.log(2 * u.dimensionless_unscaled)", "_____no_output_____" ] ], [ [ "### Or they expect the correct type of unit!", "_____no_output_____" ] ], [ [ "np.sin(2 * u.m)", "_____no_output_____" ], [ "np.sin(2 * u.deg)", "_____no_output_____" ] ], [ [ "## Using units can save you headaches. \n\n* All of the trig functions expect all angles to be in radians. \n* If you forget this, it can lead to problems that are hard to debug", "_____no_output_____" ], [ "$$ \\large\n\\sin(90^{\\circ}) + \\sin(45^{\\circ}) = 1 + \\frac{\\sqrt{2}}{2} \\approx 1.7071\n$$", "_____no_output_____" ] ], [ [ "np.sin(90) + np.sin(45)", "_____no_output_____" ], [ "np.sin(90 * u.deg) + np.sin(45 * u.deg)", "_____no_output_____" ] ], [ [ "---\n\n## You can define your own units", "_____no_output_____" ] ], [ [ "ringo = u.def_unit('Ringos', 3.712 * imperial.yd)", "_____no_output_____" ], [ "rocket_position.to(ringo)", "_____no_output_____" ], [ "my_velocity.to(ringo / u.s)", "_____no_output_____" ] ], [ [ "#### ...Since `ringo` is self-defined it does not have a `u.` in front of it", "_____no_output_____" ], [ "### You can access the number and unit part of the Quantity separately:", "_____no_output_____" ] ], [ [ "my_velocity.value", "_____no_output_____" ], [ "my_velocity.unit", "_____no_output_____" ] ], [ [ "### This is useful in formatting output:", "_____no_output_____" ] ], [ [ "f\"The velocity of the first particle is {my_velocity.value:.1f} in the units of {my_velocity.unit:s}.\"", "_____no_output_____" ] ], [ [ "---\n\n# Constants", "_____no_output_____" ], [ "The `Astropy` package also includes a whole bunch of built-in constants to make your life easier.\n\n* The package is usually imported as `const`\n\n### [Astropy Constants](http://docs.astropy.org/en/stable/constants/index.html#reference-api)", "_____no_output_____" ] ], [ [ "const.G", "_____no_output_____" ], [ "const.M_sun", "_____no_output_____" ] ], [ [ "---\n\n### An Example: The velocity of an object in circular orbit around the Sun is", "_____no_output_____" ], [ "$$\\large\nv=\\sqrt{GM_{\\odot}\\over d} \n$$\n\n### What is the velocity of an object at 1 AU from the Sun?", "_____no_output_____" ] ], [ [ "def find_orbit_v(distance):\n result = np.sqrt(const.G * const.M_sun / distance)\n return result.decompose()", "_____no_output_____" ], [ "my_distance = 1 * u.AU", "_____no_output_____" ], [ "orbit_v = find_orbit_v(my_distance)\norbit_v", "_____no_output_____" ], [ "orbit_v.to(u.km/u.s)", "_____no_output_____" ], [ "orbit_v.to(ringo/u.ms)", "_____no_output_____" ] ], [ [ "### Be careful about the difference between a unit and a constant", "_____no_output_____" ] ], [ [ "my_star = 1 * u.solMass\nmy_star", "_____no_output_____" ], [ "my_star.unit", "_____no_output_____" ], [ "const.M_sun", "_____no_output_____" ], [ "const.M_sun.unit", "_____no_output_____" ] ], [ [ "## Last week's homework\n\n$$\\large \n\\textrm{Diameter}\\ (\\textrm{in km}) = \\frac{1329\\ \\textrm{km}}{\\sqrt{\\textrm{geometric albedo}}}\\ 10^{-0.2\\ (\\textrm{absolute magnitude})}\n$$", "_____no_output_____" ] ], [ [ "def find_diameter(ab_mag, albedo):\n result = ( (1329 * u.km) / np.sqrt(albedo) ) * (10 ** (-0.2 * ab_mag))\n return result.decompose()", "_____no_output_____" ], [ "my_ab_mag = 3.34\nmy_albedo = 0.09\n\nasteroid_diameter = find_diameter(my_ab_mag, my_albedo)\nasteroid_diameter", "_____no_output_____" ] ], [ [ "$$\\Large \n\\mathrm{Mass}\\ = \\ \\rho \\cdot \\frac{1}{6} \\pi D^3 \n$$", "_____no_output_____" ] ], [ [ "def find_mass(diameter, density):\n result = density * (1/6) * np.pi * diameter ** 3\n return result.decompose()", "_____no_output_____" ], [ "my_density = 3000 * (u.kg / u.m**3)\n\nfind_mass(asteroid_diameter, my_density)", "_____no_output_____" ] ], [ [ "#### Notice - as long as `density` has units of mass/length$^3$, and `diameter` has units of length, you do not need to do any conversions.", "_____no_output_____" ] ], [ [ "my_other_density = 187 * (imperial.lb / imperial.ft **3)\n\nfind_mass(asteroid_diameter, my_other_density)", "_____no_output_____" ] ], [ [ "---\n# Real world example - [Mars Climate Orbiter](https://en.wikipedia.org/wiki/Mars_Climate_Orbiter)\n\nAerobraking is a spaceflight maneuver uses the drag of flying a spacecraft through the (upper) atmosphere of a world to slow a spacecraft and lower its orbit. Aerobraking requires way less fuel compared to using propulsion to slow down.\n\n", "_____no_output_____" ], [ "On September 8, 1999, Trajectory Correction Maneuver-4 (TCM-4) was computed to place the Mars Climate Orbiter spacecraft at an optimal position for an orbital insertion maneuver that would bring the spacecraft around Mars at an altitude of 226 km. At this altitude the orbiter would skim through Mars' upper atmosphere, gradually aerobraking for weeks.", "_____no_output_____" ], [ "The calculation of TCM-4 was done in United States imperical units. The software that calculated the total needed impulse of the thruster firing produced results in pound-force seconds.", "_____no_output_____" ], [ "### Mars Climate Orbiter\n\n* Mass = 338 kg (745 lbs)\n* ΔV needed for TCM-4 = 9.2 m/s (30.2 fps)\n* Need to calculate Impulse\n\n### Impulse is a change in momentum\n\n$$ \\Large \nI = \\Delta\\ p\\ =\\ m\\Delta v \n$$", "_____no_output_____" ], [ "#### Impulse calculated in imperial units:", "_____no_output_____" ] ], [ [ "imperial_impulse = (745 * (imperial.lb)) * (30.2 * (imperial.ft / u.s))\n\nimperial_impulse.to(imperial.lbf * u.s)", "_____no_output_____" ] ], [ [ "The computed impulse value was then sent to the spacecraft and was used to fire the thuster on September 15, 1999. The computer that fired the thuster expected the impulse to be in SI units (newton-seconds). SI units are required by NASA's Software Interface Specification (SIS).", "_____no_output_____" ], [ "#### $\\Delta$v that would be the result of an impuse of 669.3 (N * s) for M = 338 kg:", "_____no_output_____" ] ], [ [ "my_deltav = (669.3 * (u.N * u.s)) / (338 * (u.kg))\n\nmy_deltav.decompose()", "_____no_output_____" ] ], [ [ "This $\\Delta$v was way too small! At this speed the spacecraft's trajectory would have taken it within 57 km (35 miles) of the surface. At this altitude, the spacecraft would likely have skipped violently off the denser-than-expected atmosphere, and it was either destroyed in the atmosphere, or re-entered heliocentric space.", "_____no_output_____" ], [ "<img src=\"./images/MCO_Orbit.png\" width=\"700\"/>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cba1104625e400097929a3766953b6869c9fffa9
70,697
ipynb
Jupyter Notebook
Day-4/MLFA_LAB3_Discriminative_Feature_Selection.ipynb
willcmc/ml-lab
4827aef4619049fd3897c171a386a055e096488e
[ "MIT" ]
null
null
null
Day-4/MLFA_LAB3_Discriminative_Feature_Selection.ipynb
willcmc/ml-lab
4827aef4619049fd3897c171a386a055e096488e
[ "MIT" ]
null
null
null
Day-4/MLFA_LAB3_Discriminative_Feature_Selection.ipynb
willcmc/ml-lab
4827aef4619049fd3897c171a386a055e096488e
[ "MIT" ]
null
null
null
30.724468
396
0.423568
[ [ [ "# **Discriminative Feature Selection**", "_____no_output_____" ], [ "# FEATURE SELECTION\n\nFeature Selection is the process where you automatically or manually select those features which contribute most to your prediction variable or output in which you are interested in. Having irrelevant features in your data can decrease the accuracy of the models and make your model learn based on irrelevant features.\n\nWe are going to understand it with a practice example. Steps are as follows :\n\n- Import important libraries\n\n- Importing data\n\n- Data Preprocessing\n\n - Price\n\n - Size\n\n - Installs\n\n- Discriminative Feature Check\n\n - Reviews\n\n - Price", "_____no_output_____" ], [ "**1. Import Important Libraries**", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np", "_____no_output_____" ] ], [ [ "**2. Importing Data**\n\nToday we will be working on a playstore apps dataset with ratings. Link to the dataset --> https://www.kaggle.com/lava18/google-play-store-apps/data", "_____no_output_____" ] ], [ [ "df = pd.read_csv('googleplaystore.csv',encoding='unicode_escape')\ndf.head()", "_____no_output_____" ] ], [ [ "**3. Data Preprocessing**\n\nLet us have a look at all the datatypes first :", "_____no_output_____" ] ], [ [ "df.dtypes", "_____no_output_____" ] ], [ [ "We see that all the columns except 'Rating' are object datatype. We want those columns also as numeric as they dont make sense when they are in object form.Let us start with the 'Price' column.\n\n**i) Price** \n\nWhen we saw the head of the dataset, we only see the 0 values in 'Price' column. Let us have a look at the rows with non zero data. As the 'Price column is object type, we compare the column with '0' instead of 0. ", "_____no_output_____" ] ], [ [ "df[df['Price']!='0'].head()", "_____no_output_____" ] ], [ [ "We see that the 'Price' column has dollar sign in the beginning for the apps which are not free. Hence we cannot directly convert it to numeric type. We will first have to remove the $ sign so that all datas are uniform and can be converted.\n\nWe use the replace function over here to replace the dollar sign by blank. Notice that we had to convert the column to string type from object type as the replace function is only applicable on string functions.", "_____no_output_____" ] ], [ [ "df['Price'] = df['Price'].str.replace('$','', regex=False)\ndf[df['Price']!='0'].head()", "_____no_output_____" ] ], [ [ "**ii) Size**\n\nAs we see the 'Size' column, we see that the value ends with the letter 'M' for mega. We want to convert the size to numeric value to use in the dataset. Hence we will need to remove the letter 'M'.\n\nFor this, we convert the column to string and omit the last letter of the string and save the data in 'Size' column.\n\nNotice from the previous head that we saw, that the 'Size' for row 427 is given as varies with device. We obviously cannot convert such data to numeric. We will see how to deal with it later.", "_____no_output_____" ] ], [ [ "df['Size'] = df['Size'].str[:-1]\ndf.head()", "_____no_output_____" ] ], [ [ "**iii) Installs**\n\nIf we see the 'Installs' column, there are 2 major changes that we need to make to convert it to numeric. We have to remove the '+' sign from the end of the data as well as remove the commas before converting to numeric.\n\nTo remove the last letter, we apply the same procedure as for the 'Size' column :", "_____no_output_____" ] ], [ [ "df['Installs'] = df['Installs'].str[:-1]\ndf.head()", "_____no_output_____" ] ], [ [ "For the removal of commas, we will use the replace function to replace commas with blank.\n\nReplace function only works on string, hence we access the values of the series as string before applying the replace function :", "_____no_output_____" ] ], [ [ "df['Installs'] = df['Installs'].str.replace(',','')\ndf.head()", "_____no_output_____" ] ], [ [ "Now, we will finally convert all the data to numeric type using the to_numeric function. Notice that we have used the errors='coerce' parameter. This parameter converts all the data which cannot be converted to numeric into NaN. For example the 'Size' in row 427 cannot be converted to int. Hence it will be converted to NaN. After that we take a look at the datatypes of the columns again.", "_____no_output_____" ] ], [ [ "df['Reviews'] = pd.to_numeric(df['Reviews'],errors='coerce')\ndf['Size'] = pd.to_numeric(df['Size'],errors='coerce')\ndf['Installs'] = pd.to_numeric(df['Installs'],errors='coerce')\ndf['Price'] = pd.to_numeric(df['Price'],errors='coerce')\ndf.dtypes", "_____no_output_____" ] ], [ [ "Now we will see and work with all the NaN values. Let us first have a look at all the NaN values in the dataset :", "_____no_output_____" ] ], [ [ "df.isna().sum()", "_____no_output_____" ] ], [ [ "As rating is the output of our dataset, we cannot have that to be NaN. Hence we will remove all the rows with 'Rating' as NaN :", "_____no_output_____" ] ], [ [ "df = df[df['Rating'].isna()==False]\ndf.isna().sum()", "_____no_output_____" ] ], [ [ "This is the final preprocessed dataset that we obtained :", "_____no_output_____" ] ], [ [ "df.head()", "_____no_output_____" ] ], [ [ "**4. Discriminative Feature Check**\n\nNow we will move on to checking the discriminative feature checking, to see which feature is good and which is not. We will start with the 'Reviews' column. For our case, we will take rating > 4.3 as a good rating. We take that value because as we see in the following stats, the rating is divided 50:50 at that value.\n\nBefore we do that, let us have a look at the statistics of the whole table :", "_____no_output_____" ] ], [ [ "df.describe()", "_____no_output_____" ] ], [ [ "**i) Reviews**\n\nWe will have to check for multiple values that which of them has the best rating distinction. We will start by comparing with the mean of the 'Reviews' column which is 514098.\n\nWe will use a new function over here known as crosstab. Crosstab allows us to have a frequency count across 2 columns or conditions.\n\nWe could also normalize the column results to obtain the conditional probability of P(Rating = HIGH | condition)\n\nWe have also turned on the margins to see the total frequency under that condition.", "_____no_output_____" ] ], [ [ "pd.crosstab(df['Rating']>4.3,df['Reviews']>514098,rownames=['Ratings>4.3'],colnames=['Reviews>514098'],margins= True)", "_____no_output_____" ] ], [ [ "We see that the number of ratings in the case of Reviews > 514098 is very less (close to 10%).\n\nHence it is preferred to take the 50 percentile point rather than the mean to be the pivot point. Let us now take the 50 percentile point which is 5930 reviews in this case. So let us take a look at that :", "_____no_output_____" ] ], [ [ "pd.crosstab(df['Rating']>4.3,df['Reviews']>5930,rownames=['Ratings>4.3'],colnames=['Reviews>5930'],margins= True)", "_____no_output_____" ] ], [ [ "Now we see that the number of ratings is equal for both high and low reviews. So we will take the 50 percentile point to start from now on. Let us now look at the conditional probability :", "_____no_output_____" ] ], [ [ "pd.crosstab(df['Rating']>4.3,df['Reviews']>5930,rownames=['Ratings>4.3'],colnames=['Reviews>5930'],margins= True,normalize='columns')", "_____no_output_____" ] ], [ [ "There is not much difference between P(Ratings=HIGH|Reviews<5930) and P(Ratings=HIGH|Reviews>5930) so this is a bad feature.\n\nLet us increase the value of the pivot for ratings to 80000 and check again. We dont need to check for the percentage being too low as we are almost at 75 percentile mark.", "_____no_output_____" ] ], [ [ "pd.crosstab(df['Rating']>4.3,df['Reviews']>80000,rownames=['Ratings>4.3'],colnames=['Reviews>80000'],margins= True,normalize='columns')", "_____no_output_____" ] ], [ [ "Now we see that there is a good difference in the probabilities and hence Rating>80000 is a good feature.", "_____no_output_____" ], [ "**ii) Price**\n\nWe will do the same for 'Price' column to find out the best distinctive feature. We see that in this case, even the 75 percentile mark also points to 0. Hence in this case, we will classify the data as Free or not :", "_____no_output_____" ] ], [ [ "pd.crosstab(df['Rating']>4.3,df['Price']==0,rownames=['Ratings>4.3'],colnames=['Price=$0'],margins= True)", "_____no_output_____" ] ], [ [ "This shows us that it is very difficult to use the Price as a feature. Hence it is a doubtful feature. If then also we want to force this as a feature, let us see the conditional probability :", "_____no_output_____" ] ], [ [ "pd.crosstab(df['Rating']>4.3,df['Price']==0,rownames=['Ratings>4.3'],colnames=['Price=$0'],margins= True,normalize='columns')", "_____no_output_____" ] ], [ [ "We see that there is not much difference in probability either, hence this would serve as a bad feature in any case.", "_____no_output_____" ], [ "This is the end of this tutorial. Now you can move on to assignment 7 in which you have to check the other 2 distinctive features.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cba11a73cc95e29531a0aaaeedb5b92f4f3d021f
24,728
ipynb
Jupyter Notebook
TrainPhiNet.ipynb
darshansiddu01/PhiNet
528d0cc5fd019cd8326d942a422a14da90af203e
[ "Apache-2.0" ]
3
2019-09-14T06:58:17.000Z
2021-07-01T11:49:40.000Z
TrainPhiNet.ipynb
darshansiddu01/PhiNet
528d0cc5fd019cd8326d942a422a14da90af203e
[ "Apache-2.0" ]
null
null
null
TrainPhiNet.ipynb
darshansiddu01/PhiNet
528d0cc5fd019cd8326d942a422a14da90af203e
[ "Apache-2.0" ]
1
2019-02-19T19:50:54.000Z
2019-02-19T19:50:54.000Z
34.344444
206
0.438167
[ [ [ "!wget https://www.dropbox.com/s/ic9ym6ckxq2lo6v/Dataset_Signature_Final.zip\n#!wget https://www.dropbox.com/s/0n2gxitm2tzxr1n/lightCNN_51_checkpoint.pth\n#!wget https://www.dropbox.com/s/9yd1yik7u7u3mse/light_cnn.py\nimport zipfile\nsigtrain = zipfile.ZipFile('Dataset_Signature_Final.zip', mode='r')\nsigtrain.extractall()", "_____no_output_____" ], [ "# http://pytorch.org/\nfrom os.path import exists\nfrom wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag\nplatform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag())\ncuda_output = !ldconfig -p|grep cudart.so|sed -e 's/.*\\.\\([0-9]*\\)\\.\\([0-9]*\\)$/cu\\1\\2/'\naccelerator = cuda_output[0] if exists('/dev/nvidia0') else 'cpu'\n\n!pip install -q http://download.pytorch.org/whl/{accelerator}/torch-0.4.1-{platform}-linux_x86_64.whl torchvision", "_____no_output_____" ] ], [ [ "import re\nimport os\nimport cv2\nimport random\nimport numpy as np\nimport collections\n\nimport torch\nimport torchvision\nfrom torch.utils import data\nfrom torchvision import models\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader,Dataset\nimport torch.nn.functional as F\nfrom PIL import Image\nimport PIL\n\nfrom numpy.random import choice, shuffle\nfrom itertools import product, combinations, combinations_with_replacement, permutations\n\nimport torch.optim as optim\nfrom torchvision import transforms\n\ntrain_image_list = []\ntest_image_list = []\n\n\nfor root, dirs, files in os.walk('Dataset'):\n #if (len(dirs) ==0 and off in root): \n if (len(dirs) ==0):\n for root_sub, dirs_sub, files_sub in os.walk(root):\n for file in files_sub:\n if 'dataset4' not in root_sub:\n train_image_list.append(os.path.join(root_sub,file).rstrip('\\n'))\n else:\n test_image_list.append(os.path.join(root_sub,file).rstrip('\\n'))\n\ntrain_image_list_x = []\nfor i in list(set([re.split('/',image)[1] for image in train_image_list ])):\n#datasetx = random.choice(dataset)\n#index1 = dataset.index(datasetx)\n#for dataset_ in dataset:\n train_image_list_x.append([image for image in train_image_list if i in image])\n \ntrain_image_lis_dataset1 = train_image_list_x[0]\ntrain_image_lis_dataset2 = train_image_list_x[1]\ntrain_image_lis_dataset3 = train_image_list_x[2]\n", "_____no_output_____" ], [ "class PhiLoader(data.Dataset):\n \n def __init__(self, image_list, resize_shape, transform=True):\n \n\n self.image_list = image_list\n\n self.diff = list(set([str(str(re.split('/',image)[-1]).split('.')[0])[-3:] for image in self.image_list]))\n self.identity_image = []\n \n for i in self.diff:\n self.identity_image.append([image for image in self.image_list if ((str(str(image).split('/')[-1]).split('.')[0]).endswith(i))])\n\n \n self.PairPool=[]\n \n for user in self.identity_image:\n Real=[]\n Forge=[]\n for image in user:\n if 'real' in image:\n Real.append(image)\n\n else:\n Forge.append(image)\n \n self.PairPool.extend(list(product(Real,Forge+Real)))\n \n self.Dimensions = resize_shape\n self.transform=transform\n self.labels=[]\n self.ToGray=transforms.Grayscale()\n self.RR=transforms.RandomRotation(degrees=10,resample=PIL.Image.CUBIC)\n self.Identity = transforms.Lambda(lambda x : x)\n self.RRC = transforms.Lambda(lambda x : self.RandomRCrop(x))\n self.Transform=transforms.RandomChoice([self.RR,\n self.RRC,\n self.Identity\n ])\n self.T=transforms.ToTensor()\n self.labels=[]\n \n def __len__(self):\n\n return len(self.PairPool)\n \n def RandomRCrop(self,image):\n width,height = image.size\n size=random.uniform(0.9,1.00)\n #ratio = random.uniform(0.45,0.55)\n newheight = size*height\n newwidth = size*width\n T=transforms.RandomCrop((int(newheight),int(newwidth)))\n return T(image)\n\n \n def __getitem__(self,index):\n \n #print(\"index\",index)\n index=index%len(self.PairPool)\n pairPool = self.PairPool[index]\n \n img1 = self.ToGray(Image.open(pairPool[0]))\n img2 = self.ToGray(Image.open(pairPool[1])) \n \n label_1 = pairPool[0].split('/')[2]\n label_2 = pairPool[1].split('/')[2]\n \n\n \n \n if label_1 == label_2: ### same class\n l=0.0\n self.labels.append(l)\n \n else: ### different class \n l=1.0\n self.labels.append(l)\n \n \n if self.transform:\n img1 = self.Transform(img1)\n img2 = self.Transform(img2) \n \n return self.T(img1.resize(self.Dimensions)), self.T(img2.resize(self.Dimensions)), torch.tensor(l) \n\n ", "_____no_output_____" ], [ "class PhiNet(nn.Module):\n def __init__(self, ):\n super(PhiNet, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(1,96,kernel_size=11,stride=1),\n nn.ReLU(),\n nn.LocalResponseNorm(5, alpha=1e-4, beta=0.75, k=2),\n nn.MaxPool2d(kernel_size=3, stride=2))\n \n\n self.layer2 = nn.Sequential(\n nn.Conv2d(96, 256, kernel_size=5, stride=1, padding=2),\n nn.ReLU(),\n nn.LocalResponseNorm(5, alpha=1e-4, beta=0.75, k=2),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Dropout2d(p=0.3))\n \n self.layer3 = nn.Sequential(\n nn.Conv2d(256,384, kernel_size=3, stride=1, padding=1))\n \n self.layer4 = nn.Sequential(\n nn.Conv2d(384,256, kernel_size=3, stride=1, padding=1),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Dropout2d(p=0.3))\n \n self.layer5 = nn.Sequential(\n nn.Conv2d(256,128, kernel_size=3, stride=1, padding=1),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Dropout2d(p=0.3))\n \n self.adap = nn.AdaptiveAvgPool3d((128,6,6))\n \n self.layer6 = nn.Sequential(\n nn.Linear(4608,512),\n nn.ReLU(),\n nn.Dropout(p=0.5))\n \n \n \n self.layer7 = nn.Sequential(\n nn.Linear(512,128),\n nn.ReLU())\n \n \n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.layer5(out)\n out = self.adap(out)\n\n out = out.reshape(out.size()[0], -1)\n \n \n out = self.layer6(out)\n out = self.layer7(out)\n \n return out", "_____no_output_____" ], [ "import math\n\ndef set_optimizer_lr(optimizer, lr):\n # callback to set the learning rate in an optimizer, without rebuilding the whole optimizer\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return optimizer\n \ndef se(initial_lr,iteration,epoch_per_cycle):\n return initial_lr * (math.cos(math.pi * iteration / epoch_per_cycle) + 1) / 2", "_____no_output_____" ], [ "class ContrastiveLoss(torch.nn.Module):\n \"\"\"\n Contrastive loss function.\n Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf\n \"\"\"\n\n def __init__(self, margin=2.0):\n super(ContrastiveLoss, self).__init__()\n self.margin = margin\n\n def forward(self, output1, output2, label):\n euclidean_distance = F.pairwise_distance(output1, output2)\n loss_contrastive = torch.mean((1-label) * torch.pow(euclidean_distance, 2) +\n (label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))\n\n\n return loss_contrastive\n\n \ndef contrastive_loss():\n return ContrastiveLoss()", "_____no_output_____" ], [ "def compute_accuracy_roc(predictions, labels):\n \n '''\n Compute ROC accuracy with a range of thresholds on distances.\n '''\n\n dmax = np.max(predictions)\n\n dmin = np.min(predictions)\n\n nsame = np.sum(labels == 0)\n\n ndiff = np.sum(labels == 1)\n thresh=1.0\n\n step = 0.01\n max_acc = 0\n\n for d in np.arange(dmin, dmax+step, step):\n \n \n idx1 = predictions.ravel() <= d\n idx2 = predictions.ravel() > d\n\n tpr = float(np.sum(labels[idx1] == 0)) / nsame \n tnr = float(np.sum(labels[idx2] == 1)) / ndiff\n acc = 0.5 * (tpr + tnr) \n \n\n if (acc > max_acc):\n \n max_acc = acc\n thresh=d\n\n return max_acc,thresh", "_____no_output_____" ], [ "trainloader1 = torch.utils.data.DataLoader(PhiLoader(image_list = train_image_lis_dataset1, resize_shape=[128,64]), \n batch_size=32, num_workers=4, shuffle = True, pin_memory=False)\n\ntrainloader1_hr = torch.utils.data.DataLoader(PhiLoader(image_list = train_image_lis_dataset1, resize_shape=[256,128]), \n batch_size=16, num_workers=4, shuffle = True, pin_memory=False)\n\ntrainloader1_uhr = torch.utils.data.DataLoader(PhiLoader(image_list = train_image_lis_dataset1, resize_shape=[512,256]), \n batch_size=4, num_workers=0, shuffle = False, pin_memory=False)\n\ntrainloader3 = torch.utils.data.DataLoader(PhiLoader(image_list = train_image_lis_dataset3, resize_shape=[512,256]), \n batch_size=32, num_workers=1, shuffle = False, pin_memory=False)\n\ntestloader = torch.utils.data.DataLoader(PhiLoader(image_list = test_image_list, resize_shape=[256,128]), \n batch_size=32, num_workers=1, shuffle = True, pin_memory=False)", "_____no_output_____" ], [ "device = torch.device(\"cuda:0\")\nprint(device)\nbest_loss = 99999999\n\nphinet = PhiNet().to(device)\n\n", "cuda:0\n" ], [ "siamese_loss = contrastive_loss() ### Notice a new loss. contrastive_loss function is defined above.\nsiamese_loss = siamese_loss.to(device)\n", "_____no_output_____" ], [ "def test(epoch):\n \n global best_loss\n phinet.eval()\n test_loss = 0\n correct = 0\n total = 1\n \n \n for batch_idx, (inputs_1, inputs_2, targets) in enumerate(testloader):\n \n with torch.no_grad():\n \n inputs_1, inputs_2, targets = inputs_1.to(device), inputs_2.to(device), targets.to(device)\n \n features_1 = phinet(inputs_1) ### get feature for image_1\n features_2 = phinet(inputs_2) ### get feature for image_2 \n \n loss = siamese_loss(features_1, features_2, targets.float())\n test_loss += loss.item()\n \n\n # Save checkpoint.\n losss = test_loss/len(testloader)\n if losss < best_loss: ### save model with the best loss so far\n print('Saving..') \n state = {\n 'net': phinet\n }\n if not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\n torch.save(state, 'checkpoint/phinet_siamese.stdt')\n best_loss = losss\n \n return test_loss/len(testloader)", "_____no_output_____" ], [ "def train_se(epochs_per_cycle,initial_lr,dl):\n phinet.train()\n snapshots = []\n global epoch;\n epoch_loss=0\n cycle_loss=0\n global optimizer \n\n for j in range(epochs_per_cycle):\n epoch_loss = 0\n print('\\nEpoch: %d' % epoch)\n lr = se(initial_lr, j, epochs_per_cycle)\n optimizer = set_optimizer_lr(optimizer, lr)\n train = trainloader1\n\n for batch_idx, (inputs_1, inputs_2, targets) in enumerate(train):\n\n inputs_1, inputs_2, targets = inputs_1.to(device), inputs_2.to(device), targets.to(device)\n \n optimizer.zero_grad()\n features_1 = phinet(inputs_1) ### get feature for image_1\n features_2 = phinet(inputs_2) \n\n loss =siamese_loss(features_1, features_2, targets) \n\n loss.backward()\n optimizer.step()\n\n epoch_loss += loss.item()/len(train)\n \n\n epoch+=1\n cycle_loss += epoch_loss/(epochs_per_cycle)\n print (\"e_Loss:\",epoch_loss); \n\n print(\"c_loss:\",cycle_loss)\n snapshots.append(phinet.state_dict())\n \n return snapshots", "_____no_output_____" ], [ "lr=1e-4\nepoch=0\noptimizer = optim.SGD(phinet.parameters(),lr=lr)", "_____no_output_____" ], [ "for i in range(6):\n train_se(3,lr,trainloader1)\n test_loss = test(i)\n print(\"Test Loss: \", test_loss)\n", "\nEpoch: 0\ne_Loss: 1.8136854987395439\n\nEpoch: 1\ne_Loss: 1.8160580522135685\n\nEpoch: 2\ne_Loss: 1.8135544312627692\nc_loss: 1.8144326607386272\nSaving..\nTest Loss: 1.9583299776603436\n\nEpoch: 3\n" ], [ "for i in range(6):\n train_se(3,lr,trainloader1_hr)\n test_loss = test(i)\n print(\"Test Loss: \", test_loss)", "_____no_output_____" ], [ "loaded=torch.load('checkpoint/phinet_siamese.stdt')['net']\n", "_____no_output_____" ], [ "import gc\ndef predict(model,dataloader,fn):\n model.eval()\n model.cuda()\n labels=[]\n out=[]\n pwd = torch.nn.PairwiseDistance(p=1)\n for x0,x1,label in dataloader:\n \n labels.extend(label.numpy())\n a=model(x0.cuda())\n b=model(x1.cuda())\n #print(torch.log(a/(1-a)),a)\n out.extend(pwd(a,b))\n #!nvidia-smi\n \n\n \n return fn(np.asarray(out),np.asarray(labels))\n ", "_____no_output_____" ], [ "testloader_ = torch.utils.data.DataLoader(PhiLoader(image_list = train_image_lis_dataset2, resize_shape=[256,128]), \n batch_size=16, num_workers=0, shuffle = False, pin_memory=False)\n\nwith torch.no_grad():\n maxacc,threshold = predict(loaded,testloader_,compute_accuracy_roc)\n print(\"Accuracy:{:0.3f}\".format(maxacc*100),\"Threshold:{:0.3f}\".format(threshold))\n ", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cba11e5cca8690e081dd11188a4321680dd42d26
9,432
ipynb
Jupyter Notebook
notebooks/misc.ipynb
alreich/abstract_algebra
9aca57cbc002677aeb117f542a961b7cbdfd4c29
[ "MIT" ]
1
2021-12-04T11:23:21.000Z
2021-12-04T11:23:21.000Z
notebooks/misc.ipynb
alreich/abstract_algebra
9aca57cbc002677aeb117f542a961b7cbdfd4c29
[ "MIT" ]
null
null
null
notebooks/misc.ipynb
alreich/abstract_algebra
9aca57cbc002677aeb117f542a961b7cbdfd4c29
[ "MIT" ]
null
null
null
32.524138
330
0.55598
[ [ [ "# Miscellaneous", "_____no_output_____" ], [ "This section describes the organization of classes, methods, and functions in the ``finite_algebra`` module, by way of describing the algebraic entities they represent. So, if we let $A \\rightarrow B$ denote \"A is a superclass of B\", then the class hierarchy of algebraic structures in ``finite_algebra`` is:", "_____no_output_____" ], [ "<center><i>FiniteAlgebra</i> $\\rightarrow$ Magma $\\rightarrow$ Semigroup $\\rightarrow$ Monoid $\\rightarrow$ Group $\\rightarrow$ Ring $\\rightarrow$ Field", "_____no_output_____" ], [ "The definition of a Group is the easiest place to begin with this description.", "_____no_output_____" ], [ "## Groups", "_____no_output_____" ], [ "A group, $G = \\langle S, \\circ \\rangle$, consists of a set, $S$, and a binary operation, $\\circ: S \\times S \\to S$ such that:\n\n1. $\\circ$ assigns a unique value, $a \\circ b \\in S$, for every $(a,b) \\in S \\times S$.\n1. $\\circ$ is <i>associative</i>. That is, for any $a,b,c \\in S \\Rightarrow a \\circ (b \\circ c) = (a \\circ b) \\circ c$.\n1. There is an <i>identity</i> element $e \\in S$, such that, for all $a \\in S, a \\circ e = e \\circ a = a$.\n1. Every element $a \\in S$ has an <i>inverse</i> element, $a^{-1} \\in S$, such that, $a \\circ a^{-1} = a^{-1} \n\\circ a = e$.\n\nThe symbol, $\\circ$, is used above to emphasize that it is not the same as numeric addition, $+$, or multiplication, $\\times$. Most of the time, though, no symbol at all is used, e.g., $ab$ instead of $a \\circ b$. That will be the case here.\n\nAlso, since groups are associative, there is no ambiquity in writing products like, $abc$, without paretheses.", "_____no_output_____" ], [ "## Magmas, Semigroups, and Monoids", "_____no_output_____" ], [ "By relaxing one or more of the Group requirements, above, we obtain even more general algebraic structures:\n\n* If only assumption 1, above, holds, then we have a **Magma**\n* If both 1 and 2 hold, then we have a **Semigroup**\n* If 1, 2, and 3 hold, then we have a **Monoid**\n\nRewriting this list as follows, suggests the class hiearchy, presented at the beginning:\n\n* binary operation $\\Rightarrow$ **Magma**\n* an *associative* Magma $\\Rightarrow$ **Semigroup**\n* a Semigroup with an *identity element* $\\Rightarrow$ **Monoid**\n* a Monoid with *inverses* $\\Rightarrow$ **Group**", "_____no_output_____" ], [ "## Finite Algebras", "_____no_output_____" ], [ "The **FiniteAlgebra** class is not an algebraic structure--it has no binary operation--but rather, it is a *container* for functionality that is common to all classes below it in the hierarchy, to avoid cluttering the definitions of it's subclasses with a lot of \"bookkeeping\" details.\n\nTwo of those \"bookkeeping\" details are quite important, though:\n* List of elements -- a list of ``str``\n* Cayley Table -- a NumPy array of integers representing the 0-based indices of elements in the element list\n\nAlgebraic properties, such as associativity, commutativity, identities, and inverses, can be derived from the Cayley Table, so methods that test for those properties are contained in the **CayleyTable** class and can be accessed by methods in the **FiniteAlgebra** class.", "_____no_output_____" ], [ "## Rings and Fields", "_____no_output_____" ], [ "Adding Ring and Field classes completes the set algebras supported by ``finite_algebras``.\n\nWe can define a **Ring**, $R = \\langle S, +, \\cdot \\rangle$, on a set, $S$, with two binary operations, $+$ and $\\cdot$, abstractly called, *addition* and *multiplication*, where:\n\n1. $\\langle S, + \\rangle$ is an abelian Group\n1. $\\langle S, \\cdot \\rangle$ is Semigroup\n1. Multiplication distributes over addition:\n * $a \\cdot (b + c) = a \\cdot b + a \\cdot c$\n * $(b + c) \\cdot a = b \\cdot a + c \\cdot a$\n\nWith Rings, the **additive identity** element is usually denoted by $0$, and, if it exists, a **multiplicative identity** is denoted by $1$.", "_____no_output_____" ], [ "A **Field**, $F = \\langle S, +, \\cdot \\rangle$, is a Ring, where $\\langle S\\setminus{\\{0\\}}, \\cdot \\rangle$ is an abelian Group.", "_____no_output_____" ], [ "## Commutative Magmas", "_____no_output_____" ], [ "A <i>commutative Magma</i> is a Magma where the binary operation is commutative.\n\nThat is, for all $a,b \\in M \\Rightarrow ab = ba$.\n\nIf the Magma also happens to be a Group, then it is often referred to as an <i>abelian Group</i>.", "_____no_output_____" ], [ "## Finite Groups", "_____no_output_____" ], [ "A <i>finite group</i> is a group, $G = \\langle S, \\cdot \\rangle$, where the number of elements is finite.\n\nSo, for example, $S = \\{e, a_1, a_2, a_3, ... , a_{n-1}\\}$. In this case, we say that the <i>order</i> of $G$ is $n$.\n\nFor infinite groups, the operator, $\\circ$, is usually defined according to a rule or function. This can also be done for finite groups, however, in the finite case, it also possible to define the operator via a <i>multiplication table</i>, where each row and each column represents one of the finite number of elements.\n\nFor example, if $S = \\{E, H, V, R\\}$, where $E$ is the identity element, then a possible multiplication table would be as shown below (i.e., the <i>Klein-4 Group</i>):", "_____no_output_____" ], [ " . | E | H | V | R\n-----|---|---|---|---\n <b>E</b> | E | H | V | R\n <b>H</b> | H | E | R | V\n <b>V</b> | R | R | E | H\n <b>R</b> | E | V | H | E", "_____no_output_____" ], [ "<center><b>elements & their indices:</b> $\\begin{bmatrix} E & H & V & R \\\\ 0 & 1 & 2 & 3 \\end{bmatrix}$", "_____no_output_____" ], [ "<center><b>table (showing indices):<b> $\\begin{bmatrix} 0 & 1 & 2 & 3 \\\\ 1 & 0 & 3 & 2 \\\\ 2 & 3 & 0 & 1 \\\\ 3 & 2 & 1 & 0 \\end{bmatrix}$", "_____no_output_____" ], [ "## Subgroups", "_____no_output_____" ], [ "Given a group, $G = \\langle S, \\circ \\rangle$, suppose that $T \\subseteq S$, such that $H = \\langle T, \\circ \\rangle$ forms a group itself, then $H$ is said to be a subgroup of $G$, sometimes denoted by $H \\trianglelefteq G$.\n\nThere are two <i>trivial subgroups</i> of $G$: the group consisting of just the identity element, $\\langle \\{e\\}, \\circ \\rangle$, and entire group, $G$, itself. All other subgroups are <i>proper subgroups</i>.\n\nA subgroup, $H$, is a <i>normal subgroup</i> of a group G, if, for all elements $g \\in G$ and for all $h \\in H \\Rightarrow ghg^{-1} \\in H$.", "_____no_output_____" ], [ "## Isomorphisms", "_____no_output_____" ], [ "TBD", "_____no_output_____" ], [ "## References", "_____no_output_____" ], [ "TBD", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cba121490587cab1f63c9e93a101fa9c51d5c6df
12,611
ipynb
Jupyter Notebook
Tutorials/CNTK_201A_CIFAR-10_DataLoader.ipynb
mukehvier/CNTK
0ee09cf771bda9d4912790e0fed7322e89d86d87
[ "RSA-MD" ]
1
2019-04-03T09:12:57.000Z
2019-04-03T09:12:57.000Z
Tutorials/CNTK_201A_CIFAR-10_DataLoader.ipynb
mukehvier/CNTK
0ee09cf771bda9d4912790e0fed7322e89d86d87
[ "RSA-MD" ]
null
null
null
Tutorials/CNTK_201A_CIFAR-10_DataLoader.ipynb
mukehvier/CNTK
0ee09cf771bda9d4912790e0fed7322e89d86d87
[ "RSA-MD" ]
1
2020-12-24T14:50:54.000Z
2020-12-24T14:50:54.000Z
37.091176
400
0.539608
[ [ [ "# CNTK 201A Part A: CIFAR-10 Data Loader\n\nThis tutorial will show how to prepare image data sets for use with deep learning algorithms in CNTK. The CIFAR-10 dataset (http://www.cs.toronto.edu/~kriz/cifar.html) is a popular dataset for image classification, collected by Alex Krizhevsky, Vinod Nair, and Geoffrey Hinton. It is a labeled subset of the [80 million tiny images](http://people.csail.mit.edu/torralba/tinyimages/) dataset.\n\nThe CIFAR-10 dataset is not included in the CNTK distribution but can be easily downloaded and converted to CNTK-supported format \n\nCNTK 201A tutorial is divided into two parts:\n- Part A: Familiarizes you with the CIFAR-10 data and converts them into CNTK supported format. This data will be used later in the tutorial for image classification tasks.\n- Part B: We will introduce image understanding tutorials.\n\nIf you are curious about how well computers can perform on CIFAR-10 today, Rodrigo Benenson maintains a [blog](http://rodrigob.github.io/are_we_there_yet/build/classification_datasets_results.html#43494641522d3130) on the state-of-the-art performance of various algorithms.\n", "_____no_output_____" ] ], [ [ "from __future__ import print_function\n\nfrom PIL import Image\nimport getopt\nimport numpy as np\nimport pickle as cp\nimport os\nimport shutil\nimport struct\nimport sys\nimport tarfile\nimport xml.etree.cElementTree as et\nimport xml.dom.minidom\n\ntry: \n from urllib.request import urlretrieve \nexcept ImportError: \n from urllib import urlretrieve\n\n# Config matplotlib for inline plotting\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Data download\n\nThe CIFAR-10 dataset consists of 60,000 32x32 color images in 10 classes, with 6,000 images per class. \nThere are 50,000 training images and 10,000 test images. The 10 classes are: airplane, automobile, bird, \ncat, deer, dog, frog, horse, ship, and truck.", "_____no_output_____" ] ], [ [ "# CIFAR Image data\nimgSize = 32\nnumFeature = imgSize * imgSize * 3", "_____no_output_____" ] ], [ [ "We first setup a few helper functions to download the CIFAR data. The archive contains the files data_batch_1, data_batch_2, ..., data_batch_5, as well as test_batch. Each of these files is a Python \"pickled\" object produced with cPickle. To prepare the input data for use in CNTK we use three oprations:\n> `readBatch`: Unpack the pickle files\n\n> `loadData`: Compose the data into single train and test objects\n\n> `saveTxt`: As the name suggests, saves the label and the features into text files for both training and testing. \n ", "_____no_output_____" ] ], [ [ "def readBatch(src):\n with open(src, 'rb') as f:\n if sys.version_info[0] < 3: \n d = cp.load(f) \n else:\n d = cp.load(f, encoding='latin1')\n data = d['data']\n feat = data\n res = np.hstack((feat, np.reshape(d['labels'], (len(d['labels']), 1))))\n return res.astype(np.int)\n\ndef loadData(src):\n print ('Downloading ' + src)\n fname, h = urlretrieve(src, './delete.me')\n print ('Done.')\n try:\n print ('Extracting files...')\n with tarfile.open(fname) as tar:\n tar.extractall()\n print ('Done.')\n print ('Preparing train set...')\n trn = np.empty((0, numFeature + 1), dtype=np.int)\n for i in range(5):\n batchName = './cifar-10-batches-py/data_batch_{0}'.format(i + 1)\n trn = np.vstack((trn, readBatch(batchName)))\n print ('Done.')\n print ('Preparing test set...')\n tst = readBatch('./cifar-10-batches-py/test_batch')\n print ('Done.')\n finally:\n os.remove(fname)\n return (trn, tst)\n\ndef saveTxt(filename, ndarray):\n with open(filename, 'w') as f:\n labels = list(map(' '.join, np.eye(10, dtype=np.uint).astype(str)))\n for row in ndarray:\n row_str = row.astype(str)\n label_str = labels[row[-1]]\n feature_str = ' '.join(row_str[:-1])\n f.write('|labels {} |features {}\\n'.format(label_str, feature_str))", "_____no_output_____" ] ], [ [ "In addition to saving the images in the text format, we would save the images in PNG format. In addition we also compute the mean of the image. `saveImage` and `saveMean` are two functions used for this purpose.", "_____no_output_____" ] ], [ [ "def saveImage(fname, data, label, mapFile, regrFile, pad, **key_parms):\n # data in CIFAR-10 dataset is in CHW format.\n pixData = data.reshape((3, imgSize, imgSize))\n if ('mean' in key_parms):\n key_parms['mean'] += pixData\n\n if pad > 0:\n pixData = np.pad(pixData, ((0, 0), (pad, pad), (pad, pad)), mode='constant', constant_values=128) \n\n img = Image.new('RGB', (imgSize + 2 * pad, imgSize + 2 * pad))\n pixels = img.load()\n for x in range(img.size[0]):\n for y in range(img.size[1]):\n pixels[x, y] = (pixData[0][y][x], pixData[1][y][x], pixData[2][y][x])\n img.save(fname)\n mapFile.write(\"%s\\t%d\\n\" % (fname, label))\n \n # compute per channel mean and store for regression example\n channelMean = np.mean(pixData, axis=(1,2))\n regrFile.write(\"|regrLabels\\t%f\\t%f\\t%f\\n\" % (channelMean[0]/255.0, channelMean[1]/255.0, channelMean[2]/255.0))\n \ndef saveMean(fname, data):\n root = et.Element('opencv_storage')\n et.SubElement(root, 'Channel').text = '3'\n et.SubElement(root, 'Row').text = str(imgSize)\n et.SubElement(root, 'Col').text = str(imgSize)\n meanImg = et.SubElement(root, 'MeanImg', type_id='opencv-matrix')\n et.SubElement(meanImg, 'rows').text = '1'\n et.SubElement(meanImg, 'cols').text = str(imgSize * imgSize * 3)\n et.SubElement(meanImg, 'dt').text = 'f'\n et.SubElement(meanImg, 'data').text = ' '.join(['%e' % n for n in np.reshape(data, (imgSize * imgSize * 3))])\n\n tree = et.ElementTree(root)\n tree.write(fname)\n x = xml.dom.minidom.parse(fname)\n with open(fname, 'w') as f:\n f.write(x.toprettyxml(indent = ' '))\n", "_____no_output_____" ] ], [ [ "`saveTrainImages` and `saveTestImages` are simple wrapper functions to iterate through the data set.", "_____no_output_____" ] ], [ [ "def saveTrainImages(filename, foldername):\n if not os.path.exists(foldername):\n os.makedirs(foldername)\n data = {}\n dataMean = np.zeros((3, imgSize, imgSize)) # mean is in CHW format.\n with open('train_map.txt', 'w') as mapFile:\n with open('train_regrLabels.txt', 'w') as regrFile:\n for ifile in range(1, 6):\n with open(os.path.join('./cifar-10-batches-py', 'data_batch_' + str(ifile)), 'rb') as f:\n if sys.version_info[0] < 3: \n data = cp.load(f)\n else: \n data = cp.load(f, encoding='latin1')\n for i in range(10000):\n fname = os.path.join(os.path.abspath(foldername), ('%05d.png' % (i + (ifile - 1) * 10000)))\n saveImage(fname, data['data'][i, :], data['labels'][i], mapFile, regrFile, 4, mean=dataMean)\n dataMean = dataMean / (50 * 1000)\n saveMean('CIFAR-10_mean.xml', dataMean)\n\ndef saveTestImages(filename, foldername):\n if not os.path.exists(foldername):\n os.makedirs(foldername)\n with open('test_map.txt', 'w') as mapFile:\n with open('test_regrLabels.txt', 'w') as regrFile:\n with open(os.path.join('./cifar-10-batches-py', 'test_batch'), 'rb') as f:\n if sys.version_info[0] < 3: \n data = cp.load(f)\n else: \n data = cp.load(f, encoding='latin1')\n for i in range(10000):\n fname = os.path.join(os.path.abspath(foldername), ('%05d.png' % i))\n saveImage(fname, data['data'][i, :], data['labels'][i], mapFile, regrFile, 0)", "_____no_output_____" ], [ "# URLs for the train image and labels data\nurl_cifar_data = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n\n# Paths for saving the text files\ndata_dir = './data/CIFAR-10/'\ntrain_filename = data_dir + '/Train_cntk_text.txt'\ntest_filename = data_dir + '/Test_cntk_text.txt'\n\ntrain_img_directory = data_dir + '/Train'\ntest_img_directory = data_dir + '/Test'\n\nroot_dir = os.getcwd()", "_____no_output_____" ], [ "if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n\ntry:\n os.chdir(data_dir) \n trn, tst= loadData('http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz')\n print ('Writing train text file...')\n saveTxt(r'./Train_cntk_text.txt', trn)\n print ('Done.')\n print ('Writing test text file...')\n saveTxt(r'./Test_cntk_text.txt', tst)\n print ('Done.')\n print ('Converting train data to png images...')\n saveTrainImages(r'./Train_cntk_text.txt', 'train')\n print ('Done.')\n print ('Converting test data to png images...')\n saveTestImages(r'./Test_cntk_text.txt', 'test')\n print ('Done.')\nfinally:\n os.chdir(\"../..\")", "Downloading http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\nDone.\nExtracting files...\nDone.\nPreparing train set...\nDone.\nPreparing test set...\nDone.\nWriting train text file...\nDone.\nWriting test text file...\nDone.\nConverting train data to png images...\nDone.\nConverting test data to png images...\nDone.\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cba12cc568107aa8d2c56becac10dee13d002abd
67,184
ipynb
Jupyter Notebook
IPython Scikit-Learn at Scale.ipynb
zonca/machine-learning-at-scale-with-python
6da57aa89d23e525776d9f9bdcc583ffe0c3f163
[ "MIT" ]
7
2015-03-25T20:58:41.000Z
2018-04-24T16:08:24.000Z
IPython Scikit-Learn at Scale.ipynb
zonca/machine-learning-at-scale-with-python
6da57aa89d23e525776d9f9bdcc583ffe0c3f163
[ "MIT" ]
null
null
null
IPython Scikit-Learn at Scale.ipynb
zonca/machine-learning-at-scale-with-python
6da57aa89d23e525776d9f9bdcc583ffe0c3f163
[ "MIT" ]
3
2016-09-25T19:20:55.000Z
2020-10-26T17:00:53.000Z
38
1,110
0.375491
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cba12e46bacff9eb7a21760197425615016aed2c
17,651
ipynb
Jupyter Notebook
5_Indexing/1_Indexing_loc_iloc.ipynb
sureshmecad/Pandas
128091e7021158f39eb0ff97e0e63d76e778a52c
[ "CNRI-Python" ]
null
null
null
5_Indexing/1_Indexing_loc_iloc.ipynb
sureshmecad/Pandas
128091e7021158f39eb0ff97e0e63d76e778a52c
[ "CNRI-Python" ]
null
null
null
5_Indexing/1_Indexing_loc_iloc.ipynb
sureshmecad/Pandas
128091e7021158f39eb0ff97e0e63d76e778a52c
[ "CNRI-Python" ]
null
null
null
24.481276
133
0.345136
[ [ [ "https://www.youtube.com/watch?v=LlSbe2lz6mY", "_____no_output_____" ], [ "## Indexing", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "data = {'rollno':[101,102,103,104,105],'english':[60,50,70,40,60],'science':[67,56,67,78,87],'Maths':[40,50,45,56,54]}\ndf=pd.DataFrame(data,index=['john','mac','henry','raj','rajesh'])\ndf", "_____no_output_____" ] ], [ [ "<h3 style=\"color:blue\" align=\"left\"> 1. loc : Label based accessing function (Label Index) </h3>", "_____no_output_____" ], [ "#### Whenever want to access data with the help of label, wether it is \"Row Label or Column Label\"", "_____no_output_____" ], [ "<h3 style=\"color:green\" align=\"left\"> Selecting a single row </h3>", "_____no_output_____" ] ], [ [ "df.loc['john']", "_____no_output_____" ] ], [ [ "<h3 style=\"color:green\" align=\"left\"> Selecting multiple rows </h3>", "_____no_output_____" ] ], [ [ "df.loc[['john','mac']]", "_____no_output_____" ] ], [ [ "<h3 style=\"color:green\" align=\"left\"> Selecting Row & Column </h3>", "_____no_output_____" ] ], [ [ "df.loc[['john','mac'],['english']]", "_____no_output_____" ] ], [ [ "<h3 style=\"color:green\" align=\"left\"> Slicing Rows </h3>", "_____no_output_____" ] ], [ [ "df.loc['john':'raj']", "_____no_output_____" ] ], [ [ "<h3 style=\"color:green\" align=\"left\"> Slicing Rows & Columns </h3>", "_____no_output_____" ] ], [ [ "df.loc['john':'raj','english':'science']", "_____no_output_____" ], [ "df.loc['john':'henry','rollno':'science']", "_____no_output_____" ] ], [ [ "<h3 style=\"color:blue\" align=\"left\"> 2. iloc : Position based function (Positional Index) </h3>", "_____no_output_____" ] ], [ [ "df.iloc[0]", "_____no_output_____" ], [ "df.iloc[0,1]", "_____no_output_____" ], [ "df.iloc[0:3,0:3]", "_____no_output_____" ] ], [ [ "-------------------------------------------------------------------------------------------------------------------------------", "_____no_output_____" ] ], [ [ "Below both statements gives same result:\n\n----------------------------------------------------------------------------------------------\n\n df.loc['john'] # Using loc\n \n df.iloc[0] # Using iloc\n\n----------------------------------------------------------------------------------------------\n\n df.loc['john':'henry','rollno':'science'] # Using loc\n\n df.iloc[0:3,0:3] # Using iloc", "_____no_output_____" ] ], [ [ "<img src=\"loc_iloc_1.JPG\" height=\"400\" width=\"400\" />", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "raw", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "raw" ], [ "markdown" ] ]
cba13324de13c06c78d689d06bbed462314bc847
14,718
ipynb
Jupyter Notebook
day23part2.ipynb
g2boojum/AdventOfCode2021
dea4af5a904dfeb395fc1657ca95f5ac35e3f9fe
[ "BSD-2-Clause" ]
null
null
null
day23part2.ipynb
g2boojum/AdventOfCode2021
dea4af5a904dfeb395fc1657ca95f5ac35e3f9fe
[ "BSD-2-Clause" ]
null
null
null
day23part2.ipynb
g2boojum/AdventOfCode2021
dea4af5a904dfeb395fc1657ca95f5ac35e3f9fe
[ "BSD-2-Clause" ]
null
null
null
30.283951
220
0.451828
[ [ [ "import itertools\nimport collections\nimport copy\nfrom functools import cache", "_____no_output_____" ], [ "# from https://bradfieldcs.com/algos/graphs/dijkstras-algorithm/\nimport heapq\n\n\ndef calculate_distances(graph, starting_vertex):\n distances = {vertex: float('infinity') for vertex in graph}\n distances[starting_vertex] = 0\n\n pq = [(0, starting_vertex)]\n while len(pq) > 0:\n current_distance, current_vertex = heapq.heappop(pq)\n\n # Nodes can get added to the priority queue multiple times. We only\n # process a vertex the first time we remove it from the priority queue.\n if current_distance > distances[current_vertex]:\n continue\n\n for neighbor, weight in graph[current_vertex].items():\n distance = current_distance + weight\n\n # Only consider this new path if it's better than any path we've\n # already found.\n if distance < distances[neighbor]:\n distances[neighbor] = distance\n heapq.heappush(pq, (distance, neighbor))\n\n return distances", "_____no_output_____" ] ], [ [ "## part 2 ##\n\n e---f---g---h---i---j---k (hallway)\n \\ / \\ / \\ / \\ / \n a3 b3 c3 d3 (rooms)\n | | | |\n a2 b2 c2 d2\n | | | |\n a1 b1 c1 d1\n | | | |\n a0 bO cO dO\n \nWeights are 1 for e-f, j-k, in between rooms (a0-a1-a2-a3, etc), and 2 for all others.\nThe weights of 2 make it so we don't have to have rules about not stopping on a hallway \nspace above one of the rooms.\n", "_____no_output_____" ] ], [ [ "graph = {'a0': {'a1': 1},\n 'b0': {'b1': 1},\n 'c0': {'c1': 1},\n 'd0': {'d1': 1},\n 'a1': {'a0': 1, 'a2': 1},\n 'b1': {'b0': 1, 'b2': 1},\n 'c1': {'c0': 1, 'c2': 1},\n 'd1': {'d0': 1, 'd2': 1},\n 'a2': {'a1': 1, 'a3': 1},\n 'b2': {'b1': 1, 'b3': 1},\n 'c2': {'c1': 1, 'c3': 1},\n 'd2': {'d1': 1, 'd3': 1},\n 'a3': {'a2': 1, 'f': 2, 'g': 2},\n 'b3': {'b2': 1, 'g': 2, 'h': 2},\n 'c3': {'c2': 1, 'h': 2, 'i': 2},\n 'd3': {'d2': 1, 'i': 2, 'j': 2},\n 'e': {'f': 1},\n 'f': {'e': 1, 'a3': 2, 'g': 2},\n 'g': {'f': 2, 'h': 2, 'a3': 2, 'b3': 2},\n 'h': {'g': 2, 'i': 2, 'b3': 2, 'c3': 2},\n 'i': {'h': 2, 'j': 2, 'c3': 2, 'd3': 2},\n 'j': {'i': 2, 'k': 1, 'd3': 2},\n 'k': {'j': 1},\n }\nnodes = graph.keys()\nnodeidx = {val:i for i,val in enumerate(nodes)}\nhallway = ['e', 'f', 'g', 'h', 'i', 'j', 'k']\nrooms = [''.join(p) for p in itertools.product('abcd','0123')]\nmove_cost = {'A': 1, 'B': 10, 'C': 100, 'D': 1000}", "_____no_output_____" ], [ "example_start = {'a3': 'B', 'b3': 'C', 'c3': 'B', 'd3': 'D',\n 'a2': 'D', 'b2': 'C', 'c2': 'B', 'd2': 'A',\n 'a1': 'D', 'b1': 'B', 'c1': 'A', 'd1': 'C',\n 'a0': 'A', 'b0': 'D', 'c0': 'C', 'd0': 'A'}\npuzzle_start = {'a3': 'D', 'b3': 'B', 'c3': 'C', 'd3': 'A',\n 'a2': 'D', 'b2': 'C', 'c2': 'B', 'd2': 'A',\n 'a1': 'D', 'b1': 'B', 'c1': 'A', 'd1': 'C',\n 'a0': 'C', 'b0': 'A', 'c0': 'D', 'd0': 'B'}\nPositions = collections.namedtuple('Positions', nodes)\nfor node in hallway:\n example_start[node] = None\n puzzle_start[node] = None\nexample_init = Positions(*[example_start[node] for node in nodes])\npuzzle_init = Positions(*[puzzle_start[node] for node in nodes])", "_____no_output_____" ], [ "def is_finished(pos):\n for node in rooms:\n col = node[0]\n val = pos[nodeidx[node]]\n if (val is None) or (val.lower() != col):\n return False\n return True", "_____no_output_____" ], [ "finished_ex = Positions(*itertools.chain(*itertools.repeat('ABCD', 4), itertools.repeat(None, len(hallway))))\nis_finished(finished_ex), is_finished(example_init), is_finished(puzzle_init)", "_____no_output_____" ], [ "def find_valid_rooms(pos):\n valid = []\n for col in 'abcd':\n empty = None\n for row in reversed(range(4)):\n loc = f'{col}{row}'\n val = pos[nodeidx[loc]]\n if val is None:\n empty = row\n if empty == 0:\n valid.append(f'{col}{empty}')\n continue\n if empty is None:\n continue\n if all(pos[nodeidx[f'{col}{row}']].lower() == col for row in reversed(range(empty))):\n valid.append(f'{col}{empty}')\n return valid", "_____no_output_____" ], [ "valid_test = Positions(a0='B', b0='B', c0='C', d0=None, a1=None, b1='B', c1='C', d1=None, a2=None, b2='B', c2='C', d2=None, a3=None, b3='B', c3=None, d3=None, e=None, f=None, g=None, h=None, i=None, j=None, k=None)", "_____no_output_____" ], [ "find_valid_rooms(valid_test), find_valid_rooms(example_init)", "_____no_output_____" ], [ "@cache\ndef find_topmost_moveable(pos):\n can_move = []\n for col in 'abcd':\n for row in reversed(range(4)):\n loc = f'{col}{row}'\n val = pos[nodeidx[loc]]\n if val:\n if any(pos[nodeidx[f'{col}{r}']].lower() != col for r in reversed(range(row+1))):\n can_move.append(loc)\n break\n return can_move", "_____no_output_____" ], [ "find_topmost_moveable(valid_test), find_topmost_moveable(finished_ex), find_topmost_moveable(example_init)", "_____no_output_____" ], [ "@cache\ndef traversal_costs(pos, startnode):\n newgraph = copy.deepcopy(graph)\n for node in graph:\n for endpt in graph[node]:\n if pos[nodeidx[endpt]] is not None:\n newgraph[node][endpt] = float('infinity')\n return calculate_distances(newgraph, startnode)", "_____no_output_____" ], [ "def allowed_moves(pos, currcost):\n topmost = find_topmost_moveable(pos)\n hallway_occ = [node for node in hallway if pos[nodeidx[node]] is not None]\n # see if anything can move into it's final position\n end_rooms = find_valid_rooms(pos)\n for end_room in end_rooms:\n col = end_room[0]\n for loc in (topmost + hallway_occ):\n val = pos[nodeidx[loc]]\n home_col = val.lower()\n if home_col != col:\n continue\n tcosts = traversal_costs(pos, loc)\n tcost = tcosts[end_room]\n if tcost < float('infinity'):\n # can move to home room\n newpos = list(pos)\n newpos[nodeidx[end_room]] = val\n newpos[nodeidx[loc]] = None\n cost = tcost*move_cost[val] + currcost\n yield Positions(*newpos), cost\n # don't generate any other alternatives, just do this move\n return\n # no moves to home, so generate all possible moves from the rooms into the hallway\n hallway_empty = [node for node in hallway if pos[nodeidx[node]] is None]\n for toprm, hall in itertools.product(topmost, hallway_empty):\n tcosts = traversal_costs(pos, toprm)\n tcost = tcosts[hall]\n if tcost < float('infinity'):\n # can make the move\n val = pos[nodeidx[toprm]]\n newpos = list(pos)\n newpos[nodeidx[hall]] = val\n newpos[nodeidx[toprm]] = None\n cost = tcost*move_cost[val] + currcost\n yield Positions(*newpos), cost", "_____no_output_____" ], [ "def solve(startpos):\n curr_costs = {startpos: 0}\n curr_positions = [startpos]\n finished_costs = []\n while curr_positions:\n new_positions = set()\n for pos in curr_positions:\n currcost = curr_costs[pos]\n for allowedpos, cost in allowed_moves(pos, currcost):\n if is_finished(allowedpos):\n finished_costs.append(cost)\n continue\n if allowedpos in curr_costs:\n if cost < curr_costs[allowedpos]:\n curr_costs[allowedpos] = cost\n new_positions.add(allowedpos)\n else:\n curr_costs[allowedpos] = cost\n new_positions.add(allowedpos)\n curr_positions = new_positions\n print(len(curr_positions))\n print('min cost = ', min(finished_costs))\n ", "_____no_output_____" ], [ "%time solve(example_init)", "28\n331\n2202\n8527\n18830\n22350\n12814\n4208\n2862\n2005\n1423\n1005\n806\n565\n351\n173\n70\n37\n17\n11\n8\n3\n0\nmin cost = 44169\nCPU times: user 22.2 s, sys: 172 ms, total: 22.4 s\nWall time: 22.5 s\n" ], [ "%time solve(puzzle_init)", "28\n355\n2487\n10250\n24337\n30845\n17511\n3818\n3141\n2108\n1215\n505\n151\n78\n35\n20\n8\n7\n6\n5\n5\n5\n4\n3\n2\n0\nmin cost = 47064\nCPU times: user 28.1 s, sys: 225 ms, total: 28.4 s\nWall time: 28.4 s\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cba136436a81fb3cac785464ec797f4855d844f3
4,575
ipynb
Jupyter Notebook
week_07/Project_Workflow_week_07.ipynb
xhguo86/spiced_academy_backup-
7b65a94d0a03149bb9fc71e35a799074b4412925
[ "MIT" ]
null
null
null
week_07/Project_Workflow_week_07.ipynb
xhguo86/spiced_academy_backup-
7b65a94d0a03149bb9fc71e35a799074b4412925
[ "MIT" ]
null
null
null
week_07/Project_Workflow_week_07.ipynb
xhguo86/spiced_academy_backup-
7b65a94d0a03149bb9fc71e35a799074b4412925
[ "MIT" ]
null
null
null
18.447581
131
0.504918
[ [ [ "# Example Time Series Workflow\n* This notebook **will not run for you**\n* Because this time you'll have to load the data, split the data, feature engineer the data and select the model by yourself!", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "## 0) Imports", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\n# from statsmodels.tsa.ar_model import AutoReg\n\nfrom cleanup import clean_data\nfrom engineer import feature_engineer", "_____no_output_____" ] ], [ [ "## 1) Define the Business Goal\n\nBuild a model that can predict tomorrows temperature, given the temprature until today, as precisely as possible.", "_____no_output_____" ], [ "## 2) Get the Data\n\n### 2.1) Load the Data", "_____no_output_____" ] ], [ [ "df = pd.read_csv('data.csv')", "_____no_output_____" ] ], [ [ "### 2.2) Clean the Data", "_____no_output_____" ] ], [ [ "df = clean_data(df)", "_____no_output_____" ] ], [ [ "## 3) Train-Test-Split", "_____no_output_____" ] ], [ [ "df_train = df[:-365]\n\ndf_test = df[-365:]", "_____no_output_____" ] ], [ [ "## 4) Visualize the Data", "_____no_output_____" ], [ "## 5) Feature Engineer", "_____no_output_____" ] ], [ [ "df_train_fe = feature_engineer(df_train)", "_____no_output_____" ], [ "# Now assign X and y\ny_train = df_train_fe.copy().iloc[:,0]\nX_train = df_train_fe.copy().iloc[:,1:]", "_____no_output_____" ] ], [ [ "## 6) Train a model", "_____no_output_____" ] ], [ [ "m = LinearRegression()\nm.fit(X_train, y_train)", "_____no_output_____" ] ], [ [ "**You will see other models this week!**", "_____no_output_____" ], [ "## 7) Cross-Validate and Optimize Hyperparameters", "_____no_output_____" ], [ "## 8) Test", "_____no_output_____" ] ], [ [ "df_test_fe = feature_engineer(df_test)\n\ny_test = df_test_fe.copy().iloc[:,0]\nX_test = df_test_fe.copy().iloc[:,1:]", "_____no_output_____" ], [ "r2 = round(m.score(X_test, y_test), 2)", "_____no_output_____" ], [ "print(f'The R-squared of our model is {r2}')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ] ]
cba14aebdc6a67a5c32375795b33f3ffc715ba21
364,695
ipynb
Jupyter Notebook
Kickstarter_EDA_Cleaning.ipynb
pezLyfe/TuftsDataScience
22a0ab8b3de6a4d4043cef1f58ff7ed3ebff862f
[ "MIT" ]
null
null
null
Kickstarter_EDA_Cleaning.ipynb
pezLyfe/TuftsDataScience
22a0ab8b3de6a4d4043cef1f58ff7ed3ebff862f
[ "MIT" ]
null
null
null
Kickstarter_EDA_Cleaning.ipynb
pezLyfe/TuftsDataScience
22a0ab8b3de6a4d4043cef1f58ff7ed3ebff862f
[ "MIT" ]
null
null
null
51.027704
11,972
0.621037
[ [ [ "import pandas as pd\nimport matplotlib as mpl\nimport seaborn as sns\nimport numpy as np\nimport os\nimport re\nimport time ", "_____no_output_____" ] ], [ [ "# Importing the Data\nThis data was taken from the webrobots.io scrape of the kickstarter.com page. I've pulled together data from four different scrape dates (2/16, 2/17, 2/18, and 2/19) and done some initial cleaning. <br><br>For more information on the original dataset and the steps taken for data cleaning, please see the project repository of github <a href = \"https://github.com/pezLyfe/TuftsDataScience\">here</a>", "_____no_output_____" ] ], [ [ "start = time.time()\ndf = pd.DataFrame() #Initialize a dataframe\nfor filename in os.listdir(): #Create an iterator for all objects in the working directory\n try: #I'm using try/except here because I'm lazy and didn't clean out the folder\n df = df.append(pd.read_csv(filename), ignore_index = False) #When python finds a valid .csv file, append it\n end = time.time()\n print((end - start), filename, len(df)) #Print the filename and the total # of rows so far to track progress\n except:\n end = time.time()\n print((end - start),'Python file') #Print some message when something is wrong ", "0.008048772811889648 Python file\n2.142122507095337 2016Cleaned.csv 152455\n4.739030122756958 2017Cleaned.csv 327540\n7.769051790237427 2018Cleaned.csv 523154\n11.14200234413147 2019Cleaned.csv 731628\n11.150022745132446 Python file\n" ] ], [ [ "# De-duplicating Entries\nThe scraping method used by webrobots includes historical projects, so each scrape date will likely contain duplicates of previous projects\n\nAdditionally, the scrape is done by searching through each sub-category in Kickstarter's organization structure. Since a project can be listed under multiple sub-categories of a single parent category, there will be mupltiple entries of the same project via this method as well. \n\nLet's determine the extent of the duplicates", "_____no_output_____" ] ], [ [ "start = time.time()\ndf.reset_index(inplace = True)\ndf.drop(labels = 'Unnamed: 0', axis = 1, inplace = True)\ndf.drop(labels = 'index', axis = 1, inplace = True)\nend = time.time()\nprint(end - start)\ndf.head()", "0.8811798095703125\n" ], [ "df.tail() #Check that the indices at the end of the dataframe match as well", "_____no_output_____" ], [ "len(df)", "_____no_output_____" ], [ "#Compare the total number of unique values in the \"ID\" column with the number of entries in the dataframe\nprint(len(df.id.value_counts()), len(df)) ", "267113 731628\n" ], [ "x = df.id.value_counts()\nuniqueIDs = np.unique(df.id.values) #make an array of the unique project ID's\nlen(uniqueIDs)", "_____no_output_____" ], [ "a = df.copy()\na.tail()", "_____no_output_____" ], [ "a.loc[0][:]", "_____no_output_____" ], [ "#Drop items from A on each iteration, this should speed up the execution time\nstart = time.time()\na = df.copy()\nb = pd.DataFrame()\ndupMask = pd.DataFrame()\nfor i in range(100):\n zMask = a.id == uniqueIDs[i]\n z = a[zMask]\n b = b.append(z.iloc[0][:])\n print(i, len(a))\nend = time.time()\nprint (end - start)", "0 731628\n1 731628\n2 731628\n3 731628\n4 731628\n5 731628\n6 731628\n7 731628\n8 731628\n9 731628\n10 731628\n11 731628\n12 731628\n13 731628\n14 731628\n15 731628\n16 731628\n17 731628\n18 731628\n19 731628\n20 731628\n21 731628\n22 731628\n23 731628\n24 731628\n25 731628\n26 731628\n27 731628\n28 731628\n29 731628\n30 731628\n31 731628\n32 731628\n33 731628\n34 731628\n35 731628\n36 731628\n37 731628\n38 731628\n39 731628\n40 731628\n41 731628\n42 731628\n43 731628\n44 731628\n45 731628\n46 731628\n47 731628\n48 731628\n49 731628\n50 731628\n51 731628\n52 731628\n53 731628\n54 731628\n55 731628\n56 731628\n57 731628\n58 731628\n59 731628\n60 731628\n61 731628\n62 731628\n63 731628\n64 731628\n65 731628\n66 731628\n67 731628\n68 731628\n69 731628\n70 731628\n71 731628\n72 731628\n73 731628\n74 731628\n75 731628\n76 731628\n77 731628\n78 731628\n79 731628\n80 731628\n81 731628\n82 731628\n83 731628\n84 731628\n85 731628\n86 731628\n87 731628\n88 731628\n89 731628\n90 731628\n91 731628\n92 731628\n93 731628\n94 731628\n95 731628\n96 731628\n97 731628\n98 731628\n99 731628\n1.181581974029541\n" ], [ "a = df.copy()\nb = pd.DataFrame()\ndupMask = pd.DataFrame()\nstart = time.time()\nfor i in range(3000):\n zMask = a.id == uniqueIDs[i]\n z = a[zMask]\n number = z.iloc[0]['id']\n cat = z.iloc[0]['category']\n pledged = z.iloc[0]['pledged']\n b = b.append(z.iloc[0][:])\n aaMask = a['id'] == number & ((a['category'] != cat) | ~(a['pledged'] != pledged))\n aa = a[aaMask]\n b = b.append(aa, ignore_index = True)\n #a.drop(z.index[:], inplace = True)\n end = time.time()\n print((end - start),i, len(b))\n\n", "0.07365965843200684 0 1\n0.21155858039855957 1 2\n0.2837505340576172 2 3\n0.3619554042816162 3 4\n0.4321415424346924 4 5\n0.5063402652740479 5 6\n0.6056022644042969 6 7\n0.6948399543762207 7 8\n0.779064416885376 8 9\n0.8823387622833252 9 10\n1.0096173286437988 10 11\n1.116398572921753 11 12\n1.2101471424102783 12 13\n1.2803330421447754 13 14\n1.3535301685333252 14 15\n1.476271629333496 15 16\n1.588573694229126 16 17\n1.7093901634216309 17 18\n1.830711841583252 18 19\n1.919950246810913 19 20\n1.9916408061981201 20 21\n2.0638322830200195 21 22\n2.152064323425293 22 23\n2.226763963699341 23 24\n2.2999587059020996 24 25\n2.3821756839752197 25 26\n2.461386203765869 26 27\n2.534580707550049 27 28\n2.632841110229492 28 29\n2.7040300369262695 29 30\n2.8063015937805176 30 31\n2.883507490158081 31 32\n2.952690839767456 32 33\n3.023878812789917 33 34\n3.147662401199341 34 35\n3.2835209369659424 35 36\n3.4188802242279053 36 37\n3.5311787128448486 37 38\n3.623422145843506 38 39\n3.7261972427368164 39 40\n3.8214502334594727 40 41\n3.946359872817993 41 42\n4.09224534034729 42 43\n4.205047607421875 43 44\n4.2932820320129395 44 45\n4.363966703414917 45 46\n4.464233160018921 46 47\n4.605610132217407 47 48\n4.700358867645264 48 49\n4.7936060428619385 49 50\n4.8763251304626465 50 51\n4.95553731918335 51 52\n5.049785375595093 52 53\n5.119973421096802 53 54\n5.19417142868042 54 55\n5.280399799346924 55 56\n5.3525917530059814 56 57\n5.432804584503174 57 58\n5.533071756362915 58 59\n5.641617298126221 59 60\n5.761437892913818 60 61\n5.862704515457153 61 62\n5.95544958114624 62 63\n6.025635719299316 63 64\n6.0973265171051025 64 65\n6.184560060501099 65 66\n6.280816316604614 66 67\n6.370052099227905 67 68\n6.462296724319458 68 69\n6.546520233154297 69 70\n6.61570405960083 70 71\n6.689901828765869 71 72\n6.786158323287964 72 73\n6.858349561691284 73 74\n6.937596082687378 74 75\n7.017770767211914 75 76\n7.08795690536499 76 77\n7.1641600131988525 77 78\n7.246377229690552 78 79\n7.323582410812378 79 80\n7.399785041809082 80 81\n7.504380464553833 81 82\n7.625701665878296 82 83\n7.718447685241699 83 84\n7.794149160385132 84 85\n7.866843223571777 85 86\n7.955077171325684 86 87\n8.03729772567749 87 88\n8.137562036514282 88 89\n8.238831996917725 89 90\n8.335200548171997 90 91\n8.475069999694824 91 92\n8.583357572555542 92 93\n8.682120084762573 93 94\n8.817983150482178 94 95\n8.929776430130005 95 96\n9.027536630630493 96 97\n9.125296115875244 97 98\n9.258149147033691 98 99\n9.371949911117554 99 100\n9.4491548538208 100 101\n9.534882307052612 101 102\n9.606072664260864 102 103\n9.693304538726807 103 104\n9.774519443511963 104 105\n9.84771466255188 105 106\n9.920909404754639 106 107\n10.000118494033813 107 108\n10.074315547943115 108 109\n10.145504474639893 109 110\n10.251786231994629 110 111\n10.323978424072266 111 112\n10.412212133407593 112 113\n10.485405921936035 113 114\n10.56160831451416 114 115\n10.659869909286499 115 116\n10.754032135009766 116 117\n10.838753938674927 117 118\n10.919467449188232 118 119\n10.994167566299438 119 120\n11.087414741516113 120 121\n11.158603429794312 121 122\n11.24583625793457 122 123\n11.336074829101562 123 124\n11.408266305923462 124 125\n11.480457305908203 125 126\n11.574707508087158 126 127\n11.650910139083862 127 128\n11.724144220352173 128 129\n11.81534743309021 129 130\n11.887538194656372 130 131\n11.984797716140747 131 132\n12.071025848388672 132 133\n12.158255338668823 133 134\n12.243482112884521 134 135\n12.33372163772583 135 136\n12.40490984916687 136 137\n12.48612666130066 137 138\n12.570348978042603 138 139\n12.640535354614258 139 140\n12.722754001617432 140 141\n12.801963806152344 141 142\n12.873153448104858 142 143\n12.94735074043274 143 144\n13.02254867553711 144 145\n13.096745491027832 145 146\n13.169940710067749 146 147\n13.244138956069946 147 148\n13.320339441299438 148 149\n13.4055655002594 149 150\n13.486780643463135 150 151\n13.575014591217041 151 152\n13.657232761383057 152 153\n13.745467901229858 153 154\n13.824679136276245 154 155\n13.905894994735718 155 156\n13.988112211227417 156 157\n14.063311576843262 157 158\n14.135502576828003 158 159\n14.225743293762207 159 160\n14.29893708229065 160 161\n14.37614369392395 161 162\n14.467382431030273 162 163\n14.53857159614563 163 164\n14.610763549804688 164 165\n14.71504020690918 165 166\n14.789236783981323 166 167\n14.868448972702026 167 168\n14.95768404006958 168 169\n15.030879497528076 169 170\n15.127133131027222 170 171\n15.208349227905273 171 172\n15.28354811668396 172 173\n15.355740308761597 173 174\n15.448989152908325 174 175\n15.521180629730225 175 176\n15.598384141921997 176 177\n15.680601596832275 177 178\n15.760814428329468 178 179\n15.843032598495483 179 180\n15.921268224716187 180 181\n15.993431806564331 181 182\n16.070636987686157 182 183\n16.147841691970825 183 184\n16.22905707359314 184 185\n16.300247192382812 185 186\n16.379456281661987 186 187\n16.44863986968994 187 188\n16.520830869674683 188 189\n16.596031188964844 189 190\n16.666218996047974 190 191\n16.742419958114624 191 192\n16.82864999771118 192 193\n16.89983892440796 193 194\n16.97303318977356 194 195\n17.07129216194153 195 196\n17.14348316192627 196 197\n17.21868348121643 197 198\n17.310927629470825 198 199\n17.385124444961548 199 200\n17.493411540985107 200 201\n17.57663321495056 201 202\n17.652836084365845 202 203\n17.726030111312866 203 204\n17.808248281478882 204 205\n17.877431631088257 205 206\n17.95062518119812 206 207\n18.03585195541382 207 208\n18.10704207420349 208 209\n18.17923355102539 209 210\n18.25242781639099 210 211\n18.32461929321289 211 212\n18.40282440185547 212 213\n18.504094123840332 213 214\n18.591325521469116 214 215\n18.679559230804443 215 216\n18.75074863433838 216 217\n18.82193899154663 217 218\n18.900144338607788 218 219\n18.97333836555481 219 220\n19.04753541946411 220 221\n19.134767293930054 221 222\n19.217987775802612 222 223\n19.28917646408081 223 224\n19.372398376464844 224 225\n19.46564483642578 225 226\n19.541846990585327 226 227\n19.634093284606934 227 228\n19.736364126205444 228 229\n19.822592973709106 229 230\n19.91584086418152 230 231\n19.986027717590332 231 232\n20.05721640586853 232 233\n20.164499521255493 233 234\n20.235687732696533 234 235\n20.335954189300537 235 236\n20.421180486679077 236 237\n20.491368293762207 237 238\n20.564561367034912 238 239\n20.643772840499878 239 240\n20.741029262542725 240 241\n20.824249505996704 241 242\n20.903459548950195 242 243\n20.974648475646973 243 244\n21.044834852218628 244 245\n21.129058837890625 245 246\n21.201250076293945 246 247\n21.276450395584106 247 248\n21.36769199371338 248 249\n21.45291757583618 249 250\n21.52611231803894 250 251\n21.613343238830566 251 252\n21.686538696289062 252 253\n21.760735511779785 253 254\n21.86713194847107 254 255\n21.998480558395386 255 256\n22.113787174224854 256 257\n22.20653223991394 257 258\n22.30178427696228 258 259\n22.376983880996704 259 260\n22.449678659439087 260 261\n22.53290057182312 261 262\n22.604090690612793 262 263\n22.67427682876587 263 264\n22.76651930809021 264 265\n22.837708473205566 265 266\n22.913910627365112 266 267\n22.996128797531128 267 268\n23.068320512771606 268 269\n23.16758418083191 269 270\n23.25080442428589 270 271\n23.328009366989136 271 272\n23.400200843811035 272 273\n23.48041343688965 273 274\n23.552605867385864 274 275\n23.638834714889526 275 276\n23.726065397262573 276 277\n23.799259901046753 277 278\n23.871451139450073 278 279\n23.9496591091156 279 280\n24.024859189987183 280 281\n24.10106062889099 281 282\n24.18628716468811 282 283\n24.25847816467285 283 284\n24.329667329788208 284 285\n24.424920082092285 285 286\n24.495107173919678 286 287\n24.567298650741577 287 288\n24.654529809951782 288 289\n24.725719213485718 289 290\n24.8473002910614 290 291\n24.957091331481934 291 292\n25.050339460372925 292 293\n25.14107894897461 293 294\n25.216778993606567 294 295\n25.29047465324402 295 296\n25.383727312088013 296 297\n25.468953371047974 297 298\n25.56520938873291 298 299\n25.64642572402954 299 300\n25.717613220214844 300 301\n25.803841590881348 301 302\n25.924444913864136 302 303\n26.05278491973877 303 304\n26.17962622642517 304 305\n26.290417432785034 305 306\n26.39920449256897 306 307\n26.50047278404236 307 308\n26.592718601226807 308 309\n26.665412425994873 309 310\n26.752142429351807 310 311\n26.848897218704224 311 312\n26.920588493347168 312 313\n26.99478578567505 313 314\n27.084522247314453 314 315\n27.160223245620728 315 316\n27.2339186668396 316 317\n27.318644762039185 317 318\n" ], [ "bigMask = df.category == dupMask.Category & df.pledged == dupMask.Pledged & df.id == dupMask.Number & df.index != dupMask.Indices\ndeDuped = a[bigMask]", "_____no_output_____" ], [ "deduped.to_csv('deDupedMaybe?', sep = ',')", "_____no_output_____" ], [ "a = df.copy()\nfor i in range(len(uniqueIDs)):\n zMask = a.id == uniqueIDs[i]\n z = a[zMask]\n for j in range(len(z)-1):\n firstIndex = z.index[j]\n if z.iloc[j]['category'] == z.iloc[j+1]['category'] and z.iloc[j]['pledged'] == z.iloc[j+1]['pledged']:\n a.drop([firstIndex], axis = 0, inplace = True)\n print(i, len(a))", "0 731628\n1 731626\n2 731623\n3 731620\n4 731617\n5 731617\n6 731614\n7 731613\n8 731611\n9 731608\n10 731608\n11 731607\n12 731604\n13 731601\n14 731598\n15 731597\n16 731594\n17 731591\n18 731590\n19 731588\n20 731586\n21 731585\n22 731585\n23 731585\n24 731584\n25 731584\n26 731581\n27 731580\n28 731578\n29 731576\n30 731574\n31 731572\n32 731571\n33 731568\n34 731565\n35 731564\n36 731563\n37 731563\n38 731562\n39 731559\n40 731556\n41 731554\n42 731553\n43 731553\n44 731550\n45 731549\n46 731549\n47 731545\n48 731543\n49 731542\n50 731540\n51 731539\n52 731537\n53 731537\n54 731537\n55 731537\n56 731533\n57 731533\n58 731532\n59 731532\n60 731529\n61 731529\n62 731528\n63 731526\n64 731523\n65 731523\n66 731523\n67 731523\n68 731520\n69 731517\n70 731517\n71 731514\n72 731512\n73 731511\n74 731510\n75 731507\n76 731504\n77 731504\n78 731504\n79 731500\n80 731498\n81 731494\n82 731494\n83 731491\n84 731490\n85 731487\n86 731485\n87 731483\n88 731482\n89 731479\n90 731476\n91 731474\n92 731471\n93 731470\n94 731466\n95 731463\n96 731460\n97 731458\n98 731457\n99 731454\n100 731454\n101 731453\n102 731450\n103 731445\n104 731443\n105 731440\n106 731435\n107 731435\n108 731432\n109 731429\n110 731428\n111 731428\n112 731426\n113 731423\n114 731423\n115 731421\n116 731421\n117 731420\n118 731420\n119 731418\n120 731418\n121 731416\n122 731415\n123 731414\n124 731411\n125 731408\n126 731405\n127 731403\n128 731400\n129 731398\n130 731395\n131 731391\n132 731391\n133 731388\n134 731387\n135 731387\n136 731384\n137 731382\n138 731382\n139 731381\n140 731378\n141 731376\n142 731376\n143 731374\n144 731374\n145 731373\n146 731372\n147 731372\n148 731372\n149 731372\n150 731369\n151 731367\n152 731365\n153 731364\n154 731363\n155 731360\n156 731359\n157 731358\n158 731358\n159 731358\n160 731358\n161 731355\n162 731353\n163 731353\n164 731349\n165 731348\n166 731345\n167 731343\n168 731342\n169 731341\n170 731338\n171 731337\n172 731336\n173 731336\n174 731333\n175 731333\n176 731333\n177 731333\n178 731333\n179 731330\n180 731329\n181 731328\n182 731328\n183 731325\n184 731325\n185 731325\n186 731323\n187 731320\n188 731318\n189 731317\n190 731317\n191 731314\n192 731311\n193 731307\n194 731304\n195 731304\n196 731299\n197 731296\n198 731293\n199 731290\n200 731287\n201 731284\n202 731281\n203 731279\n204 731277\n205 731276\n206 731276\n207 731273\n208 731270\n209 731267\n210 731264\n211 731264\n212 731264\n213 731261\n214 731258\n215 731255\n216 731254\n217 731254\n218 731252\n219 731251\n220 731248\n221 731246\n222 731243\n223 731243\n224 731240\n225 731237\n226 731234\n227 731234\n228 731231\n229 731229\n230 731228\n231 731227\n232 731227\n233 731227\n234 731226\n235 731225\n236 731222\n237 731220\n238 731217\n239 731215\n240 731215\n241 731215\n242 731212\n243 731211\n244 731210\n245 731209\n246 731206\n247 731204\n248 731204\n249 731203\n250 731202\n251 731199\n252 731198\n253 731195\n254 731192\n255 731189\n256 731187\n257 731187\n258 731184\n259 731184\n260 731183\n261 731183\n262 731181\n263 731179\n264 731176\n265 731173\n266 731172\n267 731169\n268 731169\n269 731167\n270 731165\n271 731162\n272 731159\n273 731156\n274 731154\n275 731151\n276 731146\n277 731144\n278 731143\n279 731140\n280 731137\n281 731135\n282 731133\n283 731131\n284 731130\n285 731127\n286 731126\n287 731124\n288 731122\n289 731122\n290 731120\n291 731117\n292 731117\n293 731117\n294 731115\n295 731111\n296 731111\n297 731109\n298 731106\n299 731103\n300 731099\n301 731096\n302 731093\n303 731092\n304 731092\n305 731091\n306 731089\n307 731087\n308 731087\n309 731086\n310 731086\n311 731084\n312 731082\n313 731079\n314 731078\n315 731075\n316 731072\n317 731072\n318 731070\n319 731067\n320 731064\n321 731064\n322 731063\n323 731060\n324 731060\n325 731057\n326 731054\n327 731054\n328 731051\n329 731048\n330 731047\n331 731044\n332 731041\n333 731038\n334 731037\n335 731034\n336 731034\n337 731032\n338 731029\n339 731029\n340 731026\n341 731024\n342 731021\n343 731020\n344 731018\n345 731016\n346 731013\n347 731010\n348 731009\n349 731006\n350 731005\n351 731002\n352 731002\n353 731000\n354 731000\n355 730998\n356 730997\n357 730995\n358 730993\n359 730992\n360 730992\n361 730991\n362 730991\n363 730991\n364 730991\n365 730990\n366 730989\n367 730988\n368 730987\n369 730985\n370 730984\n371 730984\n372 730984\n373 730984\n374 730982\n375 730982\n376 730981\n377 730979\n378 730976\n379 730976\n380 730973\n381 730973\n382 730970\n383 730968\n384 730968\n385 730968\n386 730967\n387 730964\n388 730962\n389 730958\n390 730954\n391 730952\n392 730952\n393 730951\n394 730951\n395 730948\n396 730946\n397 730946\n398 730943\n399 730940\n400 730938\n401 730938\n402 730936\n403 730936\n404 730935\n405 730932\n406 730929\n407 730927\n408 730927\n409 730925\n410 730922\n411 730919\n412 730919\n413 730918\n414 730917\n415 730916\n416 730914\n417 730914\n418 730911\n419 730908\n420 730907\n421 730906\n422 730906\n423 730903\n424 730903\n425 730903\n426 730900\n427 730898\n428 730895\n429 730892\n430 730890\n431 730889\n432 730889\n433 730885\n434 730884\n435 730884\n436 730882\n437 730879\n438 730878\n439 730875\n440 730872\n441 730869\n442 730867\n443 730866\n444 730864\n445 730860\n446 730859\n447 730854\n448 730852\n449 730849\n450 730849\n451 730849\n452 730846\n453 730843\n454 730841\n455 730838\n456 730838\n457 730835\n458 730832\n459 730829\n460 730828\n461 730828\n462 730825\n463 730825\n464 730822\n465 730818\n466 730815\n467 730812\n468 730812\n469 730809\n470 730809\n471 730806\n472 730804\n473 730803\n474 730803\n475 730803\n476 730800\n477 730799\n478 730796\n479 730796\n480 730793\n481 730793\n482 730792\n483 730792\n484 730791\n485 730791\n486 730789\n487 730787\n488 730787\n489 730786\n490 730783\n491 730778\n492 730775\n493 730772\n494 730771\n495 730771\n496 730771\n497 730771\n498 730767\n499 730767\n500 730764\n501 730764\n502 730762\n503 730761\n504 730760\n505 730758\n506 730755\n507 730752\n508 730752\n509 730750\n510 730750\n511 730747\n512 730744\n513 730744\n514 730741\n515 730741\n516 730741\n517 730739\n518 730737\n519 730737\n520 730734\n521 730733\n522 730731\n523 730731\n524 730731\n525 730730\n526 730729\n527 730729\n528 730728\n529 730725\n530 730723\n531 730721\n532 730720\n533 730718\n534 730715\n535 730714\n536 730712\n537 730711\n538 730711\n539 730711\n540 730711\n541 730711\n542 730710\n543 730709\n544 730708\n545 730704\n546 730702\n547 730702\n548 730699\n549 730698\n550 730698\n551 730696\n552 730693\n553 730691\n554 730690\n555 730687\n556 730684\n557 730682\n558 730682\n559 730679\n560 730676\n561 730676\n562 730673\n563 730672\n564 730669\n565 730669\n566 730667\n567 730664\n568 730664\n569 730664\n570 730662\n571 730659\n572 730656\n573 730656\n574 730656\n575 730656\n576 730654\n577 730652\n578 730651\n579 730649\n580 730648\n581 730645\n582 730645\n583 730643\n584 730643\n585 730642\n586 730639\n587 730636\n588 730636\n589 730636\n590 730633\n591 730633\n592 730629\n593 730626\n594 730624\n595 730622\n596 730620\n597 730617\n598 730616\n599 730616\n600 730613\n601 730611\n602 730609\n603 730609\n604 730606\n605 730604\n606 730604\n607 730601\n608 730601\n609 730599\n610 730597\n611 730594\n612 730591\n613 730590\n614 730590\n615 730588\n616 730586\n617 730585\n618 730585\n619 730585\n620 730582\n621 730581\n622 730578\n623 730577\n624 730577\n625 730574\n626 730571\n627 730570\n628 730569\n629 730566\n630 730564\n631 730562\n632 730559\n633 730558\n634 730555\n635 730553\n636 730553\n637 730552\n638 730551\n639 730549\n640 730546\n641 730543\n642 730542\n643 730542\n644 730538\n645 730538\n646 730536\n647 730535\n648 730532\n649 730529\n650 730528\n651 730526\n652 730524\n653 730521\n654 730519\n655 730516\n656 730516\n657 730513\n658 730510\n659 730507\n660 730506\n661 730506\n662 730504\n663 730502\n664 730500\n665 730500\n666 730500\n667 730497\n668 730494\n669 730492\n670 730490\n671 730487\n672 730487\n673 730485\n674 730483\n675 730481\n676 730479\n677 730479\n678 730476\n679 730473\n680 730470\n681 730468\n682 730465\n683 730463\n684 730462\n685 730460\n686 730457\n687 730455\n688 730452\n689 730452\n690 730449\n691 730449\n692 730449\n693 730446\n694 730446\n695 730442\n696 730439\n697 730437\n698 730434\n699 730431\n700 730428\n701 730428\n702 730427\n703 730424\n704 730424\n705 730424\n706 730421\n707 730421\n708 730417\n709 730415\n710 730415\n711 730415\n712 730412\n713 730412\n714 730412\n715 730409\n716 730408\n717 730408\n718 730407\n719 730407\n720 730406\n721 730405\n722 730405\n723 730404\n724 730403\n725 730402\n726 730402\n727 730401\n728 730401\n729 730401\n730 730401\n731 730399\n732 730399\n733 730397\n734 730394\n735 730392\n736 730389\n737 730388\n738 730385\n739 730384\n740 730384\n741 730382\n742 730382\n743 730379\n744 730375\n745 730372\n746 730371\n747 730368\n748 730365\n749 730362\n750 730360\n751 730355\n752 730351\n753 730350\n754 730350\n755 730350\n" ], [ "len(df)", "_____no_output_____" ], [ "exampleMask = df['id'] == 197154\nexample = df[exampleMask]\nexample", "_____no_output_____" ], [ "duplicateID = []\nfor i in len(df):\n \n\ngrouped = df.groupby(\"id\")\ngrouped.groups", "_____no_output_____" ], [ "a.head()", "_____no_output_____" ], [ "successFrame = a[a['state'] == 'successful']\nspotFrame = a[(a['spotlight'] == True) & (a['state'] == 'successful')]\nstaFrame = a[a['staff_pick'] == True]\nstaSucFrame = a[(a['staff_pick'] == True) & (a['state'] == 'successful')]\nprint('Succesful = ',len(successFrame), 'Spotlighted =', len(spotFrame), 'Staff Picks =', len(staSucFrame), len(staFrame))\npGen = len(successFrame)/len(a)\npSpot = len(spotFrame)/len(a)\npStaff = len(staSucFrame)/len(staFrame)\npPicked = len(staFrame)/len(a)\nprint('P-general = ', pGen,\n 'P-Spotlight = ', pSpot,\n 'P-Staff Picks = ', pStaff,\n 'P-Picked for Staff = ', pPicked)", "Succesful = 363492 Spotlighted = 363492 Staff Picks = 79168 93579\nP-general = 0.4968262559661467 P-Spotlight = 0.4968262559661467 P-Staff Picks = 0.8460017739022644 P-Picked for Staff = 0.1279051649198773\n" ], [ "canFrame = a[~((a['state'] == 'canceled') | (a['state'] == 'active')) ]\nsuccessFrame = canFrame[canFrame['state'] == 'successful']\nspotFrame = canFrame[(canFrame['spotlight'] == True) & (canFrame['state'] == 'successful')]\nstaFrame = canFrame[canFrame['staff_pick'] == True]\nstaSucFrame = canFrame[(canFrame['staff_pick'] == True) & (canFrame['state'] == 'successful')]\nprint('Succesful = ',len(successFrame), 'Spotlighted =', len(spotFrame), 'Staff Picks =', len(staSucFrame), len(staFrame))\npGen = len(successFrame)/len(canFrame)\npSpot = len(spotFrame)/len(canFrame)\npStaff = len(staSucFrame)/len(staFrame)\npPicked = len(staFrame)/len(canFrame)\nprint('P-general = ', pGen,\n 'P-Spotlight = ', pSpot,\n 'P-Staff Picks = ', pStaff,\n 'P-Picked for Staff = ', pPicked)", "Succesful = 363492 Spotlighted = 363492 Staff Picks = 79168 92267\nP-general = 0.5252182557457397 P-Spotlight = 0.5252182557457397 P-Staff Picks = 0.8580315822558444 P-Picked for Staff = 0.1333187877666968\n" ], [ "df.hist('goal', bins = 50)", "_____no_output_____" ], [ "sortbyPrice = df.sort_values('converted_pledged_amount', axis = 0, ascending = False)", "_____no_output_____" ], [ "bins = [1000, 3000, 7000, 15000, 50000, 150000, 1000000]", "_____no_output_____" ], [ "sortbyPrice.head()", "_____no_output_____" ], [ "df.hist('converted_pledged_amount', bins = bins)", "_____no_output_____" ], [ "mpl.pyplot.scatter(df['converted_pledged_amount'], df['backers_count'])", "_____no_output_____" ], [ "mpl.pyplot.boxplot(mergedData['converted_pledged_amount'])", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cba14e5dad1d38d57fd51a5ed8ac151bf706a4c0
35,298
ipynb
Jupyter Notebook
nbs/44_tutorial.tabular.ipynb
EmbraceLife/fastai
85258502eff144708d657aa4b4d2ab4c2a2b3a0b
[ "Apache-2.0" ]
1
2022-03-13T00:09:58.000Z
2022-03-13T00:09:58.000Z
nbs/44_tutorial.tabular.ipynb
EmbraceLife/fastai
85258502eff144708d657aa4b4d2ab4c2a2b3a0b
[ "Apache-2.0" ]
null
null
null
nbs/44_tutorial.tabular.ipynb
EmbraceLife/fastai
85258502eff144708d657aa4b4d2ab4c2a2b3a0b
[ "Apache-2.0" ]
null
null
null
29.712121
405
0.392883
[ [ [ "#|hide\n#|skip\n! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab", "_____no_output_____" ] ], [ [ "# Tabular training\n\n> How to use the tabular application in fastai", "_____no_output_____" ], [ "To illustrate the tabular application, we will use the example of the [Adult dataset](https://archive.ics.uci.edu/ml/datasets/Adult) where we have to predict if a person is earning more or less than $50k per year using some general data.", "_____no_output_____" ] ], [ [ "from fastai.tabular.all import *", "_____no_output_____" ] ], [ [ "We can download a sample of this dataset with the usual `untar_data` command:", "_____no_output_____" ] ], [ [ "path = untar_data(URLs.ADULT_SAMPLE)\npath.ls()", "_____no_output_____" ] ], [ [ "Then we can have a look at how the data is structured:", "_____no_output_____" ] ], [ [ "df = pd.read_csv(path/'adult.csv')\ndf.head()", "_____no_output_____" ] ], [ [ "Some of the columns are continuous (like age) and we will treat them as float numbers we can feed our model directly. Others are categorical (like workclass or education) and we will convert them to a unique index that we will feed to embedding layers. We can specify our categorical and continuous column names, as well as the name of the dependent variable in `TabularDataLoaders` factory methods:", "_____no_output_____" ] ], [ [ "dls = TabularDataLoaders.from_csv(path/'adult.csv', path=path, y_names=\"salary\",\n cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race'],\n cont_names = ['age', 'fnlwgt', 'education-num'],\n procs = [Categorify, FillMissing, Normalize])", "_____no_output_____" ] ], [ [ "The last part is the list of pre-processors we apply to our data:\n\n- `Categorify` is going to take every categorical variable and make a map from integer to unique categories, then replace the values by the corresponding index.\n- `FillMissing` will fill the missing values in the continuous variables by the median of existing values (you can choose a specific value if you prefer)\n- `Normalize` will normalize the continuous variables (subtract the mean and divide by the std)\n\n", "_____no_output_____" ], [ "To further expose what's going on below the surface, let's rewrite this utilizing `fastai`'s `TabularPandas` class. We will need to make one adjustment, which is defining how we want to split our data. By default the factory method above used a random 80/20 split, so we will do the same:", "_____no_output_____" ] ], [ [ "splits = RandomSplitter(valid_pct=0.2)(range_of(df))", "_____no_output_____" ], [ "to = TabularPandas(df, procs=[Categorify, FillMissing,Normalize],\n cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race'],\n cont_names = ['age', 'fnlwgt', 'education-num'],\n y_names='salary',\n splits=splits)", "_____no_output_____" ] ], [ [ "Once we build our `TabularPandas` object, our data is completely preprocessed as seen below:", "_____no_output_____" ] ], [ [ "to.xs.iloc[:2]", "_____no_output_____" ] ], [ [ "Now we can build our `DataLoaders` again:", "_____no_output_____" ] ], [ [ "dls = to.dataloaders(bs=64)", "_____no_output_____" ] ], [ [ "> Later we will explore why using `TabularPandas` to preprocess will be valuable.", "_____no_output_____" ], [ "The `show_batch` method works like for every other application:", "_____no_output_____" ] ], [ [ "dls.show_batch()", "_____no_output_____" ] ], [ [ "We can define a model using the `tabular_learner` method. When we define our model, `fastai` will try to infer the loss function based on our `y_names` earlier. \n\n**Note**: Sometimes with tabular data, your `y`'s may be encoded (such as 0 and 1). In such a case you should explicitly pass `y_block = CategoryBlock` in your constructor so `fastai` won't presume you are doing regression.", "_____no_output_____" ] ], [ [ "learn = tabular_learner(dls, metrics=accuracy)", "_____no_output_____" ] ], [ [ "And we can train that model with the `fit_one_cycle` method (the `fine_tune` method won't be useful here since we don't have a pretrained model).", "_____no_output_____" ] ], [ [ "learn.fit_one_cycle(1)", "_____no_output_____" ] ], [ [ "We can then have a look at some predictions:", "_____no_output_____" ] ], [ [ "learn.show_results()", "_____no_output_____" ] ], [ [ "Or use the predict method on a row:", "_____no_output_____" ] ], [ [ "row, clas, probs = learn.predict(df.iloc[0])", "_____no_output_____" ], [ "row.show()", "_____no_output_____" ], [ "clas, probs", "_____no_output_____" ] ], [ [ "To get prediction on a new dataframe, you can use the `test_dl` method of the `DataLoaders`. That dataframe does not need to have the dependent variable in its column.", "_____no_output_____" ] ], [ [ "test_df = df.copy()\ntest_df.drop(['salary'], axis=1, inplace=True)\ndl = learn.dls.test_dl(test_df)", "_____no_output_____" ] ], [ [ "Then `Learner.get_preds` will give you the predictions:", "_____no_output_____" ] ], [ [ "learn.get_preds(dl=dl)", "_____no_output_____" ] ], [ [ "> Note: Since machine learning models can't magically understand categories it was never trained on, the data should reflect this. If there are different missing values in your test data you should address this before training", "_____no_output_____" ], [ "## `fastai` with Other Libraries\n\nAs mentioned earlier, `TabularPandas` is a powerful and easy preprocessing tool for tabular data. Integration with libraries such as Random Forests and XGBoost requires only one extra step, that the `.dataloaders` call did for us. Let's look at our `to` again. Its values are stored in a `DataFrame` like object, where we can extract the `cats`, `conts,` `xs` and `ys` if we want to:", "_____no_output_____" ] ], [ [ "to.xs[:3]", "_____no_output_____" ] ], [ [ "Now that everything is encoded, you can then send this off to XGBoost or Random Forests by extracting the train and validation sets and their values:", "_____no_output_____" ] ], [ [ "X_train, y_train = to.train.xs, to.train.ys.values.ravel()\nX_test, y_test = to.valid.xs, to.valid.ys.values.ravel()", "_____no_output_____" ] ], [ [ "And now we can directly send this in!", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cba1562de240e5b6ca6999de3cb5f87e91e79380
17,025
ipynb
Jupyter Notebook
notebooks/4.1_gating_test/0.1_60x_overlays.field_001.ipynb
ggirelli/deconwolf-smFISH-analysis
28fb971f320b50ff25c1ad865c187bfe6218986a
[ "MIT" ]
null
null
null
notebooks/4.1_gating_test/0.1_60x_overlays.field_001.ipynb
ggirelli/deconwolf-smFISH-analysis
28fb971f320b50ff25c1ad865c187bfe6218986a
[ "MIT" ]
null
null
null
notebooks/4.1_gating_test/0.1_60x_overlays.field_001.ipynb
ggirelli/deconwolf-smFISH-analysis
28fb971f320b50ff25c1ad865c187bfe6218986a
[ "MIT" ]
null
null
null
33.914343
149
0.379031
[ [ [ "import pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport skimage as sk\nfrom skimage import measure\nimport os\nimport tifffile\nfrom tqdm import tqdm", "_____no_output_____" ], [ "dots_data = pd.read_csv(\"field_001.gated_dots.tsv\", sep=\"\\t\")", "_____no_output_____" ], [ "dots_data2 = dots_data.loc[\"60x\" == dots_data[\"magnification\"], :]", "_____no_output_____" ], [ "dots_data2", "_____no_output_____" ], [ "ref_raw = dots_data2.loc[\"raw\" == dots_data2[\"image_type\"], :].reset_index(drop=True)\nref__dw = dots_data2.loc[\"dw\" == dots_data2[\"image_type\"], :].reset_index(drop=True)\nraw_image_folder_path = \"/mnt/data/Imaging/202105-Deconwolf/data_210726/60x_raw\"\ndw__image_folder_path = \"/mnt/data/Imaging/202105-Deconwolf/data_210726/60x_dw\"\nmask_folder_path = \"../../data/60x_mask/dilated_labels_watershed\"", "_____no_output_____" ], [ "current_field_id = 1\n\nprint(f\"Field #{current_field_id}\")\n\nraw_max_z_proj = tifffile.imread(os.path.join(raw_image_folder_path, f\"a647_{current_field_id:03d}.tif\")).max(0)\ndw__max_z_proj = tifffile.imread(os.path.join(dw__image_folder_path, f\"a647_{current_field_id:03d}.tif\")).max(0)\n\nlabels = tifffile.imread(os.path.join(mask_folder_path, f\"a647_{current_field_id:03d}.dilated_labels.tiff\")).reshape(raw_max_z_proj.shape)\n\nfield_raw_dots = ref_raw.loc[ref_raw[\"series_id\"] == current_field_id, :].sort_values(\"Value2\", ascending=False)\nfield_dw__dots = ref__dw.loc[ref__dw[\"series_id\"] == current_field_id, :].sort_values(\"Value2\", ascending=False)\n\nselected_raw_dots = field_raw_dots.reset_index(drop=True)\nselected_dw__dots = field_dw__dots.reset_index(drop=True)\n\nfig3, ax = plt.subplots(figsize=(20, 10), ncols=2, constrained_layout=True)\nfig3.suptitle(f\"Field #{current_field_id}\")\nprint(\" > Plotting raw\")\nax[0].set_title(f\"60x_raw (n.dots={selected_raw_dots.shape[0]})\")\nax[0].imshow(\n raw_max_z_proj, cmap=plt.get_cmap(\"gray\"), interpolation=\"none\",\n vmin=raw_max_z_proj.min(), vmax=raw_max_z_proj.max(),\n resample=False, filternorm=False)\nax[0].scatter(\n x=selected_raw_dots[\"y\"].values,\n y=selected_raw_dots[\"x\"].values,\n s=30, facecolors='none', edgecolors='r', linewidth=.5)\nprint(\" > Plotting dw\")\nax[1].set_title(f\"60x_dw (n.dots={selected_dw__dots.shape[0]})\")\nax[1].imshow(\n dw__max_z_proj, cmap=plt.get_cmap(\"gray\"), interpolation=\"none\",\n vmin=dw__max_z_proj.min()*1.5, vmax=dw__max_z_proj.max()*.5,\n resample=False, filternorm=False)\nax[1].scatter(\n x=selected_dw__dots[\"y\"].values,\n y=selected_dw__dots[\"x\"].values,\n s=30, facecolors='none', edgecolors='r', linewidth=.5)\nprint(\" > Plotting contours\")\nfor lid in tqdm(range(1, labels.max()), desc=\"nucleus\"):\n contours = measure.find_contours(labels == lid, 0.8)\n for contour in contours:\n ax[0].scatter(x=contour[:,1], y=contour[:,0], c=\"yellow\", s=.005)\n ax[1].scatter(x=contour[:,1], y=contour[:,0], c=\"yellow\", s=.005)\nplt.close(fig3)\nprint(\" > Exporting\")\nfig3.savefig(f\"overlay_{current_field_id:03d}.60x.png\", bbox_inches='tight')\nprint(\" ! DONE\")", "Field #1\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
cba1569533d7437486b24f2fcb18ed639e45418d
380,995
ipynb
Jupyter Notebook
proyecto_7.ipynb
julianrojas19/julianrojas19.github.io
a5f9c68badbb7dd210e2fb5384e66985c1ae623d
[ "MIT" ]
null
null
null
proyecto_7.ipynb
julianrojas19/julianrojas19.github.io
a5f9c68badbb7dd210e2fb5384e66985c1ae623d
[ "MIT" ]
null
null
null
proyecto_7.ipynb
julianrojas19/julianrojas19.github.io
a5f9c68badbb7dd210e2fb5384e66985c1ae623d
[ "MIT" ]
null
null
null
212.017251
87,593
0.826405
[ [ [ "<a href=\"https://colab.research.google.com/github/julianrojas19/julianrojas19.github.io/blob/main/proyecto_7.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns", "_____no_output_____" ], [ "df=pd.read_csv('/content/economicas.csv')", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "df.columns.str.upper()", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "economista = df.filter(['ESTU_CONSECUTIVO\t','RESULT_PUNTAJE','RESULT_DESEMPENO','ESTU_GENERO','ESTU_PRGM_ACADEMICO','ESTU_SNIES_PRGMACADEMICO', 'GRUPOREFERENCIA','MOD_RAZONA_CUANTITAT_PUNT', 'MOD_LECTURA_CRITICA_PUNT','MOD_COMPETEN_CIUDADA_PUNT', 'MOD_INGLES_PUNT', 'MOD_COMUNI_ESCRITA_PUNT', 'PUNT_GLOBAL','PERCENTIL_GLOBAL'])", "_____no_output_____" ], [ "economista", "_____no_output_____" ], [ "economista.count()", "_____no_output_____" ], [ "from scipy.spatial.distance import mahalanobis\nfrom scipy.stats import chi2\nimport scipy.stats as stats", "_____no_output_____" ], [ "multipydf = economista.corr()\nf, ax=plt.subplots(figsize=(11, 8))\nsns.heatmap(multipydf, vmax=.9, square=True)", "_____no_output_____" ], [ "sns.heatmap(economista.corr(), annot=True)", "_____no_output_____" ], [ "sns.heatmap(economista.corr(method='spearman'), annot=True)", "_____no_output_____" ], [ "sns.heatmap(economista.cov(), annot=True)", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cba17b82e86517dce185fdb4bab1e2cd98d680c1
7,594
ipynb
Jupyter Notebook
tutorial/alink_python/Chap18.ipynb
yangqi199808/Alink
51402e1a7549ad062f5a9fc70e59fa3b65999f81
[ "Apache-2.0" ]
null
null
null
tutorial/alink_python/Chap18.ipynb
yangqi199808/Alink
51402e1a7549ad062f5a9fc70e59fa3b65999f81
[ "Apache-2.0" ]
null
null
null
tutorial/alink_python/Chap18.ipynb
yangqi199808/Alink
51402e1a7549ad062f5a9fc70e59fa3b65999f81
[ "Apache-2.0" ]
null
null
null
31.251029
94
0.487227
[ [ [ "from pyalink.alink import *\nuseLocalEnv(4)\n\nfrom utils import *\nimport os\nimport pandas as pd\n\nDATA_DIR = ROOT_DIR + \"mnist\" + os.sep\n\nDENSE_TRAIN_FILE = \"dense_train.ak\";\nSPARSE_TRAIN_FILE = \"sparse_train.ak\";\n\nINIT_MODEL_FILE = \"init_model.ak\";\nTEMP_STREAM_FILE = \"temp_stream.ak\";\n\nVECTOR_COL_NAME = \"vec\";\nLABEL_COL_NAME = \"label\";\nPREDICTION_COL_NAME = \"cluster_id\";\n", "_____no_output_____" ], [ "#c_1\ndense_source = AkSourceBatchOp().setFilePath(DATA_DIR + DENSE_TRAIN_FILE);\nsparse_source = AkSourceBatchOp().setFilePath(DATA_DIR + SPARSE_TRAIN_FILE);\nsw = Stopwatch();\n\npipelineList = [\n [\"KMeans EUCLIDEAN\",\n Pipeline()\\\n .add(\n KMeans()\\\n .setK(10)\\\n .setVectorCol(VECTOR_COL_NAME)\\\n .setPredictionCol(PREDICTION_COL_NAME)\n )\n ],\n [\"KMeans COSINE\",\n Pipeline()\\\n .add(\n KMeans()\\\n .setDistanceType('COSINE')\\\n .setK(10)\\\n .setVectorCol(VECTOR_COL_NAME)\\\n .setPredictionCol(PREDICTION_COL_NAME)\n )\n ],\n [\"BisectingKMeans\",\n Pipeline()\\\n .add(\n BisectingKMeans()\\\n .setK(10)\\\n .setVectorCol(VECTOR_COL_NAME)\\\n .setPredictionCol(PREDICTION_COL_NAME)\n )\n ]\n]\n\nfor pipelineTuple2 in pipelineList :\n sw.reset();\n sw.start();\n pipelineTuple2[1]\\\n .fit(dense_source)\\\n .transform(dense_source)\\\n .link(\n EvalClusterBatchOp()\\\n .setVectorCol(VECTOR_COL_NAME)\\\n .setPredictionCol(PREDICTION_COL_NAME)\\\n .setLabelCol(LABEL_COL_NAME)\\\n .lazyPrintMetrics(pipelineTuple2[0] + \" DENSE\")\n );\n BatchOperator.execute();\n sw.stop();\n print(sw.getElapsedTimeSpan());\n\n sw.reset();\n sw.start();\n pipelineTuple2[1]\\\n .fit(sparse_source)\\\n .transform(sparse_source)\\\n .link(\n EvalClusterBatchOp()\\\n .setVectorCol(VECTOR_COL_NAME)\\\n .setPredictionCol(PREDICTION_COL_NAME)\\\n .setLabelCol(LABEL_COL_NAME)\\\n .lazyPrintMetrics(pipelineTuple2[0] + \" SPARSE\")\n );\n BatchOperator.execute();\n sw.stop();\n print(sw.getElapsedTimeSpan());\n", "_____no_output_____" ], [ "#c_2\nbatch_source = AkSourceBatchOp().setFilePath(DATA_DIR + SPARSE_TRAIN_FILE);\nstream_source = AkSourceStreamOp().setFilePath(DATA_DIR + SPARSE_TRAIN_FILE);\n\nif not(os.path.exists(DATA_DIR + INIT_MODEL_FILE)) :\n batch_source\\\n .sampleWithSize(100)\\\n .link(\n KMeansTrainBatchOp()\\\n .setVectorCol(VECTOR_COL_NAME)\\\n .setK(10)\n )\\\n .link(\n AkSinkBatchOp()\\\n .setFilePath(DATA_DIR + INIT_MODEL_FILE)\n );\n BatchOperator.execute();\n\n\ninit_model = AkSourceBatchOp().setFilePath(DATA_DIR + INIT_MODEL_FILE);\n\nKMeansPredictBatchOp()\\\n .setPredictionCol(PREDICTION_COL_NAME)\\\n .linkFrom(init_model, batch_source)\\\n .link(\n EvalClusterBatchOp()\\\n .setVectorCol(VECTOR_COL_NAME)\\\n .setPredictionCol(PREDICTION_COL_NAME)\\\n .setLabelCol(LABEL_COL_NAME)\\\n .lazyPrintMetrics(\"Batch Prediction\")\n );\nBatchOperator.execute();\n\nstream_source\\\n .link(\n KMeansPredictStreamOp(init_model)\\\n .setPredictionCol(PREDICTION_COL_NAME)\n )\\\n .link(\n AkSinkStreamOp()\\\n .setFilePath(DATA_DIR + TEMP_STREAM_FILE)\\\n .setOverwriteSink(True)\n );\nStreamOperator.execute();\n\nAkSourceBatchOp()\\\n .setFilePath(DATA_DIR + TEMP_STREAM_FILE)\\\n .link(\n EvalClusterBatchOp()\\\n .setVectorCol(VECTOR_COL_NAME)\\\n .setPredictionCol(PREDICTION_COL_NAME)\\\n .setLabelCol(LABEL_COL_NAME)\\\n .lazyPrintMetrics(\"Stream Prediction\")\n );\nBatchOperator.execute();", "_____no_output_____" ], [ "#c_3\n\npd.set_option('display.html.use_mathjax', False)\n\nstream_source = AkSourceStreamOp().setFilePath(DATA_DIR + SPARSE_TRAIN_FILE);\n\ninit_model = AkSourceBatchOp().setFilePath(DATA_DIR + INIT_MODEL_FILE);\n\nstream_pred = stream_source\\\n .link(\n StreamingKMeansStreamOp(init_model)\\\n .setTimeInterval(1)\\\n .setHalfLife(1)\\\n .setPredictionCol(PREDICTION_COL_NAME)\n )\\\n .select(PREDICTION_COL_NAME + \", \" + LABEL_COL_NAME +\", \" + VECTOR_COL_NAME);\n\nstream_pred.sample(0.001).print();\n\nstream_pred\\\n .link(\n AkSinkStreamOp()\\\n .setFilePath(DATA_DIR + TEMP_STREAM_FILE)\\\n .setOverwriteSink(True)\n );\nStreamOperator.execute();\n\nAkSourceBatchOp()\\\n .setFilePath(DATA_DIR + TEMP_STREAM_FILE)\\\n .link(\n EvalClusterBatchOp()\\\n .setVectorCol(VECTOR_COL_NAME)\\\n .setPredictionCol(PREDICTION_COL_NAME)\\\n .setLabelCol(LABEL_COL_NAME)\\\n .lazyPrintMetrics(\"StreamingKMeans\")\n );\nBatchOperator.execute();\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
cba18cc51805223c116558c869e41d46139d8f54
247,471
ipynb
Jupyter Notebook
notebooks/copies/lectures/S04_Pandas.ipynb
robkravec/sta-663-2021
4dc8018f7b172eaf81da9edc33174768ff157939
[ "MIT" ]
null
null
null
notebooks/copies/lectures/S04_Pandas.ipynb
robkravec/sta-663-2021
4dc8018f7b172eaf81da9edc33174768ff157939
[ "MIT" ]
null
null
null
notebooks/copies/lectures/S04_Pandas.ipynb
robkravec/sta-663-2021
4dc8018f7b172eaf81da9edc33174768ff157939
[ "MIT" ]
null
null
null
26.684386
350
0.358002
[ [ [ "# Introduction to `pandas`", "_____no_output_____" ] ], [ [ "# pandas is the data frame equivalent in Python\nimport numpy as np\nimport pandas as pd", "_____no_output_____" ] ], [ [ "## Series and Data Frames", "_____no_output_____" ], [ "### Series objects", "_____no_output_____" ], [ "A `Series` is like a vector. All elements must have the same type or are nulls.", "_____no_output_____" ] ], [ [ "s = pd.Series([1,1,2,3] + [None])\ns", "_____no_output_____" ] ], [ [ "### Size", "_____no_output_____" ] ], [ [ "s.size # Number of elements", "_____no_output_____" ] ], [ [ "### Unique Counts", "_____no_output_____" ] ], [ [ "s.value_counts() # Returns a dictionary of counts", "_____no_output_____" ] ], [ [ "### Special types of series", "_____no_output_____" ], [ "#### Strings", "_____no_output_____" ] ], [ [ "words = 'the quick brown fox jumps over the lazy dog'.split()\ns1 = pd.Series([' '.join(item) for item in zip(words[:-1], words[1:])])\ns1", "_____no_output_____" ], [ "s1.str.upper() # Need to specify that you're going to use a string method with .str", "_____no_output_____" ], [ "s1.str.split()", "_____no_output_____" ], [ "s1.str.split().str[1]", "_____no_output_____" ] ], [ [ "### Categories\n\nEquivalent of factors in R", "_____no_output_____" ] ], [ [ "s2 = pd.Series(['Asian', 'Asian', 'White', 'Black', 'White', 'Hispanic'])\ns2", "_____no_output_____" ], [ "s2 = s2.astype('category')\ns2", "_____no_output_____" ], [ "s2.cat.categories", "_____no_output_____" ], [ "s2.cat.codes", "_____no_output_____" ] ], [ [ "### Ordered categories", "_____no_output_____" ] ], [ [ "s3 = pd.Series(['Mon', 'Tue', 'Wed', 'Thu', 'Fri']).astype('category')\ns3", "_____no_output_____" ], [ "s3.cat.ordered", "_____no_output_____" ], [ "s3.sort_values()", "_____no_output_____" ], [ "s3 = s3.cat.reorder_categories(['Mon', 'Tue', 'Wed', 'Thu', 'Fri'], ordered=True)", "_____no_output_____" ], [ "s3.cat.ordered", "_____no_output_____" ], [ "s3.sort_values()", "_____no_output_____" ] ], [ [ "### DataFrame objects", "_____no_output_____" ], [ "A `DataFrame` is like a matrix. Columns in a `DataFrame` are `Series`.\n\n- Each column in a DataFrame represents a **variable**\n- Each row in a DataFrame represents an **observation**\n- Each cell in a DataFrame represents a **value**", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(dict(num=[1,2,3] + [None]))\ndf", "_____no_output_____" ], [ "df.num", "_____no_output_____" ] ], [ [ "### Index\n\nRow and column identifiers are of `Index` type.\n\nSomewhat confusingly, index is also a a synonym for the row identifiers.", "_____no_output_____" ] ], [ [ "df.index", "_____no_output_____" ] ], [ [ "#### Setting a column as the row index", "_____no_output_____" ] ], [ [ "df", "_____no_output_____" ], [ "df1 = df.set_index('num')\ndf1", "_____no_output_____" ] ], [ [ "#### Making an index into a column", "_____no_output_____" ] ], [ [ "df1.reset_index()", "_____no_output_____" ] ], [ [ "#### Sometimes you don't need to retain the index information", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(dict(letters = list('ABCDEFG')))", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df = df[df.letters.isin(list('AEIOU'))] # Use df.columnName to select a specific column by name", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.reset_index(drop=True)", "_____no_output_____" ] ], [ [ "### Columns\n\nThis is just a different index object", "_____no_output_____" ] ], [ [ "df.columns", "_____no_output_____" ] ], [ [ "### Getting raw values\n\nSometimes you just want a `numpy` array, and not a `pandas` object.", "_____no_output_____" ] ], [ [ "df.values", "_____no_output_____" ] ], [ [ "## Creating Data Frames", "_____no_output_____" ], [ "### Manual", "_____no_output_____" ] ], [ [ "# Feed in dictionary with column name as key and series / vector as values\nn = 5\ndates = pd.date_range(start='now', periods=n, freq='d')\ndf = pd.DataFrame(dict(pid=np.random.randint(100, 999, n), \n weight=np.random.normal(70, 20, n),\n height=np.random.normal(170, 15, n),\n date=dates,\n ))\ndf", "_____no_output_____" ] ], [ [ "### From numpy array", "_____no_output_____" ] ], [ [ "pd.DataFrame(np.eye(3,2), columns=['A', 'B'], index=['x', 'y', 'z'])", "_____no_output_____" ] ], [ [ "### From URL", "_____no_output_____" ] ], [ [ "url = \"https://gist.githubusercontent.com/netj/8836201/raw/6f9306ad21398ea43cba4f7d537619d0e07d5ae3/iris.csv\"\n\ndf = pd.read_csv(url)\ndf.head()", "_____no_output_____" ] ], [ [ "### From file\n\nYou can read in data from many different file types - plain text, JSON, spreadsheets, databases etc. Functions to read in data look like `read_X` where X is the data type.", "_____no_output_____" ] ], [ [ "%%file measures.txt\npid\tweight\theight\tdate\n328\t72.654347\t203.560866\t2018-11-11 14:16:18.148411\n756\t34.027679\t189.847316\t2018-11-12 14:16:18.148411\n185\t28.501914\t158.646074\t2018-11-13 14:16:18.148411\n507\t17.396343\t180.795993\t2018-11-14 14:16:18.148411\n919\t64.724301\t173.564725\t2018-11-15 14:16:18.148411", "Writing measures.txt\n" ], [ "df = pd.read_table('measures.txt')\ndf", "_____no_output_____" ] ], [ [ "## Indexing Data Frames", "_____no_output_____" ], [ "### Implicit defaults\n\nif you provide a slice, it is assumed that you are asking for rows.", "_____no_output_____" ] ], [ [ "df[1:3]", "_____no_output_____" ] ], [ [ "If you provide a single value or list, it is assumed that you are asking for columns.", "_____no_output_____" ] ], [ [ "df[['pid', 'weight']] ", "_____no_output_____" ] ], [ [ "### Extracting a column", "_____no_output_____" ], [ "#### Dictionary style access", "_____no_output_____" ] ], [ [ "df['pid'] # Use double bracket to return data frame instead of series", "_____no_output_____" ] ], [ [ "#### Property style access\n\nThis only works for column names that are also valid Python identifier (i.e., no spaces or dashes or keywords)", "_____no_output_____" ] ], [ [ "df.pid", "_____no_output_____" ] ], [ [ "### Indexing by location\n\nThis is similar to `numpy` indexing", "_____no_output_____" ] ], [ [ "df.iloc[1:3, :]", "_____no_output_____" ], [ "df.iloc[1:3, [True, False, True, False]]", "_____no_output_____" ] ], [ [ "### Indexing by name", "_____no_output_____" ] ], [ [ "# Since referencing by name, the ranges are inclusive on both ends. For example, the 1:3 rows refer to the row names\ndf.loc[1:3, 'weight':'height'] ", "_____no_output_____" ] ], [ [ "**Warning**: When using `loc`, the row slice indicates row names, not positions.", "_____no_output_____" ] ], [ [ "df1 = df.copy()\ndf1.index = df.index + 1\ndf1", "_____no_output_____" ], [ "df1.loc[1:3, 'weight':'height']", "_____no_output_____" ] ], [ [ "## Structure of a Data Frame", "_____no_output_____" ], [ "### Data types", "_____no_output_____" ] ], [ [ "df.dtypes", "_____no_output_____" ] ], [ [ "### Converting data types", "_____no_output_____" ], [ "#### Using `astype` on one column", "_____no_output_____" ] ], [ [ "df.pid = df.pid.astype('category')", "_____no_output_____" ] ], [ [ "#### Using `astype` on multiple columns", "_____no_output_____" ] ], [ [ "df = df.astype(dict(weight=float, \n height=float))", "_____no_output_____" ] ], [ [ "#### Using a conversion function", "_____no_output_____" ] ], [ [ "df.date = pd.to_datetime(df.date)", "_____no_output_____" ] ], [ [ "#### Check", "_____no_output_____" ] ], [ [ "df.dtypes", "_____no_output_____" ] ], [ [ "### Basic properties", "_____no_output_____" ] ], [ [ "df.size", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "df.describe() # Only works for columns that are numeric", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 5 entries, 0 to 4\nData columns (total 4 columns):\npid 5 non-null category\nweight 5 non-null float64\nheight 5 non-null float64\ndate 5 non-null datetime64[ns]\ndtypes: category(1), datetime64[ns](1), float64(2)\nmemory usage: 453.0 bytes\n" ] ], [ [ "### Inspection", "_____no_output_____" ] ], [ [ "df.head(n=3)", "_____no_output_____" ], [ "df.tail(n=3)", "_____no_output_____" ], [ "df.sample(n=3)", "_____no_output_____" ], [ "df.sample(frac=0.5)", "_____no_output_____" ] ], [ [ "## Selecting, Renaming and Removing Columns", "_____no_output_____" ], [ "### Selecting columns", "_____no_output_____" ] ], [ [ "df.filter(items=['pid', 'date'])", "_____no_output_____" ], [ "df.filter(regex='.*ght')", "_____no_output_____" ] ], [ [ "I'm not actually clear about what `like` does - it seems to mean \"contains\"", "_____no_output_____" ] ], [ [ "df.filter(like='ei')", "_____no_output_____" ] ], [ [ "#### Filter has an optional axis argument if you want to select by row index", "_____no_output_____" ] ], [ [ "df.filter([0,1,3,4], axis=0)", "_____no_output_____" ] ], [ [ "#### Note that you can also use regular string methods on the columns", "_____no_output_____" ] ], [ [ "df.loc[:, df.columns.str.contains('d')]", "_____no_output_____" ] ], [ [ "### Renaming columns", "_____no_output_____" ] ], [ [ "df.rename(dict(weight='w', height='h'), axis=1)", "_____no_output_____" ], [ "orig_cols = df.columns ", "_____no_output_____" ], [ "df.columns = list('abcd')", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.columns = orig_cols", "_____no_output_____" ], [ "df", "_____no_output_____" ] ], [ [ "### Removing columns", "_____no_output_____" ] ], [ [ "df.drop(['pid', 'date'], axis=1)", "_____no_output_____" ], [ "df.drop(columns=['pid', 'date'])", "_____no_output_____" ], [ "df.drop(columns=df.columns[df.columns.str.contains('d')])", "_____no_output_____" ] ], [ [ "## Selecting, Renaming and Removing Rows", "_____no_output_____" ], [ "### Selecting rows", "_____no_output_____" ] ], [ [ "df[df.weight.between(60,70)]", "_____no_output_____" ], [ "df[(69 <= df.weight) & (df.weight < 70)]", "_____no_output_____" ], [ "df[df.date.between(pd.to_datetime('2018-11-13'), \n pd.to_datetime('2018-11-15 23:59:59'))]", "_____no_output_____" ], [ "# Essentially SQL commands (like in a WHERE clause)\ndf.query('weight <= 70 and height > 90')", "_____no_output_____" ] ], [ [ "### Renaming rows", "_____no_output_____" ] ], [ [ "# Dictionary of old name: new name\ndf.rename({i:letter for i,letter in enumerate('abcde')})", "_____no_output_____" ], [ "df.index = ['the', 'quick', 'brown', 'fox', 'jumphs']", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df = df.reset_index(drop=True)", "_____no_output_____" ], [ "df", "_____no_output_____" ] ], [ [ "### Dropping rows", "_____no_output_____" ] ], [ [ "df.drop([1,3], axis=0)", "_____no_output_____" ] ], [ [ "#### Dropping duplicated data", "_____no_output_____" ] ], [ [ "df['something'] = [1,1,None,2,None]\ndf['nothing'] = [None, None, None, None, None]", "_____no_output_____" ], [ "df.loc[df.something.duplicated()]", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.drop_duplicates(subset='something')\n# If you actually want to change df, need to assign it or use inplace = True argument", "_____no_output_____" ] ], [ [ "#### Dropping missing data", "_____no_output_____" ] ], [ [ "df", "_____no_output_____" ], [ "df.dropna()", "_____no_output_____" ], [ "df.dropna(axis=1)", "_____no_output_____" ], [ "df.dropna(axis=1, how='all')", "_____no_output_____" ] ], [ [ "#### Brute force replacement of missing values", "_____no_output_____" ] ], [ [ "df.something.fillna(0)", "_____no_output_____" ], [ "df.something.fillna(df.something.mean())", "_____no_output_____" ], [ "df.something.ffill() # Forward fill", "_____no_output_____" ], [ "df.something.bfill() # Backward fill (generally not a good idea)", "_____no_output_____" ], [ "df.something.interpolate() # Average of forward and backward fill", "_____no_output_____" ] ], [ [ "## Transforming and Creating Columns", "_____no_output_____" ] ], [ [ "df.assign(bmi=df['weight'] / (df['height']/100)**2)", "_____no_output_____" ], [ "df['bmi'] = df['weight'] / (df['height']/100)**2", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df['something'] = [2,2,None,None,3]", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "?df.insert", "_____no_output_____" ] ], [ [ "## Sorting Data Frames", "_____no_output_____" ], [ "### Sort on indexes", "_____no_output_____" ] ], [ [ "df.sort_index(axis=1)", "_____no_output_____" ], [ "df.sort_index(axis=0, ascending=False)", "_____no_output_____" ] ], [ [ "### Sort on values", "_____no_output_____" ] ], [ [ "df.sort_values(by=['something', 'bmi'], ascending=[True, False])", "_____no_output_____" ] ], [ [ "## Summarizing", "_____no_output_____" ], [ "### Apply an aggregation function", "_____no_output_____" ] ], [ [ "df.select_dtypes(include=np.number) # Just select columns with numeric types", "_____no_output_____" ], [ "df.select_dtypes(include=np.number).agg(np.sum)", "_____no_output_____" ], [ "df.agg(['count', np.sum, np.mean])", "_____no_output_____" ] ], [ [ "## Split-Apply-Combine\n\nWe often want to perform subgroup analysis (conditioning by some discrete or categorical variable). This is done with `groupby` followed by an aggregate function. Conceptually, we split the data frame into separate groups, apply the aggregate function to each group separately, then combine the aggregated results back into a single data frame.", "_____no_output_____" ] ], [ [ "df['treatment'] = list('ababa')", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "grouped = df.groupby('treatment')", "_____no_output_____" ], [ "grouped.get_group('a')", "_____no_output_____" ], [ "grouped.mean()", "_____no_output_____" ] ], [ [ "### Using `agg` with `groupby`", "_____no_output_____" ] ], [ [ "grouped.agg('mean')", "_____no_output_____" ], [ "grouped.agg(['mean', 'std'])", "_____no_output_____" ], [ "grouped.agg({'weight': ['mean', 'std'], 'height': ['min', 'max'], 'bmi': lambda x: (x**2).sum()})", "_____no_output_____" ] ], [ [ "### Using `transform` wtih `groupby`", "_____no_output_____" ] ], [ [ "g_mean = grouped[['weight', 'height']].transform(np.mean)\ng_mean", "_____no_output_____" ], [ "g_std = grouped[['weight', 'height']].transform(np.std)\ng_std", "_____no_output_____" ], [ "(df[['weight', 'height']] - g_mean)/g_std", "_____no_output_____" ] ], [ [ "## Combining Data Frames", "_____no_output_____" ] ], [ [ "df", "_____no_output_____" ], [ "df1 = df.iloc[3:].copy()", "_____no_output_____" ], [ "df1.drop('something', axis=1, inplace=True)\ndf1", "_____no_output_____" ] ], [ [ "### Adding rows\n\nNote that `pandas` aligns by column indexes automatically.", "_____no_output_____" ] ], [ [ "# Works even though the columns are not exactly the same. Fills in missing values for `something` column\ndf.append(df1, sort=False)", "_____no_output_____" ], [ "pd.concat([df, df1], sort=False)", "_____no_output_____" ] ], [ [ "### Adding columns", "_____no_output_____" ] ], [ [ "df.pid", "_____no_output_____" ], [ "df2 = pd.DataFrame(dict(pid=[649, 533, 400, 600], age=[23,34,45,56]))", "_____no_output_____" ], [ "df2.pid", "_____no_output_____" ], [ "df.pid = df.pid.astype('int')", "_____no_output_____" ], [ "pd.merge(df, df2, on='pid', how='inner')", "_____no_output_____" ], [ "pd.merge(df, df2, on='pid', how='left')", "_____no_output_____" ], [ "pd.merge(df, df2, on='pid', how='right')", "_____no_output_____" ], [ "pd.merge(df, df2, on='pid', how='outer')", "_____no_output_____" ] ], [ [ "### Merging on the index", "_____no_output_____" ] ], [ [ "df1 = pd.DataFrame(dict(x=[1,2,3]), index=list('abc'))\ndf2 = pd.DataFrame(dict(y=[4,5,6]), index=list('abc'))\ndf3 = pd.DataFrame(dict(z=[7,8,9]), index=list('abc'))", "_____no_output_____" ], [ "df1", "_____no_output_____" ], [ "df2", "_____no_output_____" ], [ "df3", "_____no_output_____" ], [ "df1.join([df2, df3])", "_____no_output_____" ] ], [ [ "## Fixing common DataFrame issues", "_____no_output_____" ], [ "### Multiple variables in a column", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(dict(pid_treat = ['A-1', 'B-2', 'C-1', 'D-2']))\ndf", "_____no_output_____" ], [ "df.pid_treat.str.split('-')", "_____no_output_____" ], [ "df.pid_treat.str.split('-').apply(pd.Series, index=['pid', 'treat'])", "_____no_output_____" ] ], [ [ "### Multiple values in a cell", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(dict(pid=['a', 'b', 'c'], vals = [(1,2,3), (4,5,6), (7,8,9)]))\ndf", "_____no_output_____" ], [ "df[['t1', 't2', 't3']] = df.vals.apply(pd.Series)\ndf", "_____no_output_____" ], [ "df.drop('vals', axis=1, inplace=True)", "_____no_output_____" ], [ "pd.melt(df, id_vars='pid', value_name='vals').drop('variable', axis=1)", "_____no_output_____" ], [ "df.explode(column='vals')", "_____no_output_____" ] ], [ [ "## Reshaping Data Frames\n\nSometimes we need to make rows into columns or vice versa.", "_____no_output_____" ], [ "### Converting multiple columns into a single column\n\nThis is often useful if you need to condition on some variable.", "_____no_output_____" ] ], [ [ "url = 'https://raw.githubusercontent.com/uiuc-cse/data-fa14/gh-pages/data/iris.csv'\niris = pd.read_csv(url)", "_____no_output_____" ], [ "iris.head()", "_____no_output_____" ], [ "iris.shape", "_____no_output_____" ], [ "# Go from wide format to long format\ndf_iris = pd.melt(iris, id_vars='species')", "_____no_output_____" ], [ "df_iris.shape", "_____no_output_____" ], [ "df_iris.sample(10)", "_____no_output_____" ] ], [ [ "## Chaining commands\n\nSometimes you see this functional style of method chaining that avoids the need for temporary intermediate variables.", "_____no_output_____" ] ], [ [ "(\n iris.\n sample(frac=0.2).\n filter(regex='s.*').\n assign(both=iris.sepal_length + iris.sepal_length).\n groupby('species').agg(['mean', 'sum']).\n pipe(lambda x: np.around(x, 1)) # pipe allows you to put any arbitrary Python function in the method chaining\n)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cba18da53212e9f191d146a6f2f9ac53cc72e0a3
18,934
ipynb
Jupyter Notebook
py2neo/py2neo.ipynb
RabbitWhite1/Neo4j_SampleCode
de659729e094a5c46c6fea6503d3b4ac4af0cece
[ "MIT" ]
null
null
null
py2neo/py2neo.ipynb
RabbitWhite1/Neo4j_SampleCode
de659729e094a5c46c6fea6503d3b4ac4af0cece
[ "MIT" ]
null
null
null
py2neo/py2neo.ipynb
RabbitWhite1/Neo4j_SampleCode
de659729e094a5c46c6fea6503d3b4ac4af0cece
[ "MIT" ]
null
null
null
28.008876
735
0.514577
[ [ [ "# py2neo\nBy Zhanghan Wang \nRefer to [The Py2neo v4 Handbook](https://py2neo.org/v4/index.html#) \n\nThis is a .ipynb file to illustrate how to use py2neo", "_____no_output_____" ], [ "## Import", "_____no_output_____" ] ], [ [ "import pprint\nimport numpy as np\nimport pandas as pd\nimport py2neo\nprint(py2neo.__version__)\nfrom py2neo import *\nfrom py2neo.ogm import *", "4.2.0\n" ] ], [ [ "## Attention!\nTo run the following codes, you may need to initialize your database by run the code in the next cell.\n\n**!!!This code will delete all data in your database!!!**", "_____no_output_____" ] ], [ [ "graph = Graph()\ngraph.run(\"MATCH (all) DETACH DELETE all\")\ngraph.run(\"CREATE (:Person {name:'Alice'})-[:KNOWS]->(:Person {name:'Bob'})\"\n \"CREATE (:Person {name:'Ada'})-[:KNOWS]->(:Person {name:'Hank'})\"\n )", "_____no_output_____" ] ], [ [ "## 1. py2neo.data – Data Types\n\nHere are some basic operations about the data, including nodes and relationships.\n\n- [py2neo.data.Node](https://py2neo.org/v4/data.html#py2neo.data.Node) \n- [py2neo.data.Relationship](https://py2neo.org/v4/data.html#py2neo.data.Relationship) \n- [py2neo.data.Subgraph](https://py2neo.org/v4/data.html#py2neo.data.Subgraph)\n - |, &, -, ^ are allowed here.", "_____no_output_____" ], [ "### 1.1 Node and Relationships", "_____no_output_____" ] ], [ [ "# Create some nodes and relationships\na = Node('Person', name='Alice')\nb = Node('Person', name='Bob')\nab = Relationship(a, 'KNOWS', b)\nprint(a, b, ab, sep='\\n')\n\n# Create a relationship by extending the Relationship class\nc = Node(\"Person\", name=\"Carol\")\nclass WorksWith(Relationship):\n pass\nac = WorksWith(a, c)\ntype(ac)\nprint(ac)", "(:Person {name: 'Alice'})\n(:Person {name: 'Bob'})\n(Alice)-[:KNOWS {}]->(Bob)\n(Alice)-[:WorksWith {}]->(Carol)\n" ] ], [ [ "### 1.2 Subgraph\n\nBy definition, a `Subgraph` must contain at least one node; null subgraphs should be represented by `None`.\n\n> I don't know how to print `s.keys` and `s.types`", "_____no_output_____" ] ], [ [ "s = ab | ac\n\nprint(set(s))\nprint(s.labels)\n\n# to print them, we can transform them into set\nprint(set(s.nodes))\nprint(set(s.relationships))\n\n# I don't know how to print them.\nprint(s.keys)\nprint(s.types)", "{(Alice)-[:WorksWith {}]->(Carol), (Alice)-[:KNOWS {}]->(Bob)}\nfrozenset({'Person'})\n{(:Person {name: 'Bob'}), (:Person {name: 'Alice'}), (:Person {name: 'Carol'})}\n{(Alice)-[:WorksWith {}]->(Carol), (Alice)-[:KNOWS {}]->(Bob)}\n<bound method Subgraph.keys of <py2neo.data.Subgraph object at 0x000001A5F04F5F88>>\n<bound method Subgraph.types of <py2neo.data.Subgraph object at 0x000001A5F04F5F88>>\n" ] ], [ [ "### 1.3 Path objects and other Walkable types\n\n[py2neo.data.Walkable](https://py2neo.org/v4/data.html#py2neo.data.Walkable)", "_____no_output_____" ] ], [ [ "w = ab + Relationship(b, \"LIKES\", c) + ac\nprint(\"w.__class__: {}\".format(w.__class__))\nprint(\"start_node: {}\\nend_node: {}\".format(w.start_node, w.end_node))\nprint(\"nodes({}): {}\".format(w.nodes.__class__, w.nodes))\nprint(\"relationships({}): {}\".format(w.relationships.__class__, w.relationships))\nprint(\"walk:\")\ni = 0;\nfor item in walk(w):\n print(\"\\t{}th yield: {}\".format(i, item))\n i += 1", "w.__class__: <class 'py2neo.data.Walkable'>\nstart_node: (:Person {name: 'Alice'})\nend_node: (:Person {name: 'Alice'})\nnodes(<class 'tuple'>): ((:Person {name: 'Alice'}), (:Person {name: 'Bob'}), (:Person {name: 'Carol'}), (:Person {name: 'Alice'}))\nrelationships(<class 'tuple'>): ((Alice)-[:KNOWS {}]->(Bob), (Bob)-[:LIKES {}]->(Carol), (Alice)-[:WorksWith {}]->(Carol))\nwalk:\n\t0th yield: (:Person {name: 'Alice'})\n\t1th yield: (Alice)-[:KNOWS {}]->(Bob)\n\t2th yield: (:Person {name: 'Bob'})\n\t3th yield: (Bob)-[:LIKES {}]->(Carol)\n\t4th yield: (:Person {name: 'Carol'})\n\t5th yield: (Alice)-[:WorksWith {}]->(Carol)\n\t6th yield: (:Person {name: 'Alice'})\n" ] ], [ [ "### 1.4 Record Objects and Table Objects\n\n#### [Record](https://py2neo.org/v4/data.html#py2neo.data.Record)\nA `Record` object holds an ordered, keyed collection of values. It is in many ways similar to a namedtuple but allows field access only through bracketed syntax and provides more functionality. `Record` extends both tuple and Mapping.\n\n#### [Table](https://py2neo.org/v4/data.html#py2neo.data.Table)\nA `Table` holds a list of `Record` objects, typically received as the result of a Cypher query. It provides a convenient container for working with a result in its entirety and provides methods for conversion into various output formats. `Table` extends `list`.", "_____no_output_____" ], [ "## 2. Connect to Your Database", "_____no_output_____" ], [ "### 2.1 Database and Graph\n\nNeo4j only supports one Graph per Database.\n\n- [py2neo.database](https://py2neo.org/v4/database.html)\n- [py2neo.database.Graph](https://py2neo.org/v4/database.html#py2neo.database.Graph)\n\n> I don't know how to get the `Graph` instance from the `Database` instance.", "_____no_output_____" ] ], [ [ "# Connect to the database\ndb = Database(\"bolt://localhost:7687\")\nprint('Connected to a database.\\nURI: {}, name: {}:\\n'.format(db.uri, db.name))\n# Return the graph from the database\ngraph = Graph(\"bolt://localhost:7687\")\nprint('Connected to a graph:\\n{}'.format(graph))", "Connected to a database.\nURI: bolt://localhost:7687, name: graph.db:\n\nConnected to a graph:\n<Graph database=<Database uri='bolt://localhost:7687' secure=False user_agent='py2neo/4.2.0 neobolt/1.7.16 Python/3.7.4-final-0 (win32)'> name='data'>\n" ] ], [ [ "#### 2.1.1 Graph Operations", "_____no_output_____" ] ], [ [ "# Create\nShirley = Node('Person', name='Shirley')\n## the code is annotated in case creating node each time you run\n# graph.create(Shirley)\n## but we can use merge here. \n## We can consider merge as creating if not existing(that is updating)\ngraph.merge(Shirley, 'Person', 'name')\n\n# nodes\nprint(graph.nodes)\nprint(len(graph.nodes))\n## get specific nodes\n### get by id\ntry:\n print(graph.nodes[0])\n print(graph.nodes.get(1))\nexcept KeyError:\n print(\"KeyError\")\nexcept:\n print(\"Error\")\n### get by match\nAlice = graph.nodes.match('Person', name='Alice').first()\nprint(Alice)\n\n# get relationships using matcher\nprint(graph.relationships.match((Alice,)).first()) \n## Node here cannot be newed by Node, there are some differences\nprint('By match: {}; By new a Node: {}'.format(Alice, Node('Person', name='Alice')))", "<py2neo.matching.NodeMatcher object at 0x000001A5F0504CC8>\n5\nKeyError\n(_44:Person {name: 'Alice'})\n(Alice)-[:KNOWS {}]->(Bob)\nBy match: (_44:Person {name: 'Alice'}); By new a Node: (:Person {name: 'Alice'})\n" ] ], [ [ "### 2.2 Transactions", "_____no_output_____" ] ], [ [ "# begin a new transaction\ntx = graph.begin()\na = Node(\"Person\", name=\"Shirley\")\nb = Node(\"Person\", name=\"Hank\")\nab = Relationship(a, \"KNOWS\", b)\n# still the same problem\nprint(graph.exists(ab), graph.exists(a), graph.exists(b), a)\ntx.merge(ab, 'Person', 'name')\ntx.commit()\nprint(graph.exists(ab), graph.exists(a), graph.exists(b), a)", "False False False (:Person {name: 'Shirley'})\nTrue True True (_48:Person {name: 'Shirley'})\n" ] ], [ [ "### 2.3 Cypher Results\n\n\n\nThe Cpyher Results `Cursor` are returned by some fuunctions like `run`, and you can get information about the `run` by the `Cursor`.\n\nTurn to the handbook when needing:\n- [Cypher Results](https://py2neo.org/v4/database.html#cypher-results): `py2neo.database.Cursor`", "_____no_output_____" ] ], [ [ "print(graph.run(\"MATCH (a:Person) RETURN a.name LIMIT 2\").data())\n\ndisplay(graph.run(\"MATCH (s)-[r]->(e)\"\n \"RETURN s AS Start, r AS Relationship, e AS End\").to_table())", "[{'a.name': 'Alice'}, {'a.name': 'Bob'}]\n" ] ], [ [ "### 2.4 Errors & Warnings\n\nTurn to the handbook when needing:\n- [Errors & Warnings](https://py2neo.org/v4/database.html#errors-warnings)", "_____no_output_____" ], [ "## 3. py2neo.matching – Entity matching", "_____no_output_____" ] ], [ [ "# NodeMatcher\nmatcher = NodeMatcher(graph)\nprint(matcher.match(\"Person\", name=\"Shirley\").first())\nprint(list(matcher.match('Person').where('_.name =~ \"A.*\"').order_by(\"_.name\").limit(3)))\n# RelationshipMatcher\nmatcher = RelationshipMatcher(graph)\n# use iteration\nfor r in matcher.match(r_type='KNOWS'):\n print(r)", "(_48:Person {name: 'Shirley'})\n[(_46:Person {name: 'Ada'}), (_44:Person {name: 'Alice'})]\n(Alice)-[:KNOWS {}]->(Bob)\n(Ada)-[:KNOWS {}]->(Hank)\n(Shirley)-[:KNOWS {}]->(Hank)\n" ] ], [ [ "## 4. py2neo.ogm – Object-Graph Mapping\n\n[py2neo.ogm](https://py2neo.org/v4/ogm.html)\n\nThe `py2neo.ogm` maps the `Neo4j Objects` into `Python Objects`.\n\nTo create this, you should extend the class `GraphObject`. By default it is just the class name.", "_____no_output_____" ] ], [ [ "# Sample classes\nclass Movie(GraphObject):\n __primarykey__ = \"title\"\n\n title = Property()\n tag_line = Property(\"tagline\")\n released = Property()\n\n actors = RelatedFrom(\"Person\", \"ACTED_IN\")\n directors = RelatedFrom(\"Person\", \"DIRECTED\")\n producers = RelatedFrom(\"Person\", \"PRODUCED\")\n\n\nclass Person(GraphObject):\n __primarykey__ = \"name\"\n\n name = Property()\n born = Property()\n isBoy = Label()\n likes = RelatedTo(\"Person\")\n beliked = RelatedFrom('Person')\n friend = Related(\"Person\")\n\n acted_in = RelatedTo(Movie)\n directed = RelatedTo(Movie)\n produced = RelatedTo(Movie)", "_____no_output_____" ] ], [ [ "### 4.1 Node, Property and Label", "_____no_output_____" ] ], [ [ "alice = Person()\nalice.name = \"Alice Smith\"\nalice.born = 1990\nalice.isBoy = False\nprint(alice)\nprint(alice.born)\nprint(alice.isBoy)\nprint(alice.__node__)", "<Person name='Alice Smith'>\n1990\nFalse\n(:Person {born: 1990, name: 'Alice Smith'})\n" ] ], [ [ "### 4.2 Related\n\n[Related Objects](https://py2neo.org/v4/ogm.html#related-objects)\n\nFunctions can be used:\n- add, clear, get, remove, update", "_____no_output_____" ] ], [ [ "alice = Person()\nalice.name = \"Alice Smith\"\nbob = Person()\nbob.name = \"Bob\"\nalice.likes.add(bob)\nalice.friend.add(bob)\nbob.friend.add(alice)\nprint(\"Alice's friends are {}\".format(list(alice.friend)))\nfor like in alice.likes:\n print('Alice likes: {}'.format(like))", "Alice's friends are [<Person name='Bob'>]\nAlice likes: <Person name='Bob'>\n" ] ], [ [ "### 4.3 Object Matching", "_____no_output_____" ] ], [ [ "print(list(Person.match(graph).where('_.name =~ \"A.*\"')))", "[<Person name='Alice'>, <Person name='Ada'>]\n" ] ], [ [ "### 4.4 Object Operations", "_____no_output_____" ] ], [ [ "jack = Person()\njack.name = 'Jack'\ngraph.merge(jack)\nprint(jack.__node__)", "(_49:Person {name: 'Jack'})\n" ] ], [ [ "## 5. py2neo.cypher – Cypher Utilities", "_____no_output_____" ], [ "## 6. py2neo.cypher.lexer – Cypher Lexer", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cba1949cadf98f458b8f8517cbca6ace140aa092
10,845
ipynb
Jupyter Notebook
projects/enron.ipynb
Linlin15963/msds501
0bcfa7f59a4e9b2d71db2c5973eb04c1ae60e72f
[ "MIT" ]
86
2018-08-14T20:13:32.000Z
2022-03-21T22:30:15.000Z
projects/enron.ipynb
Linlin15963/msds501
0bcfa7f59a4e9b2d71db2c5973eb04c1ae60e72f
[ "MIT" ]
2
2017-07-21T02:02:25.000Z
2017-09-13T03:19:09.000Z
projects/enron.ipynb
Linlin15963/msds501
0bcfa7f59a4e9b2d71db2c5973eb04c1ae60e72f
[ "MIT" ]
99
2015-02-28T20:10:38.000Z
2018-07-30T20:24:43.000Z
41.711538
943
0.559521
[ [ [ "# Enron email data set exploration", "_____no_output_____" ] ], [ [ "# Get better looking pictures\n%config InlineBackend.figure_format = 'retina'", "_____no_output_____" ], [ "df = pd.read_feather('enron.feather')\ndf = df.sort_values(['Date'])\ndf.tail(5)", "_____no_output_____" ] ], [ [ "## Email traffic over time\n\nGroup the data set by `Date` and `MailID`, which will get you an index that collects all of the unique mail IDs per date. Then reset the index so that those date and mail identifiers become columns and then select for just those columns; we don't actually care about the counts created by the `groupby` (that was just to get the index). Create a histogram that shows the amount of traffic per day. Then specifically for email sent from `richard.shapiro` and then `john.lavorato`. Because some dates are set improperly (to 1980), filter for dates greater than January 1, 1999.", "_____no_output_____" ], [ "## Received emails\n\nCount the number of messages received per user and then sort in reverse order. Make a bar chart showing the top 30 email recipients.", "_____no_output_____" ], [ "## Sent emails\n\nMake a bar chart indicating the top 30 mail senders. This is more complicated than the received emails because a single person can email multiple people in a single email. So, group by `From` and `MailID`, convert the index back to columns and then group again by `From` and get the count.", "_____no_output_____" ], [ "## Email heatmap\n\nGiven a list of Enron employees, compute a heat map that indicates how much email traffic went between each pair of employees. The heat map is not symmetric because Susan sending mail to Xue is not the same thing as Xue sending mail to Susan. The first step is to group the data frame by `From` and `To` columns in order to get the number of emails from person $i$ to person $j$. Then, create a 2D numpy matrix, $C$, of integers and set $C_{i,j}$ to the count of person $i$ to person $j$. Using matplotlib, `ax.imshow(C, cmap='GnBu', vmax=4000)`, show the heat map and add tick labels at 45 degrees for the X axis. Set the labels to the appropriate names. Draw the number of emails in the appropriate cells of the heat map, for all values greater than zero. Please note that when you draw text using `ax.text()`, the coordinates are X,Y whereas the coordinates in the $C$ matrix are row,column so you will have to flip the coordinates.", "_____no_output_____" ] ], [ [ "people = ['jeff.skilling', 'kenneth.lay', 'louise.kitchen', 'tana.jones',\n 'sara.shackleton', 'vince.kaminski', 'sally.beck', 'john.lavorato',\n 'mark.taylor', 'greg.whalley', 'jeff.dasovich', 'steven.kean',\n 'chris.germany', 'mike.mcconnell', 'benjamin.rogers', 'j.kaminski',\n 'stanley.horton', 'a..shankman', 'richard.shapiro']", "_____no_output_____" ] ], [ [ "## Build graph and compute rankings\n\nFrom the data frame, create a graph data structure using networkx. Create an edge from node A to node B if there is an email from A to B in the data frame. Although we do know the total number of emails between people, let's keep it simple and use simply a weight of 1 as the edge label. See networkx method `add_edge()`.\n\n1. Using networkx, compute the pagerank between all nodes. Get the data into a data frame, sort in reverse order, and display the top 15 users from the data frame. \n2. Compute the centrality for the nodes of the graph. The documentation says that centrality is \"*the fraction of nodes it is connected to.*\"\n\nI use `DataFrame.from_dict` to convert the dictionaries returned from the various networkx methods to data frames.", "_____no_output_____" ], [ "### Node PageRank", "_____no_output_____" ], [ "### Centrality", "_____no_output_____" ], [ "### Plotting graph subsets", "_____no_output_____" ], [ "The email graph is way too large to display the whole thing and get any meaningful information out. However, we can look at subsets of the graph such as the neighbors of a specific node. To visualize it we can use different strategies to layout the nodes. In this case, we will use two different layout strategies: *spring* and *kamada-kawai*. According to\n[Wikipedia](https://en.wikipedia.org/wiki/Force-directed_graph_drawing), these force directed layout strategies have the characteristic: \"*...the edges tend to have uniform length (because of the spring forces), and nodes that are not connected by an edge tend to be drawn further apart...*\". \n\nUse networkx `ego_graph()` method to get a radius=1 neighborhood around `jeff.skilling` and draw the spring graph with a plot that is 20x20 inch so we can see details. Then, draw the same subgraph again using the kamada-kawai layout strategy. Finally, get the neighborhood around kenneth.lay and draw kamada-kawai.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cba197ad009935c094de8476c591740baac6e3cf
16,433
ipynb
Jupyter Notebook
docs/python/inference/notebooks/onnxruntime-stvm-tutorial.ipynb
prateek9623/onnxruntime
a3cb59bc5ef273de009780ab1c906048d8a0e750
[ "MIT" ]
2
2021-12-20T01:43:17.000Z
2021-12-20T01:49:06.000Z
docs/python/inference/notebooks/onnxruntime-stvm-tutorial.ipynb
prateek9623/onnxruntime
a3cb59bc5ef273de009780ab1c906048d8a0e750
[ "MIT" ]
30
2021-09-26T08:05:58.000Z
2022-03-31T10:45:30.000Z
docs/python/inference/notebooks/onnxruntime-stvm-tutorial.ipynb
MaxMood96/onnxruntime
a656c55a754f3758872b8864aa5640a7e2be6a48
[ "MIT" ]
1
2021-04-14T12:25:15.000Z
2021-04-14T12:25:15.000Z
31.723938
228
0.563257
[ [ [ "# ONNX Runtime: Tutorial for STVM execution provider\n\nThis notebook shows a simple example for model inference with STVM EP.\n\n\n#### Tutorial Roadmap:\n1. Prerequistes\n2. Accuracy check for STVM EP\n3. Configuration options", "_____no_output_____" ], [ "## 1. Prerequistes\n\nMake sure that you have installed all the necessary dependencies described in the corresponding paragraph of the documentation.\n\nAlso, make sure you have the `tvm` and `onnxruntime-stvm` packages in your pip environment. \n\nIf you are using `PYTHONPATH` variable expansion, make sure it contains the following paths: `<path_to_msft_onnxrt>/onnxruntime/cmake/external/tvm_update/python` and `<path_to_msft_onnxrt>/onnxruntime/build/Linux/Release`.", "_____no_output_____" ], [ "### Common import\n\nThese packages can be delivered from standard `pip`.", "_____no_output_____" ] ], [ [ "import onnx\nimport numpy as np\nfrom typing import List, AnyStr\nfrom onnx import ModelProto, helper, checker, mapping", "_____no_output_____" ] ], [ [ "### Specialized import\n\nIt is better to collect these packages from source code in order to clearly understand what is available to you right now.", "_____no_output_____" ] ], [ [ "import tvm.testing\nfrom tvm.contrib.download import download_testdata\nimport onnxruntime.providers.stvm # nessesary to register tvm_onnx_import_and_compile and others", "_____no_output_____" ] ], [ [ "### Helper functions for working with ONNX ModelProto\n\nThis set of helper functions allows you to recognize the meta information of the models. This information is needed for more versatile processing of ONNX models.", "_____no_output_____" ] ], [ [ "def get_onnx_input_names(model: ModelProto) -> List[AnyStr]:\n inputs = [node.name for node in model.graph.input]\n initializer = [node.name for node in model.graph.initializer]\n inputs = list(set(inputs) - set(initializer))\n return sorted(inputs)\n\n\ndef get_onnx_output_names(model: ModelProto) -> List[AnyStr]:\n return [node.name for node in model.graph.output]\n\n\ndef get_onnx_input_types(model: ModelProto) -> List[np.dtype]:\n input_names = get_onnx_input_names(model)\n return [\n mapping.TENSOR_TYPE_TO_NP_TYPE[node.type.tensor_type.elem_type]\n for node in sorted(model.graph.input, key=lambda node: node.name) if node.name in input_names\n ]\n\n\ndef get_onnx_input_shapes(model: ModelProto) -> List[List[int]]:\n input_names = get_onnx_input_names(model)\n return [\n [dv.dim_value for dv in node.type.tensor_type.shape.dim]\n for node in sorted(model.graph.input, key=lambda node: node.name) if node.name in input_names\n ]\n\n\ndef get_random_model_inputs(model: ModelProto) -> List[np.ndarray]:\n input_shapes = get_onnx_input_shapes(model)\n input_types = get_onnx_input_types(model)\n assert len(input_types) == len(input_shapes)\n inputs = [np.random.uniform(size=shape).astype(dtype) for shape, dtype in zip(input_shapes, input_types)]\n return inputs", "_____no_output_____" ] ], [ [ "### Wrapper helper functions for Inference\n\nWrapper helper functions for running model inference using ONNX Runtime EP.", "_____no_output_____" ] ], [ [ "def get_onnxruntime_output(model: ModelProto, inputs: List, provider_name: AnyStr) -> np.ndarray:\n output_names = get_onnx_output_names(model)\n input_names = get_onnx_input_names(model)\n assert len(input_names) == len(inputs)\n input_dict = {input_name: input_value for input_name, input_value in zip(input_names, inputs)}\n\n inference_session = onnxruntime.InferenceSession(model.SerializeToString(), providers=[provider_name])\n output = inference_session.run(output_names, input_dict)\n\n # Unpack output if there's only a single value.\n if len(output) == 1:\n output = output[0]\n return output\n\n\ndef get_cpu_onnxruntime_output(model: ModelProto, inputs: List) -> np.ndarray:\n return get_onnxruntime_output(model, inputs, \"CPUExecutionProvider\")\n\n\ndef get_stvm_onnxruntime_output(model: ModelProto, inputs: List) -> np.ndarray:\n return get_onnxruntime_output(model, inputs, \"StvmExecutionProvider\")", "_____no_output_____" ] ], [ [ "### Helper function for checking accuracy\n\nThis function uses the TVM API to compare two output tensors. The tensor obtained using the `CPUExecutionProvider` is used as a reference.\n\nIf a mismatch is found between tensors, an appropriate exception will be thrown.", "_____no_output_____" ] ], [ [ "def verify_with_ort_with_inputs(\n model,\n inputs,\n out_shape=None,\n opset=None,\n freeze_params=False,\n dtype=\"float32\",\n rtol=1e-5,\n atol=1e-5,\n opt_level=1,\n):\n if opset is not None:\n model.opset_import[0].version = opset\n\n ort_out = get_cpu_onnxruntime_output(model, inputs)\n stvm_out = get_stvm_onnxruntime_output(model, inputs)\n for stvm_val, ort_val in zip(stvm_out, ort_out):\n tvm.testing.assert_allclose(ort_val, stvm_val, rtol=rtol, atol=atol)\n assert ort_val.dtype == stvm_val.dtype", "_____no_output_____" ] ], [ [ "### Helper functions for download models\n\nThese functions use the TVM API to download models from the ONNX Model Zoo.", "_____no_output_____" ] ], [ [ "BASE_MODEL_URL = \"https://github.com/onnx/models/raw/master/\"\nMODEL_URL_COLLECTION = {\n \"ResNet50-v1\": \"vision/classification/resnet/model/resnet50-v1-7.onnx\",\n \"ResNet50-v2\": \"vision/classification/resnet/model/resnet50-v2-7.onnx\",\n \"SqueezeNet-v1.1\": \"vision/classification/squeezenet/model/squeezenet1.1-7.onnx\",\n \"SqueezeNet-v1.0\": \"vision/classification/squeezenet/model/squeezenet1.0-7.onnx\",\n \"Inception-v1\": \"vision/classification/inception_and_googlenet/inception_v1/model/inception-v1-7.onnx\",\n \"Inception-v2\": \"vision/classification/inception_and_googlenet/inception_v2/model/inception-v2-7.onnx\",\n}\n\n\ndef get_model_url(model_name):\n return BASE_MODEL_URL + MODEL_URL_COLLECTION[model_name]\n\n\ndef get_name_from_url(url):\n return url[url.rfind(\"/\") + 1 :].strip()\n\n\ndef find_of_download(model_name):\n model_url = get_model_url(model_name)\n model_file_name = get_name_from_url(model_url)\n return download_testdata(model_url, model_file_name, module=\"models\")", "_____no_output_____" ] ], [ [ "## 2. Accuracy check for STVM EP \n\nThis section will check the accuracy. The check will be to compare the output tensors for `CPUExecutionProvider` and `STVMExecutionProvider`. See the description of `verify_with_ort_with_inputs` function used above.\n\n\n### Check for simple architectures", "_____no_output_____" ] ], [ [ "def get_two_input_model(op_name: AnyStr) -> ModelProto:\n dtype = \"float32\"\n in_shape = [1, 2, 3, 3]\n in_type = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)]\n out_shape = in_shape\n out_type = in_type\n\n layer = helper.make_node(op_name, [\"in1\", \"in2\"], [\"out\"])\n graph = helper.make_graph(\n [layer],\n \"two_input_test\",\n inputs=[\n helper.make_tensor_value_info(\"in1\", in_type, in_shape),\n helper.make_tensor_value_info(\"in2\", in_type, in_shape),\n ],\n outputs=[\n helper.make_tensor_value_info(\n \"out\", out_type, out_shape\n )\n ],\n )\n model = helper.make_model(graph, producer_name=\"two_input_test\")\n checker.check_model(model, full_check=True)\n return model", "_____no_output_____" ], [ "onnx_model = get_two_input_model(\"Add\")\ninputs = get_random_model_inputs(onnx_model)\nverify_with_ort_with_inputs(onnx_model, inputs)\nprint(\"****************** Success! ******************\")", "STVM ep options:\ntarget: llvm -mcpu=skylake-avx512\ntarget_host: llvm -mcpu=skylake-avx512\nopt level: 3\nfreeze weights: 1\ntuning file path: \ntuning type: Ansor\nconvert layout to NHWC: 0\ninput tensor names: \ninput tensor shapes: \nBuild TVM graph executor\n****************** Success! ******************\n" ] ], [ [ "### Check for DNN architectures ", "_____no_output_____" ] ], [ [ "def get_onnx_model(model_name):\n model_path = find_of_download(model_name)\n onnx_model = onnx.load(model_path)\n return onnx_model", "_____no_output_____" ], [ "model_name = \"ResNet50-v1\"\n\nonnx_model = get_onnx_model(model_name)\ninputs = get_random_model_inputs(onnx_model)\nverify_with_ort_with_inputs(onnx_model, inputs)\nprint(\"****************** Success! ******************\")", "STVM ep options:\ntarget: llvm -mcpu=skylake-avx512\ntarget_host: llvm -mcpu=skylake-avx512\nopt level: 3\nfreeze weights: 1\ntuning file path: \ntuning type: Ansor\nconvert layout to NHWC: 0\ninput tensor names: \ninput tensor shapes: \n" ] ], [ [ "## 3. Configuration options\n\nThis section shows how you can configure STVM EP using custom options. For more details on the options used, see the corresponding section of the documentation.", "_____no_output_____" ] ], [ [ "provider_name = \"StvmExecutionProvider\"\nprovider_options = dict(target=\"llvm -mtriple=x86_64-linux-gnu\",\n target_host=\"llvm -mtriple=x86_64-linux-gnu\",\n opt_level=3,\n freeze_weights=True,\n tuning_file_path=\"\",\n tuning_type=\"Ansor\",\n)", "_____no_output_____" ], [ "model_name = \"ResNet50-v1\"\nonnx_model = get_onnx_model(model_name)\ninput_dict = {input_name: input_value for input_name, input_value in zip(get_onnx_input_names(onnx_model),\n get_random_model_inputs(onnx_model))}\noutput_names = get_onnx_output_names(onnx_model)", "_____no_output_____" ], [ "stvm_session = onnxruntime.InferenceSession(onnx_model.SerializeToString(),\n providers=[provider_name],\n provider_options=[provider_options]\n )\noutput = stvm_session.run(output_names, input_dict)[0]\nprint(f\"****************** Output shape: {output.shape} ******************\")", "STVM ep options:\ntarget: llvm -mtriple=x86_64-linux-gnu\ntarget_host: llvm -mtriple=x86_64-linux-gnu\nopt level: 3\nfreeze weights: 1\ntuning file path: \ntuning type: Ansor\nconvert layout to NHWC: 0\ninput tensor names: \ninput tensor shapes: \nBuild TVM graph executor\n****************** Output shape: (1, 1000) ******************\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cba1b3de6333290af8d8f0427a833aab2d04ff27
128,938
ipynb
Jupyter Notebook
notebooks/experimental/zero_crossing_in_time_domain.ipynb
cloudedbats/cloudedbats_dsp
1ca3bd371a6ea504a7d11540fe8afd7bcac56c43
[ "MIT" ]
3
2018-04-07T15:54:37.000Z
2019-03-26T14:55:07.000Z
notebooks/experimental/zero_crossing_in_time_domain.ipynb
cloudedbats/cloudedbats_dsp
1ca3bd371a6ea504a7d11540fe8afd7bcac56c43
[ "MIT" ]
null
null
null
notebooks/experimental/zero_crossing_in_time_domain.ipynb
cloudedbats/cloudedbats_dsp
1ca3bd371a6ea504a7d11540fe8afd7bcac56c43
[ "MIT" ]
null
null
null
353.254795
119,264
0.929408
[ [ [ "# Wave (.wav) to Zero Crossing.\n\nThis is an attempt to produce synthetic ZC (Zero Crossing) from FS (Full Scan) files. All parts are calculated in the time domain to mimic true ZC. FFT is not used (maybe with the exception of the internal implementation of the Butterworth filter).\n\nCurrent status: Seems to work well for \"easy files\", but not for mixed and low amplitude recordings. I don't know why...\n\nThe resulting plot is both embedded in this notebook and as separate files: 'zc_in_time_domain_test_1.png' and 'zc_in_time_domain_test_2.png'.\n\nSources in information/inspiration:\n\n- http://users.lmi.net/corben/fileform.htm#Anabat%20File%20Formats\n- https://stackoverflow.com/questions/3843017/efficiently-detect-sign-changes-in-python\n- https://github.com/riggsd/zcant/blob/master/zcant/conversion.py\n", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport scipy.io.wavfile as wf\nimport scipy.signal\n#import sounddevice", "_____no_output_____" ], [ "# Settings.\n#sound_file = '../data_in/Mdau_TE384.wav'\nsound_file = '../data_in/Ppip_TE384.wav'\n#sound_file = '../data_in/Myotis-Plecotus-Eptesicus_TE384.wav'\n\ncutoff_freq_hz = 18000\nzc_divratio = 4 ", "_____no_output_____" ], [ "# Debug settings.\nplay_sound = False\ndebug = False", "_____no_output_____" ], [ "# Read the sound file.\n(sampling_freq, signal_int16) = wf.read(sound_file, 'rb')\nprint('Sampling freq in file: ' + str(sampling_freq) + ' Hz.')\nprint(str(len(signal_int16)) + ' samples.')\n#if play_sound:\n# sounddevice.play(signal_int16, sampling_freq)\n# sounddevice.wait()", "Sampling freq in file: 38400 Hz.\n586452 samples.\n" ], [ "# Check if TE, Time Expansion.\nif '_TE' in sound_file:\n sampling_freq *= 10\nprint('Sampling freq: ' + str(sampling_freq) + ' Hz.')", "Sampling freq: 384000 Hz.\n" ], [ "# Signed int16 to [-1.0, 1.0].\nsignal = np.array(signal_int16) / 32768 ", "_____no_output_____" ], [ "# Noise level. RMS, root-mean-square.\nnoise_level = np.sqrt(np.mean(np.square(signal)))\nprint(noise_level)", "0.016186364861810285\n" ], [ "# Filter. Butterworth.\nnyquist = 0.5 * sampling_freq\nlow = cutoff_freq_hz / nyquist\nfilter_order = 9\nb, a = scipy.signal.butter(filter_order, [low], btype='highpass')\n#signal= scipy.signal.lfilter(b, a, signal)\nsignal= scipy.signal.filtfilt(b, a, signal)", "_____no_output_____" ], [ "# Add hysteresis around zero to remove noise.\nsignal[(signal < noise_level) & (signal > -noise_level)] = 0.0", "_____no_output_____" ], [ "# Check where zero crossings may occur.\nsign_diff_array = np.diff(np.sign(signal))", "_____no_output_____" ], [ "# Extract positive zero passings and interpolate where it occurs.\nindex_array = []\nold_index = None\nfor index, value in enumerate(sign_diff_array):\n if value in [2., 1., 0.]:\n # Check for raising signal level.\n if value == 2.:\n # From negative directly to positive. Calculate interpolated index.\n x_adjust = signal[index] / (signal[index] - signal[index+1])\n index_array.append(index + x_adjust)\n old_index = None\n elif (value == 1.) and (old_index is None):\n # From negative to zero.\n old_index = index\n elif (value == 1.) and (old_index is not None):\n # From zero to positive. Calculate interpolated index.\n x_adjust = signal[old_index] / (signal[old_index] - signal[index+1])\n index_array.append(old_index + x_adjust)\n old_index = None \n else:\n # Falling signal level.\n old_index = None\n\nprint(len(index_array))\nif debug:\n print(index_array[:100])", "3944\n" ], [ "zero_crossings = index_array[::zc_divratio]\nprint(len(zero_crossings))", "986\n" ], [ "# Prepare lists.\nfreqs = []\ntimes = []\nfor index, zero_crossing in enumerate(zero_crossings[0:-1]):\n freq = zero_crossings[index+1] - zero_crossings[index]\n freq_hz = sampling_freq * zc_divratio / freq\n if freq_hz >= cutoff_freq_hz:\n freqs.append(freq_hz)\n times.append(zero_crossing)\nprint(len(freqs))", "929\n" ], [ "# Prepare arrays for plotting.\nfreq_array_khz = np.array(freqs) / 1000.0\ntime_array_s = np.array(times) / sampling_freq\ntime_array_compact = range(0, len(times))\nif debug:\n print(len(freq_array_khz))\n print(freq_array_khz[:100])\n print(time_array_s[:100])", "_____no_output_____" ], [ "# Plot two diagrams, normal and compressed time.\nfig, (ax1, ax2) = plt.subplots(2,1,\n figsize=(16, 5), \n dpi=150, \n #facecolor='w', \n #edgecolor='k',\n )\n# ax1.\nax1.scatter(time_array_s, freq_array_khz, s=1, c='navy', alpha=0.5)\n\nax1.set_title('File: ' + sound_file)\nax1.set_ylim((0,120))\nax1.minorticks_on()\nax1.grid(which='major', linestyle='-', linewidth='0.5', alpha=0.6)\nax1.grid(which='minor', linestyle='-', linewidth='0.5', alpha=0.3)\nax1.tick_params(which='both', top='off', left='off', right='off', bottom='off') \n# ax2.\nax2.scatter(time_array_compact, freq_array_khz, s=1, c='navy', alpha=0.5)\n\nax2.set_ylim((0,120))\nax2.minorticks_on()\nax2.grid(which='major', linestyle='-', linewidth='0.5', alpha=0.6)\nax2.grid(which='minor', linestyle='-', linewidth='0.5', alpha=0.3)\nax2.tick_params(which='both', top='off', left='off', right='off', bottom='off') \n\nplt.tight_layout()\nfig.savefig('zc_in_time_domain_test.png')\n#fig.savefig('zc_in_time_domain_test_1.png')\n#fig.savefig('zc_in_time_domain_test_2.png')\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cba1cdf7d35dfeca8648d7d4908d488381c95951
2,592
ipynb
Jupyter Notebook
07_Visualization/Online_Retail/Exercises.ipynb
brothermalcolm/pandas_exercises-master
b5bad32ffbcd7d3e94fa36ff405a4319d2f3db92
[ "MIT" ]
1
2019-09-19T07:35:23.000Z
2019-09-19T07:35:23.000Z
07_Visualization/Online_Retail/Exercises.ipynb
brothermalcolm/pandas_exercises-master
b5bad32ffbcd7d3e94fa36ff405a4319d2f3db92
[ "MIT" ]
null
null
null
07_Visualization/Online_Retail/Exercises.ipynb
brothermalcolm/pandas_exercises-master
b5bad32ffbcd7d3e94fa36ff405a4319d2f3db92
[ "MIT" ]
1
2020-11-01T23:07:44.000Z
2020-11-01T23:07:44.000Z
18.514286
174
0.526235
[ [ [ "# Online Retails Purchase", "_____no_output_____" ], [ "### Introduction:\n\n\n\n### Step 1. Import the necessary libraries", "_____no_output_____" ], [ "### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/Visualization/Online_Retail/Online_Retail.csv). ", "_____no_output_____" ], [ "### Step 3. Assign it to a variable called online_rt", "_____no_output_____" ], [ "### Step 4. Create a histogram with the 10 countries that have the most 'Quantity' ordered except UK", "_____no_output_____" ], [ "### Step 5. Exclude negative Quatity entries", "_____no_output_____" ], [ "### Step 6. Create a scatterplot with the Quantity per UnitPrice by CustomerID for the top 3 Countries", "_____no_output_____" ], [ "### BONUS: Create your own question and answer it.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cba1cf4e522b49caab56f9c11a9c606bf0c8aa06
57,050
ipynb
Jupyter Notebook
SourceAnalysis/1.BinnedLikelihood/1.BinnedLikelihood.ipynb
emilychau/AnalysisThreads
8270f7b3baa65816a12bff52e0cf7d9038ee9533
[ "MIT" ]
1
2021-02-15T08:38:56.000Z
2021-02-15T08:38:56.000Z
SourceAnalysis/1.BinnedLikelihood/1.BinnedLikelihood.ipynb
emilychau/AnalysisThreads
8270f7b3baa65816a12bff52e0cf7d9038ee9533
[ "MIT" ]
null
null
null
SourceAnalysis/1.BinnedLikelihood/1.BinnedLikelihood.ipynb
emilychau/AnalysisThreads
8270f7b3baa65816a12bff52e0cf7d9038ee9533
[ "MIT" ]
null
null
null
47.82062
791
0.663979
[ [ [ "# Binned Likelihood Tutorial\n\nThe detection, flux determination, and spectral modeling of Fermi LAT sources is accomplished by a maximum likelihood optimization technique as described in the [Cicerone](https://fermi.gsfc.nasa.gov/ssc/data/analysis/documentation/Cicerone/Cicerone_Likelihood/) (see also, e.g., [Abdo, A. A. et al. 2009, ApJS, 183, 46](http://adsabs.harvard.edu/abs/2009ApJS..183...46A)).\n\nTo illustrate how to use the Likelihood software, this tutorial gives a step-by-step description for performing a binned likelihood analysis.", "_____no_output_____" ], [ "## Binned vs Unbinned Likelihood\n\nBinned likelihood analysis is the preferred method for most types of LAT analysis (see [Cicerone](https://fermi.gsfc.nasa.gov/ssc/data/analysis/documentation/Cicerone/Cicerone_Likelihood/)).\n\nHowever, when analyzing data over short time periods (with few events), it is better to use the **unbinned** analysis.\n\nTo perform an unbinned likelihood analysis, see the [Unbinned Likelihood](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/likelihood_tutorial.html) tutorial.\n\n**Additional references**:\n* [SciTools References](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/references.html)\n* Descriptions of available [Spectral and Spatial Models](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/source_models.html)\n* Examples of [XML Model Definitions for Likelihood](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#xmlModelDefinitions):\n * [Power Law](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#powerlaw)\n * [Broken Power Law](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#brokenPowerLaw)\n * [Broken Power Law 2](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#powerLaw2)\n * [Log Parabola](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#logParabola)\n * [Exponential Cutoff](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#expCutoff)\n * [BPL Exponential Cutoff](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#bplExpCutoff)\n * [Gaussian](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#gaussian)\n * [Constant Value](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#constantValue)\n * [File Function](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#fileFunction)\n * [Band Function](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#bandFunction)\n * [PL Super Exponential Cutoff](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html#plSuperExpCutoff)", "_____no_output_____" ], [ "# Prerequisites\n\nYou will need an **event** data file, a **spacecraft** data file (also referred to as the \"pointing and livetime history\" file), and the current **background models** (available for [download](https://fermi.gsfc.nasa.gov/ssc/data/access/lat/BackgroundModels.html)). They are also found in code cells below.\n\nYou may choose to select your own data files, or to use the files provided within this tutorial.\n\nCustom data sets may be retrieved from the [Lat Data Server](http://fermi.gsfc.nasa.gov/cgi-bin/ssc/LAT/LATDataQuery.cgi).", "_____no_output_____" ], [ "# Outline\n\n1. **Make Subselections from the Event Data**\n\n Since there is computational overhead for each event associated with each diffuse component, it is useful to filter out any events that are not within the extraction region used for the analysis.\n\n\n2. **Make Counts Maps from the Event Files**\n \n By making simple FITS images, we can inspect our data and pick out obvious sources.\n\n\n3. **Download the latest diffuse models**\n\n The recommended models for a normal point source analysis are `gll_iem_v07.fits` (a very large file) and `iso_P8R3_SOURCE_V2_v1.txt`. All of the background models along with a description of the models are available [here](https://fermi.gsfc.nasa.gov/ssc/data/access/lat/BackgroundModels.html).\n\n\n4. **Create a Source Model XML File**\n\n The source model XML file contains the various sources and their model parameters to be fit using the **gtlike** tool.\n\n\n5. **Create a 3D Counts Cube**\n\n The binned counts cube is used to reduce computation requirements in regions with large numbers of events.\n\n\n6. **Compute Livetimes**\n\n Precomputing the livetime for the dataset speeds up the exposure calculation.\n \n\n7. **Compute Exposure Cube**\n\n This accounts for exposure as a function of energy, based on the cuts made. The exposure map must be recomputed if any change is made to the data selection or binning.\n \n\n8. **Compute Source Maps**\n\n Here the exposure calculation is applied to each of the sources described in the model.\n\n\n9. **Perform the Likelihood Fit**\n\n Fitting the data to the model provides flux, errors, spectral indices, and other information.\n\n\n10. **Create a Model Map**\n\n This can be compared to the counts map to verify the quality of the fit and to make a residual map.", "_____no_output_____" ], [ "# 1. Make subselections from the event data\n\nFor this case we will use two years of LAT Pass 8 data. This is a longer data set than is described in the [Extract LAT Data](../DataSelection/1.ExtractLATData.ipynb) tutorial.\n\n>**NOTE**: The ROI used by the binned likelihood analysis is defined by the 3D counts map boundary. The region selection used in the data extraction step, which is conical, must fully contain the 3D counts map spatial boundary, which is square.\n\nSelection of data:\n\n Search Center (RA, DEC) =(193.98, -5.82)\n Radius = 15 degrees\n Start Time (MET) = 239557417 seconds (2008-08-04 T15:43:37)\n Stop Time (MET) = 302572802 seconds (2010-08-04 T00:00:00)\n Minimum Energy = 100 MeV\n Maximum Energy = 500000 MeV\n\nThis two-year dataset generates numerous data files. We provide the user with the original event data files and the accompanying spacecraft file:\n\n* L181126210218F4F0ED2738_PH00.fits (5.0 MB)\n* L181126210218F4F0ED2738_PH01.fits (10.5 MB)\n* L181126210218F4F0ED2738_PH02.fits (6.5 MB)\n* L181126210218F4F0ED2738_PH03.fits (9.2 MB)\n* L181126210218F4F0ED2738_PH04.fits (7.4 MB)\n* L181126210218F4F0ED2738_PH05.fits (6.2 MB)\n* L181126210218F4F0ED2738_PH06.fits (4.5 MB)\n* L181126210218F4F0ED2738_SC00.fits (256 MB spacecraft file)", "_____no_output_____" ] ], [ [ "!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_PH00.fits\n!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_PH01.fits\n!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_PH02.fits\n!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_PH03.fits\n!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_PH04.fits\n!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_PH05.fits\n!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_PH06.fits\n!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/L181126210218F4F0ED2738_SC00.fits", "_____no_output_____" ], [ "!mkdir ./data\n!mv *.fits ./data\n!ls ./data", "_____no_output_____" ] ], [ [ "In order to combine the two events files for your analysis, you must first generate a text file listing the events files to be included.\n\nIf you do not wish to download all the individual files, you can skip to the next step and retrieve the combined, filtered event file. However, you will need the spacecraft file to complete the analysis, so you should retrieve that now.\n\nTo generate the file list, type:", "_____no_output_____" ] ], [ [ "!ls ./data/*_PH* > ./data/binned_events.txt", "_____no_output_____" ] ], [ [ "When analyzing point sources, it is recommended that you include events with high probability of being photons. To do this, you should use **gtselect** to cut on the event class, keeping only the SOURCE class events (event class 128, or as recommended in the Cicerone).\n\nIn addition, since we do not wish to cut on any of the three event types (conversion type, PSF, or EDISP), we will use `evtype=3` (which corresponds to standard analysis in Pass 7). Note that `INDEF` is the default for evtype in gtselect.", "_____no_output_____" ], [ "```bash\ngtselect evclass=128 evtype=3\n```", "_____no_output_____" ], [ "Be aware that `evclass` and `evtype` are hidden parameters. So, to use them, you must type them on the command line.\n\nThe text file you made (`binned_events.txt`) will be used in place of the input fits filename when running gtselect. The syntax requires that you use an @ before the filename to indicate that this is a text file input rather than a fits file.\n\nWe perform a selection to the data we want to analyze. For this example, we consider the source class photons within our 15 degree region of interest (ROI) centered on the blazar 3C 279. For some of the selections that we made with the data server and don't want to modify, we can use \"INDEF\" to instruct the tool to read those values from the data file header. Here, we are only filtering on event class (not on event type) and applying a zenith cut, so many of the parameters are designated as \"INDEF\".", "_____no_output_____" ], [ "We apply the **gtselect** tool to the data file as follows:", "_____no_output_____" ] ], [ [ "%%bash\ngtselect evclass=128 evtype=3\n @./data/binned_events.txt\n ./data/3C279_binned_filtered.fits\n INDEF\n INDEF\n INDEF\n INDEF\n INDEF\n 100\n 500000\n 90", "_____no_output_____" ] ], [ [ "In the last step we also selected the energy range and the maximum zenith angle value (90 degrees) as suggested in Cicerone and recommended by the LAT instrument team.\n\nThe Earth's limb is a strong source of background gamma rays and we can filter them out with a zenith-angle cut. The use of \"zmax\" in calculating the exposure allows for a more selective method than just using the ROI cuts in controlling the Earth limb contamination. The filtered data from the above steps are provided [here](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_binned_filtered.fits).", "_____no_output_____" ], [ "After the data selection is made, we need to select the good time intervals in which the satellite was working in standard data taking mode and the data quality was good. For this task we use **gtmktime** to select GTIs by filtering on information provided in the spacecraft file. The current **gtmktime** filter expression recommended by the LAT team in the Cicerone is:", "_____no_output_____" ], [ "```\n(DATA_QUAL>0)&&(LAT_CONFIG==1)\n```", "_____no_output_____" ], [ "This excludes time periods when some spacecraft event has affected the quality of the data; it ensures the LAT instrument was in normal science data-taking mode.\n\nHere is an example of running **gtmktime** for our analysis of the region surrounding 3C 279.", "_____no_output_____" ] ], [ [ "%%bash\ngtmktime\n @./data/L181126210218F4F0ED2738_SC00.fits\n (DATA_QUAL>0)&&(LAT_CONFIG==1)\n no\n ./data/3C279_binned_filtered.fits\n ./data/3C279_binned_gti.fits", "_____no_output_____" ] ], [ [ "The data file with all the cuts described above is provided in this [link](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_binned_gti.fits). A more detailed discussion of data selection can be found in the [Data Preparation](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data_preparation.html) analysis thread.\n\nTo view the DSS keywords in a given extension of a data file, use the **gtvcut** tool and review the data cuts on the EVENTS extension. This provides a listing of the keywords reflecting each cut applied to the data file and their values, including the entire list of GTIs. (Use the option `suppress_gtis=no` to view the entire list.)", "_____no_output_____" ] ], [ [ "%%bash\ngtvcut suppress_gtis=no\n ./data/3C279_binned_gti.fits\n EVENTS", "_____no_output_____" ] ], [ [ "Here you can see the event class and event type, the location and radius of the data selection, as well as the energy range in MeV, the zenith angle cut, and the fact that the time cuts to be used in the exposure calculation are defined by the GTI table.\n\nVarious Fermitools will be unable to run if you have multiple copies of a particular DSS keyword. This can happen if the position used in extracting the data from the data server is different than the position used with **gtselect**. It is wise to review the keywords for duplicates before proceeding. If you do have keyword duplication, it is advisable to regenerate the data file with consistent cuts.", "_____no_output_____" ], [ "# 2. Make a counts map from the event data\n\nNext, we create a counts map of the ROI, summed over photon energies, in order to identify candidate sources and to ensure that the field looks sensible as a simple sanity check. For creating the counts map, we will use the [gtbin](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtbin.txt) tool with the option \"CMAP\" (no spacecraft file is necessary for this step).\n\nThen we will view the output file, as shown below:", "_____no_output_____" ] ], [ [ "%%bash\ngtbin\n CMAP\n ./data/3C279_binned_gti.fits\n ./data/3C279_binned_cmap.fits\n NONE\n 150\n 150\n 0.2\n CEL\n 193.98\n -5.82\n 0.0\n AIT", "_____no_output_____" ] ], [ [ "We chose an ROI of 15 degrees, corresponding to 30 degrees in diameter. Since we want a pixel size of 0.2 degrees/pixel, then we must select 30/0.2=150 pixels for the size of the x and y axes. The last command launches the visualization tool _ds9_ and produces a display of the generated [counts](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_binned_cmap.fits) map.", "_____no_output_____" ], [ "<img src='https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/images/BinnedLikelihood/3C279_binned_counts_map.png'>\n\nYou can see several strong sources and a number of weaker sources in this map. Mousing over the positions of these sources shows that two of them are likely 3C 279 and 3C 273.\n\nIt is important to inspect your data prior to proceeding to verify that the contents are as you expect. A malformed data query or improper data selection can generate a non-circular region, or a file with zero events. By inspecting your data prior to analysis, you have an opportunity to detect such issues early in the analysis.\n\nA more detailed discussion of data exploration can be found in the [Explore LAT Data](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/explore_latdata.html) analysis thread.", "_____no_output_____" ], [ "# 3. Create a 3-D (binned) counts map\n\nSince the counts map shows the expected data, you are ready to prepare your data set for analysis. For binned likelihood analysis, the data input is a three-dimensional counts map with an energy axis, called a counts cube. The gtbin tool performs this task as well by using the `CCUBE` option.\n\n<img src=\"https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/images/BinnedLikelihood/square_in_circle.png\">\n\nThe binning of the counts map determines the binning of the exposure calculation. The likelihood analysis may lose accuracy if the energy bins are not sufficiently narrow to accommodate more rapid variations in the effective area with decreasing energy below a few hundred MeV. For a typical analysis, ten logarithmically spaced bins per decade in energy are recommended. The analysis is less sensitive to the spatial binning and 0.2 deg bins are a reasonable standard.\n\nThis counts cube is a square binned region that must fit within the circular acceptance cone defined during the data extraction step, and visible in the counts map above. To find the maximum size of the region your data will support, find the side of a square that can be fully inscribed within your circular acceptance region (multiply the radius of the acceptance cone by sqrt(2)). For this example, the maximum length for a side is 21.21 degrees.", "_____no_output_____" ], [ "To create the counts cube, we run [gtbin](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtbin.txt) as follows:", "_____no_output_____" ] ], [ [ "%%bash\ngtbin\n CCUBE\n ./data/3C279_binned_gti.fits\n ./data/3C279_binned_ccube.fits\n NONE\n 100\n 100\n 0.2\n CEL\n 193.98\n -5.82\n 0.0\n AIT\n LOG\n 100\n 500000\n 37", "_____no_output_____" ] ], [ [ "[gtbin](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtbin.txt) takes the following as parameters:\n* Type of output file (CCUBE|CMAP|LC|PHA1|PHA2|HEALPIX)\n* Event data file name\n* Output file name\n* Spacecraft data file name\n* Size of the X axis in pixels\n* Size of the Y axis in pixels\n* Image scale (in degrees/pixel)\n* Coordindate system (CEL - celestial; GAL - galactic) (pick CEL or GAL)\n* First coordinate of image center in degrees (RA or galactic l)\n* Second coordinate of image center in degrees (DEC or galactic b)\n* Rotation angle of image axis, in degrees\n* Projection method (AIT|ARC|CAR|GLS|MER|NCP|SIN|STG|TAN)\n* Algorithm for defining energy bins (FILE|LIN|LOG)\n* Start value for first energy bin in MeV\n* Stop value for last energy bin in MeV\n* Number of logarithmically uniform energy bins", "_____no_output_____" ], [ "The counts cube generated in this step is provided [here](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_binned_ccube.fits).\n\nIf you open the file with _ds9_, you see that it is made up of 37 images, one for each logarithmic energy bin. By playing through these images, it is easy to see how the PSF of the LAT changes with energy. You can also see that changing energy cuts could be helpful when trying to optimize the localization or spectral information for specific sources.\n\nBe sure to verify that there are no black corners on your counts cube. These corners correspond to regions with no data and will cause errors in your exposure calculations.", "_____no_output_____" ], [ "# 4. Download the latest diffuse model files\n\nWhen you use the current Galactic diffuse emission model ([`gll_iem_v07.fits`](https://fermi.gsfc.nasa.gov/ssc/data/analysis/software/aux/4fgl/gll_iem_v07.fits)) in a likelihood analysis, you also want to use the corresponding model for the extragalactic isotropic diffuse emission, which includes the residual cosmic-ray background. The recommended isotropic model for point source analysis is [`iso_P8R3_SOURCE_V2_v1.txt`](https://fermi.gsfc.nasa.gov/ssc/data/analysis/software/aux/4fgl/iso_P8R3_SOURCE_V2_v1.txt).\n\nAll the Pass 8 background models have been included in the Fermitools distribution, in the `$(FERMI_DIR)/refdata/fermi/galdiffuse/` directory. If you use that path in your model, you should not have to download the diffuse models individually.\n\n>**NOTE**: Keep in mind that the isotropic model needs to agree with both the event class and event type selections you are using in your analysis. The iso_P8R3_SOURCE_V2_v1.txt isotropic spectrum is valid only for the latest response functions and only for data sets with front + back events combined. All of the most up-to-date background models along with a description of the models are available [here](https://fermi.gsfc.nasa.gov/ssc/data/access/lat/BackgroundModels.html).", "_____no_output_____" ], [ "# 5. Create a source model XML file\n\nThe [gtlike](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtlike.txt) tool reads the source model from an XML file. The model file contains your best guess at the locations and spectral forms for the sources in your data. A source model can be created using the [model editor](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/modeleditor.txt) tool, by using the user contributed tool `make4FGLxml.py` (available at the [user-contributed tools](https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/) page), or by editing the file directly within a text editor.\n\nHere we cannot use the same source model that was used to analyze six months of data in the Unbinned Likelihood tutorial, as the 2-year data set contains many more significant sources and will not converge. Instead, we will use the 4FGL catalog to define our source model by running `make4FGLxml.py`. To run the script, you will need to download the current LAT catalog file and place it in your working directory:", "_____no_output_____" ] ], [ [ "!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/make4FGLxml.py\n!wget https://fermi.gsfc.nasa.gov/ssc/data/access/lat/8yr_catalog/gll_psc_v18.fit", "_____no_output_____" ], [ "!mv make4FGLxml.py gll_psc_v18.fit ./data", "_____no_output_____" ], [ "!python ./data/make4FGLxml.py ./data/gll_psc_v18.fit ./data/3C279_binned_gti.fits -o ./data/3C279_input_model.xml ", "_____no_output_____" ] ], [ [ "Note that we are using a high level of significance so that we only fit the brightest sources, and we have forced the extended sources to be modeled as point sources.\n\nIt is also necessary to specify the entire path to location of the diffuse model on your system. Clearly, the simple 4-source model we used for the 6-month [Unbinned Likelihood](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/likelihood_tutorial.html) analysis would have been too simplistic.", "_____no_output_____" ], [ "This XML file uses the spectral model from the 4FGL catalog analysis for each source. (The catalog file is available at the [LAT 8-yr Catalog page](https://fermi.gsfc.nasa.gov/ssc/data/access/lat/8yr_catalog/).) However, that analysis used a subset of the available spectral models. A dedicated analysis of the region may indicate a different spectral model is preferred.\n\nFor more details on the options available for your XML models, see:\n* Descriptions of available [Spectral and Spatial Models](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/source_models.html)\n* Examples of [XML Model Definitions for Likelihood](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/xml_model_defs.html)", "_____no_output_____" ], [ "Finally, the `make4FGLxml.py` script automatically adds 10 degrees to your ROI to account for sources that lie outside your data region, but which may contribute photons to your data. In addition, it gives you the ability to free only some of the spectral parameters for sources within your ROI, and fixes them for the others.\n\nWith hundreds of sources, there are too many free parameters to gain a good spectral fit. It is advisable to revise these values so that only sources near your source of interest, or very bright source, have all spectral parameters free. Farther away, you can fix the spectral form and free only the normalization parameter (or \"prefactor\"). If you are working in a crowded region or have nested sources (e.g. a point source on top of an extended source), you will probably want to fix parameters for some sources even if they lie close to your source of interest.\n\nOnly the normalization parameter will be left free for the remaining sources within the ROI. We have also used the significance parameter (`-s`) of `make4FLGxml.py` to free only the brightest sources in our ROI. In addition, we used the `-v` flag to override that for sources that are significantly variable. Both these changes are necessary: having too many free parameters will not allow the fit to converge (see the section for the fitting step).", "_____no_output_____" ], [ "### XML for Extended Sources\n\nIn some regions, the [make4FGLxml.py](https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/make4FGLxml.py) script may add one or more extended sources to your XML model.\n\nThe script will provide the number of extended sources included in the model. In order to use these extended sources, you will need to downloaded the extended source templates from the [LAT Catalog](https://fermi.gsfc.nasa.gov/ssc/data/access/lat/8yr_catalog/) page (look for \"Extended Source template archive\").\n\nExtract the archive in the directory of your choice and note the path to the template files, which have names like `W44.fits` and `VelaX.fits`. You will need to provide the path to the template file to the script before you run it.\n\nHere is an example of the proper format for an extended source XML entry for Binned Likelihood analysis:\n\n```xml\n<source name=\"SpatialMap_source\" type=\"DiffuseSource\">\n<spectrum type=\"PowerLaw2\">\n<parameter free=\"1\" max=\"1000.0\" min=\"1e-05\" name=\"Integral\" scale=\"1e-06\" value=\"1.0\"/>\n<parameter free=\"1\" max=\"-1.0\" min=\"-5.0\" name=\"Index\" scale=\"1.0\" value=\"-2.0\"/>\n<parameter free=\"0\" max=\"200000.0\" min=\"20.0\" name=\"LowerLimit\" scale=\"1.0\" value=\"20.0\"/>\n<parameter free=\"0\" max=\"200000.0\" min=\"20.0\" name=\"UpperLimit\" scale=\"1.0\" value=\"2e5\"/>\n</spectrum>\n<spatialModel W44 file=\"$(PATH_TO_FILE)/W44.fits\" type=\"SpatialMap\" map_based_integral=\"true\">\n<parameter free=\"0\" max=\"1000.0\" min=\"0.001\" name=\"Normalization\" scale= \"1.0\" value=\"1.0\"/>\n</spatialModel>\n</source>\n```", "_____no_output_____" ], [ "# 6. Compute livetimes and exposure\n\nTo speed up the exposure calculations performed by Likelihood, it is helpful to pre-compute the livetime as a function of sky position and off-axis angle. The [gtltcube](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/gtltcube.txt) tool creates a livetime cube, which is a [HealPix](http://healpix.jpl.nasa.gov/) table, covering the entire sky, of the integrated livetime as a function of inclination with respect to the LAT z-axis.\n\nHere is an example of how to run [gtltcube](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/gtltcube.txt):", "_____no_output_____" ] ], [ [ "%%bash\ngtltcube zmax=90\n ./data/3C279_binned_gti.fits\n ./data/L181126210218F4F0ED2738_SC00.fits\n ./data/3C279_binned_ltcube.fits\n 0.025\n 1", "_____no_output_____" ] ], [ [ ">**Note**: Values such as \"0.1\" for \"Step size in cos(theta) are known to give unexpected results. Use \"0.09\" instead.", "_____no_output_____" ], [ "The livetime cube generated from this analysis can be found [here](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_binned_ltcube.fits).\n\nFor more information about the livetime cubes see the documentation in the [Cicerone](https://fermi.gsfc.nasa.gov/ssc/data/analysis/documentation/Cicerone/Cicerone_Likelihood/) and also the explanation in the [Unbinned Likelihood](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/likelihood_tutorial.html) tutorial.", "_____no_output_____" ], [ "# 7. Compute exposure map\n\nNext, you must apply the livetime calculated in the previous step to your region of interest. To do this, we use the [gtexpcube2](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtexpcube2.txt) tool, which is an updated version of the previous **gtexpcube**. This tool generates a binned exposure map, an accounting of the exposure at each position in the sky, that are a required input to the likelihood process.\n\n>**NOTE**: In the past, running **gtsrcmaps** calculated the exposure map for you, so most analyses skipped the binned exposure map generation step. With the introduction of **gtexpcube2**, this is no longer the case. You must explicitly command the creation of the exposure map as a separate analysis step.", "_____no_output_____" ], [ "In order to create an exposure map that accounts for contributions from all the sources in your analysis region, you must consider not just the sources included in the counts cube. The large PSF of the LAT means that at low energies, sources from well outside your counts cube could affect the sources you are analyzing. To compensate for this, you must create an exposure map that includes sources up to 10 degrees outside your ROI. (The ROI is determined by the radius you downloaded from the data server, here a 15 degree radius.) In addition, you should account for all the exposure that contributes to those additional sources. Since the exposure map uses square pixels, to match the binning in the counts cube, and to ensure we don't have errors, we generate a 300x300 pixel map.", "_____no_output_____" ], [ "If you provide [gtexpcube2](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtexpcube2.txt) a filename for your counts cube, it will use the information from that file to define the geometry of the exposure map. This is legacy behavior and will not give you the necessary 20° buffer you need to completely account for the exposure of nearby sources. (It will also cause an error in the next step.)\n\nInstead, you should specify the appropriate geometry for the exposure map, remembering that the counts cube used 0.2 degree pixel binning. To do that, enter `none` when asked for a Counts cube.\n\n**Note**: If you get a \"`File not found`\" error in the examples below, just put the IRF name in explicitly. The appropriate IRF for this data set is `P8R3_SOURCE_V2`.", "_____no_output_____" ] ], [ [ "%%bash\ngtexpcube2\n ./data/3C279_binned_ltcube.fits\n none\n ./data/3C279_binned_expcube.fits\n P8R3_SOURCE_V2\n 300\n 300\n .2\n 193.98\n -5.82\n 0\n AIT\n CEL\n 100\n 500000\n 37", "_____no_output_____" ] ], [ [ "The generated exposure map can be found [here](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_binned_expcube.fits).\n\nAt this point, you may decide it is easier to simply generate exposure maps for the entire sky. You may be right, as it certainly simplifies the step when scripting. However, making an all-sky map increases the processing time for this step, though the increase is modest.\n\nTo generate an all-sky exposure map (rather than the exposure map we calculated above) you need to specify the proper binning and explicitly give the number of pixels for the entire sky (360°x180°).\n\nHere is an example:", "_____no_output_____" ] ], [ [ "%%bash\ngtexpcube2\n ./data/3C279_binned_ltcube.fits\n none\n ./data/3C279_binned_allsky_expcube.fits\n P8R3_SOURCE_V2\n 1800\n 900\n .2\n 193.98\n -5.82\n 0\n AIT\n CEL\n 100\n 500000\n 37", "_____no_output_____" ] ], [ [ "The all-sky exposure map can be found [here](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_binned_allsky_expcube.fits).\n\nJust as in the [Unbinned Likelihood](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/likelihood_tutorial.html) analysis, the exposure needs to be recalculated if the ROI, zenith angle, time, event class, or energy selections applied to the data are changed. For the binned analysis, this also includes the spatial and energy binning of the 3D counts map (which affects the exposure map as well).", "_____no_output_____" ], [ "# 8. Compute source map\n\nThe [gtsrcmaps](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtsrcmaps.txt) tool creates model counts maps for use with the binned likelihood analysis. To do this, it takes each source spectrum in the XML model, multiplies it by the exposure at the source position, and convolves that exposure with the effective PSF.\n\nThis is an example of how to run the tool:", "_____no_output_____" ] ], [ [ "%%bash\ngtsrcmaps\n ./data/3C279_binned_ltcube.fits\n ./data/3C279_binned_ccube.fits\n ./data/3C279_input_model.xml\n ./data/3C279_binned_allsky_expcube.fits\n ./data/3C279_binned_srcmaps.fits\n CALDB", "_____no_output_____" ] ], [ [ "The output file from [gtsrcmaps](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtsrcmaps.txt) can be found [here](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_binned_srcmaps.fits).\n\nBecause your model map can include sources outside your ROI, you may see a list of warnings at the beginning of the output. These are expected (because you have properly included sources outside your ROI in your XML file) and should cause no problem in your analysis. In addition, if your exposure map is too small for the region, you will see the following warning:\n\n```\nCaught St13runtime_error at the top level:\nRequest for exposure at a sky position that is outside of the map boundaries.\n\nThe contribution of the diffuse source outside of the exposure\nand counts map boundaries is being computed to account for PSF\nleakage into the analysis region. To handle this, use an all-sky\nbinned exposure map. Alternatively, to neglect contributions\noutside of the counts map region, use the emapbnds=no option when\nrunning gtsrcmaps.\n```", "_____no_output_____" ], [ "In this situation, you should increase the dimensions of your exposure map, or just move to the all-sky version.\n\nSource map generation for the point sources is fairly quick, and maps for many point sources may take up a lot of disk space. If you are analyzing a single long data set, it may be preferable to pre-compute only the source maps for the diffuse components at this stage.\n\n[gtlike](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtlike.txt) will compute maps for the point sources on the fly if they appear in the XML definition and a corresponding map is not in the source maps FITS file. To skip generating source maps for point sources, specify \"`ptsrc=no`\" on the command line when running **gtsrcmaps**. However, if you expect to perform multiple fits on the same set of data, precomputing the source maps will probably save you time.", "_____no_output_____" ], [ "# 9. Run gtlike\n\n>NOTE: Prior to running **gtlike** for Unbinned Likelihood, it is necessary to calculate the diffuse response for each event (when that response is not precomputed). However, for Binned Likelihood analysis the diffuse response is calculated over the entire bin, so this step is not necessary.\n\nIf you want to use the **energy dispersion correction** during your analysis, you must enable this feature using the environment variable `USE_BL_EDISP`. This may be set on the command line using:\n\n```bash\nexport USE_BL_EDISP=true\n```\nor, depending on your shell,\n\n```\nsetenv USE_BL_EDISP=true\n```\n\nTo disable the use of energy dispersion, you must unset the variable:\n```bash\nunset USE_BL_EDISP\n```\nor\n```\nunsetenv USE_BL_EDISP\n```", "_____no_output_____" ], [ "```bash\nexport USE_BL_EDISP=true\n```\nor, depending on your shell,\n\n```\nsetenv USE_BL_EDISP=true\n```\n\nTo disable the use of energy dispersion, you must unset the variable:\n```bash\nunset USE_BL_EDISP\n```\nor\n```\nunsetenv USE_BL_EDISP\n```", "_____no_output_____" ], [ "Now we are ready to run the [gtlike](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtlike.txt) application.\n\nHere, we request that the fitted parameters be saved to an output XML model file for use in later steps.", "_____no_output_____" ] ], [ [ "!wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_output_model.xml", "_____no_output_____" ], [ "%%bash\ngtlike refit=yes plot=yes sfile=./data/3C279_binned_output.xml\n BINNED\n ./data/3C279_binned_srcmaps.fits\n ./data/3C279_binned_allsky_expcube.fits\n ./data/3C279_binned_ltcube.fits\n ./data/3C279_input_model.xml\n CALDB\n NEWMINUIT", "_____no_output_____" ] ], [ [ "Most of the entries prompted for are fairly obvious. In addition to the various XML and FITS files, the user is prompted for a choice of IRFs, the type of statistic to use, and the optimizer.", "_____no_output_____" ], [ "The statistics available are:\n* **UNBINNED**: This should be used for short timescale or low source count data. If this option is chosen then parameters for the spacecraft file, event file, and exposure file must be given. See explanation in: [Likelihood Tutorial]()\n\n* **BINNED**: This is a standard binned analysis as described in this tutorial. This analysis is used for long timescale or high-density data (such as in the Galactic plane) which can cause memory errors in the unbinned analysis. If this option is chosen then parameters for the source map file, livetime file, and exposure file must be given.", "_____no_output_____" ], [ "There are five optimizers from which to choose: `DRMNGB`, `DRMNFB`, `NEWMINUIT`, `MINUIT` and `LBFGS`. Generally speaking, the faster way to find the parameter estimates is to use `DRMNGB` (or `DRMNFB`) to find initial values and then use `MINUIT` (or `NEWMINUIT`) to find more accurate results. If you have trouble achieving convergence at first, you can loosen your tolerance by setting the hidden parameter `ftol` on the command line. (The default value for `ftol` is `0.001`.)", "_____no_output_____" ], [ "Analyzing a 2-year dataset will take many hours (in our case more than 2 days with a 32-bit machine with 1 GB of RAM). The required running time is high if your source is in the Galactic plane. Here is some output from our fit, where 4FGL J1229.0+0202 and 4FGL J1256.1-0547 corresponds to 3C 273 and 3C 279, respectively:", "_____no_output_____" ], [ "```\nThis is gtlike version \n\n...\n\nPhoton fluxes are computed for the energy range 100 to 500000 MeV\n\n4FGL J1229.0+0202:\nnorm: 8.16706 +/- 0.0894921\nalpha: 2.49616 +/- 0.015028\nbeta: 0.104635 +/- 0.0105201\nEb: 279.04\nTS value: 32017.6\nFlux: 6.69253e-07 +/- 7.20102e-09 photons/cm^2/s\n\n4FGL J1256.1-0547:\nnorm: 2.38177 +/- 0.0296458\nalpha: 2.25706 +/- 0.0116212\nbeta: 0.0665607 +/- 0.00757385\nEb: 442.052\nTS value: 29261.7\nFlux: 5.05711e-07 +/- 6.14833e-09 photons/cm^2/s\n\n...\n\ngll_iem_v07:\nPrefactor: 0.900951 +/- 0.0235397\nIndex: 0\nScale: 100\nFlux: 0.000469334 +/- 1.22608e-05 photons/cm^2/s\n\niso_P8R3_SOURCE_V2_v1:\nNormalization: 1.13545 +/- 0.0422581\nFlux: 0.000139506 +/- 5.19439e-06 photons/cm^2/s\n\nWARNING: Fit may be bad in range [100, 199.488] (MeV)\nWARNING: Fit may be bad in range [251.124, 316.126] (MeV)\nWARNING: Fit may be bad in range [6302.3, 7933.61] (MeV)\nWARNING: Fit may be bad in range [39744.4, 50032.1] (MeV)\nWARNING: Fit may be bad in range [315519, 397190] (MeV)\n\nTotal number of observed counts: 207751\nTotal number of model events: 207407\n\n-log(Likelihood): 73014.38504\n\nWriting fitted model to 3C279_binned_output.xml\n```", "_____no_output_____" ], [ "Since we selected `plot=yes` in the command line, a plot of the fitted data appears.\n\n<img src=\"https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/images/BinnedLikelihood/3C279_binned_spectral_fit.png\">", "_____no_output_____" ], [ "In the first plot, the counts/MeV vs MeV are plotted. The points are the data, and the lines are the models. Error bars on the points represent sqrt(Nobs) in that band, where Nobs is the observed number of counts. The black line is the sum of the models for all sources. \n\nThe colored lines follow the sources as follows:\n* Black - summed model\n* Red - first source (see below)\n* Green - second source\n* Blue - third source\n* Magenta - fourth source\n* Cyan - the fifth source\n\nIf you have more sources, the colors are reused in the same order. In our case we have, in order of decreasing value on the y-axis: summed model (black), the extragalactic background (black), the galactic background (cyan), 3C 273 (red), and 3C 279 (black).\n\nThe second plot gives the residuals between your model and the data. Error bars here represent (sqrt(Nopbs))/Npred, where Npred is the predicted number of counts in each band based on the fitted model.", "_____no_output_____" ], [ "To assess the quality of the fit, look first for the words at the top of the output `<Optimizer> did successfully converge.` Successful convergence is a minimum requirement for a good fit.\n\nNext, look at the energy ranges that are generating warnings of bad fits. If any of these ranges affect your source of interest, you may need to revise the source model and refit. You can also look at the residuals on the plot (bottom panel). If the residuals indicate a poor fit overall (e.g., the points trending all low or all high) you should consider changing your model file, perhaps by using a different source model definition, and refit the data.\n \nIf the fits and spectral shapes are good, but could be improved, you may wish to simply update your model file to hold some of the spectral parameters fixed. For example, by fixing the spectral model for 3C 273, you may get a better quality fit for 3C 279. Close the plot and you will be asked if you wish to refit the data.", "_____no_output_____" ], [ "```\nRefit? [y] n\nElapsed CPU time: 1571.805872\n```", "_____no_output_____" ], [ "Here, hitting `return` will instruct the application to fit again. We are happy with the result, so we type `n` and end the fit.", "_____no_output_____" ], [ "### Results\n\nWhen it completes, **gtlike** generates a standard output XML file. If you re-run the tool in the same directory, these files will be overwritten by default. Use the `clobber=no` option on the command line to keep from overwriting the output files.\n\nUnfortunately, the fit details and the value for the `-log(likelihood)` are not recorded in the automatic output files.\n\nYou should consider logging the output to a text file for your records by using `> fit_data.txt` (or something similar) with your **gtlike** command.\n\nBe aware, however, that this will make it impossible to request a refit when the likelihood process completes.", "_____no_output_____" ] ], [ [ "!gtlike plot=yes sfile=./data/3C279_output_model.xml > fit_data.txt", "_____no_output_____" ] ], [ [ "In this example, we used the `sfile` parameter to request that the model results be written to an output XML file. This file contains the source model results that were written to `results.dat` at the completion of the fit.\n\n> **Note**: If you have specified an output XML model file and you wish to modify your model while waiting at the `Refit? [y]` prompt, you will need to copy the results of the output model file to your input model before making those modifications. ", "_____no_output_____" ], [ "The results of the likelihood analysis have to be scaled by the quantity called \"scale\" in the XML model in order to obtain the total photon flux (photons cm-2 s-1) of the source. You must refer to the model formula of your source for the interpretation of each parameter. In our example the 'prefactor' of our power law model of the first fitted source (4FGLJ1159.5-0723) has to be scaled by the factor 'scale'=10-14. For example the total flux of 4FGLJ1159.5-0723 is the integral between 100 MeV and 500000 MeV of:", "_____no_output_____" ], [ "$Prefactor \\cdot scale \\cdot (E /100)^{index}=(6.7017x10-14) \\cdot (E/100)^{-2.0196}$", "_____no_output_____" ], [ "Errors reported with each value in the `results.dat` file are 1σ estimates (based on inverse-Hessian at the optimum of the log-likelihood surface).", "_____no_output_____" ], [ "### Other Useful Hidden Parameters\n\nIf you are scripting and wish to generate multiple output files without overwriting, the `results` and `specfile` parameters allow you to specify output filenames for the `results.dat` and `counts_spectra.fits` files respectively.\n\nIf you do not specify a source model output file with the `sfile` parameter, then the input model file will be overwritten with the latest fit. This is convenient as it allows the user to edit that file while the application is waiting at the `Refit? [y]` prompt so that parameters can be adjusted and set free or fixed. This would be similar to the use of the \"newpar\", \"freeze\", and \"thaw\" commands in [XSPEC](http://heasarc.gsfc.nasa.gov/docs/xanadu/xspec/index.html). ", "_____no_output_____" ], [ "# 10. Create a model map\n\nFor comparison to the counts map data, we create a model map of the region based on the fit parameters.\n\nThis map is essentially an infinite-statistics counts map of the region-of-interest based on our model fit.\n\nThe [gtmodel](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtmodel.txt) application reads in the fitted model, applies the proper scaling to the source maps, and adds them together to get the final map. ", "_____no_output_____" ] ], [ [ "%%bash\ngtmodel\n ./data/3C279_binned_srcmaps.fits\n ./data/3C279_binned_output.xml\n ./data/3C279_model_map.fits\n CALDB\n ./data/3C279_binned_ltcube.fits\n ./data/3C279_binned_allsky_expcube.fits", "_____no_output_____" ] ], [ [ "To understand how well the fit matches the data, we want to compare the [model map](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_model_map.fits) just created with the counts map over the same field of view. First we have to create the [new counts map](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_binned_cmap_small.fits) that matches in size the model map (the one generated in encircles the ROI, while the model map is completely inscribed within the ROI): We will use again the [gtbin](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtbin.txt) tool with the option `CMAP` as shown below: ", "_____no_output_____" ] ], [ [ "%%bash\ngtbin\n CMAP\n ./data/3C279_binned_gti.fits\n ./data/3C279_binned_cmap_small.fits\n NONE\n 100\n 100\n 0.2\n CEL\n 193.98\n -5.82\n 0.0\n STG", "_____no_output_____" ] ], [ [ "Here we've plotted the model map next to the the energy-summed counts map for the data.\n\n<img src='https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/images/BinnedLikelihood/3C279_binned_map_comparison.png'>", "_____no_output_____" ], [ "Finally we want to create the [residual map](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/BinnedLikelihood/3C279_residual.fits) by using the FTOOL **farith** to check if we can improve the model:", "_____no_output_____" ] ], [ [ "%%bash\nfarith\n ./data/3C279_binned_cmap_small.fits\n ./data/3C279_model_map.fits\n ./data/3C279_residual.fits\n SUB", "_____no_output_____" ] ], [ [ "The residual map is shown below. As you can see, the binning we chose probably used pixels that were too large.\n\nThe primary sources, 3C 273 and 3C 279, have some positive pixels next to some negative ones. This effect could be lessened by either using a smaller pixel size or by offsetting the central position slightly from the position of the blazar (or both).\n\nIf your residual map contains bright sources, the next step would be to iterate the analysis with the additional sources included in the XML model file. \n\n<img src='https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/images/BinnedLikelihood/3C279_binned_residuals.png'>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
cba1e7c8b9831cebb344f506c77af9dd7c36932a
32,447
ipynb
Jupyter Notebook
trained_models/GRU/seq_GRU.ipynb
Sapphirine/fakenewschallenge
f718f93717301617988797c5d89a71c5be34a55b
[ "Apache-2.0" ]
2
2019-04-09T17:58:38.000Z
2019-05-01T09:47:37.000Z
trained_models/GRU/seq_GRU.ipynb
wl5/fake_news_detection
a5ee04c89d064b5f8da1ecbf1f440f6b29930853
[ "Apache-2.0" ]
null
null
null
trained_models/GRU/seq_GRU.ipynb
wl5/fake_news_detection
a5ee04c89d064b5f8da1ecbf1f440f6b29930853
[ "Apache-2.0" ]
null
null
null
54.078333
172
0.493975
[ [ [ "from collections import Counter\nimport numpy as np\nfrom csv import DictReader\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils import np_utils\nfrom keras.models import Sequential, Model, load_model\nfrom keras.layers import concatenate, Embedding, Dense, Dropout, Activation, LSTM, CuDNNLSTM, CuDNNGRU,Flatten, Input, RepeatVector, TimeDistributed, Bidirectional\nfrom keras.optimizers import Adam, RMSprop\nfrom keras.callbacks import Callback, ModelCheckpoint, EarlyStopping, TensorBoard\nimport codecs\nimport pickle", "_____no_output_____" ], [ "MAX_LEN_HEAD = 100\nMAX_LEN_BODY = 500\nVOCAB_SIZE = 15000\nEMBEDDING_DIM = 300", "_____no_output_____" ], [ "def get_vocab(lst, vocab_size):\n \"\"\"\n lst: list of sentences\n \"\"\"\n vocabcount = Counter(w for txt in lst for w in txt.lower().split())\n vocabcount = vocabcount.most_common(vocab_size)\n word2idx = {}\n idx2word = {}\n for i, word in enumerate(vocabcount):\n word2idx[word[0]] = i\n idx2word[i] = word[0]\n return word2idx, idx2word", "_____no_output_____" ], [ "def cov2idx_unk(lst, word2idx):\n output = []\n for sentence in lst:\n temp = []\n for word in sentence.split():\n if word in word2idx:\n temp.append(word2idx[word])\n else:\n temp.append(word2idx['<unk>'])\n temp.append(word2idx['<unk>'])\n output.append(temp)\n return output", "_____no_output_____" ], [ "def pad_seq(cov_lst, max_len=MAX_LEN_BODY):\n \"\"\"\n list of list of index converted from words\n \"\"\"\n pad_lst = pad_sequences(cov_lst, maxlen = max_len, padding='post')\n return pad_lst", "_____no_output_____" ], [ "label_ref = {'agree': 0, 'disagree': 1, 'discuss': 2, 'unrelated': 3}", "_____no_output_____" ], [ "def load_train_unk(file_instances, file_bodies):\n \"\"\"\n article: the name of the article file\n \"\"\"\n \n instance_lst = []\n # Process file\n with open(file_instances, \"r\", encoding='utf-8') as table:\n r = DictReader(table)\n for line in r:\n instance_lst.append(line)\n \n body_lst = []\n # Process file\n with open(file_bodies, \"r\", encoding='utf-8') as table:\n r = DictReader(table)\n for line in r:\n body_lst.append(line)\n \n heads = {}\n bodies = {}\n \n for instance in instance_lst:\n if instance['Headline'] not in heads:\n head_id = len(heads)\n heads[instance['Headline']] = head_id\n instance['Body ID'] = int(instance['Body ID'])\n for body in body_lst:\n bodies[int(body['Body ID'])] = body['articleBody']\n \n headData = []\n bodyData = []\n labelData = []\n for instance in instance_lst:\n headData.append(instance['Headline'])\n bodyData.append(bodies[instance['Body ID']])\n labelData.append(label_ref[instance['Stance']])\n \n \n word2idx, idx2word = get_vocab(headData+bodyData, VOCAB_SIZE)\n word2idx['<unk>'] = len(word2idx)\n \n cov_head = cov2idx_unk(headData, word2idx)\n cov_body = cov2idx_unk(bodyData, word2idx)\n remove_list = []\n for i in range(len(cov_head)):\n if len(cov_head[i])>MAX_LEN_HEAD or len(cov_body[i])>MAX_LEN_BODY:\n remove_list.append(i)\n for idx in sorted(remove_list, reverse = True):\n cov_head.pop(idx)\n cov_body.pop(idx)\n labelData.pop(idx)\n pad_head = pad_seq(cov_head, MAX_LEN_HEAD)\n pad_body = pad_seq(cov_body, MAX_LEN_BODY)\n return pad_head, pad_body, labelData, word2idx, idx2word", "_____no_output_____" ], [ "pad_head, pad_body, labelData, word2idx, idx2word = load_train_unk(\"train_stances.csv\", \"train_bodies.csv\")", "_____no_output_____" ], [ "#for training\ntrain_head = pad_head[:-1000]\ntrain_body = pad_body[:-1000]\ntrain_label = labelData[:-1000]\n\nval_head = pad_head[-1000:]\nval_body = pad_body[-1000:]\nval_label = labelData[-1000:]", "_____no_output_____" ], [ "BATCH_SIZE = 128\nNUM_LAYERS = 0\nHIDDEN_DIM = 512\nEPOCHS = 60", "_____no_output_____" ], [ "input_head = Input(shape=(MAX_LEN_HEAD,), dtype='int32', name='input_head')\nembed_head = Embedding(output_dim=EMBEDDING_DIM, input_dim=VOCAB_SIZE+1, input_length=MAX_LEN_HEAD)(input_head)\ngru_head = CuDNNGRU(128)(embed_head)\n# embed_head = Embedding(VOCAB_SIZE, EMBEDDING_DIM , input_length = MAX_LEN_HEAD, weights = [g_word_embedding_matrix], trainable=False)\ninput_body = Input(shape=(MAX_LEN_BODY,), dtype='int32', name='input_body')\nembed_body = Embedding(output_dim=EMBEDDING_DIM, input_dim=VOCAB_SIZE+1, input_length=MAX_LEN_BODY)(input_body)\ngru_body = CuDNNGRU(128)(embed_body)\n# embed_body = Embedding(VOCAB_SIZE, EMBEDDING_DIM , input_length = MAX_LEN_BODY, weights = [g_word_embedding_matrix], trainable=False)\n\nconcat = concatenate([gru_head, gru_body], axis = 1)\n\nx = Dense(400, activation='relu')(concat)\nx = Dropout(0.5)(x)\nx = Dense(400, activation='relu')(x)\nx = Dropout(0.5)(x)\n\n# And finally we add the main logistic regression layer\nmain_output = Dense(4, activation='softmax', name='main_output')(x)\nmodel = Model(inputs=[input_head, input_body], outputs=main_output)\nmodel.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy', metrics = ['accuracy'])\nmodel.summary()", "__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_head (InputLayer) (None, 100) 0 \n__________________________________________________________________________________________________\ninput_body (InputLayer) (None, 500) 0 \n__________________________________________________________________________________________________\nembedding_6 (Embedding) (None, 100, 300) 4500300 input_head[0][0] \n__________________________________________________________________________________________________\nembedding_7 (Embedding) (None, 500, 300) 4500300 input_body[0][0] \n__________________________________________________________________________________________________\ncu_dnngru_5 (CuDNNGRU) (None, 128) 165120 embedding_6[0][0] \n__________________________________________________________________________________________________\ncu_dnngru_6 (CuDNNGRU) (None, 128) 165120 embedding_7[0][0] \n__________________________________________________________________________________________________\nconcatenate_2 (Concatenate) (None, 256) 0 cu_dnngru_5[0][0] \n cu_dnngru_6[0][0] \n__________________________________________________________________________________________________\ndense_3 (Dense) (None, 400) 102800 concatenate_2[0][0] \n__________________________________________________________________________________________________\ndropout_3 (Dropout) (None, 400) 0 dense_3[0][0] \n__________________________________________________________________________________________________\ndense_4 (Dense) (None, 400) 160400 dropout_3[0][0] \n__________________________________________________________________________________________________\ndropout_4 (Dropout) (None, 400) 0 dense_4[0][0] \n__________________________________________________________________________________________________\nmain_output (Dense) (None, 4) 1604 dropout_4[0][0] \n==================================================================================================\nTotal params: 9,595,644\nTrainable params: 9,595,644\nNon-trainable params: 0\n__________________________________________________________________________________________________\n" ], [ "wt_dir = \"./models/seqLSTM/\"\nmodel_path = wt_dir+'biLSTM'+'{epoch:03d}'+'.h5'\ntensorboard = TensorBoard(log_dir='./Graph')\nmodel_checkpoint = ModelCheckpoint(model_path, save_best_only =False, period =2, save_weights_only = False)\n# model.fit([try_head, try_body], \n# try_label, \n# epochs=30, \n# validation_data=([try_head, try_body], try_label), \n# batch_size=BATCH_SIZE,\n# shuffle=True,\n# callbacks = [model_checkpoint, tensorboard])\nmodel.fit([train_head, train_body], \n train_label, \n epochs=2*EPOCHS, \n validation_data=([val_head, val_body], val_label), \n batch_size=BATCH_SIZE,\n shuffle = True, \n callbacks=[model_checkpoint, tensorboard])", "Train on 38357 samples, validate on 1000 samples\nEpoch 1/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.8110 - acc: 0.7388 - val_loss: 0.7872 - val_acc: 0.7400\nEpoch 2/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.7966 - acc: 0.7401 - val_loss: 0.7754 - val_acc: 0.7400\nEpoch 3/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.7935 - acc: 0.7401 - val_loss: 0.7870 - val_acc: 0.7400\nEpoch 4/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.7915 - acc: 0.7401 - val_loss: 0.7723 - val_acc: 0.7400\nEpoch 5/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.7916 - acc: 0.7401 - val_loss: 0.7745 - val_acc: 0.7400\nEpoch 6/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.7916 - acc: 0.7401 - val_loss: 0.7730 - val_acc: 0.7400\nEpoch 7/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.7908 - acc: 0.7401 - val_loss: 0.7733 - val_acc: 0.7400\nEpoch 8/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.7916 - acc: 0.7401 - val_loss: 0.7745 - val_acc: 0.7400\nEpoch 9/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.7907 - acc: 0.7401 - val_loss: 0.7733 - val_acc: 0.7400\nEpoch 10/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.7912 - acc: 0.7401 - val_loss: 0.7695 - val_acc: 0.7400\nEpoch 11/120\n38357/38357 [==============================] - 44s 1ms/step - loss: 0.7903 - acc: 0.7400 - val_loss: 0.7773 - val_acc: 0.7400\nEpoch 12/120\n38357/38357 [==============================] - 44s 1ms/step - loss: 0.7896 - acc: 0.7411 - val_loss: 0.7713 - val_acc: 0.7400\nEpoch 13/120\n38357/38357 [==============================] - 44s 1ms/step - loss: 0.7879 - acc: 0.7411 - val_loss: 0.7792 - val_acc: 0.7390\nEpoch 14/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.7894 - acc: 0.7403 - val_loss: 0.7704 - val_acc: 0.7420\nEpoch 15/120\n38357/38357 [==============================] - 44s 1ms/step - loss: 0.7863 - acc: 0.7420 - val_loss: 0.7744 - val_acc: 0.7380\nEpoch 16/120\n38357/38357 [==============================] - 44s 1ms/step - loss: 0.7802 - acc: 0.7443 - val_loss: 0.7513 - val_acc: 0.7440\nEpoch 17/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.7459 - acc: 0.7582 - val_loss: 0.7076 - val_acc: 0.7670\nEpoch 18/120\n38357/38357 [==============================] - 44s 1ms/step - loss: 0.6624 - acc: 0.7855 - val_loss: 0.6208 - val_acc: 0.7840\nEpoch 19/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.6172 - acc: 0.7959 - val_loss: 0.6101 - val_acc: 0.8000\nEpoch 20/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.5948 - acc: 0.8058 - val_loss: 0.5853 - val_acc: 0.7990\nEpoch 21/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.5732 - acc: 0.8118 - val_loss: 0.5496 - val_acc: 0.7980\nEpoch 22/120\n38357/38357 [==============================] - 44s 1ms/step - loss: 0.5569 - acc: 0.8148 - val_loss: 0.5480 - val_acc: 0.8070\nEpoch 23/120\n38357/38357 [==============================] - 44s 1ms/step - loss: 0.5456 - acc: 0.8173 - val_loss: 0.5618 - val_acc: 0.8010\nEpoch 24/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.5413 - acc: 0.8172 - val_loss: 0.5403 - val_acc: 0.8040\nEpoch 25/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.5343 - acc: 0.8169 - val_loss: 0.5073 - val_acc: 0.8130\nEpoch 26/120\n38357/38357 [==============================] - 44s 1ms/step - loss: 0.5187 - acc: 0.8195 - val_loss: 0.5119 - val_acc: 0.8150\nEpoch 27/120\n38357/38357 [==============================] - 44s 1ms/step - loss: 0.5151 - acc: 0.8196 - val_loss: 0.5142 - val_acc: 0.8100\nEpoch 28/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.5108 - acc: 0.8208 - val_loss: 0.5196 - val_acc: 0.8040\nEpoch 29/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.5077 - acc: 0.8210 - val_loss: 0.5144 - val_acc: 0.8190\nEpoch 30/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.5062 - acc: 0.8210 - val_loss: 0.5189 - val_acc: 0.8120\nEpoch 31/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.5057 - acc: 0.8218 - val_loss: 0.4967 - val_acc: 0.8090\nEpoch 32/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.5030 - acc: 0.8203 - val_loss: 0.4925 - val_acc: 0.8140\nEpoch 33/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.5025 - acc: 0.8207 - val_loss: 0.5311 - val_acc: 0.8070\nEpoch 34/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.5007 - acc: 0.8228 - val_loss: 0.5270 - val_acc: 0.8150\nEpoch 35/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.5004 - acc: 0.8212 - val_loss: 0.4965 - val_acc: 0.8190\nEpoch 36/120\n38357/38357 [==============================] - 44s 1ms/step - loss: 0.5004 - acc: 0.8212 - val_loss: 0.4994 - val_acc: 0.8080\nEpoch 37/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.5024 - acc: 0.8210 - val_loss: 0.5056 - val_acc: 0.8100\nEpoch 38/120\n38357/38357 [==============================] - 44s 1ms/step - loss: 0.4997 - acc: 0.8209 - val_loss: 0.4901 - val_acc: 0.8110\nEpoch 39/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.4985 - acc: 0.8212 - val_loss: 0.4925 - val_acc: 0.8070\nEpoch 40/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.4957 - acc: 0.8220 - val_loss: 0.5127 - val_acc: 0.8160\nEpoch 41/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.4989 - acc: 0.8210 - val_loss: 0.4985 - val_acc: 0.8140\nEpoch 42/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.4973 - acc: 0.8206 - val_loss: 0.5243 - val_acc: 0.8050\nEpoch 43/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.4983 - acc: 0.8187 - val_loss: 0.5506 - val_acc: 0.8190\nEpoch 44/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.4977 - acc: 0.8210 - val_loss: 0.5095 - val_acc: 0.8060\nEpoch 45/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.4978 - acc: 0.8196 - val_loss: 0.5002 - val_acc: 0.8120\nEpoch 46/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.4984 - acc: 0.8194 - val_loss: 0.5176 - val_acc: 0.8010\nEpoch 47/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.4998 - acc: 0.8209 - val_loss: 0.5113 - val_acc: 0.8150\nEpoch 48/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.4989 - acc: 0.8192 - val_loss: 0.4979 - val_acc: 0.8110\nEpoch 49/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.4985 - acc: 0.8190 - val_loss: 0.4981 - val_acc: 0.8160\nEpoch 50/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.4991 - acc: 0.8198 - val_loss: 0.5061 - val_acc: 0.8090\nEpoch 51/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.5005 - acc: 0.8202 - val_loss: 0.5007 - val_acc: 0.8040\nEpoch 52/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.4993 - acc: 0.8198 - val_loss: 0.4911 - val_acc: 0.8130\nEpoch 53/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.5064 - acc: 0.8201 - val_loss: 0.5176 - val_acc: 0.8090\nEpoch 54/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.5005 - acc: 0.8196 - val_loss: 0.5008 - val_acc: 0.8040\nEpoch 55/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.5014 - acc: 0.8199 - val_loss: 0.5040 - val_acc: 0.8130\nEpoch 56/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.5001 - acc: 0.8192 - val_loss: 0.4934 - val_acc: 0.8130\nEpoch 57/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.5023 - acc: 0.8183 - val_loss: 0.4842 - val_acc: 0.8080\nEpoch 58/120\n38357/38357 [==============================] - 45s 1ms/step - loss: 0.5013 - acc: 0.8201 - val_loss: 0.4993 - val_acc: 0.8100\nEpoch 59/120\n" ], [ "pickle.dump(word2idx, open(\"word2idx_GRU.pkl\", \"wb\"))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cba1f96e541a79aad56ca0d050bb9be238c38681
10,483
ipynb
Jupyter Notebook
examples/ipython/Smith Sphere.ipynb
waldyrious/galgebra
b5eb070340434d030dd737a5656fbf709538b0b1
[ "BSD-3-Clause" ]
null
null
null
examples/ipython/Smith Sphere.ipynb
waldyrious/galgebra
b5eb070340434d030dd737a5656fbf709538b0b1
[ "BSD-3-Clause" ]
null
null
null
examples/ipython/Smith Sphere.ipynb
waldyrious/galgebra
b5eb070340434d030dd737a5656fbf709538b0b1
[ "BSD-3-Clause" ]
null
null
null
26.606599
425
0.450825
[ [ [ "# Smith Sphere ", "_____no_output_____" ], [ "The [smith chart](http://en.wikipedia.org/wiki/Smith_chart) is a nomogram used frequently in RF/Microwave Engineering. Since its inception it has been recognised that projecting the chart onto the reimen sphere [1].\n\n[1]H. . Wheeler, “Reflection Charts Relating to Impedance Matching,” IEEE Transactions on Microwave Theory and Techniques, vol. 32, no. 9, pp. 1008–1021, Sep. 1984.", "_____no_output_____" ] ], [ [ "#from IPython.display import SVG\n#SVG('pics/smith_sphere.svg')\nfrom galgebra.printer import Format, Fmt", "_____no_output_____" ], [ "from galgebra import ga\nfrom galgebra.ga import Ga\nfrom sympy import *\nFormat()\n\n(o3d,er,ex,es) = Ga.build('e_r e_x e_s',g=[1,1,1])\n(o2d,zr,zx) = Ga.build('z_r z_x',g=[1,1])\n\nBz = er^ex # impedance plance \nBs = es^ex # reflection coefficient plane\nBx = er^es\nI = o3d.I()\n\ndef down(p, N):\n '''\n stereographically project a vector in G3 downto the bivector N\n '''\n n= -1*N.dual()\n return -(n^p)*(n-n*(n|p)).inv() \n\ndef up(p):\n '''\n stereographically project a vector in G2 upto the space G3\n '''\n if (p^Bz).obj == 0:\n N = Bz\n elif (p^Bs).obj == 0:\n N = Bs\n \n n = -N.dual()\n \n return n + 2*(p*p + 1).inv()*(p-n)\n \na,b,c,z,s,n = [o3d.mv(k,'vector') for k in ['a','b','c','z','s' ,'n']]", "_____no_output_____" ] ], [ [ "\n\nStarting with an impedance vector $z$, defined by a vector in the impedance plane $B_z$, this vector has two scalar components ( $z^r$, $z^x$) known as resistance and reactance", "_____no_output_____" ] ], [ [ "Bz.dual()", "_____no_output_____" ], [ "Bz.is_zero()", "_____no_output_____" ], [ "z = z.proj([er,ex])\nz", "_____no_output_____" ] ], [ [ "stereographically up-projecting this onto the sphere to point $p$, ", "_____no_output_____" ] ], [ [ "p = up(z)\np", "_____no_output_____" ], [ "simplify(p.norm2())", "_____no_output_____" ] ], [ [ "If we stereo-project this back onto the impedance plane", "_____no_output_____" ] ], [ [ "down(p, Bz)", "_____no_output_____" ], [ "down(p,Bs).simplify()", "_____no_output_____" ], [ "(z-er)*(z+er).inv()", "_____no_output_____" ], [ "p", "_____no_output_____" ], [ "R=((-pi/4)*Bx).exp()\nR", "_____no_output_____" ], [ "R*p*R.rev()", "_____no_output_____" ], [ "down(R*p*R.rev(),Bz)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
cba20bfadd11f3e99c5f06afe28a1b0a69a766be
40,681
ipynb
Jupyter Notebook
docs/tutorials/google/floquet.ipynb
balopat/Cirq
5589c31830c41670192e13127e4dc4d2ce0d3fdb
[ "Apache-2.0" ]
1
2020-10-15T19:02:39.000Z
2020-10-15T19:02:39.000Z
docs/tutorials/google/floquet.ipynb
balopat/Cirq
5589c31830c41670192e13127e4dc4d2ce0d3fdb
[ "Apache-2.0" ]
5
2020-08-20T01:03:57.000Z
2021-04-13T00:26:48.000Z
docs/tutorials/google/floquet.ipynb
balopat/Cirq
5589c31830c41670192e13127e4dc4d2ce0d3fdb
[ "Apache-2.0" ]
1
2019-06-17T11:21:53.000Z
2019-06-17T11:21:53.000Z
29.956554
558
0.566776
[ [ [ "##### Copyright 2021 The Cirq Developers", "_____no_output_____" ], [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Floquet calibration", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://quantumai.google/cirq/tutorials/google/floquet\"><img src=\"https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png\" />View on QuantumAI</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/quantumlib/Cirq/blob/master/docs/tutorials/google/floquet.ipynb\"><img src=\"https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/quantumlib/Cirq/blob/master/docs/tutorials/google/floquet.ipynb\"><img src=\"https://quantumai.google/site-assets/images/buttons/github_logo_1x.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/Cirq/docs/tutorials/google/floquet.ipynb\"><img src=\"https://quantumai.google/site-assets/images/buttons/download_icon_1x.png\" />Download notebook</a>\n </td>\n</table>", "_____no_output_____" ], [ "This notebook demonstrates the Floquet calibration API, a tool for characterizing $\\sqrt{\\text{iSWAP}}$ gates and inserting single-qubit $Z$ phases to compensate for errors. This characterization is done by the Quantum Engine and the insertion of $Z$ phases for compensation/calibration is completely client-side with the help of Cirq utilities. At the highest level, the tool inputs a quantum circuit of interest (as well as a backend to run on) and outputs a calibrated circuit for this backend which can then be executed to produce better results.", "_____no_output_____" ], [ "## Details on the calibration tool", "_____no_output_____" ], [ "In more detail, assuming we have a number-convserving two-qubit unitary gate, Floquet calibration (FC) returns fast, accurate estimates for the relevant angles to be calibrated. The `cirq.PhasedFSimGate` has five angles $\\theta$, $\\zeta$, $\\chi$, $\\gamma$, $\\phi$ with unitary matrix\n\n$$\n\\left[ \\begin{matrix}\n1 & 0 & 0 & 0 \\\\\n0 & \\exp(-i \\gamma - i \\zeta) cos( \\theta ) & -i \\exp(-i \\gamma + i \\chi) sin( \\theta ) & 0 \\\\\n0 & -i \\exp(-i \\gamma - i \\chi) sin( \\theta ) & \\exp(-i \\gamma + i \\zeta) cos( \\theta) & 0 \\\\\n0 & 0 & 0 & \\exp(-2 i \\gamma -i \\phi ) \n\\end{matrix} \\right]\n$$\n\nWith Floquet calibration, every angle but $\\chi$ can be calibrated. In experiments, we have found these angles change when gates are run in parallel. Because of this, we perform FC on entire moments of two-qubits gates and return different characterized angles for each. \n\nAfter characterizing a set of angles, one needs to adjust the circuit to compensate for the offset. The simplest adjustment is for $\\zeta$ and $\\gamma$ and works by adding $R_z$ gates before and after the two-qubit gates in question. For many circuits, even this simplest compensation can lead to a significant improvement in results. We provide methods for doing this in this notebook and analyze results for an example circuit.\n\nWe do not attempt to correct the misaligned iSWAP rotation or the additional two-qubit phase in this notebook. This is a non-trivial task and we do currently have simple tools to achieve this. It is up to the user to correct for these as best as possible.", "_____no_output_____" ], [ "Note: The Floquet calibration API and this documentation is ongoing work. The amount by which errors are reduced may vary from run to run and from circuit to circuit.", "_____no_output_____" ], [ "## Setup", "_____no_output_____" ] ], [ [ "try:\n import cirq\nexcept ImportError:\n print(\"installing cirq...\")\n !pip install cirq --quiet\n print(\"installed cirq.\")", "_____no_output_____" ], [ "from typing import Iterable, List, Optional, Sequence\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport cirq\nimport cirq_google as cg # Contains the Floquet calibration tools.", "_____no_output_____" ] ], [ [ "Note: In order to run on Google's Quantum Computing Service, an environment variable `GOOGLE_CLOUD_PROJECT` must be present and set to a valid Google Cloud Platform project identifier. If this is not satisfied, we default to an engine simulator.", "_____no_output_____" ], [ "Running the next cell will prompt you to authenticate Google Cloud SDK to use your project. See the [Getting Started Guide](../tutorials/google/start.ipynb) for more information.", "_____no_output_____" ], [ "Note: Leave `project_id` blank to use a noisy simulator.", "_____no_output_____" ] ], [ [ "# The Google Cloud Project id to use.\nproject_id = '' #@param {type:\"string\"}\n\nif project_id == '':\n import os \n if 'GOOGLE_CLOUD_PROJECT' not in os.environ:\n print(\"No processor_id provided and environment variable \"\n \"GOOGLE_CLOUD_PROJECT not set, defaulting to noisy simulator.\")\n processor_id = None\n engine = cg.PhasedFSimEngineSimulator.create_with_random_gaussian_sqrt_iswap(\n mean=cg.SQRT_ISWAP_PARAMETERS,\n sigma=cg.PhasedFSimCharacterization(\n theta=0.01, zeta=0.10, chi=0.01, gamma=0.10, phi=0.02\n ),\n )\n sampler = engine\n device = cg.Bristlecone\n line_length = 20\nelse: \n import os\n os.environ['GOOGLE_CLOUD_PROJECT'] = project_id\n\n def authenticate_user():\n \"\"\"Runs the user through the Colab OAuth process.\n\n Checks for Google Application Default Credentials and runs interactive login \n if the notebook is executed in Colab. In case the notebook is executed in Jupyter notebook\n or other IPython runtimes, no interactive login is provided, it is assumed that the \n `GOOGLE_APPLICATION_CREDENTIALS` env var is set or `gcloud auth application-default login`\n was executed already.\n\n For more information on using Application Default Credentials see \n https://cloud.google.com/docs/authentication/production\n \"\"\"\n in_colab = False\n try:\n from IPython import get_ipython\n in_colab = 'google.colab' in str(get_ipython())\n except: \n # Notebook is not executed within IPython. Assuming external authentication.\n return \n\n if in_colab: \n from google.colab import auth \n print(\"Getting OAuth2 credentials.\")\n print(\"Press enter after entering the verification code.\")\n auth.authenticate_user(clear_output=False)\n print(\"Authentication complete.\")\n else: \n print(\"Notebook is not executed with Colab, assuming Application Default Credentials are setup.\") \n\n authenticate_user()\n print(\"Successful authentication to Google Cloud.\")\n \n processor_id = \"\" #@param {type:\"string\"}\n engine = cg.get_engine()\n device = cg.get_engine_device(processor_id)\n sampler = cg.get_engine_sampler(processor_id, gate_set_name=\"sqrt_iswap\")\n line_length = 35", "_____no_output_____" ] ], [ [ "## Minimal example for a single $\\sqrt{\\text{iSWAP}}$ gate", "_____no_output_____" ], [ "To see how the API is used, we first show the simplest usage of Floquet calibration for a minimal example of one $\\sqrt{\\text{iSWAP}}$ gate. After this section, we show detailed usage with a larger circuit and analyze the results.", "_____no_output_____" ], [ "The gates that are calibrated by Floquet calibration are $\\sqrt{\\text{iSWAP}}$ gates:", "_____no_output_____" ] ], [ [ "sqrt_iswap = cirq.FSimGate(np.pi / 4, 0.0)\nprint(cirq.unitary(sqrt_iswap).round(3))", "_____no_output_____" ] ], [ [ "First we get two connected qubits on the selected device and define a circuit.", "_____no_output_____" ] ], [ [ "\"\"\"Define a simple circuit to use Floquet calibration on.\"\"\"\nqubits = cg.line_on_device(device, length=2)\ncircuit = cirq.Circuit(sqrt_iswap.on(*qubits))\n\n# Display it.\nprint(\"Circuit to calibrate:\\n\")\nprint(circuit)", "_____no_output_____" ] ], [ [ "The simplest way to use Floquet calibration is as follows.", "_____no_output_____" ] ], [ [ "\"\"\"Simplest usage of Floquet calibration.\"\"\"\ncalibrated_circuit, *_ = cg.run_zeta_chi_gamma_compensation_for_moments(\n circuit,\n engine,\n processor_id=processor_id,\n gate_set=cg.SQRT_ISWAP_GATESET\n)", "_____no_output_____" ] ], [ [ "Note: Additional returned arguments, omitted here for simplicity, are described below.", "_____no_output_____" ], [ "When we print out the returned `calibrated_circuit.circuit` below, we see the added $Z$ rotations to compensate for errors.", "_____no_output_____" ] ], [ [ "print(\"Calibrated circuit:\\n\")\ncalibrated_circuit.circuit", "_____no_output_____" ] ], [ [ "This `calibrated_circuit` can now be executed on the processor to produce better results.", "_____no_output_____" ], [ "## More detailed example with a larger circuit", "_____no_output_____" ], [ "We now use Floquet calibration on a larger circuit which models the evolution of a fermionic particle on a linear spin chain. The physics of this problem for a closed chain (here we use an open chain) has been studied in [Accurately computing electronic properties of materials using eigenenergies](https://arxiv.org/abs/2012.00921), but for the purposes of this notebook we can treat this just as an example to demonstrate Floquet calibration on.", "_____no_output_____" ], [ "First we use the function `cirq_google.line_on_device` to return a line of qubits of a specified length.", "_____no_output_____" ] ], [ [ "line = cg.line_on_device(device, line_length)\nprint(line)", "_____no_output_____" ] ], [ [ "This line is now broken up into a number of segments of a specified length (number of qubits).", "_____no_output_____" ] ], [ [ "segment_length = 5\nsegments = [line[i: i + segment_length] \n for i in range(0, line_length - segment_length + 1, segment_length)]", "_____no_output_____" ] ], [ [ "For example, the first segment consists of the following qubits.", "_____no_output_____" ] ], [ [ "print(*segments[0])", "_____no_output_____" ] ], [ [ "We now implement a number of Trotter steps on each segment in parallel. The middle qubit on each segment is put into the $|1\\rangle$ state, then each Trotter step consists of staggered $\\sqrt{\\text{iSWAP}}$ gates. All qubits are measured in the $Z$ basis at the end of the circuit.\n\nFor convenience, this code is wrapped in a function.", "_____no_output_____" ] ], [ [ "def create_example_circuit(\n segments: Sequence[Sequence[cirq.Qid]],\n num_trotter_steps: int,\n) -> cirq.Circuit:\n \"\"\"Returns a linear chain circuit to demonstrate Floquet calibration on.\"\"\"\n circuit = cirq.Circuit()\n\n # Initial state preparation.\n for segment in segments:\n circuit += [cirq.X.on(segment[len(segment) // 2])]\n\n # Trotter steps.\n for step in range(num_trotter_steps):\n offset = step % 2\n moment = cirq.Moment()\n for segment in segments:\n moment += cirq.Moment(\n [sqrt_iswap.on(a, b) for a, b in zip(segment[offset::2], \n segment[offset + 1::2])])\n circuit += moment\n\n # Measurement.\n circuit += cirq.measure(*sum(segments, ()), key='z')\n return circuit", "_____no_output_____" ] ], [ [ "As an example, we show this circuit on the first segment of the line from above.", "_____no_output_____" ] ], [ [ "\"\"\"Example of the linear chain circuit on one segment of the line.\"\"\"\nnum_trotter_steps = 20\n\ncircuit_on_segment = create_example_circuit(\n segments=[segments[0]],\n num_trotter_steps=num_trotter_steps,\n)\nprint(circuit_on_segment.to_text_diagram(qubit_order=segments[0]))", "_____no_output_____" ] ], [ [ "The circuit we will use for Floquet calibration is this same pattern repeated on all segments of the line.", "_____no_output_____" ] ], [ [ "\"\"\"Circuit used to demonstrate Floquet calibration.\"\"\"\ncircuit = create_example_circuit(\n segments=segments,\n num_trotter_steps=num_trotter_steps\n)", "_____no_output_____" ] ], [ [ "### Execution on a simulator", "_____no_output_____" ], [ "To establish a \"ground truth,\" we first simulate a segment on a noiseless simulator.", "_____no_output_____" ] ], [ [ "\"\"\"Simulate one segment on a simulator.\"\"\"\nnreps = 20_000\nsim_result = cirq.Simulator().run(circuit_on_segment, repetitions=nreps)", "_____no_output_____" ] ], [ [ "### Execution on the processor without Floquet calibration", "_____no_output_____" ], [ "We now execute the full circuit on a processor without using Floquet calibration.", "_____no_output_____" ] ], [ [ "\"\"\"Execute the full circuit on a processor without Floquet calibration.\"\"\"\nraw_results = sampler.run(circuit, repetitions=nreps)", "_____no_output_____" ] ], [ [ "### Comparing raw results to simulator results", "_____no_output_____" ], [ "For comparison we will plot densities (average measurement results) on each segment. Such densities are in the interval $[0, 1]$ and more accurate results are closer to the simulator results.\n\nTo visualize results, we define a few helper functions.", "_____no_output_____" ], [ "#### Helper functions", "_____no_output_____" ], [ "Note: The functions in this section are just utilities for visualizing results and not essential for Floquet calibration. As such this section can be safely skipped or skimmed.", "_____no_output_____" ], [ "The next cell defines two functions for returning the density (average measurement results) on a segment or on all segments. We can optionally post-select for measurements with a specific filling (particle number) - i.e., discard measurement results which don't obey this expected particle number.", "_____no_output_____" ] ], [ [ "def z_density_from_measurements(\n measurements: np.ndarray,\n post_select_filling: Optional[int] = 1\n) -> np.ndarray:\n \"\"\"Returns density for one segment on the line.\"\"\"\n counts = np.sum(measurements, axis=1, dtype=int)\n \n if post_select_filling is not None:\n errors = np.abs(counts - post_select_filling)\n counts = measurements[(errors == 0).nonzero()]\n\n return np.average(counts, axis=0)\n\n\ndef z_densities_from_result(\n result: cirq.Result,\n segments: Iterable[Sequence[cirq.Qid]],\n post_select_filling: Optional[int] = 1\n) -> List[np.ndarray]:\n \"\"\"Returns densities for each segment on the line.\"\"\"\n measurements = result.measurements['z']\n z_densities = []\n \n offset = 0\n for segment in segments:\n z_densities.append(z_density_from_measurements(\n measurements[:, offset: offset + len(segment)], \n post_select_filling)\n )\n offset += len(segment)\n return z_densities", "_____no_output_____" ] ], [ [ "Now we define functions to plot the densities for the simulator, processor without Floquet calibration, and processor with Floquet calibration (which we will use at the end of this notebook). The first function is for a single segment, and the second function is for all segments.", "_____no_output_____" ] ], [ [ "#@title\ndef plot_density(\n ax: plt.Axes,\n sim_density: np.ndarray,\n raw_density: np.ndarray,\n cal_density: Optional[np.ndarray] = None,\n raw_errors: Optional[np.ndarray] = None,\n cal_errors: Optional[np.ndarray] = None,\n title: Optional[str] = None,\n show_legend: bool = True,\n show_ylabel: bool = True,\n) -> None:\n \"\"\"Plots the density of a single segment for simulated, raw, and calibrated\n results.\n \"\"\"\n colors = [\"grey\", \"orange\", \"green\"]\n alphas = [0.5, 0.8, 0.8]\n labels = [\"sim\", \"raw\", \"cal\"]\n\n # Plot densities.\n for i, density in enumerate([sim_density, raw_density, cal_density]):\n if density is not None:\n ax.plot(\n range(len(density)), \n density, \n \"-o\" if i == 0 else \"o\",\n markersize=11,\n color=colors[i],\n alpha=alphas[i],\n label=labels[i]\n )\n\n # Plot errors if provided.\n errors = [raw_errors, cal_errors]\n densities = [raw_density, cal_density]\n for i, (errs, dens) in enumerate(zip(errors, densities)):\n if errs is not None:\n ax.errorbar(\n range(len(errs)),\n dens,\n errs,\n linestyle='',\n color=colors[i + 1],\n capsize=8,\n elinewidth=2,\n markeredgewidth=2\n )\n \n # Titles, axes, and legend.\n ax.set_xticks(list(range(len(sim_density))))\n ax.set_xlabel(\"Qubit index in segment\")\n if show_ylabel:\n ax.set_ylabel(\"Density\")\n if title:\n ax.set_title(title)\n if show_legend:\n ax.legend()\n\n\ndef plot_densities(\n sim_density: np.ndarray,\n raw_densities: Sequence[np.ndarray],\n cal_densities: Optional[Sequence[np.ndarray]] = None,\n rows: int = 3\n) -> None:\n \"\"\"Plots densities for simulated, raw, and calibrated results on all segments.\n \"\"\"\n if not cal_densities:\n cal_densities = [None] * len(raw_densities)\n\n cols = (len(raw_densities) + rows - 1) // rows\n\n fig, axes = plt.subplots(\n rows, cols, figsize=(cols * 4, rows * 3.5), sharey=True\n )\n if rows == 1 and cols == 1:\n axes = [axes]\n elif rows > 1 and cols > 1:\n axes = [axes[row, col] for row in range(rows) for col in range(cols)]\n\n for i, (ax, raw, cal) in enumerate(zip(axes, raw_densities, cal_densities)):\n plot_density(\n ax, \n sim_density, \n raw, \n cal, \n title=f\"Segment {i + 1}\", \n show_legend=False,\n show_ylabel=i % cols == 0\n )\n\n # Common legend for all subplots.\n handles, labels = ax.get_legend_handles_labels()\n fig.legend(handles, labels)\n\n plt.tight_layout(pad=0.1, w_pad=1.0, h_pad=3.0)", "_____no_output_____" ] ], [ [ "#### Visualizing results", "_____no_output_____" ], [ "Note: This section uses helper functions from the previous section to plot results. The code can be safely skimmed: emphasis should be on the plots.", "_____no_output_____" ], [ "To visualize results, we first extract densities from the measurements.", "_____no_output_____" ] ], [ [ "\"\"\"Extract densities from measurement results.\"\"\"\n# Simulator density.\nsim_density, = z_densities_from_result(sim_result,[circuit_on_segment])\n\n# Processor densities without Floquet calibration.\nraw_densities = z_densities_from_result(raw_results, segments)", "_____no_output_____" ] ], [ [ "We first plot the densities on each segment. Note that the simulator densities (\"sim\") are repeated on each segment and the lines connecting them are just visual guides.", "_____no_output_____" ] ], [ [ "plot_densities(sim_density, raw_densities, rows=int(np.sqrt(line_length / segment_length)))", "_____no_output_____" ] ], [ [ "We can also look at the average and variance over the segments.", "_____no_output_____" ] ], [ [ "\"\"\"Plot mean density and variance over segments.\"\"\"\nraw_avg = np.average(raw_densities, axis=0)\nraw_std = np.std(raw_densities, axis=0, ddof=1)\n\nplot_density(\n plt.gca(), \n sim_density, \n raw_density=raw_avg,\n raw_errors=raw_std,\n title=\"Average over segments\"\n)", "_____no_output_____" ] ], [ [ "In the next section, we will use Floquet calibration to produce better average results. After running the circuit with Floquet calibration, we will use these same visualizations to compare results.", "_____no_output_____" ], [ "### Execution on the processor with Floquet calibration", "_____no_output_____" ], [ "There are two equivalent ways to use Floquet calibration which we outline below. A rough estimate for the time required for Floquet calibration is about 16 seconds per 10 qubits, plus 30 seconds of overhead, per calibrated moment.", "_____no_output_____" ], [ "#### Simple usage", "_____no_output_____" ], [ "The first way to use Floquet calibration is via the single function call used at the start of this notebook. Here, we describe the remaining returned values in addition to `calibrated_circuit`.", "_____no_output_____" ], [ "Note: We comment out this section so Floquet calibration on the larger circuit is only executed once in the notebook.", "_____no_output_____" ] ], [ [ "# (calibrated_circuit, calibrations\n# ) = cg.run_zeta_chi_gamma_compensation_for_moments(\n# circuit,\n# engine,\n# processor_id=processor_id,\n# gate_set=cg.SQRT_ISWAP_GATESET\n# )", "_____no_output_____" ] ], [ [ "The returned `calibrated_circuit.circuit` can then be run on the engine. The full list of returned arguments is as follows:\n\n* `calibrated_circuit.circuit`: The input `circuit` with added $Z$ rotations around each $\\sqrt{\\text{iSWAP}}$ gate to compensate for errors.\n* `calibrated_circuit.moment_to_calibration`: Provides an index of the matching characterization (index in calibrations list) for each moment of the `calibrated_circuit.circuit`, or `None` if the moment was not characterized (e.g., for a measurement outcome).\n* `calibrations`: List of characterization results for each characterized moment. Each characterization contains angles for each qubit pair.", "_____no_output_____" ], [ "#### Step-by-step usage", "_____no_output_____" ], [ "Note: This section is provided to see the Floquet calibration API at a lower level, but the results are identical to the \"simple usage\" in the previous section.", "_____no_output_____" ], [ "The above function `cirq_google.run_floquet_phased_calibration_for_circuit` performs the following three steps:\n\n1. Find moments within the circuit that need to be characterized.\n2. Characterize them on the engine.\n3. Apply corrections to the original circuit.\n\nTo find moments that need to be characterized, we can do the following.", "_____no_output_____" ] ], [ [ "\"\"\"Step 1: Find moments in the circuit that need to be characterized.\"\"\"\n(characterized_circuit, characterization_requests\n ) = cg.prepare_floquet_characterization_for_moments(\n circuit,\n options=cg.FloquetPhasedFSimCalibrationOptions(\n characterize_theta=False,\n characterize_zeta=True,\n characterize_chi=False,\n characterize_gamma=True,\n characterize_phi=False\n )\n)", "_____no_output_____" ] ], [ [ "The `characterization_requests` contain information on the operations (gate + qubit pairs) to characterize.", "_____no_output_____" ] ], [ [ "\"\"\"Show an example characterization request.\"\"\"\nprint(f\"Total {len(characterization_requests)} moment(s) to characterize.\")\n\nprint(\"\\nExample request\")\nrequest = characterization_requests[0]\nprint(\"Gate:\", request.gate)\nprint(\"Qubit pairs:\", request.pairs)\nprint(\"Options: \", request.options)", "_____no_output_____" ] ], [ [ "We now characterize them on the engine using `cirq_google.run_calibrations`.", "_____no_output_____" ] ], [ [ "\"\"\"Step 2: Characterize moments on the engine.\"\"\"\ncharacterizations = cg.run_calibrations(\n characterization_requests,\n engine, \n processor_id=processor_id, \n gate_set=cg.SQRT_ISWAP_GATESET,\n max_layers_per_request=1,\n)", "_____no_output_____" ] ], [ [ "The `characterizations` store characterization results for each pair in each moment, for example.", "_____no_output_____" ] ], [ [ "print(f\"Total: {len(characterizations)} characterizations.\")\nprint()\n\n(pair, parameters), *_ = characterizations[0].parameters.items()\nprint(f\"Example pair: {pair}\")\nprint(f\"Example parameters: {parameters}\")", "_____no_output_____" ] ], [ [ "Finally, we apply corrections to the original circuit.", "_____no_output_____" ] ], [ [ "\"\"\"Step 3: Apply corrections to the circuit to get a calibrated circuit.\"\"\"\ncalibrated_circuit = cg.make_zeta_chi_gamma_compensation_for_moments(\n characterized_circuit,\n characterizations\n)", "_____no_output_____" ] ], [ [ "The calibrated circuit can now be run on the processor. We first inspect the calibrated circuit to compare to the original.", "_____no_output_____" ] ], [ [ "print(\"Portion of calibrated circuit:\")\nprint(\"\\n\".join(\n calibrated_circuit.circuit.to_text_diagram(qubit_order=line).splitlines()[:9] + \n [\"...\"]))", "_____no_output_____" ] ], [ [ "Note again that $\\sqrt{\\text{iSWAP}}$ gates are padded by $Z$ phases to compensate for errors. We now run this calibrated circuit.", "_____no_output_____" ] ], [ [ "\"\"\"Run the calibrated circuit on the engine.\"\"\"\ncal_results = sampler.run(calibrated_circuit.circuit, repetitions=nreps)", "_____no_output_____" ] ], [ [ "### Comparing raw results to calibrated results", "_____no_output_____" ], [ "We now compare results with and without Floquet calibration, again using the simulator results as a baseline for comparison. First we extract the calibrated densities.", "_____no_output_____" ] ], [ [ "\"\"\"Extract densities from measurement results.\"\"\"\ncal_densities = z_densities_from_result(cal_results, segments)", "_____no_output_____" ] ], [ [ "Now we reproduce the same density plots from above on each segment, this time including the calibrated (\"cal\") results.", "_____no_output_____" ] ], [ [ "plot_densities(\n sim_density, raw_densities, cal_densities, rows=int(np.sqrt(line_length / segment_length))\n)", "_____no_output_____" ] ], [ [ "We also visualize the mean and variance of results over segments as before.", "_____no_output_____" ] ], [ [ "\"\"\"Plot mean density and variance over segments.\"\"\"\nraw_avg = np.average(raw_densities, axis=0)\nraw_std = np.std(raw_densities, axis=0, ddof=1)\n\ncal_avg = np.average(cal_densities, axis=0)\ncal_std = np.std(cal_densities, axis=0, ddof=1)\n\nplot_density(\n plt.gca(), \n sim_density, \n raw_avg, \n cal_avg, \n raw_std, \n cal_std, \n title=\"Average over segments\"\n)", "_____no_output_____" ] ], [ [ "Last, we can look at density errors between raw/calibrated results and simulated results.", "_____no_output_____" ] ], [ [ "\"\"\"Plot errors of raw vs calibrated results.\"\"\"\nfig, axes = plt.subplots(ncols=2, figsize=(15, 4))\n\naxes[0].set_title(\"Error of the mean\")\naxes[0].set_ylabel(\"Density\")\naxes[1].set_title(\"Data standard deviation\")\n\ncolors = [\"orange\", \"green\"]\nlabels = [\"raw\", \"cal\"]\n\nfor index, density in enumerate([raw_densities, cal_densities]):\n color = colors[index]\n label = labels[index]\n\n average_density = np.average(density, axis=0)\n sites = list(range(len(average_density)))\n \n error = np.abs(average_density - sim_density)\n std_dev = np.std(density, axis=0, ddof=1)\n\n axes[0].plot(sites, error, color=color, alpha=0.6)\n axes[0].scatter(sites, error, color=color)\n\n axes[1].plot(sites, std_dev, label=label, color=color, alpha=0.6)\n axes[1].scatter(sites, std_dev, color=color)\n\nfor ax in axes:\n ax.set_xticks(sites)\n ax.set_xlabel(\"Qubit index in segment\")\n\nplt.legend();", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cba20e43fd16c392ca8f4616b226250f5cd43b9f
3,740
ipynb
Jupyter Notebook
source/2022/Beginner/content/000_LectureTitles.ipynb
qqiang00/ElementaryMathPython
7fa723335ca342b867d51776d1f009066c50092d
[ "MIT" ]
null
null
null
source/2022/Beginner/content/000_LectureTitles.ipynb
qqiang00/ElementaryMathPython
7fa723335ca342b867d51776d1f009066c50092d
[ "MIT" ]
null
null
null
source/2022/Beginner/content/000_LectureTitles.ipynb
qqiang00/ElementaryMathPython
7fa723335ca342b867d51776d1f009066c50092d
[ "MIT" ]
null
null
null
25.972222
60
0.568449
[ [ [ "## Course Plan 课程计划\nfor MTH100 2022 \n\nCreated: Jan 2nd, 2022, Last Updated: Jan 3rd, 2022\n\n创建日期: 2022年01月02日, 最近更新:2022年01月03日", "_____no_output_____" ], [ "### 2022 Winter\n- 01 Hello, Python 你好,Python\n- 02 Create a Name 起一个名字\n- 03 Compare Ages 比较年龄大小\n- 04 Guess Numbers 猜数字\n- 05 Start to Draw 开始涂鸦\n- 06 Draw Rectangles 绘制矩形\n- 07 Draw Regular Polygons 绘制正多边形\n- 08 Sum of 1 to 100 1到100求和\n- 09 King's Rewards 1 国王的奖励1\n- 10 King's Rewards 2 国王的奖励2\n- Mid Term Exam 期中考试", "_____no_output_____" ], [ "- 11 Find Sequence Pattern 找数列规律\n- 12 Sequence 1 序列1\n- 13 Sequence 2 序列2\n- 14 And or Or 和还是或\n- 15 Chicken Rabbits in One Cage 鸡兔同笼\n- 16 Empty the Pool 清空水池\n- 17 Escape from Well Bottom 逃离井底\n- 18 Maximize Area 找最大面积\n- Final Exam 期末考试", "_____no_output_____" ], [ "### 2022 Summer\n- 21 Division by Estimating Quotient 试商除法\n- 22 Quotient and Remainder 商和余数\n- 23 Factors 因数\n- 24 Composite Numbers 合数\n- 25 Prime Numbers 质数\n- 26 Axis 数轴\n- 27 Negative Numbers 负数\n- 28 Decimal Numbers 小数\n- 29 Locate Numbers on Axis 点在数轴上的位置\n- Mid Term Exam 期中考试", "_____no_output_____" ], [ "- 31 Permutation 排列\n- 32 Combination 组合\n- 33 Opposite and Absolute Numbers 相反数和绝对值\n- 34 Max and Min 最大和最小\n- 35 Secret Messaging 悄悄话\n- 36 Random Walk 随机行走\n- 37 Cartesian Coordinate System 笛卡尔坐标系\n- 38 Locate Points in a Plane 1 平面上点的定位1\n- 39 Locate Points in a Plane 2 平面上点的定位2\n- Final Exam 期末考试", "_____no_output_____" ], [ "### 2022 Fall\n- 41 Parallel 平行\n- 42 Calculate Distance 计算距离\n- 43 Draw Arbitary Polygons 绘制任意多边形\n- 44 Perimeter of Rectangles 矩形的周长\n- 45 Area of Rectangles 矩形的面积\n- 46 Square and Power 平方和幂\n- 47 Square of Sum 和的平方\n- 48 Parallelogram 平行四边形\n- 49 Triangle 三角形\n- Mid Term Exam 期中考试", "_____no_output_____" ], [ "- 51 Pythagoras Theorem 1 毕达哥拉斯定理1\n- 52 Pythagoras Theorem 2 毕达哥拉斯定理2\n- 53 Square Root 平方根\n- 54 Slope and Tangent 斜率和正切\n- 55 Distance Between Arbitary two Points 任意两点距离\n- 56 Axial Symmetry 轴对称\n- 57 Rotational Symmetry 旋转对称\n- 58 Sum, Mean and Median 总和、均值和中位数\n- 59 Line and Bar Chart 折线和柱状图\n- Final Exam 期末考试", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cba2214eb8e8df8c482539cf42ddf3745bf0c7f4
108,114
ipynb
Jupyter Notebook
code/monkey_classifier/monkeyTL.ipynb
Pruthvi-Sanghavi/Digit_Recognition
09de4259d8cc2bc92440f3f61f3bbb1835b5c7be
[ "MIT" ]
null
null
null
code/monkey_classifier/monkeyTL.ipynb
Pruthvi-Sanghavi/Digit_Recognition
09de4259d8cc2bc92440f3f61f3bbb1835b5c7be
[ "MIT" ]
null
null
null
code/monkey_classifier/monkeyTL.ipynb
Pruthvi-Sanghavi/Digit_Recognition
09de4259d8cc2bc92440f3f61f3bbb1835b5c7be
[ "MIT" ]
null
null
null
55.585604
6,930
0.643682
[ [ [ "##The premise of this project is for the implementation a CNN with VGG-16 as a feature selector \nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "#Create an ImageGenerator object that is used to randomize and make certain small transformations to the image\n#to build better and robust networks\n\nfrom keras.preprocessing.image import ImageDataGenerator", "_____no_output_____" ], [ "image_gen = ImageDataGenerator(rotation_range=30,\n width_shift_range=0.1,\n height_shift_range=0.1,\n rescale=1/255,\n zoom_range=0.2,\n shear_range=0.2,\n fill_mode='nearest')", "_____no_output_____" ] ], [ [ "## Model:\n", "_____no_output_____" ] ], [ [ "from keras.applications import vgg16\nfrom keras.models import Sequential\nfrom keras.layers import Dense,Dropout,Flatten,Conv2D,MaxPooling2D\nfrom keras import optimizers", "_____no_output_____" ], [ "model = vgg16.VGG16(weights='imagenet', include_top=False, \n input_shape=(150,150,3), pooling=\"max\")\n\nfor layer in model.layers[:-3]:\n layer.trainable = False\n\nfor layer in model.layers:\n print(layer, layer.trainable)\n\n\ntransfer_model = Sequential()\nfor layer in model.layers:\n transfer_model.add(layer)\ntransfer_model.add(Dense(128, activation=\"relu\")) \ntransfer_model.add(Dropout(0.5))\ntransfer_model.add(Dense(10, activation=\"softmax\")) \n\nadam = optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.00001)\n\ntransfer_model.compile(loss=\"categorical_crossentropy\",\n optimizer=adam,\n metrics=[\"accuracy\"])", "Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5\n58892288/58889256 [==============================] - 5s 0us/step\n<tensorflow.python.keras.engine.input_layer.InputLayer object at 0x0000020EFB4D7430> False\n<tensorflow.python.keras.layers.convolutional.Conv2D object at 0x0000020EFB4EB640> False\n<tensorflow.python.keras.layers.convolutional.Conv2D object at 0x0000020EFB5437F0> False\n<tensorflow.python.keras.layers.pooling.MaxPooling2D object at 0x0000020EFB5434C0> False\n<tensorflow.python.keras.layers.convolutional.Conv2D object at 0x0000020EFB5C96A0> False\n<tensorflow.python.keras.layers.convolutional.Conv2D object at 0x0000020EFB5D9100> False\n<tensorflow.python.keras.layers.pooling.MaxPooling2D object at 0x0000020EFB5DBFD0> False\n<tensorflow.python.keras.layers.convolutional.Conv2D object at 0x0000020EFB5E5CA0> False\n<tensorflow.python.keras.layers.convolutional.Conv2D object at 0x0000020EFB5EF490> False\n<tensorflow.python.keras.layers.convolutional.Conv2D object at 0x0000020EFB5E5A60> False\n<tensorflow.python.keras.layers.pooling.MaxPooling2D object at 0x0000020EFB5DBD60> False\n<tensorflow.python.keras.layers.convolutional.Conv2D object at 0x0000020EFB631400> False\n<tensorflow.python.keras.layers.convolutional.Conv2D object at 0x0000020EFB6373D0> False\n<tensorflow.python.keras.layers.convolutional.Conv2D object at 0x0000020EFB637C70> False\n<tensorflow.python.keras.layers.pooling.MaxPooling2D object at 0x0000020EFB642610> False\n<tensorflow.python.keras.layers.convolutional.Conv2D object at 0x0000020EFB6506A0> False\n<tensorflow.python.keras.layers.convolutional.Conv2D object at 0x0000020EFB657190> False\n<tensorflow.python.keras.layers.convolutional.Conv2D object at 0x0000020EFB65CF10> True\n<tensorflow.python.keras.layers.pooling.MaxPooling2D object at 0x0000020EFB642F40> True\n<tensorflow.python.keras.layers.pooling.GlobalMaxPooling2D object at 0x0000020EFB661D30> True\n" ], [ "transfer_model.summary()\n", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nblock1_conv1 (Conv2D) (None, 150, 150, 64) 1792 \n_________________________________________________________________\nblock1_conv2 (Conv2D) (None, 150, 150, 64) 36928 \n_________________________________________________________________\nblock1_pool (MaxPooling2D) (None, 75, 75, 64) 0 \n_________________________________________________________________\nblock2_conv1 (Conv2D) (None, 75, 75, 128) 73856 \n_________________________________________________________________\nblock2_conv2 (Conv2D) (None, 75, 75, 128) 147584 \n_________________________________________________________________\nblock2_pool (MaxPooling2D) (None, 37, 37, 128) 0 \n_________________________________________________________________\nblock3_conv1 (Conv2D) (None, 37, 37, 256) 295168 \n_________________________________________________________________\nblock3_conv2 (Conv2D) (None, 37, 37, 256) 590080 \n_________________________________________________________________\nblock3_conv3 (Conv2D) (None, 37, 37, 256) 590080 \n_________________________________________________________________\nblock3_pool (MaxPooling2D) (None, 18, 18, 256) 0 \n_________________________________________________________________\nblock4_conv1 (Conv2D) (None, 18, 18, 512) 1180160 \n_________________________________________________________________\nblock4_conv2 (Conv2D) (None, 18, 18, 512) 2359808 \n_________________________________________________________________\nblock4_conv3 (Conv2D) (None, 18, 18, 512) 2359808 \n_________________________________________________________________\nblock4_pool (MaxPooling2D) (None, 9, 9, 512) 0 \n_________________________________________________________________\nblock5_conv1 (Conv2D) (None, 9, 9, 512) 2359808 \n_________________________________________________________________\nblock5_conv2 (Conv2D) (None, 9, 9, 512) 2359808 \n_________________________________________________________________\nblock5_conv3 (Conv2D) (None, 9, 9, 512) 2359808 \n_________________________________________________________________\nblock5_pool (MaxPooling2D) (None, 4, 4, 512) 0 \n_________________________________________________________________\nglobal_max_pooling2d (Global (None, 512) 0 \n_________________________________________________________________\ndense (Dense) (None, 128) 65664 \n_________________________________________________________________\ndropout (Dropout) (None, 128) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 10) 1290 \n=================================================================\nTotal params: 14,781,642\nTrainable params: 7,146,378\nNon-trainable params: 7,635,264\n_________________________________________________________________\n" ], [ "# Load files in google colab\nfrom google.colab import files\n\n# Install kaggle to download the dataset\n!pip install -q kaggle\n\n# Upload the kaggle api token json file\nupload = files.upload()\n!mkdir ~/.kaggle\n!cp /content/kaggle.json ~/.kaggle/kaggle.json\n# Download the dataset from kaggle using api link\n!kaggle datasets download -d slothkong/10-monkey-species\n\n# Unzip the dataset folder\n!unzip 10-monkey-species", "_____no_output_____" ], [ "train_directory = 'datasets/training/training'\nvalidation_directory = 'datasets/validation/validation'", "_____no_output_____" ], [ "## Getting the training and the validation sets\nbatch_size = 16\ntrain_gen = image_gen.flow_from_directory(train_directory,target_size=(150,150),batch_size=batch_size,\n class_mode='categorical')", "Found 1098 images belonging to 10 classes.\n" ], [ "validation_gen = image_gen.flow_from_directory(validation_directory,target_size=(150,150),batch_size=batch_size,\n class_mode='categorical')", "Found 272 images belonging to 10 classes.\n" ], [ "results = transfer_model.fit_generator(train_gen,epochs=30,steps_per_epoch=1097//batch_size,\n validation_data=validation_gen,validation_steps=272//batch_size)", "c:\\users\\pruth\\onedrive\\desktop\\environment\\tf_proj\\lib\\site-packages\\tensorflow\\python\\keras\\engine\\training.py:1844: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.\n warnings.warn('`Model.fit_generator` is deprecated and '\n" ], [ "transfer_model.save('tlmonkeyCNN.h5')", "_____no_output_____" ], [ "_, acc = transfer_model.evaluate_generator(validation_gen, steps=272 //batch_size)", "c:\\users\\pruth\\onedrive\\desktop\\environment\\tf_proj\\lib\\site-packages\\tensorflow\\python\\keras\\engine\\training.py:1877: UserWarning: `Model.evaluate_generator` is deprecated and will be removed in a future version. Please use `Model.evaluate`, which supports generators.\n warnings.warn('`Model.evaluate_generator` is deprecated and '\n" ], [ "print('The testing accuracy for the CNN with the 10-Species-Monkey dataset is : %.3f' % (acc * 100.0))", "The testing accuracy for the CNN with the 10-Species-Monkey dataset is : 85.662\n" ], [ "from tensorflow import keras\nx = keras.models.load_model('tlmonkeyCNN.h5')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cba229c2fba725cac6b94347e29417472780371c
648,852
ipynb
Jupyter Notebook
scenarios/detection/12_hard_negative_sampling.ipynb
karineip/computervision-recipes
984c46a15ffab855ce3138b0f6895d2a59ae3872
[ "MIT" ]
7,899
2019-12-14T20:39:16.000Z
2022-03-31T12:13:27.000Z
scenarios/detection/12_hard_negative_sampling.ipynb
somAzzz/computervision-recipes
1bb489af757fde7c773e16fab87b24305cff4457
[ "MIT" ]
185
2019-07-30T15:33:48.000Z
2019-12-12T13:42:01.000Z
scenarios/detection/12_hard_negative_sampling.ipynb
somAzzz/computervision-recipes
1bb489af757fde7c773e16fab87b24305cff4457
[ "MIT" ]
1,025
2019-12-18T06:30:48.000Z
2022-03-24T06:55:04.000Z
1,134.356643
588,504
0.95487
[ [ [ "<i>Copyright (c) Microsoft Corporation. All rights reserved.</i>\n\n<i>Licensed under the MIT License.</i>", "_____no_output_____" ], [ "# Hard Negative Sampling for Object Detection", "_____no_output_____" ], [ "You built an object detection model, evaluated it on a test set, and are happy with its accuracy. Now you deploy the model in a real-world application and you may find that the model over-fires heavily, i.e. it detects objects where none are.\n\nThis is a common problem in machine learning because our training set only contains a limited number of images, which is not sufficient to model the appearance of every object and every background in the world. Hard negative sampling (or hard negative mining) is a useful technique to address this problem. It is a way to make the model more robust to over-fitting by identifying images which are hard for the model and hence should be added to the training set. \n\nThe technique is widely used when one has a large number of negative images however adding all to the training set would cause (i) training to become too slow; and (ii) overwhelm training with too high a ratio of negatives to positives. For many negative images the model likely already performs well and hence adding them to the training set would not improve accuracy. Therefore, we try to identify those negative images where the model is incorrect.\n\nNote that hard-negative mining is a special case of active learning where the task is to identify images which are hard for the model, annotate these images with the ground truth label, and to add them to the training set. *Hard* could be defined as the model being wrong, or as the model being uncertain about a prediction.\n\n\n# Overview\n\nIn this notebook, we train our model on a training set <i>T</i> as usual, test the model on un-seen negative candidate images <i>U</i>, and see on which images in <i>U</i> the model over-fires. These images are then introduces into the training set <i>T</i> and the model is re-trained. As dataset, we use the *fridge objects* images (`watter_bottle`, `carton`, `can`, and `milk_bottle`), similar to the [01_training_introduction](./01_training_introduction.ipynb) notebook. \n<img src=\"./media/hard_neg.jpg\" width=\"600\"/>\n\nThe overall hard negative mining process is as follows: \n* First, prepare training set <i>T</i> and negative-candidate set <i>U</i>. A small proportion of both sets are set aside for evaluation.\n* Second, load a pre-trained detection model.\n* Next, mine hard negatives by following steps as shown in the figure:\n 1. Train the model on <i>T</i>.\n 2. Score the model on <i>U</i>.\n 3. Identify `NEGATIVE_NUM` images in <i>U</i> where the model is most incorrect and add to <i>T</i>.\n* Finally, repeat these steps until the model stops improving.", "_____no_output_____" ] ], [ [ "import sys\nsys.path.append(\"../../\")\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image\nimport scrapbook as sb\nimport torch\nimport torchvision\nfrom torchvision import transforms\n\nfrom utils_cv.classification.data import Urls as UrlsIC\nfrom utils_cv.common.data import unzip_url\nfrom utils_cv.common.gpu import which_processor, is_windows\nfrom utils_cv.detection.data import Urls as UrlsOD\nfrom utils_cv.detection.dataset import DetectionDataset, get_transform\nfrom utils_cv.detection.model import DetectionLearner, get_pretrained_fasterrcnn\nfrom utils_cv.detection.plot import plot_detections, plot_grid \n\n# Change matplotlib backend so that plots are shown on windows machines\nif is_windows():\n plt.switch_backend('TkAgg')\n\nprint(f\"TorchVision: {torchvision.__version__}\")\nwhich_processor()", "TorchVision: 0.4.0\nTorch is using GPU: Tesla V100-PCIE-16GB\n" ], [ "# Ensure edits to libraries are loaded and plotting is shown in the notebook.\n%reload_ext autoreload\n%autoreload 2\n%matplotlib inline", "_____no_output_____" ] ], [ [ "Default parameters. Choose `NEGATIVE_NUM` so that the number of negative images to be added at each iteration corresponds to roughly 10-20% of the total number of images in the training set. If `NEGATIVE_NUM` is too low, then too few hard negatives get added to make a noticeable difference.", "_____no_output_____" ] ], [ [ "# Path to training images, and to the negative images \nDATA_PATH = unzip_url(UrlsOD.fridge_objects_path, exist_ok=True)\nNEG_DATA_PATH = unzip_url(UrlsIC.fridge_objects_negatives_path, exist_ok=True)\n\n# Number of negative images to add to the training set after each negative mining iteration. \n# Here set to 10, but this value should be around 10-20% of the total number of images in the training set.\nNEGATIVE_NUM = 10\n\n# Model parameters corresponding to the \"fast_inference\" parameters in the 03_training_accuracy_vs_speed notebook.\nEPOCHS = 10\nLEARNING_RATE = 0.005\nIM_SIZE = 500\nBATCH_SIZE = 2 \n\n# Use GPU if available\ndevice = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\nprint(f\"Using torch device: {device}\")\nassert str(device)==\"cuda\", \"Model evaluation requires CUDA capable GPU\"", "Using torch device: cuda\n" ] ], [ [ "## 1. Prepare datasets", "_____no_output_____" ], [ "We prepare our datasets in the following way:\n* Training images in `data.train_ds` which includes initially only *fridge objects* images, and after running hard-negative mining also negative images.\n* Negative candidate images in `neg_data.train_ds`.\n* Test images in `data.test_ds` to evaluate accuracy on *fridge objects* images, and in `neg_data.test_ds` to evaluate how often the model misfires on images which do not contain an object-of-interest.", "_____no_output_____" ] ], [ [ "# Model training dataset T, split into 75% training and 25% test\ndata = DetectionDataset(DATA_PATH, train_pct=0.75)\nprint(f\"Positive dataset: {len(data.train_ds)} training images and {len(data.test_ds)} test images.\")\n\n# Negative images split into hard-negative mining candidates U, and a negative test set.\n# Setting \"allow_negatives=True\" since the negative images don't have an .xml file with ground truth annotations\nneg_data = DetectionDataset(NEG_DATA_PATH, train_pct=0.80, batch_size=BATCH_SIZE, \n im_dir = \"\", allow_negatives = True, \n train_transforms = get_transform(train=False))\nprint(f\"Negative dataset: {len(neg_data.train_ds)} candidates for hard negative mining and {len(neg_data.test_ds)} test images.\") ", "Positive dataset: 96 training images and 32 test images.\nNegative dataset: 52 candidates for hard negative mining and 12 test images.\n" ] ], [ [ "## 2. Prepare a model\n\nInitialize a pre-trained Faster R-CNN model similar to the [01_training_introduction](./01_training_introduction.ipynb) notebook.", "_____no_output_____" ] ], [ [ "# Pre-trained Faster R-CNN model\ndetector = DetectionLearner(data, im_size=IM_SIZE)", "_____no_output_____" ], [ "# Record after each mining iteration the validation accuracy and how many objects were found in the negative test set\nvalid_accs = []\nnum_neg_detections = []", "_____no_output_____" ] ], [ [ "## 3. Train the model on *T*\n\n<a id='train'></a>\n\nModel training. As described at the start of this notebook, you likely need to repeat the steps from here until the end of the notebook several times to achieve optimal results.", "_____no_output_____" ] ], [ [ "# Fine-tune model. After each epoch prints the accuracy on the validation set.\ndetector.fit(EPOCHS, lr=LEARNING_RATE, print_freq=30)", "_____no_output_____" ] ], [ [ "Show the accuracy on the validation set for this and all previous mining iterations.", "_____no_output_____" ] ], [ [ "# Get validation accuracy on test set at IOU=0.5:0.95\nacc = float(detector.ap[-1][\"bbox\"])\nvalid_accs.append(acc)\n\n# Plot validation accuracy versus number of hard-negative mining iterations\nfrom utils_cv.common.plot import line_graph\nline_graph(\n values=(valid_accs), \n labels=(\"Validation\"),\n x_guides=range(len(valid_accs)),\n x_name=\"Hard negative mining iteration\",\n y_name=\"[email protected]:0.95\",\n)", "_____no_output_____" ] ], [ [ "## 4. Score the model on *U* ", "_____no_output_____" ], [ "Run inference on all negative candidate images. The images where the model is most incorrect will later be added as hard negatives to the training set.", "_____no_output_____" ] ], [ [ "detections = detector.predict_dl(neg_data.train_dl, threshold=0)\ndetections[0]", "_____no_output_____" ] ], [ [ "Count how many objects were detected in the negative test set. This number typically goes down dramatically after a few mining iterations, and is an indicator how much the model over-fires on unseen images.", "_____no_output_____" ] ], [ [ "# Count number of mis-detections on negative test set\ntest_detections = detector.predict_dl(neg_data.test_dl, threshold=0)\nbbox_scores = [bbox.score for det in test_detections for bbox in det['det_bboxes']]\nnum_neg_detections.append(len(bbox_scores))\n\n# Plot\nfrom utils_cv.common.plot import line_graph\nline_graph(\n values=(num_neg_detections), \n labels=(\"Negative test set\"),\n x_guides=range(len(num_neg_detections)), \n x_name=\"Hard negative mining iteration\",\n y_name=\"Number of detections\",\n)", "_____no_output_____" ] ], [ [ "## 5. Hard negative mining", "_____no_output_____" ], [ "Use the negative candidate images where the model is most incorrect as hard negatives. ", "_____no_output_____" ] ], [ [ "# For each image, get maximum score (i.e. confidence in the detection) over all detected bounding boxes in the image\nmax_scores = []\nfor idx, detection in enumerate(detections):\n if len(detection['det_bboxes']) > 0:\n max_score = max([d.score for d in detection['det_bboxes']])\n else:\n max_score = float('-inf')\n max_scores.append(max_score)", "_____no_output_____" ], [ "# Use the n images with highest maximum score as hard negatives\nhard_im_ids = np.argsort(max_scores)[::-1]\nhard_im_ids = hard_im_ids[:NEGATIVE_NUM]\nhard_im_scores =[max_scores[i] for i in hard_im_ids]\nprint(f\"Indentified {len(hard_im_scores)} hard negative images with detection scores in range {min(hard_im_scores)} to {max(hard_im_scores):4.2f}\")", "Indentified 10 hard negative images with detection scores in range -inf to 0.83\n" ] ], [ [ "Plot some of the identified hard negatives images. This will likely mistake objects which were not part of the training set as the objects-of-interest.", "_____no_output_____" ] ], [ [ "# Get image paths and ground truth boxes for the hard negative images\ndataset_ids = [detections[i]['idx'] for i in hard_im_ids]\nim_paths = [neg_data.train_ds.dataset.im_paths[i] for i in dataset_ids]\ngt_bboxes = [neg_data.train_ds.dataset.anno_bboxes[i] for i in dataset_ids]\n\n# Plot\ndef _grid_helper():\n for i in hard_im_ids:\n yield detections[i], neg_data, None, None\nplot_grid(plot_detections, _grid_helper(), rows=1)", "_____no_output_____" ] ], [ [ "## 6. Add hard negatives to *T*\n\nWe now add the identified hard negative images to the training set.", "_____no_output_____" ] ], [ [ "# Add identified hard negatives to training set\ndata.add_images(im_paths, gt_bboxes, target = \"train\")\nprint(f\"Added {len(im_paths)} hard negative images. Now: {len(data.train_ds)} training images and {len(data.test_ds)} test images\")\nprint(f\"Completed {len(valid_accs)} hard negative iterations.\")\n\n# Preserve some of the notebook outputs\nsb.glue(\"valid_accs\", valid_accs)\nsb.glue(\"hard_im_scores\", list(hard_im_scores))", "Added 10 hard negative images. Now: 126 training images and 32 test images\nCompleted 3 hard negative iterations.\n" ] ], [ [ "## Repeat", "_____no_output_____" ], [ "Now, **repeat** all steps starting from \"[3. Train the model on T](#train)\" to re-train the model and the training set T with added and add more hard negative images to the training set. **Stop** once the accuracy `valid_accs` stopped improving and if the number of (mis)detections in the negative test set `num_neg_detections` stops decreasing. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cba22a994822d66d5655a967d38b79d724cfb954
34,996
ipynb
Jupyter Notebook
session/true-case/t5/t5-3x-super-tiny-4k.ipynb
huseinzol05/malaya
8c80209a8c8f33d9285c3dc0ab57923f7ff216a9
[ "MIT" ]
88
2021-01-06T10:01:31.000Z
2022-03-30T17:34:09.000Z
session/true-case/t5/t5-3x-super-tiny-4k.ipynb
huseinzol05/malaya
8c80209a8c8f33d9285c3dc0ab57923f7ff216a9
[ "MIT" ]
43
2021-01-14T02:44:41.000Z
2022-03-31T19:47:42.000Z
session/true-case/t5/t5-3x-super-tiny-4k.ipynb
huseinzol05/malaya
8c80209a8c8f33d9285c3dc0ab57923f7ff216a9
[ "MIT" ]
38
2021-01-06T07:15:03.000Z
2022-03-19T05:07:50.000Z
39.813424
945
0.587296
[ [ [ "import os\n\nos.environ['CUDA_VISIBLE_DEVICES'] = ''\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'prepare/mesolitica-tpu.json'\nb2_application_key_id = os.environ['b2_application_key_id']\nb2_application_key = os.environ['b2_application_key']", "_____no_output_____" ], [ "from google.cloud import storage\nclient = storage.Client()\nbucket = client.bucket('mesolitica-tpu-general')", "_____no_output_____" ], [ "best = '1050000'\ndirectory = 't5-3x-super-tiny-true-case-4k'\n!rm -rf output out {directory}\n!mkdir {directory}", "_____no_output_____" ], [ "model = best\n\nblob = bucket.blob(f'{directory}/model.ckpt-{model}.data-00000-of-00002')\nblob.download_to_filename(f'{directory}/model.ckpt-{model}.data-00000-of-00002')\n\nblob = bucket.blob(f'{directory}/model.ckpt-{model}.data-00001-of-00002')\nblob.download_to_filename(f'{directory}/model.ckpt-{model}.data-00001-of-00002')\n\nblob = bucket.blob(f'{directory}/model.ckpt-{model}.index')\nblob.download_to_filename(f'{directory}/model.ckpt-{model}.index')\n\nblob = bucket.blob(f'{directory}/model.ckpt-{model}.meta')\nblob.download_to_filename(f'{directory}/model.ckpt-{model}.meta')\n\nblob = bucket.blob(f'{directory}/checkpoint')\nblob.download_to_filename(f'{directory}/checkpoint')\n\nblob = bucket.blob(f'{directory}/operative_config.gin')\nblob.download_to_filename(f'{directory}/operative_config.gin')", "_____no_output_____" ], [ "with open(f'{directory}/checkpoint', 'w') as fopen:\n fopen.write(f'model_checkpoint_path: \"model.ckpt-{model}\"')", "_____no_output_____" ], [ "from b2sdk.v1 import *\ninfo = InMemoryAccountInfo()\nb2_api = B2Api(info)\napplication_key_id = b2_application_key_id\napplication_key = b2_application_key\nb2_api.authorize_account(\"production\", application_key_id, application_key)\nfile_info = {'how': 'good-file'}\nb2_bucket = b2_api.get_bucket_by_name('malaya-model')", "_____no_output_____" ], [ "tar = 't5-3x-super-tiny-true-case-4k-2021-09-10.tar.gz'\nos.system(f'tar -czvf {tar} {directory}')\noutPutname = f'finetuned/{tar}'\nb2_bucket.upload_local_file(\n local_file=tar,\n file_name=outPutname,\n file_infos=file_info,\n)", "_____no_output_____" ], [ "os.system(f'rm {tar}')", "_____no_output_____" ], [ "import tensorflow as tf\nimport tensorflow_datasets as tfds\nimport t5", "_____no_output_____" ], [ "model = t5.models.MtfModel(\n model_dir=directory,\n tpu=None,\n tpu_topology=None,\n model_parallelism=1,\n batch_size=1,\n sequence_length={\"inputs\": 256, \"targets\": 256},\n learning_rate_schedule=0.003,\n save_checkpoints_steps=5000,\n keep_checkpoint_max=3,\n iterations_per_loop=100,\n mesh_shape=\"model:1,batch:1\", \n mesh_devices=[\"cpu:0\"]\n)", "_____no_output_____" ], [ "!rm -rf output/*", "_____no_output_____" ], [ "import gin\n\nfrom t5.data import sentencepiece_vocabulary\n\nDEFAULT_SPM_PATH = 'prepare/sp10m.cased.ms-en-4k.model'\nDEFAULT_EXTRA_IDS = 100\nmodel_dir = directory\n\ndef get_default_vocabulary():\n return sentencepiece_vocabulary.SentencePieceVocabulary(\n DEFAULT_SPM_PATH, DEFAULT_EXTRA_IDS)\n\nwith gin.unlock_config():\n gin.parse_config_file(t5.models.mtf_model._operative_config_path(model_dir))\n gin.bind_parameter(\"Bitransformer.decode.beam_size\", 1)\n gin.bind_parameter(\"Bitransformer.decode.temperature\", 0)\n gin.bind_parameter(\"utils.get_variable_dtype.slice_dtype\", \"float32\")\n gin.bind_parameter(\n \"utils.get_variable_dtype.activation_dtype\", \"float32\")\n \nvocabulary = t5.data.SentencePieceVocabulary(DEFAULT_SPM_PATH)\nestimator = model.estimator(vocabulary, disable_tpu=True)", "INFO:tensorflow:Using config: {'_model_dir': 't5-3x-super-tiny-true-case-4k', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true\nisolate_session_state: true\n, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': None, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_session_creation_timeout_secs': 7200, '_service': None, '_cluster_spec': None, '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1, '_tpu_config': TPUConfig(iterations_per_loop=100, num_shards=None, num_cores_per_replica=1, per_host_input_for_training=4, tpu_job_name=None, initial_infeed_sleep_secs=None, input_partition_dims=None, eval_training_input_configuration=2, experimental_host_call_every_n_steps=1), '_cluster': <tensorflow.python.distribute.cluster_resolver.tpu_cluster_resolver.TPUClusterResolver object at 0x7f8be9891d68>}\nINFO:tensorflow:_TPUContext: eval_on_tpu True\nWARNING:tensorflow:eval_on_tpu ignored because use_tpu is False.\n" ], [ "import os\n\ncheckpoint_step = t5.models.mtf_model._get_latest_checkpoint_from_dir(model_dir)\nmodel_ckpt = \"model.ckpt-\" + str(checkpoint_step)\ncheckpoint_path = os.path.join(model_dir, model_ckpt)\ncheckpoint_step, model_ckpt, checkpoint_path", "_____no_output_____" ], [ "from mesh_tensorflow.transformer import dataset as transformer_dataset\n\ndef serving_input_fn():\n inputs = tf.placeholder(\n dtype=tf.string,\n shape=[None],\n name=\"inputs\")\n\n batch_size = tf.shape(inputs)[0]\n padded_inputs = tf.pad(inputs, [(0, tf.mod(-tf.size(inputs), batch_size))])\n dataset = tf.data.Dataset.from_tensor_slices(padded_inputs)\n dataset = dataset.map(lambda x: {\"inputs\": x})\n dataset = transformer_dataset.encode_all_features(dataset, vocabulary)\n dataset = transformer_dataset.pack_or_pad(\n dataset=dataset,\n length=model._sequence_length,\n pack=False,\n feature_keys=[\"inputs\"]\n )\n dataset = dataset.batch(tf.cast(batch_size, tf.int64))\n features = tf.data.experimental.get_single_element(dataset)\n return tf.estimator.export.ServingInputReceiver(\n features=features, receiver_tensors=inputs)\n\nout = estimator.export_saved_model('output', serving_input_fn, checkpoint_path=checkpoint_path)", "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensorflow_core/python/ops/resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\nInstructions for updating:\nIf using Keras pass *_constraint arguments to layers.\nINFO:tensorflow:Calling model_fn.\nINFO:tensorflow:Running infer on CPU\nINFO:tensorflow:feature inputs : Tensor(\"Reshape:0\", shape=(1, 256), dtype=int32)\nWARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/mesh_tensorflow/transformer/utils.py:427: Print (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2018-08-20.\nInstructions for updating:\nUse tf.print instead of tf.Print. Note that tf.print returns a no-output operator that directly prints the output. Outside of defuns or eager mode, this operator will not be executed unless it is directly specified in session.run or used as a control dependency for other operators. This is only a concern in graph mode. Below is an example of how to ensure tf.print executes in graph mode:\n\nWARNING:tensorflow:Using default tf glorot_uniform_initializer for variable encoder/block_000/layer_000/SelfAttention/relative_attention_bias The initialzer will guess the input and output dimensions based on dimension order.\nWARNING:tensorflow:Using default tf glorot_uniform_initializer for variable decoder/block_000/layer_000/SelfAttention/relative_attention_bias The initialzer will guess the input and output dimensions based on dimension order.\nWARNING:tensorflow:Using default tf glorot_uniform_initializer for variable decoder/block_000/layer_000/SelfAttention/relative_attention_bias The initialzer will guess the input and output dimensions based on dimension order.\nINFO:tensorflow:Variable decoder/block_000/layer_000/SelfAttention/k size 24576 slice_size 24576 Shape[d_model=64, heads=384] \nINFO:tensorflow:Variable decoder/block_000/layer_000/SelfAttention/o size 24576 slice_size 24576 Shape[heads=384, d_model=64] \nINFO:tensorflow:Variable decoder/block_000/layer_000/SelfAttention/q size 24576 slice_size 24576 Shape[d_model=64, heads=384] \nINFO:tensorflow:Variable decoder/block_000/layer_000/SelfAttention/relative_attention_bias size 192 slice_size 192 Shape[heads=6, buckets=32] \nINFO:tensorflow:Variable decoder/block_000/layer_000/SelfAttention/v size 24576 slice_size 24576 Shape[d_model=64, heads=384] \nINFO:tensorflow:Variable decoder/block_000/layer_000/layer_norm/scale size 64 slice_size 64 Shape[d_model=64] \nINFO:tensorflow:Variable decoder/block_000/layer_001/EncDecAttention/k size 24576 slice_size 24576 Shape[d_model=64, heads=384] \nINFO:tensorflow:Variable decoder/block_000/layer_001/EncDecAttention/o size 24576 slice_size 24576 Shape[heads=384, d_model=64] \nINFO:tensorflow:Variable decoder/block_000/layer_001/EncDecAttention/q size 24576 slice_size 24576 Shape[d_model=64, heads=384] \nINFO:tensorflow:Variable decoder/block_000/layer_001/EncDecAttention/v size 24576 slice_size 24576 Shape[d_model=64, heads=384] \nINFO:tensorflow:Variable decoder/block_000/layer_001/layer_norm/scale size 64 slice_size 64 Shape[d_model=64] \nINFO:tensorflow:Variable decoder/block_000/layer_002/DenseReluDense/wi/kernel size 16384 slice_size 16384 Shape[d_model=64, d_ff=256] \nINFO:tensorflow:Variable decoder/block_000/layer_002/DenseReluDense/wo/kernel size 16384 slice_size 16384 Shape[d_ff=256, d_model=64] \nINFO:tensorflow:Variable decoder/block_000/layer_002/layer_norm/scale size 64 slice_size 64 Shape[d_model=64] \nINFO:tensorflow:Variable decoder/final_layer_norm/scale size 64 slice_size 64 Shape[d_model=64] \nINFO:tensorflow:Variable encoder/block_000/layer_000/SelfAttention/k size 24576 slice_size 24576 Shape[d_model=64, heads=384] \nINFO:tensorflow:Variable encoder/block_000/layer_000/SelfAttention/o size 24576 slice_size 24576 Shape[heads=384, d_model=64] \nINFO:tensorflow:Variable encoder/block_000/layer_000/SelfAttention/q size 24576 slice_size 24576 Shape[d_model=64, heads=384] \nINFO:tensorflow:Variable encoder/block_000/layer_000/SelfAttention/relative_attention_bias size 192 slice_size 192 Shape[heads=6, buckets=32] \nINFO:tensorflow:Variable encoder/block_000/layer_000/SelfAttention/v size 24576 slice_size 24576 Shape[d_model=64, heads=384] \nINFO:tensorflow:Variable encoder/block_000/layer_000/layer_norm/scale size 64 slice_size 64 Shape[d_model=64] \nINFO:tensorflow:Variable encoder/block_000/layer_001/DenseReluDense/wi/kernel size 16384 slice_size 16384 Shape[d_model=64, d_ff=256] \nINFO:tensorflow:Variable encoder/block_000/layer_001/DenseReluDense/wo/kernel size 16384 slice_size 16384 Shape[d_ff=256, d_model=64] \nINFO:tensorflow:Variable encoder/block_000/layer_001/layer_norm/scale size 64 slice_size 64 Shape[d_model=64] \nINFO:tensorflow:Variable encoder/final_layer_norm/scale size 64 slice_size 64 Shape[d_model=64] \nINFO:tensorflow:Variable shared/embedding size 270336 slice_size 270336 Shape[vocab=4224, d_model=64] \nINFO:tensorflow:Trainable Variables count: 26 Total size: 631616 Total slice_size: 631616 \nINFO:tensorflow:All Variables count: 26 Total size: 631616 Total slice_size: 631616 \nINFO:tensorflow:Counters:\neinsum: 4.79e+08\neinsum_unique: 4.79e+08\noutput: 2.55e+07\n output/AddOperation: 4.74e+06\n output/BinaryOpWithBroadcasting: 3.29e+05\n output/Constant: 1.97e+05\n output/EinsumOperation: 5.37e+06\n output/ImportOperation: 268\n output/MinMaxOperation: 1.97e+05\n output/OneHotOperation: 6.36e+06\n output/RangeOperation: 512\n output/ReduceOperation: 1.1e+04\n output/ReshapeOperation: 1.53e+06\n output/ScalarAddOperation: 2.64e+05\n output/ScalarMultiplyOperation: 6.08e+05\n output/ShiftOperation: 256\n output/SlicewiseOperation: 3.93e+06\n output/StopGradient: 1.18e+06\n output/Variable: 6.32e+05\n output/WhileLoopOperation: 1.97e+05\noutput_unique: 2.55e+07\n output_unique/AddOperation: 4.74e+06\n output_unique/BinaryOpWithBroadcasting: 3.29e+05\n output_unique/Constant: 1.97e+05\n output_unique/EinsumOperation: 5.37e+06\n output_unique/ImportOperation: 268\n output_unique/MinMaxOperation: 1.97e+05\n output_unique/OneHotOperation: 6.36e+06\n output_unique/RangeOperation: 512\n output_unique/ReduceOperation: 1.1e+04\n output_unique/ReshapeOperation: 1.53e+06\n output_unique/ScalarAddOperation: 2.64e+05\n output_unique/ScalarMultiplyOperation: 6.08e+05\n output_unique/ShiftOperation: 256\n output_unique/SlicewiseOperation: 3.93e+06\n output_unique/StopGradient: 1.18e+06\n output_unique/Variable: 6.32e+05\n output_unique/WhileLoopOperation: 1.97e+05\nvariables: 6.32e+05\n variables/trainable: 6.32e+05\nWARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensorflow_core/python/ops/array_ops.py:1475: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.where in 2.0, which has the same broadcast rule as np.where\nINFO:tensorflow:Done calling model_fn.\nWARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensorflow_core/python/saved_model/signature_def_utils_impl.py:201: build_tensor_info (from tensorflow.python.saved_model.utils_impl) is deprecated and will be removed in a future version.\nInstructions for updating:\nThis function will only be available through the v1 compatibility library as tf.compat.v1.saved_model.utils.build_tensor_info or tf.compat.v1.saved_model.build_tensor_info.\nINFO:tensorflow:Signatures INCLUDED in export for Classify: None\nINFO:tensorflow:Signatures INCLUDED in export for Regress: None\nINFO:tensorflow:Signatures INCLUDED in export for Predict: ['serving_default']\nINFO:tensorflow:Signatures INCLUDED in export for Train: None\nINFO:tensorflow:Signatures INCLUDED in export for Eval: None\nINFO:tensorflow:Restoring parameters from t5-3x-super-tiny-true-case-4k/model.ckpt-1050000\nINFO:tensorflow:Assets added to graph.\nINFO:tensorflow:No assets to write.\nINFO:tensorflow:SavedModel written to: output/temp-b'1637817325'/saved_model.pb\n" ], [ "config = tf.ConfigProto()\nconfig.allow_soft_placement = True\nsess = tf.Session(config = config)\nmeta_graph_def = tf.saved_model.loader.load(\n sess,\n [tf.saved_model.tag_constants.SERVING],\n out)", "WARNING:tensorflow:From <ipython-input-15-5b89b6a20c22>:7: load (from tensorflow.python.saved_model.loader_impl) is deprecated and will be removed in a future version.\nInstructions for updating:\nThis function will only be available through the v1 compatibility library as tf.compat.v1.saved_model.loader.load or tf.compat.v1.saved_model.load. There will be a new function for importing SavedModels in Tensorflow 2.0.\nINFO:tensorflow:Restoring parameters from output/1637817325/variables/variables\n" ], [ "saver = tf.train.Saver(tf.trainable_variables())\nsaver.save(sess, '3x-super-tiny-true-case-4k/model.ckpt')", "_____no_output_____" ], [ "strings = [\n n.name\n for n in tf.get_default_graph().as_graph_def().node\n if ('encoder' in n.op\n or 'decoder' in n.name\n or 'shared' in n.name\n or 'inputs' in n.name\n or 'output' in n.name\n or 'SentenceTokenizer' in n.name\n or 'self/Softmax' in n.name)\n and 'adam' not in n.name\n and 'Assign' not in n.name\n]", "_____no_output_____" ], [ "def freeze_graph(model_dir, output_node_names):\n\n if not tf.gfile.Exists(model_dir):\n raise AssertionError(\n \"Export directory doesn't exists. Please specify an export \"\n 'directory: %s' % model_dir\n )\n\n checkpoint = tf.train.get_checkpoint_state(model_dir)\n input_checkpoint = checkpoint.model_checkpoint_path\n\n absolute_model_dir = '/'.join(input_checkpoint.split('/')[:-1])\n output_graph = absolute_model_dir + '/frozen_model.pb'\n clear_devices = True\n with tf.Session(graph = tf.Graph()) as sess:\n saver = tf.train.import_meta_graph(\n input_checkpoint + '.meta', clear_devices = clear_devices\n )\n saver.restore(sess, input_checkpoint)\n output_graph_def = tf.graph_util.convert_variables_to_constants(\n sess,\n tf.get_default_graph().as_graph_def(),\n output_node_names,\n )\n with tf.gfile.GFile(output_graph, 'wb') as f:\n f.write(output_graph_def.SerializeToString())\n print('%d ops in the final graph.' % len(output_graph_def.node))", "_____no_output_____" ], [ "freeze_graph('3x-super-tiny-true-case-4k', strings)", "INFO:tensorflow:Restoring parameters from 3x-super-tiny-true-case-4k/model.ckpt\nWARNING:tensorflow:From <ipython-input-18-504c79665720>:23: convert_variables_to_constants (from tensorflow.python.framework.graph_util_impl) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse `tf.compat.v1.graph_util.convert_variables_to_constants`\nWARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensorflow_core/python/framework/graph_util_impl.py:277: extract_sub_graph (from tensorflow.python.framework.graph_util_impl) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse `tf.compat.v1.graph_util.extract_sub_graph`\nINFO:tensorflow:Froze 42 variables.\nINFO:tensorflow:Converted 42 variables to const ops.\n1504 ops in the final graph.\n" ], [ "import struct\n\nunknown = b'\\xff\\xff\\xff\\xff'\n\ndef load_graph(frozen_graph_filename):\n with tf.gfile.GFile(frozen_graph_filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n \n for node in graph_def.node:\n \n if node.op == 'RefSwitch':\n node.op = 'Switch'\n for index in xrange(len(node.input)):\n if 'moving_' in node.input[index]:\n node.input[index] = node.input[index] + '/read'\n elif node.op == 'AssignSub':\n node.op = 'Sub'\n if 'use_locking' in node.attr: del node.attr['use_locking']\n elif node.op == 'AssignAdd':\n node.op = 'Add'\n if 'use_locking' in node.attr: del node.attr['use_locking']\n elif node.op == 'Assign':\n node.op = 'Identity'\n if 'use_locking' in node.attr: del node.attr['use_locking']\n if 'validate_shape' in node.attr: del node.attr['validate_shape']\n if len(node.input) == 2:\n node.input[0] = node.input[1]\n del node.input[1]\n \n if 'Reshape/shape' in node.name or 'Reshape_1/shape' in node.name:\n b = node.attr['value'].tensor.tensor_content\n arr_int = [int.from_bytes(b[i:i + 4], 'little') for i in range(0, len(b), 4)]\n if len(arr_int):\n arr_byte = [unknown] + [struct.pack('<i', i) for i in arr_int[1:]]\n arr_byte = b''.join(arr_byte)\n node.attr['value'].tensor.tensor_content = arr_byte\n \n if len(node.attr['value'].tensor.int_val):\n node.attr['value'].tensor.int_val[0] = -1\n \n with tf.Graph().as_default() as graph:\n tf.import_graph_def(graph_def)\n return graph", "_____no_output_____" ], [ "g = load_graph('3x-super-tiny-true-case-4k/frozen_model.pb')", "_____no_output_____" ], [ "i = g.get_tensor_by_name('import/inputs:0')\no = g.get_tensor_by_name('import/SelectV2_3:0')\ni, o", "_____no_output_____" ], [ "test_sess = tf.Session(graph = g)", "_____no_output_____" ], [ "import sentencepiece as spm\nsp_model = spm.SentencePieceProcessor()\nsp_model.Load(DEFAULT_SPM_PATH)", "_____no_output_____" ], [ "string1 = 'FORMAT TERBUKA. FORMAT TERBUKA IALAH SUATU FORMAT FAIL UNTUK TUJUAN MENYIMPAN DATA DIGITAL, DI MANA FORMAT INI DITAKRIFKAN BERDASARKAN SPESIFIKASI YANG DITERBITKAN DAN DIKENDALIKAN PERTUBUHAN PIAWAIAN , SERTA BOLEH DIGUNA PAKAI KHALAYAK RAMAI .'\nstring2 = 'Husein ska mkn ayam dkat kampng Jawa'", "_____no_output_____" ], [ "strings = [string1, string2]\n[f'kes benar: {s}' for s in strings]", "_____no_output_____" ], [ "%%time\n\no_ = test_sess.run(o, feed_dict = {i: [f'kes benar: {s}' for s in strings]})\no_.shape", "CPU times: user 1.8 s, sys: 437 ms, total: 2.24 s\nWall time: 1.09 s\n" ], [ "for k in range(len(o_)):\n print(k, sp_model.DecodeIds(o_[k].tolist()))", "0 Format terbuka. Format Terbuka ialah suatu format fail untuk tujuan menyimpan data digital, di mana format ini ditakrifkan berdasarkan spesifikasi yang diterbitkan dan dikendalikan Pertubuhan Piawaian, serta boleh digunakan pakai khalayak ramai.\n1 Husein ska mkn ayam dkat kampng jawa\n" ], [ "from tensorflow.tools.graph_transforms import TransformGraph", "_____no_output_____" ], [ "transforms = ['add_default_attributes',\n 'remove_nodes(op=Identity, op=CheckNumerics)',\n 'fold_batch_norms',\n 'fold_old_batch_norms',\n 'quantize_weights(minimum_size=1536000)',\n #'quantize_weights(fallback_min=-10240, fallback_max=10240)',\n 'strip_unused_nodes',\n 'sort_by_execution_order']", "_____no_output_____" ], [ "pb = '3x-super-tiny-true-case-4k/frozen_model.pb'\ninput_graph_def = tf.GraphDef()\nwith tf.gfile.FastGFile(pb, 'rb') as f:\n input_graph_def.ParseFromString(f.read())\n \ntransformed_graph_def = TransformGraph(input_graph_def, \n ['inputs'],\n ['SelectV2_3'], transforms)\n\nwith tf.gfile.GFile(f'{pb}.quantized', 'wb') as f:\n f.write(transformed_graph_def.SerializeToString())", "WARNING:tensorflow:From <ipython-input-31-b3476a15e9e3>:3: FastGFile.__init__ (from tensorflow.python.platform.gfile) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.gfile.GFile.\n" ], [ "g = load_graph('3x-super-tiny-true-case-4k/frozen_model.pb.quantized')\ni = g.get_tensor_by_name('import/inputs:0')\no = g.get_tensor_by_name('import/SelectV2_3:0')\ni, o", "_____no_output_____" ], [ "test_sess = tf.InteractiveSession(graph = g)", "_____no_output_____" ], [ "file = '3x-super-tiny-true-case-4k/frozen_model.pb.quantized'\noutPutname = 'true-case/3x-super-tiny-t5-4k-quantized/model.pb'\nb2_bucket.upload_local_file(\n local_file=file,\n file_name=outPutname,\n file_infos=file_info,\n)", "_____no_output_____" ], [ "file = '3x-super-tiny-true-case-4k/frozen_model.pb'\noutPutname = 'true-case/3x-super-tiny-t5-4k/model.pb'\nb2_bucket.upload_local_file(\n local_file=file,\n file_name=outPutname,\n file_infos=file_info,\n)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cba22daf41ce2d6cf53cfa30b1097ae40b5aacf7
96,769
ipynb
Jupyter Notebook
venture_funding_with_deep_learning.ipynb
shivangiuw/binary_classification_neural_network
a15256236e6ff0b30b0b4fd3bd5cbd004c59c361
[ "MIT" ]
null
null
null
venture_funding_with_deep_learning.ipynb
shivangiuw/binary_classification_neural_network
a15256236e6ff0b30b0b4fd3bd5cbd004c59c361
[ "MIT" ]
null
null
null
venture_funding_with_deep_learning.ipynb
shivangiuw/binary_classification_neural_network
a15256236e6ff0b30b0b4fd3bd5cbd004c59c361
[ "MIT" ]
null
null
null
32.758632
252
0.374872
[ [ [ "# Venture Funding with Deep Learning\n\n## Steps:\n* Prepare the data for use on a neural network model.\n\n* Compile and evaluate a binary classification model using a neural network.\n\n* Optimize the neural network model.\n", "_____no_output_____" ] ], [ [ "# Imports\nimport pandas as pd\nfrom pathlib import Path\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.models import Sequential\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler,OneHotEncoder\nimport warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ] ], [ [ "---\n\n## Prepare the data to be used on a neural network model", "_____no_output_____" ], [ "### Step 1: Read the `applicants_data.csv` file into a Pandas DataFrame. Review the DataFrame, looking for categorical variables that need to be encoded, as well as columns that define features and target variables. \n", "_____no_output_____" ] ], [ [ "# Read the applicants_data.csv file from the Resources folder into a Pandas DataFrame\napplicant_data_df = pd.read_csv(Path(\"./Resources/applicants_data.csv\"))\n\n# Review the DataFrame\napplicant_data_df.head()", "_____no_output_____" ], [ "# Review the data types associated with the columns\napplicant_data_df.dtypes\n", "_____no_output_____" ] ], [ [ "### Step 2: Drop the “EIN” (Employer Identification Number) and “NAME” columns from the DataFrame, because they are not relevant to the binary classification model.", "_____no_output_____" ] ], [ [ "# Drop the 'EIN' and 'NAME' columns from the DataFrame\napplicant_data_df = applicant_data_df.drop(columns= [\"EIN\", \"NAME\"])\n\n# Review the DataFrame\napplicant_data_df.head()", "_____no_output_____" ] ], [ [ "### Step 3: Encode the dataset’s categorical variables using `OneHotEncoder`, and then place the encoded variables into a new DataFrame.", "_____no_output_____" ] ], [ [ "# Create a list of categorical variables \ncategorical_variables = list(applicant_data_df.dtypes[applicant_data_df.dtypes==\"object\"].index)\n\n# Display the categorical variables list\ncategorical_variables", "_____no_output_____" ], [ "# Create a OneHotEncoder instance\nenc = OneHotEncoder(sparse=False)\n", "_____no_output_____" ], [ "# Encode the categorcal variables using OneHotEncoder\nencoded_data = enc.fit_transform(applicant_data_df[categorical_variables])\n", "_____no_output_____" ], [ "# Create a DataFrame with the encoded variables\nencoded_df = pd.DataFrame(encoded_data, columns= enc.get_feature_names(categorical_variables))\n\n# Review the DataFrame\nencoded_df.head()", "_____no_output_____" ] ], [ [ "### Step 4: Add the original DataFrame’s numerical variables to the DataFrame containing the encoded variables.\n", "_____no_output_____" ] ], [ [ "# Create a DataFrame with the columnns containing numerical variables from the original dataset\nnumerical_variables_df= applicant_data_df.drop(columns= categorical_variables)\nnumerical_variables_df.head()", "_____no_output_____" ], [ "# Add the numerical variables from the original DataFrame to the one-hot encoding DataFrame\nencoded_df = pd.concat((encoded_df, numerical_variables_df), axis =1)\n\n# Review the Dataframe\nencoded_df.head()", "_____no_output_____" ] ], [ [ "### Step 5: Using the preprocessed data, create the features (`X`) and target (`y`) datasets. The target dataset should be defined by the preprocessed DataFrame column “IS_SUCCESSFUL”. The remaining columns should define the features dataset. \n\n", "_____no_output_____" ] ], [ [ "# Define the target set y using the IS_SUCCESSFUL column\ny = encoded_df[\"IS_SUCCESSFUL\"]\n\n# Display a sample of y\ny[:5]\n", "_____no_output_____" ], [ "# Define features set X by selecting all columns but IS_SUCCESSFUL\nX = encoded_df.drop(columns= \"IS_SUCCESSFUL\")\n\n# Review the features DataFrame\nX.head()\nx", "_____no_output_____" ] ], [ [ "### Step 6: Split the features and target sets into training and testing datasets.\n", "_____no_output_____" ] ], [ [ "# Split the preprocessed data into a training and testing dataset\n# Assign the function a random_state equal to 1\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)\n", "_____no_output_____" ] ], [ [ "### Step 7: Use scikit-learn's `StandardScaler` to scale the features data.", "_____no_output_____" ] ], [ [ "# Create a StandardScaler instance\nscaler = StandardScaler()\n\n# Fit the scaler to the features training dataset\nX_scaler = scaler.fit(X_train)\n\n# Fit the scaler to the features training dataset\nX_train_scaled = X_scaler.transform(X_train)\nX_test_scaled = X_scaler.transform(X_test)\n", "_____no_output_____" ] ], [ [ "---\n\n## Compile and Evaluate a Binary Classification Model Using a Neural Network", "_____no_output_____" ], [ "### Step 1: Create a deep neural network by assigning the number of input features, the number of layers, and the number of neurons on each layer using Tensorflow’s Keras.\n\n* Starting with two layer deep model", "_____no_output_____" ] ], [ [ "# Define the the number of inputs (features) to the model\nnumber_input_features = len(X_train.loc[0])\n\n# Review the number of features\nnumber_input_features", "_____no_output_____" ], [ "# Define the number of neurons in the output layer\nnumber_output_neurons = 1", "_____no_output_____" ], [ "# Define the number of hidden nodes for the first hidden layer\nhidden_nodes_layer1 = (number_input_features + number_output_neurons)//2\n\n# Review the number hidden nodes in the first layer\nhidden_nodes_layer1", "_____no_output_____" ], [ "# Define the number of hidden nodes for the second hidden layer\nhidden_nodes_layer2 = (hidden_nodes_layer1 + number_output_neurons)//2\n\n# Review the number hidden nodes in the second layer\nhidden_nodes_layer2\n", "_____no_output_____" ], [ "# Create the Sequential model instance\nnn = Sequential()", "_____no_output_____" ], [ "# Add the first hidden layer\nnn.add(Dense(units= hidden_nodes_layer1, activation= \"relu\", input_dim= number_input_features))", "_____no_output_____" ], [ "# Add the second hidden layer\nnn.add(Dense(units= hidden_nodes_layer2, activation= \"relu\"))", "_____no_output_____" ], [ "# Add the output layer to the model specifying the number of output neurons and activation function\nnn.add(Dense(units= number_output_neurons, activation= \"sigmoid\"))", "_____no_output_____" ], [ "# Display the Sequential model summary\nnn.summary()", "Model: \"sequential_5\"\n_________________________________________________________________\n Layer (type) Output Shape Param # \n=================================================================\n dense_15 (Dense) (None, 58) 6786 \n \n dense_16 (Dense) (None, 29) 1711 \n \n dense_17 (Dense) (None, 1) 30 \n \n=================================================================\nTotal params: 8,527\nTrainable params: 8,527\nNon-trainable params: 0\n_________________________________________________________________\n" ] ], [ [ "### Step 2: Compile and fit the model using the `binary_crossentropy` loss function, the `adam` optimizer, and the `accuracy` evaluation metric.\n", "_____no_output_____" ] ], [ [ "# Compile the Sequential model\nnn.compile(loss= \"binary_crossentropy\", optimizer= \"adam\", metrics= [\"accuracy\"])", "_____no_output_____" ], [ "# Fit the model using 50 epochs and the training data\nmodel_1= nn.fit(X_train_scaled, y_train, epochs= 50, verbose= 0)", "_____no_output_____" ] ], [ [ "### Step 3: Evaluate the model using the test data to determine the model’s loss and accuracy.\n", "_____no_output_____" ] ], [ [ "# Evaluate the model loss and accuracy metrics using the evaluate method and the test data\nmodel_loss, model_accuracy = nn.evaluate(X_test_scaled, y_test, verbose=2)\n\n# Display the model loss and accuracy results\nprint(f\"Loss: {model_loss}, Accuracy: {model_accuracy}\")", "268/268 - 0s - loss: 0.5573 - accuracy: 0.7313 - 251ms/epoch - 936us/step\nLoss: 0.5572594404220581, Accuracy: 0.7313119769096375\n" ] ], [ [ "### Step 4: Save and export your model to an HDF5 file, and name the file `AlphabetSoup.h5`. \n", "_____no_output_____" ] ], [ [ "# Set the model's file path\nfile_path = Path(\"./Resources/AlphabetSoup.h5\")\n\n# Export your model to a HDF5 file\nnn.save(file_path)", "_____no_output_____" ] ], [ [ "---\n\n## Optimize the neural network model\n", "_____no_output_____" ], [ "### Step 1: To improve on first model’s predictive accuracy, we will try three models with different optimization techniques as following : \n1. Add more hidden layers.\n2. Adjust the input data by dropping different features columns to ensure that no variables or outliers confuse the model.\n3. Add more neurons (nodes) to a hidden layer.", "_____no_output_____" ], [ "### Alternative Model 1\n#### Optimizing the model by adding one more hidden layer(i.e. three hidden layers)", "_____no_output_____" ] ], [ [ "# Define the the number of inputs (features) to the model\nnumber_input_features = len(X_train.iloc[0])\n\n# Review the number of features\nnumber_input_features", "_____no_output_____" ], [ "# Define the number of neurons in the output layer\nnumber_output_neurons_A1 = 1", "_____no_output_____" ], [ "# Define the number of hidden nodes for the first hidden layer\nhidden_nodes_layer1_A1 = (number_input_features + number_output_neurons)//2\n\n# Review the number of hidden nodes in the first layer\nhidden_nodes_layer1_A1", "_____no_output_____" ], [ "# Define the number of hidden nodes for the second hidden layer\nhidden_nodes_layer2_A1 = (hidden_nodes_layer1_A1 + number_output_neurons)//2\n\n# Review the number hidden nodes in the second layer\nhidden_nodes_layer2_A1\n", "_____no_output_____" ], [ "# Define the number of hidden nodes for the third hidden layer\nhidden_nodes_layer3_A1 = (hidden_nodes_layer2_A1 + number_output_neurons)//2\n\n# Review the number hidden nodes in the second layer\nhidden_nodes_layer3_A1\n", "_____no_output_____" ], [ "# Create the Sequential model instance\nnn_A1 = Sequential()", "_____no_output_____" ], [ "# First, Second and Third hidden layer\nnn_A1.add(Dense(units= hidden_nodes_layer1_A1, activation= \"relu\", input_dim= number_input_features))\nnn_A1.add(Dense(units= hidden_nodes_layer2_A1, activation= \"relu\"))\nnn_A1.add(Dense(units= hidden_nodes_layer3_A1, activation= \"relu\"))\n\n\n# Output layer\nnn_A1.add(Dense(units= number_output_neurons, activation= \"sigmoid\"))\n\n\n# Check the structure of the model\nnn_A1.summary()", "Model: \"sequential_1\"\n_________________________________________________________________\n Layer (type) Output Shape Param # \n=================================================================\n dense_3 (Dense) (None, 58) 6786 \n \n dense_4 (Dense) (None, 29) 1711 \n \n dense_5 (Dense) (None, 15) 450 \n \n dense_6 (Dense) (None, 1) 16 \n \n=================================================================\nTotal params: 8,963\nTrainable params: 8,963\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "# Compile the Sequential model\nnn_A1.compile(loss=\"binary_crossentropy\", optimizer= \"adam\", metrics= [\"accuracy\"])\n", "_____no_output_____" ], [ "# Fit the model using 50 epochs and the training data\nfit_model_A1 = nn_A1.fit(X_train_scaled, y_train, epochs= 50, verbose=0)\n", "_____no_output_____" ] ], [ [ "### Alternative Model 2\n#### Adjust the input data by dropping different features columns to ensure that no variables or outliers confuse the model.", "_____no_output_____" ] ], [ [ "applicant_data_reduced_df = applicant_data_df.drop(columns= [\"STATUS\",\"SPECIAL_CONSIDERATIONS\"])\napplicant_data_reduced_df.head()", "_____no_output_____" ], [ "categorical_variables_reduced = list(applicant_data_reduced_df.dtypes[applicant_data_reduced_df.dtypes==\"object\"].index)\nnumerical_variables_reduced= applicant_data_reduced_df.drop(columns= categorical_variables_reduced)", "_____no_output_____" ], [ "enc = OneHotEncoder(sparse=False)\nencoded_data_reduced = enc.fit_transform(applicant_data_reduced_df[categorical_variables_reduced])\nencoded_reduced_df = pd.DataFrame(encoded_data_reduced, columns= enc.get_feature_names(categorical_variables_reduced))\nencoded_reduced_df.head()", "_____no_output_____" ], [ "encoded_reduced_df= pd.concat((encoded_reduced_df, numerical_variables_reduced), axis= 1)\nencoded_reduced_df.head()", "_____no_output_____" ], [ "y_red = encoded_reduced_df[\"IS_SUCCESSFUL\"]\nX_red = encoded_reduced_df.drop(columns= \"IS_SUCCESSFUL\")\n", "_____no_output_____" ], [ "X_red_train, X_red_test, y_red_train, y_red_test= train_test_split(X_red, y_red, random_state=1)", "_____no_output_____" ], [ "sscaler= StandardScaler()\nX_sscaler= sscaler.fit(X_red_train)\nX_red_train_scaled= X_sscaler.transform(X_red_train)\nX_red_test_scaled= X_sscaler.transform(X_red_test)\n", "_____no_output_____" ], [ "# Define the the number of inputs (features) to the model\nnumber_input_features_A2 = len(X_red_train.iloc[0])\n\n# Review the number of features\nnumber_input_features_A2", "_____no_output_____" ], [ "# Define the number of neurons in the output layer\nnumber_output_neurons_A2 = 1", "_____no_output_____" ], [ "# Define the number of hidden nodes for the first hidden layer\nhidden_nodes_layer1_A2 = (number_input_features_A2+number_output_neurons_A2)//2\nhidden_nodes_layer1_A2", "_____no_output_____" ], [ "# Define the number of hidden nodes for the second hidden layer\nhidden_nodes_layer2_A2 = (hidden_nodes_layer1_A2 + number_output_neurons_A2)//2\nhidden_nodes_layer2_A2", "_____no_output_____" ], [ "# Define the number of hidden nodes for the third hidden layer\nhidden_nodes_layer3_A2 = (hidden_nodes_layer2_A2 + number_output_neurons_A2)//2\nhidden_nodes_layer3_A2", "_____no_output_____" ], [ "# Create the Sequential model instance\nnn_A2 = Sequential()", "_____no_output_____" ], [ "# First hidden layer\nnn_A2.add(Dense(units= hidden_nodes_layer1_A2, activation= \"relu\", input_dim= number_input_features_A2))\n# Second hidden layer\nnn_A2.add(Dense(units= hidden_nodes_layer2_A2, activation= \"relu\"))\n# Third hidden layer\nnn_A2.add(Dense(units= hidden_nodes_layer3_A2, activation= \"relu\"))\n\n\n# Output layer\nnn_A2.add(Dense(units= number_output_neurons_A2, activation= \"sigmoid\"))\n\n\n# Check the structure of the model\nnn_A2.summary()", "Model: \"sequential_2\"\n_________________________________________________________________\n Layer (type) Output Shape Param # \n=================================================================\n dense_7 (Dense) (None, 57) 6498 \n \n dense_8 (Dense) (None, 29) 1682 \n \n dense_9 (Dense) (None, 15) 450 \n \n dense_10 (Dense) (None, 1) 16 \n \n=================================================================\nTotal params: 8,646\nTrainable params: 8,646\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "# Compile the model\nnn_A2.compile(loss= \"binary_crossentropy\", optimizer= \"adam\", metrics= [\"accuracy\"])", "_____no_output_____" ], [ "# Fit the model\nfit_model_A2 = nn_A2.fit(X_red_train_scaled, y_red_train, epochs= 50, verbose=0)\n", "_____no_output_____" ] ], [ [ "### Alternative Model 3\n#### Increasing the number of nodes in hidden layers", "_____no_output_____" ] ], [ [ "# Define the the number of inputs (features) to the model\nnumber_input_features_A3= 113\n# Define the the number of output to the model\nnumber_output_neurons_A3=1", "_____no_output_____" ], [ "# Define the number of hidden nodes adding 2 more nodes\nhidden_nodes_layer1_A3 = ((number_input_features_A3 + number_output_neurons_A3)//2)+2\nhidden_nodes_layer1_A3", "_____no_output_____" ], [ "# Define the number of nodes in Second hidden layer adding 2 more nodes\nhidden_nodes_layer2_A3 = ((hidden_nodes_layer1_A3 + number_output_neurons_A3)//2)+ 2\nhidden_nodes_layer2_A3", "_____no_output_____" ], [ "# Define the number of nodes in Third hidden layer adding 2 more nodes\nhidden_nodes_layer3_A3 = ((hidden_nodes_layer2_A3 + number_output_neurons_A3)//2)+2\nhidden_nodes_layer3_A3", "_____no_output_____" ], [ "# Create the Sequential model instance\nnn_A3 = Sequential()\n# First hidden layer\nnn_A3.add(Dense(units= hidden_nodes_layer1_A3, activation= \"relu\", input_dim= number_input_features_A3))\n# Second hidden layer\nnn_A3.add(Dense(units= hidden_nodes_layer2_A3, activation= \"relu\"))\n# Third hidden layer\nnn_A3.add(Dense(units= hidden_nodes_layer3_A3, activation= \"relu\"))\n\n\n# Output layer\nnn_A3.add(Dense(units= number_output_neurons_A3, activation= \"sigmoid\"))\n\n\n# Check the structure of the model\nnn_A3.summary()", "Model: \"sequential_3\"\n_________________________________________________________________\n Layer (type) Output Shape Param # \n=================================================================\n dense_11 (Dense) (None, 59) 6726 \n \n dense_12 (Dense) (None, 32) 1920 \n \n dense_13 (Dense) (None, 18) 594 \n \n dense_14 (Dense) (None, 1) 19 \n \n=================================================================\nTotal params: 9,259\nTrainable params: 9,259\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "# Compile the model\nnn_A3.compile(loss= \"binary_crossentropy\", optimizer= \"adam\", metrics= [\"accuracy\"])", "_____no_output_____" ], [ "# Fit the model\nfit_model_A3 = nn_A3.fit(X_red_train_scaled, y_red_train, epochs= 150, verbose=0)\n", "_____no_output_____" ] ], [ [ "### Step 2: After finishing your models, display the accuracy scores achieved by each model, and compare the results.", "_____no_output_____" ] ], [ [ "print(\"Original Model Results\")\n\n# Evaluate the model loss and accuracy metrics using the evaluate method and the test data\nmodel_loss, model_accuracy = nn.evaluate(X_test_scaled, y_test, verbose=2)\n\n# Display the model loss and accuracy results\nprint(f\"Loss: {model_loss}, Accuracy: {model_accuracy}\")", "Original Model Results\n268/268 - 0s - loss: 0.5659 - accuracy: 0.7318 - 194ms/epoch - 723us/step\nLoss: 0.5659148693084717, Accuracy: 0.7317784428596497\n" ], [ "print(\"Alternative Model 1 Results\")\n\n# Evaluate the model loss and accuracy metrics using the evaluate method and the test data\nmodel_loss, model_accuracy = nn_A1.evaluate(X_test_scaled, y_test, verbose=2)\n\n# Display the model loss and accuracy results\nprint(f\"Loss: {model_loss}, Accuracy: {model_accuracy}\")", "Alternative Model 1 Results\n268/268 - 0s - loss: 0.5596 - accuracy: 0.7306 - 181ms/epoch - 676us/step\nLoss: 0.559639573097229, Accuracy: 0.7306122183799744\n" ], [ "print(\"Alternative Model 2 Results\")\n\n# Evaluate the model loss and accuracy metrics using the evaluate method and the test data\nmodel_loss, model_accuracy = nn_A2.evaluate(X_red_test_scaled, y_red_test, verbose=2)\n\n# Display the model loss and accuracy results\nprint(f\"Loss: {model_loss}, Accuracy: {model_accuracy}\")", "Alternative Model 2 Results\n268/268 - 0s - loss: 0.5526 - accuracy: 0.7300 - 188ms/epoch - 701us/step\nLoss: 0.5526419281959534, Accuracy: 0.7300291657447815\n" ], [ "print(\"Alternative Model 3 Results\")\n\n# Evaluate the model loss and accuracy metrics using the evaluate method and the test data\nmodel_loss, model_accuracy = nn_A3.evaluate(X_red_test_scaled, y_red_test, verbose=2)\n\n# Display the model loss and accuracy results\nprint(f\"Loss: {model_loss}, Accuracy: {model_accuracy}\")", "Alternative Model 3 Results\n268/268 - 0s - loss: 0.5720 - accuracy: 0.7293 - 200ms/epoch - 747us/step\nLoss: 0.5720489025115967, Accuracy: 0.7293294668197632\n" ] ], [ [ "### Step 3: Save each of your alternative models as an HDF5 file.\n", "_____no_output_____" ] ], [ [ "# Set the model's file path\nfile_path = Path(\"./Resources/model_A1.h5\")\n\n# Export your model to a HDF5 file\nnn_A1.save(file_path)", "_____no_output_____" ], [ "# Set the file path for the second alternative model\nfile_path =Path(\"./Resources/model_A2.h5\")\n\n# Export your model to a HDF5 file\nnn_A2.save(file_path)\n", "_____no_output_____" ], [ "# Set the file path for the third alternative model\nfile_path =Path(\"./Resources/model_A3.h5\")\n\n# Export your model to a HDF5 file\nnn_A3.save(file_path)\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cba2325e7ccc0bc2aba5a9012b843ec1641a7ed1
36,962
ipynb
Jupyter Notebook
app/prediction/cwa_prediction/score_prediction/score_prediction.ipynb
Sirlord-Sen/timetable-AI-service
ad3cc25ebafb64bb23f4649a7ab0b8f678bf9cae
[ "Apache-2.0" ]
null
null
null
app/prediction/cwa_prediction/score_prediction/score_prediction.ipynb
Sirlord-Sen/timetable-AI-service
ad3cc25ebafb64bb23f4649a7ab0b8f678bf9cae
[ "Apache-2.0" ]
null
null
null
app/prediction/cwa_prediction/score_prediction/score_prediction.ipynb
Sirlord-Sen/timetable-AI-service
ad3cc25ebafb64bb23f4649a7ab0b8f678bf9cae
[ "Apache-2.0" ]
null
null
null
65.303887
17,374
0.68584
[ [ [ "import os\r\nimport os.path as path\r\nimport numpy as np\r\nimport pandas as pd \r\nimport matplotlib.pyplot as plt \r\nfrom tensorflow.keras import layers, models, optimizers, regularizers\r\nfrom tensorflow.keras.models import load_model", "_____no_output_____" ], [ "current_dir = os.path.join(os.getcwd())\r\nfile = os.path.join(path.dirname(path.dirname(current_dir)), \"generate_data\\data_cwa.csv\")\r\nmyData = pd.read_csv(file, delimiter=',', usecols=['cwa','credit','time','difficulty', 'score'])\r\nmy_data_copy = myData\r\nmyData.shape", "_____no_output_____" ], [ "myData[\"score\"] = myData[\"score\"].values / 100\r\nmyData[\"cwa\"] = myData[\"cwa\"].values / 100\r\nmyData[\"credit\"] = myData[\"credit\"].values / 10\r\nmyData [\"difficulty\"] = myData['difficulty'].values / 5\r\nmyData[\"time\"] = myData[\"time\"].values / 6", "_____no_output_____" ], [ "df = pd.DataFrame(myData)\r\ndf = df.sample(frac=1)\r\n\r\nmyData = df\r\n", "_____no_output_____" ], [ "myData", "_____no_output_____" ], [ "targets = myData[['score']].values\r\nmyData.drop(('score'), axis=1, inplace=True)\r\ndata = myData.values\r\n\r\nprint(targets.shape)\r\nprint(data.shape)", "(180346, 1)\n(180346, 4)\n" ], [ "num_train = int(0.5 * len(data)) \r\nnum_val = int(0.25 * len(data))\r\nnum_test = int(0.25 * len(data))", "_____no_output_____" ], [ "train_data = data[0 : num_train]\r\ntest_data = data[num_train: num_train + num_test]\r\nval_data = data[num_train + num_test:]\r\n\r\ntrain_targets = targets[0 : num_train]\r\ntest_targets = targets[num_train: num_train + num_test]\r\nval_targets = targets[num_train + num_test:]\r\n\r\nprint(len(train_data) + len(test_data) + len(val_data))\r\nprint(len(train_targets) + len(test_targets) + len(val_targets))", "180346\n180346\n" ], [ "model = models.Sequential()\r\nmodel.add(layers.Dense(1, activation=\"relu\",input_shape=(train_data.shape[1],)))\r\n# model.add(layers.Dense(1, activation=\"relu\"))\r\n# model.add(layers.Dropout(0.5))\r\n# model.add(layers.Dense(1, activation=\"relu\", kernel_regularizer=regularizers.l2(0.01)))\r\n# model.add(layers.Dropout(0.5))\r\nmodel.add(layers.Dense(1))\r\n\r\nmodel.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense (Dense) (None, 1) 5 \n_________________________________________________________________\ndense_1 (Dense) (None, 1) 2 \n=================================================================\nTotal params: 7\nTrainable params: 7\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "model.compile(\r\n optimizer=optimizers.RMSprop(learning_rate=2e-4),\r\n loss=\"mse\",\r\n metrics=['mae']\r\n )", "_____no_output_____" ], [ "history = model.fit(train_data,\r\n train_targets,\r\n epochs=40,\r\n batch_size=512,\r\n validation_data=(val_data, val_targets)\r\n )", "Epoch 1/40\n177/177 [==============================] - 1s 2ms/step - loss: 0.1839 - mae: 0.3634 - val_loss: 0.1528 - val_mae: 0.3250\nEpoch 2/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.1350 - mae: 0.3026 - val_loss: 0.1214 - val_mae: 0.2849\nEpoch 3/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.1123 - mae: 0.2727 - val_loss: 0.1044 - val_mae: 0.2621\nEpoch 4/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0978 - mae: 0.2529 - val_loss: 0.0919 - val_mae: 0.2451\nEpoch 5/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0860 - mae: 0.2367 - val_loss: 0.0808 - val_mae: 0.2295\nEpoch 6/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0755 - mae: 0.2213 - val_loss: 0.0707 - val_mae: 0.2144\nEpoch 7/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0659 - mae: 0.2065 - val_loss: 0.0616 - val_mae: 0.2000\nEpoch 8/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0572 - mae: 0.1924 - val_loss: 0.0535 - val_mae: 0.1863\nEpoch 9/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0497 - mae: 0.1791 - val_loss: 0.0464 - val_mae: 0.1733\nEpoch 10/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0430 - mae: 0.1666 - val_loss: 0.0402 - val_mae: 0.1612\nEpoch 11/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0372 - mae: 0.1549 - val_loss: 0.0348 - val_mae: 0.1499\nEpoch 12/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0322 - mae: 0.1440 - val_loss: 0.0301 - val_mae: 0.1395\nEpoch 13/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0280 - mae: 0.1340 - val_loss: 0.0262 - val_mae: 0.1298\nEpoch 14/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0243 - mae: 0.1248 - val_loss: 0.0228 - val_mae: 0.1210\nEpoch 15/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0212 - mae: 0.1165 - val_loss: 0.0200 - val_mae: 0.1131\nEpoch 16/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0186 - mae: 0.1090 - val_loss: 0.0176 - val_mae: 0.1061\nEpoch 17/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0165 - mae: 0.1024 - val_loss: 0.0156 - val_mae: 0.0999\nEpoch 18/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0147 - mae: 0.0966 - val_loss: 0.0140 - val_mae: 0.0944\nEpoch 19/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0132 - mae: 0.0916 - val_loss: 0.0126 - val_mae: 0.0897\nEpoch 20/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0119 - mae: 0.0871 - val_loss: 0.0115 - val_mae: 0.0855\nEpoch 21/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0109 - mae: 0.0832 - val_loss: 0.0105 - val_mae: 0.0817\nEpoch 22/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0100 - mae: 0.0799 - val_loss: 0.0097 - val_mae: 0.0787\nEpoch 23/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0093 - mae: 0.0772 - val_loss: 0.0091 - val_mae: 0.0764\nEpoch 24/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0088 - mae: 0.0753 - val_loss: 0.0087 - val_mae: 0.0746\nEpoch 25/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0084 - mae: 0.0737 - val_loss: 0.0083 - val_mae: 0.0732\nEpoch 26/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0082 - mae: 0.0725 - val_loss: 0.0080 - val_mae: 0.0720\nEpoch 27/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0079 - mae: 0.0714 - val_loss: 0.0078 - val_mae: 0.0710\nEpoch 28/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0077 - mae: 0.0705 - val_loss: 0.0076 - val_mae: 0.0702\nEpoch 29/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0075 - mae: 0.0697 - val_loss: 0.0074 - val_mae: 0.0694\nEpoch 30/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0074 - mae: 0.0690 - val_loss: 0.0073 - val_mae: 0.0688\nEpoch 31/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0072 - mae: 0.0684 - val_loss: 0.0072 - val_mae: 0.0682\nEpoch 32/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0071 - mae: 0.0679 - val_loss: 0.0071 - val_mae: 0.0677\nEpoch 33/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0070 - mae: 0.0675 - val_loss: 0.0070 - val_mae: 0.0674\nEpoch 34/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0069 - mae: 0.0672 - val_loss: 0.0069 - val_mae: 0.0670\nEpoch 35/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0069 - mae: 0.0669 - val_loss: 0.0068 - val_mae: 0.0667\nEpoch 36/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0068 - mae: 0.0666 - val_loss: 0.0068 - val_mae: 0.0665\nEpoch 37/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0068 - mae: 0.0664 - val_loss: 0.0067 - val_mae: 0.0663\nEpoch 38/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0067 - mae: 0.0662 - val_loss: 0.0067 - val_mae: 0.0661\nEpoch 39/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0067 - mae: 0.0660 - val_loss: 0.0067 - val_mae: 0.0659\nEpoch 40/40\n177/177 [==============================] - 0s 1ms/step - loss: 0.0067 - mae: 0.0658 - val_loss: 0.0066 - val_mae: 0.0657\n" ], [ "acc = history.history['mae']\r\nval_acc = history.history['val_mae']\r\nloss = history.history['loss']\r\nval_loss = history.history['val_loss']\r\nepochs = range(1, len(acc) + 1)\r\n# \"bo\" is for \"blue dot\"\r\nplt.plot(epochs, loss, 'bo', label='Training loss')\r\n# b is for \"solid blue line\"\r\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\r\nplt.title('Training and validation loss')\r\nplt.xlabel('Epochs')\r\nplt.ylabel('Loss')\r\nplt.legend()\r\nplt.show()", "_____no_output_____" ], [ "model.save('score_prediction_2.h5')", "_____no_output_____" ], [ "test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)", "1409/1409 [==============================] - 1s 776us/step - loss: 0.0067 - mae: 0.0660\n" ], [ "model = load_model('score_prediction_1.h5')", "_____no_output_____" ], [ "predicted = model.predict([[0.8081, 0.1, 0.458333, 0.2]])\r\npredicted ", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cba2326aab8bdb67b99177a7212e3749bc190259
12,522
ipynb
Jupyter Notebook
Ecommerceapp/Report_Designer/.ipynb_checkpoints/Reports-checkpoint.ipynb
khrisRa/ecommerce-python-console
85353c7bd723221bc658b292c783b861821c774a
[ "MIT" ]
null
null
null
Ecommerceapp/Report_Designer/.ipynb_checkpoints/Reports-checkpoint.ipynb
khrisRa/ecommerce-python-console
85353c7bd723221bc658b292c783b861821c774a
[ "MIT" ]
null
null
null
Ecommerceapp/Report_Designer/.ipynb_checkpoints/Reports-checkpoint.ipynb
khrisRa/ecommerce-python-console
85353c7bd723221bc658b292c783b861821c774a
[ "MIT" ]
null
null
null
99.380952
8,728
0.816723
[ [ [ "import sqlite3\nimport pandas as pd\nimport sqlalchemy\nfrom tabulate import tabulate\nfrom matplotlib.pyplot import hist\nfrom matplotlib import pyplot as plt\nimport numpy as np", "_____no_output_____" ], [ "engine = sqlalchemy.create_engine('sqlite:///ecom.db')", "_____no_output_____" ], [ "#Sales Report\ndf = pd.read_sql('SELECT desc as ITEM, price as \"Unit Price\", tag as Category, qty as Quantity, strftime(\"%Y %m %d\", date) as \"Sale Date\" FROM cart WHERE status = \"C\" ORDER BY date DESC', engine)\ndf2 = df.set_index('ITEM')\ndf2[\"Line Total\"] = df2[\"Unit Price\"]*df2[\"Quantity\"]\ndf2.loc['Total'] = df2.sum(numeric_only=True)\ndf2.fillna('', inplace=True)\nprint(tabulate(df2, headers='keys', tablefmt='psql'))\ndf3 = pd.read_sql('SELECT desc as ITEM, price as \"Unit Price\", tag as Category, qty as Quantity, strftime(\"%Y %m %d\", date) as \"Sale Date\" FROM cart WHERE status = \"C\" ORDER BY Quantity DESC', engine)\ndf4 = df3.head()\nfig, ax =plt.subplots(1,1)\nprint(ax.hist(df4.ITEM, weights=df4.Quantity))\nax.set_title(\"Top Sales\")\nax.set_xlabel('ITEMS')\nax.set_ylabel('Quantity')\n", "+--------------+--------------+------------+------------+-------------+--------------+\n| ITEM | Unit Price | Category | Quantity | Sale Date | Line Total |\n|--------------+--------------+------------+------------+-------------+--------------|\n| Tuna | 1.17 | CANNED | 1 | 2021 11 23 | 1.17 |\n| Cumin | 4.17 | SPICE | 2 | 2021 11 23 | 8.34 |\n| Safety shoes | 85.98 | SHOES | 2 | 2021 11 18 | 171.96 |\n| Air force2 | 49.44 | SHOES | 3 | 2021 11 18 | 148.32 |\n| Total | 140.76 | | 8 | | 329.79 |\n+--------------+--------------+------------+------------+-------------+--------------+\n(array([3., 0., 0., 2., 0., 0., 2., 0., 0., 1.]), array([0. , 0.3, 0.6, 0.9, 1.2, 1.5, 1.8, 2.1, 2.4, 2.7, 3. ]), <BarContainer object of 10 artists>)\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
cba2399d444bc4f494e3c5d9a7040a3bcfcfc015
231,051
ipynb
Jupyter Notebook
Chapter_9_Build_Nutritionist.ipynb
cxbxmxcx/EatNoEat
74113647f9d36cd12b3bc141bab6978444cb0281
[ "Apache-2.0" ]
3
2020-05-24T08:58:52.000Z
2021-12-07T15:26:48.000Z
Chapter_9_Build_Nutritionist.ipynb
cxbxmxcx/EatNoEat
74113647f9d36cd12b3bc141bab6978444cb0281
[ "Apache-2.0" ]
null
null
null
Chapter_9_Build_Nutritionist.ipynb
cxbxmxcx/EatNoEat
74113647f9d36cd12b3bc141bab6978444cb0281
[ "Apache-2.0" ]
null
null
null
87.419977
3,917
0.599686
[ [ [ "<a href=\"https://colab.research.google.com/github/cxbxmxcx/EatNoEat/blob/master/Chapter_9_Build_Nutritionist.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "Imports\n", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport os\nimport time\nfrom PIL import Image\nimport pickle", "_____no_output_____" ] ], [ [ "Download Recipe Data", "_____no_output_____" ] ], [ [ "data_folder = 'data'\nrecipes_zip = tf.keras.utils.get_file('recipes.zip', \n origin = 'https://www.dropbox.com/s/i1hvs96mnahozq0/Recipes5k.zip?dl=1',\n extract = True)\nprint(recipes_zip)\ndata_folder = os.path.dirname(recipes_zip)\nos.remove(recipes_zip)\nprint(data_folder)", "Downloading data from https://www.dropbox.com/s/i1hvs96mnahozq0/Recipes5k.zip?dl=1\n164929536/164925728 [==============================] - 13s 0us/step\n/root/.keras/datasets/recipes.zip\n/root/.keras/datasets\n" ] ], [ [ "Setup Folder Paths", "_____no_output_____" ] ], [ [ "!dir /root/.keras/datasets\ndata_folder = data_folder + '/Recipes5k/'\nannotations_folder = data_folder + 'annotations/'\nimages_folder = data_folder + 'images/'\nprint(annotations_folder)\nprint(images_folder)", "Recipes5k\n/root/.keras/datasets/Recipes5k/annotations/\n/root/.keras/datasets/Recipes5k/images/\n" ], [ "%ls /root/.keras/datasets/Recipes5k/images/", "\u001b[0m\u001b[01;34mapple_pie\u001b[0m/ \u001b[01;34mdumplings\u001b[0m/ \u001b[01;34momelette\u001b[0m/\n\u001b[01;34mbaby_back_ribs\u001b[0m/ \u001b[01;34medamame\u001b[0m/ \u001b[01;34monion_rings\u001b[0m/\n\u001b[01;34mbaklava\u001b[0m/ \u001b[01;34meggs_benedict\u001b[0m/ \u001b[01;34moysters\u001b[0m/\n\u001b[01;34mbeef_carpaccio\u001b[0m/ \u001b[01;34mescargots\u001b[0m/ \u001b[01;34mpad_thai\u001b[0m/\n\u001b[01;34mbeef_tacos\u001b[0m/ \u001b[01;34mfalafel\u001b[0m/ \u001b[01;34mpaella\u001b[0m/\n\u001b[01;34mbeef_tartare\u001b[0m/ \u001b[01;34mfilet_mignon\u001b[0m/ \u001b[01;34mpancakes\u001b[0m/\n\u001b[01;34mbeet_salad\u001b[0m/ \u001b[01;34mfish_and_chips\u001b[0m/ \u001b[01;34mpanna_cotta\u001b[0m/\n\u001b[01;34mbeignets\u001b[0m/ \u001b[01;34mfoie_gras\u001b[0m/ \u001b[01;34mpeking_duck\u001b[0m/\n\u001b[01;34mbibimbap\u001b[0m/ \u001b[01;34mfrench_fries\u001b[0m/ \u001b[01;34mpho\u001b[0m/\n\u001b[01;34mbread_pudding\u001b[0m/ \u001b[01;34mfrench_onion_soup\u001b[0m/ \u001b[01;34mpizza\u001b[0m/\n\u001b[01;34mbreakfast_burrito\u001b[0m/ \u001b[01;34mfrench_toast\u001b[0m/ \u001b[01;34mpork_chop\u001b[0m/\n\u001b[01;34mbruschetta\u001b[0m/ \u001b[01;34mfried_calamari\u001b[0m/ \u001b[01;34mpoutine\u001b[0m/\n\u001b[01;34mcaesar_salad\u001b[0m/ \u001b[01;34mfried_rice\u001b[0m/ \u001b[01;34mprime_rib\u001b[0m/\n\u001b[01;34mcannoli\u001b[0m/ \u001b[01;34mfrozen_yogurt\u001b[0m/ \u001b[01;34mpulled_pork_sandwich\u001b[0m/\n\u001b[01;34mcaprese_salad\u001b[0m/ \u001b[01;34mgarlic_bread\u001b[0m/ \u001b[01;34mramen\u001b[0m/\n\u001b[01;34mcarrot_cake\u001b[0m/ \u001b[01;34mgnocchi\u001b[0m/ \u001b[01;34mravioli\u001b[0m/\n\u001b[01;34mceviche\u001b[0m/ \u001b[01;34mgreek_salad\u001b[0m/ \u001b[01;34mred_velvet_cake\u001b[0m/\n\u001b[01;34mcheesecake\u001b[0m/ \u001b[01;34mgrilled_cheese_sandwich\u001b[0m/ \u001b[01;34mrisotto\u001b[0m/\n\u001b[01;34mcheese_plate\u001b[0m/ \u001b[01;34mgrilled_salmon\u001b[0m/ \u001b[01;34msamosa\u001b[0m/\n\u001b[01;34mchicken_curry\u001b[0m/ \u001b[01;34mguacamole\u001b[0m/ \u001b[01;34msashimi\u001b[0m/\n\u001b[01;34mchicken_quesadilla\u001b[0m/ \u001b[01;34mgyoza\u001b[0m/ \u001b[01;34mscallops\u001b[0m/\n\u001b[01;34mchicken_wings\u001b[0m/ \u001b[01;34mhamburger\u001b[0m/ \u001b[01;34mseaweed_salad\u001b[0m/\n\u001b[01;34mchocolate_cake\u001b[0m/ \u001b[01;34mhot_and_sour_soup\u001b[0m/ \u001b[01;34mshrimp_and_grits\u001b[0m/\n\u001b[01;34mchocolate_ice_cream\u001b[0m/ \u001b[01;34mhot_dog\u001b[0m/ \u001b[01;34mspaghetti_bolognese\u001b[0m/\n\u001b[01;34mchocolate_mousse\u001b[0m/ \u001b[01;34mhuevos_rancheros\u001b[0m/ \u001b[01;34mspaghetti_carbonara\u001b[0m/\n\u001b[01;34mchurros\u001b[0m/ \u001b[01;34mhummus\u001b[0m/ \u001b[01;34mspring_rolls\u001b[0m/\n\u001b[01;34mclam_chowder\u001b[0m/ \u001b[01;34mlasagna\u001b[0m/ \u001b[01;34msteak\u001b[0m/\n\u001b[01;34mclub_sandwich\u001b[0m/ \u001b[01;34mlobster_bisque\u001b[0m/ \u001b[01;34mstrawberry_shortcake\u001b[0m/\n\u001b[01;34mcrab_cakes\u001b[0m/ \u001b[01;34mlobster_roll_sandwich\u001b[0m/ \u001b[01;34msushi\u001b[0m/\n\u001b[01;34mcreme_brulee\u001b[0m/ \u001b[01;34mmacaroni_and_cheese\u001b[0m/ \u001b[01;34mtakoyaki\u001b[0m/\n\u001b[01;34mcroque_madame\u001b[0m/ \u001b[01;34mmacarons\u001b[0m/ \u001b[01;34mtiramisu\u001b[0m/\n\u001b[01;34mcupcakes\u001b[0m/ \u001b[01;34mmiso_soup\u001b[0m/ \u001b[01;34mtuna_tartare\u001b[0m/\n\u001b[01;34mdeviled_eggs\u001b[0m/ \u001b[01;34mmussels\u001b[0m/ \u001b[01;34mwaffles\u001b[0m/\n\u001b[01;34mdonuts\u001b[0m/ \u001b[01;34mnachos\u001b[0m/\n" ] ], [ [ "Extra Imports", "_____no_output_____" ] ], [ [ "from fastprogress.fastprogress import master_bar, progress_bar\nfrom IPython.display import Image\nfrom os import listdir\nfrom pickle import dump", "_____no_output_____" ] ], [ [ "Setup Convnet Application", "_____no_output_____" ] ], [ [ "use_NAS = False\nif use_NAS:\n IMG_SIZE = 224 # 299 for Inception, 224 for NASNet\n IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)\nelse:\n IMG_SIZE = 299 # 299 for Inception, 224 for NASNet\n IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)\n", "_____no_output_____" ], [ "def load_image(image_path):\n img = tf.io.read_file(image_path)\n img = tf.image.decode_jpeg(img, channels=3)\n img = tf.image.resize(img, (IMG_SIZE, IMG_SIZE))\n if use_NAS:\n img = tf.keras.applications.nasnet.preprocess_input(img)\n else:\n img = tf.keras.applications.inception_v3.preprocess_input(img)\n return img, image_path", "_____no_output_____" ], [ "foods_txt = tf.keras.utils.get_file('foods.txt',\n origin = 'https://www.dropbox.com/s/xyukyq62g98dx24/foods_cat.txt?dl=1')\n\nprint(foods_txt)", "Downloading data from https://www.dropbox.com/s/xyukyq62g98dx24/foods_cat.txt?dl=1\n8192/1968 [============================================================================================================================] - 0s 0us/step\n/root/.keras/datasets/foods.txt\n" ], [ "def get_nutrient_array(fat, protein, carbs):\n nutrients = np.array([float(fat)*4, float(protein)*4, float(carbs)*4]) \n nutrients /= np.linalg.norm(nutrients)\n return nutrients", "_____no_output_____" ], [ "def get_category_array(keto, carbs, health):\n return np.array([float(keto)-5, float(carbs)-5, float(health)-5])", "_____no_output_____" ], [ "import csv\n\ndef get_food_nutrients(nutrient_file):\n foods = {}\n with open(foods_txt) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f'Column names are {\", \".join(row)}')\n line_count += 1\n else: \n categories = get_category_array(row[1],row[2],row[3])\n foods[row[0]] = categories\n line_count += 1\n print(f'Processed {line_count} lines.')\n return foods", "_____no_output_____" ], [ "food_nutrients = get_food_nutrients(foods_txt)\nprint(food_nutrients)", "Column names are name, keto, carbs, health\nProcessed 102 lines.\n{'apple_pie': array([-4., 5., -4.]), 'baby_back_ribs': array([ 4., -2., 1.]), 'baklava': array([-4., 5., -1.]), 'beef_carpaccio': array([ 5., -4., 3.]), 'beef_tacos': array([-3., -1., 2.]), 'beef_tartare': array([ 5., -4., 3.]), 'beet_salad': array([-3., 3., 5.]), 'beignets': array([-4., 5., -1.]), 'bibimbap': array([-2., 3., 0.]), 'bread_pudding': array([-4., 3., 0.]), 'breakfast_burrito': array([-2., 2., 3.]), 'bruschetta': array([-1., 2., 4.]), 'caesar_salad': array([ 1., -2., 5.]), 'cannoli': array([-4., 5., -1.]), 'caprese_salad': array([0., 0., 5.]), 'carrot_cake': array([-4., 5., 1.]), 'ceviche': array([ 4., -4., 4.]), 'cheesecake': array([-4., 4., -2.]), 'cheese_plate': array([ 4., -4., 2.]), 'chicken_curry': array([ 4., -3., 4.]), 'chicken quesadilla': array([1., 0., 3.]), 'chicken_wings': array([ 4., -3., 3.]), 'chocolate_cake': array([-4., 5., -3.]), 'chocolate_ice_cream': array([-4., 5., 0.]), 'chocolate_mousse': array([0., 0., 0.]), 'churros': array([-3., 4., -1.]), 'clam_chowder': array([-1., 0., 3.]), 'club_sandwich': array([-2., 2., 3.]), 'crab_cakes': array([ 3., -2., 3.]), 'creme_brulee': array([-4., 2., 0.]), 'croque_madame': array([-2., 3., 2.]), 'cupcakes': array([-4., 5., -1.]), 'deviled_eggs': array([ 4., -2., 3.]), 'donuts': array([-4., 5., -3.]), 'dumplings': array([-1., 2., 3.]), 'edamame': array([ 3., -3., 5.]), 'eggs_benedict': array([ 0., -1., 2.]), 'escargots': array([ 3., -3., 3.]), 'falafel': array([-2., 2., 3.]), 'filet_mignon': array([ 5., -4., 4.]), 'fish_and_chips': array([ 2., -1., 1.]), 'foie_gras': array([ 3., -4., -1.]), 'french_fries': array([-4., 5., 0.]), 'french_onion_soup': array([ 2., -1., 3.]), 'french_toast': array([-4., 5., 0.]), 'fried_calamari': array([ 3., -3., 3.]), 'fried_rice': array([-4., 4., 3.]), 'frozen_yogurt': array([-2., 3., 2.]), 'garlic_bread': array([-4., 5., 0.]), 'gnocchi': array([-4., 5., 2.]), 'greek_salad': array([ 3., -1., 4.]), 'grilled_cheese_sandwich': array([-1., -1., 1.]), 'grilled_salmon': array([ 5., -4., 5.]), 'guacamole': array([ 5., -4., 4.]), 'gyoza': array([ 4., -4., 3.]), 'hamburger': array([ 2., -1., 1.]), 'hot_and_sour_soup': array([1., 1., 3.]), 'hot_dog': array([ 2., -1., -2.]), 'huevos_rancheros': array([ 4., -3., 3.]), 'hummus': array([ 2., -1., 3.]), 'lasagna': array([-2., 3., 3.]), 'lobster_bisque': array([0., 0., 0.]), 'lobster_roll_sandwich': array([-3., 2., 2.]), 'macaroni_and_cheese': array([-4., 3., 1.]), 'macarons': array([-4., 5., 0.]), 'miso_soup': array([ 2., -3., 4.]), 'mussels': array([ 3., -4., 4.]), 'nachos': array([-4., 3., -1.]), 'omelette': array([ 5., -4., 3.]), 'onion_rings': array([-4., 5., 1.]), 'oysters': array([ 4., -3., 3.]), 'pad_thai': array([-4., 2., 3.]), 'paella': array([-1., 3., 3.]), 'pancakes': array([-4., 5., 0.]), 'panna_cotta': array([ 0., 0., -2.]), 'peking_duck': array([ 4., -4., 3.]), 'pho': array([1., 0., 3.]), 'pizza': array([-4., 2., 2.]), 'pork_chop': array([ 5., -4., 4.]), 'poutine': array([-4., 5., 0.]), 'prime_rib': array([ 5., -4., 3.]), 'pulled_pork_sandwich': array([0., 0., 1.]), 'ramen': array([-2., 3., 4.]), 'ravioli': array([-4., 3., 2.]), 'red_velvet_cake': array([-4., 5., -2.]), 'risotto': array([-4., 5., 2.]), 'samosa': array([-4., 2., 2.]), 'sashimi': array([ 5., -4., 4.]), 'scallops': array([ 5., -4., 4.]), 'seaweed_salad': array([5., 1., 5.]), 'shrimp_and_grits': array([-2., 2., 1.]), 'spaghetti_bolognese': array([-1., 2., 2.]), 'spaghetti_carbonara': array([-3., 2., 2.]), 'spring_rolls': array([-3., 1., 2.]), 'steak': array([ 5., -4., 4.]), 'strawberry_shortcake': array([-4., 5., 0.]), 'sushi': array([ 2., -2., 3.]), 'takoyaki': array([-2., 2., 2.]), 'tiramisu': array([-4., 14., 0.]), 'tuna_tartare': array([ 5., -4., 3.]), 'waffles': array([-4., 5., 0.])}\n" ], [ "def load_images(food_w_nutrients, directory):\n X = []\n Y = []\n i=0\n mb = master_bar(listdir(directory))\n for food_group in mb: \n try:\n for pic in progress_bar(listdir(directory + food_group),\n parent=mb, comment='food = ' + food_group):\n filename = directory + food_group + '/' + pic\n image, img_path = load_image(filename)\n if i < 5:\n print(img_path)\n i+=1\n Y.append(food_w_nutrients[food_group])\n X.append(image)\n except:\n continue\n return X,Y\n", "_____no_output_____" ], [ "X, Y = load_images(food_nutrients, images_folder)\nprint(len(X), len(Y))", "_____no_output_____" ], [ "tf.keras.backend.clear_session()\n\nif use_NAS:\n # Create the base model from the pre-trained model \n base_model = tf.keras.applications.NASNetMobile(input_shape=IMG_SHAPE,\n include_top=False,\n weights='imagenet')\nelse:\n # Create the base model from the pre-trained model \n base_model = tf.keras.applications.InceptionResNetV2(input_shape=IMG_SHAPE,\n include_top=False,\n weights='imagenet')", "Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/inception_resnet_v2/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5\n219062272/219055592 [==============================] - 1s 0us/step\n" ], [ "dataset = tf.data.Dataset.from_tensor_slices((X, Y))\ndataset\nbatches = dataset.batch(64)", "_____no_output_____" ], [ "for image_batch, label_batch in batches.take(1):\n pass\n\nimage_batch.shape\ntrain_size = int(len(X)*.8)\ntest_size = int(len(X)*.2)\n\nbatches = batches.shuffle(test_size)\ntrain_dataset = batches.take(train_size)\ntest_dataset = batches.skip(train_size)\ntest_dataset = test_dataset.take(test_size)", "_____no_output_____" ], [ "feature_batch = base_model(image_batch)\nprint(feature_batch.shape)", "(64, 8, 8, 1536)\n" ], [ "base_model.trainable = True", "_____no_output_____" ], [ "# Let's take a look to see how many layers are in the base model\nprint(\"Number of layers in the base model: \", len(base_model.layers))\n\n# Fine-tune from this layer onwards\nif use_NAS:\n fine_tune_at = 100\nelse:\n fine_tune_at = 550\n\n# Freeze all the layers before the `fine_tune_at` layer\nfor layer in base_model.layers[:fine_tune_at]:\n layer.trainable = False\n\nbase_model.summary()", "Number of layers in the base model: 780\nModel: \"inception_resnet_v2\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_1 (InputLayer) [(None, 299, 299, 3) 0 \n__________________________________________________________________________________________________\nconv2d (Conv2D) (None, 149, 149, 32) 864 input_1[0][0] \n__________________________________________________________________________________________________\nbatch_normalization (BatchNorma (None, 149, 149, 32) 96 conv2d[0][0] \n__________________________________________________________________________________________________\nactivation (Activation) (None, 149, 149, 32) 0 batch_normalization[0][0] \n__________________________________________________________________________________________________\nconv2d_1 (Conv2D) (None, 147, 147, 32) 9216 activation[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_1 (BatchNor (None, 147, 147, 32) 96 conv2d_1[0][0] \n__________________________________________________________________________________________________\nactivation_1 (Activation) (None, 147, 147, 32) 0 batch_normalization_1[0][0] \n__________________________________________________________________________________________________\nconv2d_2 (Conv2D) (None, 147, 147, 64) 18432 activation_1[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_2 (BatchNor (None, 147, 147, 64) 192 conv2d_2[0][0] \n__________________________________________________________________________________________________\nactivation_2 (Activation) (None, 147, 147, 64) 0 batch_normalization_2[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 73, 73, 64) 0 activation_2[0][0] \n__________________________________________________________________________________________________\nconv2d_3 (Conv2D) (None, 73, 73, 80) 5120 max_pooling2d[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_3 (BatchNor (None, 73, 73, 80) 240 conv2d_3[0][0] \n__________________________________________________________________________________________________\nactivation_3 (Activation) (None, 73, 73, 80) 0 batch_normalization_3[0][0] \n__________________________________________________________________________________________________\nconv2d_4 (Conv2D) (None, 71, 71, 192) 138240 activation_3[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_4 (BatchNor (None, 71, 71, 192) 576 conv2d_4[0][0] \n__________________________________________________________________________________________________\nactivation_4 (Activation) (None, 71, 71, 192) 0 batch_normalization_4[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_1 (MaxPooling2D) (None, 35, 35, 192) 0 activation_4[0][0] \n__________________________________________________________________________________________________\nconv2d_8 (Conv2D) (None, 35, 35, 64) 12288 max_pooling2d_1[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_8 (BatchNor (None, 35, 35, 64) 192 conv2d_8[0][0] \n__________________________________________________________________________________________________\nactivation_8 (Activation) (None, 35, 35, 64) 0 batch_normalization_8[0][0] \n__________________________________________________________________________________________________\nconv2d_6 (Conv2D) (None, 35, 35, 48) 9216 max_pooling2d_1[0][0] \n__________________________________________________________________________________________________\nconv2d_9 (Conv2D) (None, 35, 35, 96) 55296 activation_8[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_6 (BatchNor (None, 35, 35, 48) 144 conv2d_6[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_9 (BatchNor (None, 35, 35, 96) 288 conv2d_9[0][0] \n__________________________________________________________________________________________________\nactivation_6 (Activation) (None, 35, 35, 48) 0 batch_normalization_6[0][0] \n__________________________________________________________________________________________________\nactivation_9 (Activation) (None, 35, 35, 96) 0 batch_normalization_9[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d (AveragePooli (None, 35, 35, 192) 0 max_pooling2d_1[0][0] \n__________________________________________________________________________________________________\nconv2d_5 (Conv2D) (None, 35, 35, 96) 18432 max_pooling2d_1[0][0] \n__________________________________________________________________________________________________\nconv2d_7 (Conv2D) (None, 35, 35, 64) 76800 activation_6[0][0] \n__________________________________________________________________________________________________\nconv2d_10 (Conv2D) (None, 35, 35, 96) 82944 activation_9[0][0] \n__________________________________________________________________________________________________\nconv2d_11 (Conv2D) (None, 35, 35, 64) 12288 average_pooling2d[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_5 (BatchNor (None, 35, 35, 96) 288 conv2d_5[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_7 (BatchNor (None, 35, 35, 64) 192 conv2d_7[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_10 (BatchNo (None, 35, 35, 96) 288 conv2d_10[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_11 (BatchNo (None, 35, 35, 64) 192 conv2d_11[0][0] \n__________________________________________________________________________________________________\nactivation_5 (Activation) (None, 35, 35, 96) 0 batch_normalization_5[0][0] \n__________________________________________________________________________________________________\nactivation_7 (Activation) (None, 35, 35, 64) 0 batch_normalization_7[0][0] \n__________________________________________________________________________________________________\nactivation_10 (Activation) (None, 35, 35, 96) 0 batch_normalization_10[0][0] \n__________________________________________________________________________________________________\nactivation_11 (Activation) (None, 35, 35, 64) 0 batch_normalization_11[0][0] \n__________________________________________________________________________________________________\nmixed_5b (Concatenate) (None, 35, 35, 320) 0 activation_5[0][0] \n activation_7[0][0] \n activation_10[0][0] \n activation_11[0][0] \n__________________________________________________________________________________________________\nconv2d_15 (Conv2D) (None, 35, 35, 32) 10240 mixed_5b[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_15 (BatchNo (None, 35, 35, 32) 96 conv2d_15[0][0] \n__________________________________________________________________________________________________\nactivation_15 (Activation) (None, 35, 35, 32) 0 batch_normalization_15[0][0] \n__________________________________________________________________________________________________\nconv2d_13 (Conv2D) (None, 35, 35, 32) 10240 mixed_5b[0][0] \n__________________________________________________________________________________________________\nconv2d_16 (Conv2D) (None, 35, 35, 48) 13824 activation_15[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_13 (BatchNo (None, 35, 35, 32) 96 conv2d_13[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_16 (BatchNo (None, 35, 35, 48) 144 conv2d_16[0][0] \n__________________________________________________________________________________________________\nactivation_13 (Activation) (None, 35, 35, 32) 0 batch_normalization_13[0][0] \n__________________________________________________________________________________________________\nactivation_16 (Activation) (None, 35, 35, 48) 0 batch_normalization_16[0][0] \n__________________________________________________________________________________________________\nconv2d_12 (Conv2D) (None, 35, 35, 32) 10240 mixed_5b[0][0] \n__________________________________________________________________________________________________\nconv2d_14 (Conv2D) (None, 35, 35, 32) 9216 activation_13[0][0] \n__________________________________________________________________________________________________\nconv2d_17 (Conv2D) (None, 35, 35, 64) 27648 activation_16[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_12 (BatchNo (None, 35, 35, 32) 96 conv2d_12[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_14 (BatchNo (None, 35, 35, 32) 96 conv2d_14[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_17 (BatchNo (None, 35, 35, 64) 192 conv2d_17[0][0] \n__________________________________________________________________________________________________\nactivation_12 (Activation) (None, 35, 35, 32) 0 batch_normalization_12[0][0] \n__________________________________________________________________________________________________\nactivation_14 (Activation) (None, 35, 35, 32) 0 batch_normalization_14[0][0] \n__________________________________________________________________________________________________\nactivation_17 (Activation) (None, 35, 35, 64) 0 batch_normalization_17[0][0] \n__________________________________________________________________________________________________\nblock35_1_mixed (Concatenate) (None, 35, 35, 128) 0 activation_12[0][0] \n activation_14[0][0] \n activation_17[0][0] \n__________________________________________________________________________________________________\nblock35_1_conv (Conv2D) (None, 35, 35, 320) 41280 block35_1_mixed[0][0] \n__________________________________________________________________________________________________\nblock35_1 (Lambda) (None, 35, 35, 320) 0 mixed_5b[0][0] \n block35_1_conv[0][0] \n__________________________________________________________________________________________________\nblock35_1_ac (Activation) (None, 35, 35, 320) 0 block35_1[0][0] \n__________________________________________________________________________________________________\nconv2d_21 (Conv2D) (None, 35, 35, 32) 10240 block35_1_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_21 (BatchNo (None, 35, 35, 32) 96 conv2d_21[0][0] \n__________________________________________________________________________________________________\nactivation_21 (Activation) (None, 35, 35, 32) 0 batch_normalization_21[0][0] \n__________________________________________________________________________________________________\nconv2d_19 (Conv2D) (None, 35, 35, 32) 10240 block35_1_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_22 (Conv2D) (None, 35, 35, 48) 13824 activation_21[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_19 (BatchNo (None, 35, 35, 32) 96 conv2d_19[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_22 (BatchNo (None, 35, 35, 48) 144 conv2d_22[0][0] \n__________________________________________________________________________________________________\nactivation_19 (Activation) (None, 35, 35, 32) 0 batch_normalization_19[0][0] \n__________________________________________________________________________________________________\nactivation_22 (Activation) (None, 35, 35, 48) 0 batch_normalization_22[0][0] \n__________________________________________________________________________________________________\nconv2d_18 (Conv2D) (None, 35, 35, 32) 10240 block35_1_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_20 (Conv2D) (None, 35, 35, 32) 9216 activation_19[0][0] \n__________________________________________________________________________________________________\nconv2d_23 (Conv2D) (None, 35, 35, 64) 27648 activation_22[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_18 (BatchNo (None, 35, 35, 32) 96 conv2d_18[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_20 (BatchNo (None, 35, 35, 32) 96 conv2d_20[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_23 (BatchNo (None, 35, 35, 64) 192 conv2d_23[0][0] \n__________________________________________________________________________________________________\nactivation_18 (Activation) (None, 35, 35, 32) 0 batch_normalization_18[0][0] \n__________________________________________________________________________________________________\nactivation_20 (Activation) (None, 35, 35, 32) 0 batch_normalization_20[0][0] \n__________________________________________________________________________________________________\nactivation_23 (Activation) (None, 35, 35, 64) 0 batch_normalization_23[0][0] \n__________________________________________________________________________________________________\nblock35_2_mixed (Concatenate) (None, 35, 35, 128) 0 activation_18[0][0] \n activation_20[0][0] \n activation_23[0][0] \n__________________________________________________________________________________________________\nblock35_2_conv (Conv2D) (None, 35, 35, 320) 41280 block35_2_mixed[0][0] \n__________________________________________________________________________________________________\nblock35_2 (Lambda) (None, 35, 35, 320) 0 block35_1_ac[0][0] \n block35_2_conv[0][0] \n__________________________________________________________________________________________________\nblock35_2_ac (Activation) (None, 35, 35, 320) 0 block35_2[0][0] \n__________________________________________________________________________________________________\nconv2d_27 (Conv2D) (None, 35, 35, 32) 10240 block35_2_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_27 (BatchNo (None, 35, 35, 32) 96 conv2d_27[0][0] \n__________________________________________________________________________________________________\nactivation_27 (Activation) (None, 35, 35, 32) 0 batch_normalization_27[0][0] \n__________________________________________________________________________________________________\nconv2d_25 (Conv2D) (None, 35, 35, 32) 10240 block35_2_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_28 (Conv2D) (None, 35, 35, 48) 13824 activation_27[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_25 (BatchNo (None, 35, 35, 32) 96 conv2d_25[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_28 (BatchNo (None, 35, 35, 48) 144 conv2d_28[0][0] \n__________________________________________________________________________________________________\nactivation_25 (Activation) (None, 35, 35, 32) 0 batch_normalization_25[0][0] \n__________________________________________________________________________________________________\nactivation_28 (Activation) (None, 35, 35, 48) 0 batch_normalization_28[0][0] \n__________________________________________________________________________________________________\nconv2d_24 (Conv2D) (None, 35, 35, 32) 10240 block35_2_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_26 (Conv2D) (None, 35, 35, 32) 9216 activation_25[0][0] \n__________________________________________________________________________________________________\nconv2d_29 (Conv2D) (None, 35, 35, 64) 27648 activation_28[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_24 (BatchNo (None, 35, 35, 32) 96 conv2d_24[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_26 (BatchNo (None, 35, 35, 32) 96 conv2d_26[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_29 (BatchNo (None, 35, 35, 64) 192 conv2d_29[0][0] \n__________________________________________________________________________________________________\nactivation_24 (Activation) (None, 35, 35, 32) 0 batch_normalization_24[0][0] \n__________________________________________________________________________________________________\nactivation_26 (Activation) (None, 35, 35, 32) 0 batch_normalization_26[0][0] \n__________________________________________________________________________________________________\nactivation_29 (Activation) (None, 35, 35, 64) 0 batch_normalization_29[0][0] \n__________________________________________________________________________________________________\nblock35_3_mixed (Concatenate) (None, 35, 35, 128) 0 activation_24[0][0] \n activation_26[0][0] \n activation_29[0][0] \n__________________________________________________________________________________________________\nblock35_3_conv (Conv2D) (None, 35, 35, 320) 41280 block35_3_mixed[0][0] \n__________________________________________________________________________________________________\nblock35_3 (Lambda) (None, 35, 35, 320) 0 block35_2_ac[0][0] \n block35_3_conv[0][0] \n__________________________________________________________________________________________________\nblock35_3_ac (Activation) (None, 35, 35, 320) 0 block35_3[0][0] \n__________________________________________________________________________________________________\nconv2d_33 (Conv2D) (None, 35, 35, 32) 10240 block35_3_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_33 (BatchNo (None, 35, 35, 32) 96 conv2d_33[0][0] \n__________________________________________________________________________________________________\nactivation_33 (Activation) (None, 35, 35, 32) 0 batch_normalization_33[0][0] \n__________________________________________________________________________________________________\nconv2d_31 (Conv2D) (None, 35, 35, 32) 10240 block35_3_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_34 (Conv2D) (None, 35, 35, 48) 13824 activation_33[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_31 (BatchNo (None, 35, 35, 32) 96 conv2d_31[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_34 (BatchNo (None, 35, 35, 48) 144 conv2d_34[0][0] \n__________________________________________________________________________________________________\nactivation_31 (Activation) (None, 35, 35, 32) 0 batch_normalization_31[0][0] \n__________________________________________________________________________________________________\nactivation_34 (Activation) (None, 35, 35, 48) 0 batch_normalization_34[0][0] \n__________________________________________________________________________________________________\nconv2d_30 (Conv2D) (None, 35, 35, 32) 10240 block35_3_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_32 (Conv2D) (None, 35, 35, 32) 9216 activation_31[0][0] \n__________________________________________________________________________________________________\nconv2d_35 (Conv2D) (None, 35, 35, 64) 27648 activation_34[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_30 (BatchNo (None, 35, 35, 32) 96 conv2d_30[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_32 (BatchNo (None, 35, 35, 32) 96 conv2d_32[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_35 (BatchNo (None, 35, 35, 64) 192 conv2d_35[0][0] \n__________________________________________________________________________________________________\nactivation_30 (Activation) (None, 35, 35, 32) 0 batch_normalization_30[0][0] \n__________________________________________________________________________________________________\nactivation_32 (Activation) (None, 35, 35, 32) 0 batch_normalization_32[0][0] \n__________________________________________________________________________________________________\nactivation_35 (Activation) (None, 35, 35, 64) 0 batch_normalization_35[0][0] \n__________________________________________________________________________________________________\nblock35_4_mixed (Concatenate) (None, 35, 35, 128) 0 activation_30[0][0] \n activation_32[0][0] \n activation_35[0][0] \n__________________________________________________________________________________________________\nblock35_4_conv (Conv2D) (None, 35, 35, 320) 41280 block35_4_mixed[0][0] \n__________________________________________________________________________________________________\nblock35_4 (Lambda) (None, 35, 35, 320) 0 block35_3_ac[0][0] \n block35_4_conv[0][0] \n__________________________________________________________________________________________________\nblock35_4_ac (Activation) (None, 35, 35, 320) 0 block35_4[0][0] \n__________________________________________________________________________________________________\nconv2d_39 (Conv2D) (None, 35, 35, 32) 10240 block35_4_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_39 (BatchNo (None, 35, 35, 32) 96 conv2d_39[0][0] \n__________________________________________________________________________________________________\nactivation_39 (Activation) (None, 35, 35, 32) 0 batch_normalization_39[0][0] \n__________________________________________________________________________________________________\nconv2d_37 (Conv2D) (None, 35, 35, 32) 10240 block35_4_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_40 (Conv2D) (None, 35, 35, 48) 13824 activation_39[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_37 (BatchNo (None, 35, 35, 32) 96 conv2d_37[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_40 (BatchNo (None, 35, 35, 48) 144 conv2d_40[0][0] \n__________________________________________________________________________________________________\nactivation_37 (Activation) (None, 35, 35, 32) 0 batch_normalization_37[0][0] \n__________________________________________________________________________________________________\nactivation_40 (Activation) (None, 35, 35, 48) 0 batch_normalization_40[0][0] \n__________________________________________________________________________________________________\nconv2d_36 (Conv2D) (None, 35, 35, 32) 10240 block35_4_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_38 (Conv2D) (None, 35, 35, 32) 9216 activation_37[0][0] \n__________________________________________________________________________________________________\nconv2d_41 (Conv2D) (None, 35, 35, 64) 27648 activation_40[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_36 (BatchNo (None, 35, 35, 32) 96 conv2d_36[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_38 (BatchNo (None, 35, 35, 32) 96 conv2d_38[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_41 (BatchNo (None, 35, 35, 64) 192 conv2d_41[0][0] \n__________________________________________________________________________________________________\nactivation_36 (Activation) (None, 35, 35, 32) 0 batch_normalization_36[0][0] \n__________________________________________________________________________________________________\nactivation_38 (Activation) (None, 35, 35, 32) 0 batch_normalization_38[0][0] \n__________________________________________________________________________________________________\nactivation_41 (Activation) (None, 35, 35, 64) 0 batch_normalization_41[0][0] \n__________________________________________________________________________________________________\nblock35_5_mixed (Concatenate) (None, 35, 35, 128) 0 activation_36[0][0] \n activation_38[0][0] \n activation_41[0][0] \n__________________________________________________________________________________________________\nblock35_5_conv (Conv2D) (None, 35, 35, 320) 41280 block35_5_mixed[0][0] \n__________________________________________________________________________________________________\nblock35_5 (Lambda) (None, 35, 35, 320) 0 block35_4_ac[0][0] \n block35_5_conv[0][0] \n__________________________________________________________________________________________________\nblock35_5_ac (Activation) (None, 35, 35, 320) 0 block35_5[0][0] \n__________________________________________________________________________________________________\nconv2d_45 (Conv2D) (None, 35, 35, 32) 10240 block35_5_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_45 (BatchNo (None, 35, 35, 32) 96 conv2d_45[0][0] \n__________________________________________________________________________________________________\nactivation_45 (Activation) (None, 35, 35, 32) 0 batch_normalization_45[0][0] \n__________________________________________________________________________________________________\nconv2d_43 (Conv2D) (None, 35, 35, 32) 10240 block35_5_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_46 (Conv2D) (None, 35, 35, 48) 13824 activation_45[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_43 (BatchNo (None, 35, 35, 32) 96 conv2d_43[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_46 (BatchNo (None, 35, 35, 48) 144 conv2d_46[0][0] \n__________________________________________________________________________________________________\nactivation_43 (Activation) (None, 35, 35, 32) 0 batch_normalization_43[0][0] \n__________________________________________________________________________________________________\nactivation_46 (Activation) (None, 35, 35, 48) 0 batch_normalization_46[0][0] \n__________________________________________________________________________________________________\nconv2d_42 (Conv2D) (None, 35, 35, 32) 10240 block35_5_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_44 (Conv2D) (None, 35, 35, 32) 9216 activation_43[0][0] \n__________________________________________________________________________________________________\nconv2d_47 (Conv2D) (None, 35, 35, 64) 27648 activation_46[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_42 (BatchNo (None, 35, 35, 32) 96 conv2d_42[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_44 (BatchNo (None, 35, 35, 32) 96 conv2d_44[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_47 (BatchNo (None, 35, 35, 64) 192 conv2d_47[0][0] \n__________________________________________________________________________________________________\nactivation_42 (Activation) (None, 35, 35, 32) 0 batch_normalization_42[0][0] \n__________________________________________________________________________________________________\nactivation_44 (Activation) (None, 35, 35, 32) 0 batch_normalization_44[0][0] \n__________________________________________________________________________________________________\nactivation_47 (Activation) (None, 35, 35, 64) 0 batch_normalization_47[0][0] \n__________________________________________________________________________________________________\nblock35_6_mixed (Concatenate) (None, 35, 35, 128) 0 activation_42[0][0] \n activation_44[0][0] \n activation_47[0][0] \n__________________________________________________________________________________________________\nblock35_6_conv (Conv2D) (None, 35, 35, 320) 41280 block35_6_mixed[0][0] \n__________________________________________________________________________________________________\nblock35_6 (Lambda) (None, 35, 35, 320) 0 block35_5_ac[0][0] \n block35_6_conv[0][0] \n__________________________________________________________________________________________________\nblock35_6_ac (Activation) (None, 35, 35, 320) 0 block35_6[0][0] \n__________________________________________________________________________________________________\nconv2d_51 (Conv2D) (None, 35, 35, 32) 10240 block35_6_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_51 (BatchNo (None, 35, 35, 32) 96 conv2d_51[0][0] \n__________________________________________________________________________________________________\nactivation_51 (Activation) (None, 35, 35, 32) 0 batch_normalization_51[0][0] \n__________________________________________________________________________________________________\nconv2d_49 (Conv2D) (None, 35, 35, 32) 10240 block35_6_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_52 (Conv2D) (None, 35, 35, 48) 13824 activation_51[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_49 (BatchNo (None, 35, 35, 32) 96 conv2d_49[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_52 (BatchNo (None, 35, 35, 48) 144 conv2d_52[0][0] \n__________________________________________________________________________________________________\nactivation_49 (Activation) (None, 35, 35, 32) 0 batch_normalization_49[0][0] \n__________________________________________________________________________________________________\nactivation_52 (Activation) (None, 35, 35, 48) 0 batch_normalization_52[0][0] \n__________________________________________________________________________________________________\nconv2d_48 (Conv2D) (None, 35, 35, 32) 10240 block35_6_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_50 (Conv2D) (None, 35, 35, 32) 9216 activation_49[0][0] \n__________________________________________________________________________________________________\nconv2d_53 (Conv2D) (None, 35, 35, 64) 27648 activation_52[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_48 (BatchNo (None, 35, 35, 32) 96 conv2d_48[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_50 (BatchNo (None, 35, 35, 32) 96 conv2d_50[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_53 (BatchNo (None, 35, 35, 64) 192 conv2d_53[0][0] \n__________________________________________________________________________________________________\nactivation_48 (Activation) (None, 35, 35, 32) 0 batch_normalization_48[0][0] \n__________________________________________________________________________________________________\nactivation_50 (Activation) (None, 35, 35, 32) 0 batch_normalization_50[0][0] \n__________________________________________________________________________________________________\nactivation_53 (Activation) (None, 35, 35, 64) 0 batch_normalization_53[0][0] \n__________________________________________________________________________________________________\nblock35_7_mixed (Concatenate) (None, 35, 35, 128) 0 activation_48[0][0] \n activation_50[0][0] \n activation_53[0][0] \n__________________________________________________________________________________________________\nblock35_7_conv (Conv2D) (None, 35, 35, 320) 41280 block35_7_mixed[0][0] \n__________________________________________________________________________________________________\nblock35_7 (Lambda) (None, 35, 35, 320) 0 block35_6_ac[0][0] \n block35_7_conv[0][0] \n__________________________________________________________________________________________________\nblock35_7_ac (Activation) (None, 35, 35, 320) 0 block35_7[0][0] \n__________________________________________________________________________________________________\nconv2d_57 (Conv2D) (None, 35, 35, 32) 10240 block35_7_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_57 (BatchNo (None, 35, 35, 32) 96 conv2d_57[0][0] \n__________________________________________________________________________________________________\nactivation_57 (Activation) (None, 35, 35, 32) 0 batch_normalization_57[0][0] \n__________________________________________________________________________________________________\nconv2d_55 (Conv2D) (None, 35, 35, 32) 10240 block35_7_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_58 (Conv2D) (None, 35, 35, 48) 13824 activation_57[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_55 (BatchNo (None, 35, 35, 32) 96 conv2d_55[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_58 (BatchNo (None, 35, 35, 48) 144 conv2d_58[0][0] \n__________________________________________________________________________________________________\nactivation_55 (Activation) (None, 35, 35, 32) 0 batch_normalization_55[0][0] \n__________________________________________________________________________________________________\nactivation_58 (Activation) (None, 35, 35, 48) 0 batch_normalization_58[0][0] \n__________________________________________________________________________________________________\nconv2d_54 (Conv2D) (None, 35, 35, 32) 10240 block35_7_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_56 (Conv2D) (None, 35, 35, 32) 9216 activation_55[0][0] \n__________________________________________________________________________________________________\nconv2d_59 (Conv2D) (None, 35, 35, 64) 27648 activation_58[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_54 (BatchNo (None, 35, 35, 32) 96 conv2d_54[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_56 (BatchNo (None, 35, 35, 32) 96 conv2d_56[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_59 (BatchNo (None, 35, 35, 64) 192 conv2d_59[0][0] \n__________________________________________________________________________________________________\nactivation_54 (Activation) (None, 35, 35, 32) 0 batch_normalization_54[0][0] \n__________________________________________________________________________________________________\nactivation_56 (Activation) (None, 35, 35, 32) 0 batch_normalization_56[0][0] \n__________________________________________________________________________________________________\nactivation_59 (Activation) (None, 35, 35, 64) 0 batch_normalization_59[0][0] \n__________________________________________________________________________________________________\nblock35_8_mixed (Concatenate) (None, 35, 35, 128) 0 activation_54[0][0] \n activation_56[0][0] \n activation_59[0][0] \n__________________________________________________________________________________________________\nblock35_8_conv (Conv2D) (None, 35, 35, 320) 41280 block35_8_mixed[0][0] \n__________________________________________________________________________________________________\nblock35_8 (Lambda) (None, 35, 35, 320) 0 block35_7_ac[0][0] \n block35_8_conv[0][0] \n__________________________________________________________________________________________________\nblock35_8_ac (Activation) (None, 35, 35, 320) 0 block35_8[0][0] \n__________________________________________________________________________________________________\nconv2d_63 (Conv2D) (None, 35, 35, 32) 10240 block35_8_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_63 (BatchNo (None, 35, 35, 32) 96 conv2d_63[0][0] \n__________________________________________________________________________________________________\nactivation_63 (Activation) (None, 35, 35, 32) 0 batch_normalization_63[0][0] \n__________________________________________________________________________________________________\nconv2d_61 (Conv2D) (None, 35, 35, 32) 10240 block35_8_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_64 (Conv2D) (None, 35, 35, 48) 13824 activation_63[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_61 (BatchNo (None, 35, 35, 32) 96 conv2d_61[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_64 (BatchNo (None, 35, 35, 48) 144 conv2d_64[0][0] \n__________________________________________________________________________________________________\nactivation_61 (Activation) (None, 35, 35, 32) 0 batch_normalization_61[0][0] \n__________________________________________________________________________________________________\nactivation_64 (Activation) (None, 35, 35, 48) 0 batch_normalization_64[0][0] \n__________________________________________________________________________________________________\nconv2d_60 (Conv2D) (None, 35, 35, 32) 10240 block35_8_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_62 (Conv2D) (None, 35, 35, 32) 9216 activation_61[0][0] \n__________________________________________________________________________________________________\nconv2d_65 (Conv2D) (None, 35, 35, 64) 27648 activation_64[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_60 (BatchNo (None, 35, 35, 32) 96 conv2d_60[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_62 (BatchNo (None, 35, 35, 32) 96 conv2d_62[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_65 (BatchNo (None, 35, 35, 64) 192 conv2d_65[0][0] \n__________________________________________________________________________________________________\nactivation_60 (Activation) (None, 35, 35, 32) 0 batch_normalization_60[0][0] \n__________________________________________________________________________________________________\nactivation_62 (Activation) (None, 35, 35, 32) 0 batch_normalization_62[0][0] \n__________________________________________________________________________________________________\nactivation_65 (Activation) (None, 35, 35, 64) 0 batch_normalization_65[0][0] \n__________________________________________________________________________________________________\nblock35_9_mixed (Concatenate) (None, 35, 35, 128) 0 activation_60[0][0] \n activation_62[0][0] \n activation_65[0][0] \n__________________________________________________________________________________________________\nblock35_9_conv (Conv2D) (None, 35, 35, 320) 41280 block35_9_mixed[0][0] \n__________________________________________________________________________________________________\nblock35_9 (Lambda) (None, 35, 35, 320) 0 block35_8_ac[0][0] \n block35_9_conv[0][0] \n__________________________________________________________________________________________________\nblock35_9_ac (Activation) (None, 35, 35, 320) 0 block35_9[0][0] \n__________________________________________________________________________________________________\nconv2d_69 (Conv2D) (None, 35, 35, 32) 10240 block35_9_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_69 (BatchNo (None, 35, 35, 32) 96 conv2d_69[0][0] \n__________________________________________________________________________________________________\nactivation_69 (Activation) (None, 35, 35, 32) 0 batch_normalization_69[0][0] \n__________________________________________________________________________________________________\nconv2d_67 (Conv2D) (None, 35, 35, 32) 10240 block35_9_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_70 (Conv2D) (None, 35, 35, 48) 13824 activation_69[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_67 (BatchNo (None, 35, 35, 32) 96 conv2d_67[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_70 (BatchNo (None, 35, 35, 48) 144 conv2d_70[0][0] \n__________________________________________________________________________________________________\nactivation_67 (Activation) (None, 35, 35, 32) 0 batch_normalization_67[0][0] \n__________________________________________________________________________________________________\nactivation_70 (Activation) (None, 35, 35, 48) 0 batch_normalization_70[0][0] \n__________________________________________________________________________________________________\nconv2d_66 (Conv2D) (None, 35, 35, 32) 10240 block35_9_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_68 (Conv2D) (None, 35, 35, 32) 9216 activation_67[0][0] \n__________________________________________________________________________________________________\nconv2d_71 (Conv2D) (None, 35, 35, 64) 27648 activation_70[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_66 (BatchNo (None, 35, 35, 32) 96 conv2d_66[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_68 (BatchNo (None, 35, 35, 32) 96 conv2d_68[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_71 (BatchNo (None, 35, 35, 64) 192 conv2d_71[0][0] \n__________________________________________________________________________________________________\nactivation_66 (Activation) (None, 35, 35, 32) 0 batch_normalization_66[0][0] \n__________________________________________________________________________________________________\nactivation_68 (Activation) (None, 35, 35, 32) 0 batch_normalization_68[0][0] \n__________________________________________________________________________________________________\nactivation_71 (Activation) (None, 35, 35, 64) 0 batch_normalization_71[0][0] \n__________________________________________________________________________________________________\nblock35_10_mixed (Concatenate) (None, 35, 35, 128) 0 activation_66[0][0] \n activation_68[0][0] \n activation_71[0][0] \n__________________________________________________________________________________________________\nblock35_10_conv (Conv2D) (None, 35, 35, 320) 41280 block35_10_mixed[0][0] \n__________________________________________________________________________________________________\nblock35_10 (Lambda) (None, 35, 35, 320) 0 block35_9_ac[0][0] \n block35_10_conv[0][0] \n__________________________________________________________________________________________________\nblock35_10_ac (Activation) (None, 35, 35, 320) 0 block35_10[0][0] \n__________________________________________________________________________________________________\nconv2d_73 (Conv2D) (None, 35, 35, 256) 81920 block35_10_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_73 (BatchNo (None, 35, 35, 256) 768 conv2d_73[0][0] \n__________________________________________________________________________________________________\nactivation_73 (Activation) (None, 35, 35, 256) 0 batch_normalization_73[0][0] \n__________________________________________________________________________________________________\nconv2d_74 (Conv2D) (None, 35, 35, 256) 589824 activation_73[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_74 (BatchNo (None, 35, 35, 256) 768 conv2d_74[0][0] \n__________________________________________________________________________________________________\nactivation_74 (Activation) (None, 35, 35, 256) 0 batch_normalization_74[0][0] \n__________________________________________________________________________________________________\nconv2d_72 (Conv2D) (None, 17, 17, 384) 1105920 block35_10_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_75 (Conv2D) (None, 17, 17, 384) 884736 activation_74[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_72 (BatchNo (None, 17, 17, 384) 1152 conv2d_72[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_75 (BatchNo (None, 17, 17, 384) 1152 conv2d_75[0][0] \n__________________________________________________________________________________________________\nactivation_72 (Activation) (None, 17, 17, 384) 0 batch_normalization_72[0][0] \n__________________________________________________________________________________________________\nactivation_75 (Activation) (None, 17, 17, 384) 0 batch_normalization_75[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_2 (MaxPooling2D) (None, 17, 17, 320) 0 block35_10_ac[0][0] \n__________________________________________________________________________________________________\nmixed_6a (Concatenate) (None, 17, 17, 1088) 0 activation_72[0][0] \n activation_75[0][0] \n max_pooling2d_2[0][0] \n__________________________________________________________________________________________________\nconv2d_77 (Conv2D) (None, 17, 17, 128) 139264 mixed_6a[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_77 (BatchNo (None, 17, 17, 128) 384 conv2d_77[0][0] \n__________________________________________________________________________________________________\nactivation_77 (Activation) (None, 17, 17, 128) 0 batch_normalization_77[0][0] \n__________________________________________________________________________________________________\nconv2d_78 (Conv2D) (None, 17, 17, 160) 143360 activation_77[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_78 (BatchNo (None, 17, 17, 160) 480 conv2d_78[0][0] \n__________________________________________________________________________________________________\nactivation_78 (Activation) (None, 17, 17, 160) 0 batch_normalization_78[0][0] \n__________________________________________________________________________________________________\nconv2d_76 (Conv2D) (None, 17, 17, 192) 208896 mixed_6a[0][0] \n__________________________________________________________________________________________________\nconv2d_79 (Conv2D) (None, 17, 17, 192) 215040 activation_78[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_76 (BatchNo (None, 17, 17, 192) 576 conv2d_76[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_79 (BatchNo (None, 17, 17, 192) 576 conv2d_79[0][0] \n__________________________________________________________________________________________________\nactivation_76 (Activation) (None, 17, 17, 192) 0 batch_normalization_76[0][0] \n__________________________________________________________________________________________________\nactivation_79 (Activation) (None, 17, 17, 192) 0 batch_normalization_79[0][0] \n__________________________________________________________________________________________________\nblock17_1_mixed (Concatenate) (None, 17, 17, 384) 0 activation_76[0][0] \n activation_79[0][0] \n__________________________________________________________________________________________________\nblock17_1_conv (Conv2D) (None, 17, 17, 1088) 418880 block17_1_mixed[0][0] \n__________________________________________________________________________________________________\nblock17_1 (Lambda) (None, 17, 17, 1088) 0 mixed_6a[0][0] \n block17_1_conv[0][0] \n__________________________________________________________________________________________________\nblock17_1_ac (Activation) (None, 17, 17, 1088) 0 block17_1[0][0] \n__________________________________________________________________________________________________\nconv2d_81 (Conv2D) (None, 17, 17, 128) 139264 block17_1_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_81 (BatchNo (None, 17, 17, 128) 384 conv2d_81[0][0] \n__________________________________________________________________________________________________\nactivation_81 (Activation) (None, 17, 17, 128) 0 batch_normalization_81[0][0] \n__________________________________________________________________________________________________\nconv2d_82 (Conv2D) (None, 17, 17, 160) 143360 activation_81[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_82 (BatchNo (None, 17, 17, 160) 480 conv2d_82[0][0] \n__________________________________________________________________________________________________\nactivation_82 (Activation) (None, 17, 17, 160) 0 batch_normalization_82[0][0] \n__________________________________________________________________________________________________\nconv2d_80 (Conv2D) (None, 17, 17, 192) 208896 block17_1_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_83 (Conv2D) (None, 17, 17, 192) 215040 activation_82[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_80 (BatchNo (None, 17, 17, 192) 576 conv2d_80[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_83 (BatchNo (None, 17, 17, 192) 576 conv2d_83[0][0] \n__________________________________________________________________________________________________\nactivation_80 (Activation) (None, 17, 17, 192) 0 batch_normalization_80[0][0] \n__________________________________________________________________________________________________\nactivation_83 (Activation) (None, 17, 17, 192) 0 batch_normalization_83[0][0] \n__________________________________________________________________________________________________\nblock17_2_mixed (Concatenate) (None, 17, 17, 384) 0 activation_80[0][0] \n activation_83[0][0] \n__________________________________________________________________________________________________\nblock17_2_conv (Conv2D) (None, 17, 17, 1088) 418880 block17_2_mixed[0][0] \n__________________________________________________________________________________________________\nblock17_2 (Lambda) (None, 17, 17, 1088) 0 block17_1_ac[0][0] \n block17_2_conv[0][0] \n__________________________________________________________________________________________________\nblock17_2_ac (Activation) (None, 17, 17, 1088) 0 block17_2[0][0] \n__________________________________________________________________________________________________\nconv2d_85 (Conv2D) (None, 17, 17, 128) 139264 block17_2_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_85 (BatchNo (None, 17, 17, 128) 384 conv2d_85[0][0] \n__________________________________________________________________________________________________\nactivation_85 (Activation) (None, 17, 17, 128) 0 batch_normalization_85[0][0] \n__________________________________________________________________________________________________\nconv2d_86 (Conv2D) (None, 17, 17, 160) 143360 activation_85[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_86 (BatchNo (None, 17, 17, 160) 480 conv2d_86[0][0] \n__________________________________________________________________________________________________\nactivation_86 (Activation) (None, 17, 17, 160) 0 batch_normalization_86[0][0] \n__________________________________________________________________________________________________\nconv2d_84 (Conv2D) (None, 17, 17, 192) 208896 block17_2_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_87 (Conv2D) (None, 17, 17, 192) 215040 activation_86[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_84 (BatchNo (None, 17, 17, 192) 576 conv2d_84[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_87 (BatchNo (None, 17, 17, 192) 576 conv2d_87[0][0] \n__________________________________________________________________________________________________\nactivation_84 (Activation) (None, 17, 17, 192) 0 batch_normalization_84[0][0] \n__________________________________________________________________________________________________\nactivation_87 (Activation) (None, 17, 17, 192) 0 batch_normalization_87[0][0] \n__________________________________________________________________________________________________\nblock17_3_mixed (Concatenate) (None, 17, 17, 384) 0 activation_84[0][0] \n activation_87[0][0] \n__________________________________________________________________________________________________\nblock17_3_conv (Conv2D) (None, 17, 17, 1088) 418880 block17_3_mixed[0][0] \n__________________________________________________________________________________________________\nblock17_3 (Lambda) (None, 17, 17, 1088) 0 block17_2_ac[0][0] \n block17_3_conv[0][0] \n__________________________________________________________________________________________________\nblock17_3_ac (Activation) (None, 17, 17, 1088) 0 block17_3[0][0] \n__________________________________________________________________________________________________\nconv2d_89 (Conv2D) (None, 17, 17, 128) 139264 block17_3_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_89 (BatchNo (None, 17, 17, 128) 384 conv2d_89[0][0] \n__________________________________________________________________________________________________\nactivation_89 (Activation) (None, 17, 17, 128) 0 batch_normalization_89[0][0] \n__________________________________________________________________________________________________\nconv2d_90 (Conv2D) (None, 17, 17, 160) 143360 activation_89[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_90 (BatchNo (None, 17, 17, 160) 480 conv2d_90[0][0] \n__________________________________________________________________________________________________\nactivation_90 (Activation) (None, 17, 17, 160) 0 batch_normalization_90[0][0] \n__________________________________________________________________________________________________\nconv2d_88 (Conv2D) (None, 17, 17, 192) 208896 block17_3_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_91 (Conv2D) (None, 17, 17, 192) 215040 activation_90[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_88 (BatchNo (None, 17, 17, 192) 576 conv2d_88[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_91 (BatchNo (None, 17, 17, 192) 576 conv2d_91[0][0] \n__________________________________________________________________________________________________\nactivation_88 (Activation) (None, 17, 17, 192) 0 batch_normalization_88[0][0] \n__________________________________________________________________________________________________\nactivation_91 (Activation) (None, 17, 17, 192) 0 batch_normalization_91[0][0] \n__________________________________________________________________________________________________\nblock17_4_mixed (Concatenate) (None, 17, 17, 384) 0 activation_88[0][0] \n activation_91[0][0] \n__________________________________________________________________________________________________\nblock17_4_conv (Conv2D) (None, 17, 17, 1088) 418880 block17_4_mixed[0][0] \n__________________________________________________________________________________________________\nblock17_4 (Lambda) (None, 17, 17, 1088) 0 block17_3_ac[0][0] \n block17_4_conv[0][0] \n__________________________________________________________________________________________________\nblock17_4_ac (Activation) (None, 17, 17, 1088) 0 block17_4[0][0] \n__________________________________________________________________________________________________\nconv2d_93 (Conv2D) (None, 17, 17, 128) 139264 block17_4_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_93 (BatchNo (None, 17, 17, 128) 384 conv2d_93[0][0] \n__________________________________________________________________________________________________\nactivation_93 (Activation) (None, 17, 17, 128) 0 batch_normalization_93[0][0] \n__________________________________________________________________________________________________\nconv2d_94 (Conv2D) (None, 17, 17, 160) 143360 activation_93[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_94 (BatchNo (None, 17, 17, 160) 480 conv2d_94[0][0] \n__________________________________________________________________________________________________\nactivation_94 (Activation) (None, 17, 17, 160) 0 batch_normalization_94[0][0] \n__________________________________________________________________________________________________\nconv2d_92 (Conv2D) (None, 17, 17, 192) 208896 block17_4_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_95 (Conv2D) (None, 17, 17, 192) 215040 activation_94[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_92 (BatchNo (None, 17, 17, 192) 576 conv2d_92[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_95 (BatchNo (None, 17, 17, 192) 576 conv2d_95[0][0] \n__________________________________________________________________________________________________\nactivation_92 (Activation) (None, 17, 17, 192) 0 batch_normalization_92[0][0] \n__________________________________________________________________________________________________\nactivation_95 (Activation) (None, 17, 17, 192) 0 batch_normalization_95[0][0] \n__________________________________________________________________________________________________\nblock17_5_mixed (Concatenate) (None, 17, 17, 384) 0 activation_92[0][0] \n activation_95[0][0] \n__________________________________________________________________________________________________\nblock17_5_conv (Conv2D) (None, 17, 17, 1088) 418880 block17_5_mixed[0][0] \n__________________________________________________________________________________________________\nblock17_5 (Lambda) (None, 17, 17, 1088) 0 block17_4_ac[0][0] \n block17_5_conv[0][0] \n__________________________________________________________________________________________________\nblock17_5_ac (Activation) (None, 17, 17, 1088) 0 block17_5[0][0] \n__________________________________________________________________________________________________\nconv2d_97 (Conv2D) (None, 17, 17, 128) 139264 block17_5_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_97 (BatchNo (None, 17, 17, 128) 384 conv2d_97[0][0] \n__________________________________________________________________________________________________\nactivation_97 (Activation) (None, 17, 17, 128) 0 batch_normalization_97[0][0] \n__________________________________________________________________________________________________\nconv2d_98 (Conv2D) (None, 17, 17, 160) 143360 activation_97[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_98 (BatchNo (None, 17, 17, 160) 480 conv2d_98[0][0] \n__________________________________________________________________________________________________\nactivation_98 (Activation) (None, 17, 17, 160) 0 batch_normalization_98[0][0] \n__________________________________________________________________________________________________\nconv2d_96 (Conv2D) (None, 17, 17, 192) 208896 block17_5_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_99 (Conv2D) (None, 17, 17, 192) 215040 activation_98[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_96 (BatchNo (None, 17, 17, 192) 576 conv2d_96[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_99 (BatchNo (None, 17, 17, 192) 576 conv2d_99[0][0] \n__________________________________________________________________________________________________\nactivation_96 (Activation) (None, 17, 17, 192) 0 batch_normalization_96[0][0] \n__________________________________________________________________________________________________\nactivation_99 (Activation) (None, 17, 17, 192) 0 batch_normalization_99[0][0] \n__________________________________________________________________________________________________\nblock17_6_mixed (Concatenate) (None, 17, 17, 384) 0 activation_96[0][0] \n activation_99[0][0] \n__________________________________________________________________________________________________\nblock17_6_conv (Conv2D) (None, 17, 17, 1088) 418880 block17_6_mixed[0][0] \n__________________________________________________________________________________________________\nblock17_6 (Lambda) (None, 17, 17, 1088) 0 block17_5_ac[0][0] \n block17_6_conv[0][0] \n__________________________________________________________________________________________________\nblock17_6_ac (Activation) (None, 17, 17, 1088) 0 block17_6[0][0] \n__________________________________________________________________________________________________\nconv2d_101 (Conv2D) (None, 17, 17, 128) 139264 block17_6_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_101 (BatchN (None, 17, 17, 128) 384 conv2d_101[0][0] \n__________________________________________________________________________________________________\nactivation_101 (Activation) (None, 17, 17, 128) 0 batch_normalization_101[0][0] \n__________________________________________________________________________________________________\nconv2d_102 (Conv2D) (None, 17, 17, 160) 143360 activation_101[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_102 (BatchN (None, 17, 17, 160) 480 conv2d_102[0][0] \n__________________________________________________________________________________________________\nactivation_102 (Activation) (None, 17, 17, 160) 0 batch_normalization_102[0][0] \n__________________________________________________________________________________________________\nconv2d_100 (Conv2D) (None, 17, 17, 192) 208896 block17_6_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_103 (Conv2D) (None, 17, 17, 192) 215040 activation_102[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_100 (BatchN (None, 17, 17, 192) 576 conv2d_100[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_103 (BatchN (None, 17, 17, 192) 576 conv2d_103[0][0] \n__________________________________________________________________________________________________\nactivation_100 (Activation) (None, 17, 17, 192) 0 batch_normalization_100[0][0] \n__________________________________________________________________________________________________\nactivation_103 (Activation) (None, 17, 17, 192) 0 batch_normalization_103[0][0] \n__________________________________________________________________________________________________\nblock17_7_mixed (Concatenate) (None, 17, 17, 384) 0 activation_100[0][0] \n activation_103[0][0] \n__________________________________________________________________________________________________\nblock17_7_conv (Conv2D) (None, 17, 17, 1088) 418880 block17_7_mixed[0][0] \n__________________________________________________________________________________________________\nblock17_7 (Lambda) (None, 17, 17, 1088) 0 block17_6_ac[0][0] \n block17_7_conv[0][0] \n__________________________________________________________________________________________________\nblock17_7_ac (Activation) (None, 17, 17, 1088) 0 block17_7[0][0] \n__________________________________________________________________________________________________\nconv2d_105 (Conv2D) (None, 17, 17, 128) 139264 block17_7_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_105 (BatchN (None, 17, 17, 128) 384 conv2d_105[0][0] \n__________________________________________________________________________________________________\nactivation_105 (Activation) (None, 17, 17, 128) 0 batch_normalization_105[0][0] \n__________________________________________________________________________________________________\nconv2d_106 (Conv2D) (None, 17, 17, 160) 143360 activation_105[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_106 (BatchN (None, 17, 17, 160) 480 conv2d_106[0][0] \n__________________________________________________________________________________________________\nactivation_106 (Activation) (None, 17, 17, 160) 0 batch_normalization_106[0][0] \n__________________________________________________________________________________________________\nconv2d_104 (Conv2D) (None, 17, 17, 192) 208896 block17_7_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_107 (Conv2D) (None, 17, 17, 192) 215040 activation_106[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_104 (BatchN (None, 17, 17, 192) 576 conv2d_104[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_107 (BatchN (None, 17, 17, 192) 576 conv2d_107[0][0] \n__________________________________________________________________________________________________\nactivation_104 (Activation) (None, 17, 17, 192) 0 batch_normalization_104[0][0] \n__________________________________________________________________________________________________\nactivation_107 (Activation) (None, 17, 17, 192) 0 batch_normalization_107[0][0] \n__________________________________________________________________________________________________\nblock17_8_mixed (Concatenate) (None, 17, 17, 384) 0 activation_104[0][0] \n activation_107[0][0] \n__________________________________________________________________________________________________\nblock17_8_conv (Conv2D) (None, 17, 17, 1088) 418880 block17_8_mixed[0][0] \n__________________________________________________________________________________________________\nblock17_8 (Lambda) (None, 17, 17, 1088) 0 block17_7_ac[0][0] \n block17_8_conv[0][0] \n__________________________________________________________________________________________________\nblock17_8_ac (Activation) (None, 17, 17, 1088) 0 block17_8[0][0] \n__________________________________________________________________________________________________\nconv2d_109 (Conv2D) (None, 17, 17, 128) 139264 block17_8_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_109 (BatchN (None, 17, 17, 128) 384 conv2d_109[0][0] \n__________________________________________________________________________________________________\nactivation_109 (Activation) (None, 17, 17, 128) 0 batch_normalization_109[0][0] \n__________________________________________________________________________________________________\nconv2d_110 (Conv2D) (None, 17, 17, 160) 143360 activation_109[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_110 (BatchN (None, 17, 17, 160) 480 conv2d_110[0][0] \n__________________________________________________________________________________________________\nactivation_110 (Activation) (None, 17, 17, 160) 0 batch_normalization_110[0][0] \n__________________________________________________________________________________________________\nconv2d_108 (Conv2D) (None, 17, 17, 192) 208896 block17_8_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_111 (Conv2D) (None, 17, 17, 192) 215040 activation_110[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_108 (BatchN (None, 17, 17, 192) 576 conv2d_108[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_111 (BatchN (None, 17, 17, 192) 576 conv2d_111[0][0] \n__________________________________________________________________________________________________\nactivation_108 (Activation) (None, 17, 17, 192) 0 batch_normalization_108[0][0] \n__________________________________________________________________________________________________\nactivation_111 (Activation) (None, 17, 17, 192) 0 batch_normalization_111[0][0] \n__________________________________________________________________________________________________\nblock17_9_mixed (Concatenate) (None, 17, 17, 384) 0 activation_108[0][0] \n activation_111[0][0] \n__________________________________________________________________________________________________\nblock17_9_conv (Conv2D) (None, 17, 17, 1088) 418880 block17_9_mixed[0][0] \n__________________________________________________________________________________________________\nblock17_9 (Lambda) (None, 17, 17, 1088) 0 block17_8_ac[0][0] \n block17_9_conv[0][0] \n__________________________________________________________________________________________________\nblock17_9_ac (Activation) (None, 17, 17, 1088) 0 block17_9[0][0] \n__________________________________________________________________________________________________\nconv2d_113 (Conv2D) (None, 17, 17, 128) 139264 block17_9_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_113 (BatchN (None, 17, 17, 128) 384 conv2d_113[0][0] \n__________________________________________________________________________________________________\nactivation_113 (Activation) (None, 17, 17, 128) 0 batch_normalization_113[0][0] \n__________________________________________________________________________________________________\nconv2d_114 (Conv2D) (None, 17, 17, 160) 143360 activation_113[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_114 (BatchN (None, 17, 17, 160) 480 conv2d_114[0][0] \n__________________________________________________________________________________________________\nactivation_114 (Activation) (None, 17, 17, 160) 0 batch_normalization_114[0][0] \n__________________________________________________________________________________________________\nconv2d_112 (Conv2D) (None, 17, 17, 192) 208896 block17_9_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_115 (Conv2D) (None, 17, 17, 192) 215040 activation_114[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_112 (BatchN (None, 17, 17, 192) 576 conv2d_112[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_115 (BatchN (None, 17, 17, 192) 576 conv2d_115[0][0] \n__________________________________________________________________________________________________\nactivation_112 (Activation) (None, 17, 17, 192) 0 batch_normalization_112[0][0] \n__________________________________________________________________________________________________\nactivation_115 (Activation) (None, 17, 17, 192) 0 batch_normalization_115[0][0] \n__________________________________________________________________________________________________\nblock17_10_mixed (Concatenate) (None, 17, 17, 384) 0 activation_112[0][0] \n activation_115[0][0] \n__________________________________________________________________________________________________\nblock17_10_conv (Conv2D) (None, 17, 17, 1088) 418880 block17_10_mixed[0][0] \n__________________________________________________________________________________________________\nblock17_10 (Lambda) (None, 17, 17, 1088) 0 block17_9_ac[0][0] \n block17_10_conv[0][0] \n__________________________________________________________________________________________________\nblock17_10_ac (Activation) (None, 17, 17, 1088) 0 block17_10[0][0] \n__________________________________________________________________________________________________\nconv2d_117 (Conv2D) (None, 17, 17, 128) 139264 block17_10_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_117 (BatchN (None, 17, 17, 128) 384 conv2d_117[0][0] \n__________________________________________________________________________________________________\nactivation_117 (Activation) (None, 17, 17, 128) 0 batch_normalization_117[0][0] \n__________________________________________________________________________________________________\nconv2d_118 (Conv2D) (None, 17, 17, 160) 143360 activation_117[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_118 (BatchN (None, 17, 17, 160) 480 conv2d_118[0][0] \n__________________________________________________________________________________________________\nactivation_118 (Activation) (None, 17, 17, 160) 0 batch_normalization_118[0][0] \n__________________________________________________________________________________________________\nconv2d_116 (Conv2D) (None, 17, 17, 192) 208896 block17_10_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_119 (Conv2D) (None, 17, 17, 192) 215040 activation_118[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_116 (BatchN (None, 17, 17, 192) 576 conv2d_116[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_119 (BatchN (None, 17, 17, 192) 576 conv2d_119[0][0] \n__________________________________________________________________________________________________\nactivation_116 (Activation) (None, 17, 17, 192) 0 batch_normalization_116[0][0] \n__________________________________________________________________________________________________\nactivation_119 (Activation) (None, 17, 17, 192) 0 batch_normalization_119[0][0] \n__________________________________________________________________________________________________\nblock17_11_mixed (Concatenate) (None, 17, 17, 384) 0 activation_116[0][0] \n activation_119[0][0] \n__________________________________________________________________________________________________\nblock17_11_conv (Conv2D) (None, 17, 17, 1088) 418880 block17_11_mixed[0][0] \n__________________________________________________________________________________________________\nblock17_11 (Lambda) (None, 17, 17, 1088) 0 block17_10_ac[0][0] \n block17_11_conv[0][0] \n__________________________________________________________________________________________________\nblock17_11_ac (Activation) (None, 17, 17, 1088) 0 block17_11[0][0] \n__________________________________________________________________________________________________\nconv2d_121 (Conv2D) (None, 17, 17, 128) 139264 block17_11_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_121 (BatchN (None, 17, 17, 128) 384 conv2d_121[0][0] \n__________________________________________________________________________________________________\nactivation_121 (Activation) (None, 17, 17, 128) 0 batch_normalization_121[0][0] \n__________________________________________________________________________________________________\nconv2d_122 (Conv2D) (None, 17, 17, 160) 143360 activation_121[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_122 (BatchN (None, 17, 17, 160) 480 conv2d_122[0][0] \n__________________________________________________________________________________________________\nactivation_122 (Activation) (None, 17, 17, 160) 0 batch_normalization_122[0][0] \n__________________________________________________________________________________________________\nconv2d_120 (Conv2D) (None, 17, 17, 192) 208896 block17_11_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_123 (Conv2D) (None, 17, 17, 192) 215040 activation_122[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_120 (BatchN (None, 17, 17, 192) 576 conv2d_120[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_123 (BatchN (None, 17, 17, 192) 576 conv2d_123[0][0] \n__________________________________________________________________________________________________\nactivation_120 (Activation) (None, 17, 17, 192) 0 batch_normalization_120[0][0] \n__________________________________________________________________________________________________\nactivation_123 (Activation) (None, 17, 17, 192) 0 batch_normalization_123[0][0] \n__________________________________________________________________________________________________\nblock17_12_mixed (Concatenate) (None, 17, 17, 384) 0 activation_120[0][0] \n activation_123[0][0] \n__________________________________________________________________________________________________\nblock17_12_conv (Conv2D) (None, 17, 17, 1088) 418880 block17_12_mixed[0][0] \n__________________________________________________________________________________________________\nblock17_12 (Lambda) (None, 17, 17, 1088) 0 block17_11_ac[0][0] \n block17_12_conv[0][0] \n__________________________________________________________________________________________________\nblock17_12_ac (Activation) (None, 17, 17, 1088) 0 block17_12[0][0] \n__________________________________________________________________________________________________\nconv2d_125 (Conv2D) (None, 17, 17, 128) 139264 block17_12_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_125 (BatchN (None, 17, 17, 128) 384 conv2d_125[0][0] \n__________________________________________________________________________________________________\nactivation_125 (Activation) (None, 17, 17, 128) 0 batch_normalization_125[0][0] \n__________________________________________________________________________________________________\nconv2d_126 (Conv2D) (None, 17, 17, 160) 143360 activation_125[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_126 (BatchN (None, 17, 17, 160) 480 conv2d_126[0][0] \n__________________________________________________________________________________________________\nactivation_126 (Activation) (None, 17, 17, 160) 0 batch_normalization_126[0][0] \n__________________________________________________________________________________________________\nconv2d_124 (Conv2D) (None, 17, 17, 192) 208896 block17_12_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_127 (Conv2D) (None, 17, 17, 192) 215040 activation_126[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_124 (BatchN (None, 17, 17, 192) 576 conv2d_124[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_127 (BatchN (None, 17, 17, 192) 576 conv2d_127[0][0] \n__________________________________________________________________________________________________\nactivation_124 (Activation) (None, 17, 17, 192) 0 batch_normalization_124[0][0] \n__________________________________________________________________________________________________\nactivation_127 (Activation) (None, 17, 17, 192) 0 batch_normalization_127[0][0] \n__________________________________________________________________________________________________\nblock17_13_mixed (Concatenate) (None, 17, 17, 384) 0 activation_124[0][0] \n activation_127[0][0] \n__________________________________________________________________________________________________\nblock17_13_conv (Conv2D) (None, 17, 17, 1088) 418880 block17_13_mixed[0][0] \n__________________________________________________________________________________________________\nblock17_13 (Lambda) (None, 17, 17, 1088) 0 block17_12_ac[0][0] \n block17_13_conv[0][0] \n__________________________________________________________________________________________________\nblock17_13_ac (Activation) (None, 17, 17, 1088) 0 block17_13[0][0] \n__________________________________________________________________________________________________\nconv2d_129 (Conv2D) (None, 17, 17, 128) 139264 block17_13_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_129 (BatchN (None, 17, 17, 128) 384 conv2d_129[0][0] \n__________________________________________________________________________________________________\nactivation_129 (Activation) (None, 17, 17, 128) 0 batch_normalization_129[0][0] \n__________________________________________________________________________________________________\nconv2d_130 (Conv2D) (None, 17, 17, 160) 143360 activation_129[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_130 (BatchN (None, 17, 17, 160) 480 conv2d_130[0][0] \n__________________________________________________________________________________________________\nactivation_130 (Activation) (None, 17, 17, 160) 0 batch_normalization_130[0][0] \n__________________________________________________________________________________________________\nconv2d_128 (Conv2D) (None, 17, 17, 192) 208896 block17_13_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_131 (Conv2D) (None, 17, 17, 192) 215040 activation_130[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_128 (BatchN (None, 17, 17, 192) 576 conv2d_128[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_131 (BatchN (None, 17, 17, 192) 576 conv2d_131[0][0] \n__________________________________________________________________________________________________\nactivation_128 (Activation) (None, 17, 17, 192) 0 batch_normalization_128[0][0] \n__________________________________________________________________________________________________\nactivation_131 (Activation) (None, 17, 17, 192) 0 batch_normalization_131[0][0] \n__________________________________________________________________________________________________\nblock17_14_mixed (Concatenate) (None, 17, 17, 384) 0 activation_128[0][0] \n activation_131[0][0] \n__________________________________________________________________________________________________\nblock17_14_conv (Conv2D) (None, 17, 17, 1088) 418880 block17_14_mixed[0][0] \n__________________________________________________________________________________________________\nblock17_14 (Lambda) (None, 17, 17, 1088) 0 block17_13_ac[0][0] \n block17_14_conv[0][0] \n__________________________________________________________________________________________________\nblock17_14_ac (Activation) (None, 17, 17, 1088) 0 block17_14[0][0] \n__________________________________________________________________________________________________\nconv2d_133 (Conv2D) (None, 17, 17, 128) 139264 block17_14_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_133 (BatchN (None, 17, 17, 128) 384 conv2d_133[0][0] \n__________________________________________________________________________________________________\nactivation_133 (Activation) (None, 17, 17, 128) 0 batch_normalization_133[0][0] \n__________________________________________________________________________________________________\nconv2d_134 (Conv2D) (None, 17, 17, 160) 143360 activation_133[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_134 (BatchN (None, 17, 17, 160) 480 conv2d_134[0][0] \n__________________________________________________________________________________________________\nactivation_134 (Activation) (None, 17, 17, 160) 0 batch_normalization_134[0][0] \n__________________________________________________________________________________________________\nconv2d_132 (Conv2D) (None, 17, 17, 192) 208896 block17_14_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_135 (Conv2D) (None, 17, 17, 192) 215040 activation_134[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_132 (BatchN (None, 17, 17, 192) 576 conv2d_132[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_135 (BatchN (None, 17, 17, 192) 576 conv2d_135[0][0] \n__________________________________________________________________________________________________\nactivation_132 (Activation) (None, 17, 17, 192) 0 batch_normalization_132[0][0] \n__________________________________________________________________________________________________\nactivation_135 (Activation) (None, 17, 17, 192) 0 batch_normalization_135[0][0] \n__________________________________________________________________________________________________\nblock17_15_mixed (Concatenate) (None, 17, 17, 384) 0 activation_132[0][0] \n activation_135[0][0] \n__________________________________________________________________________________________________\nblock17_15_conv (Conv2D) (None, 17, 17, 1088) 418880 block17_15_mixed[0][0] \n__________________________________________________________________________________________________\nblock17_15 (Lambda) (None, 17, 17, 1088) 0 block17_14_ac[0][0] \n block17_15_conv[0][0] \n__________________________________________________________________________________________________\nblock17_15_ac (Activation) (None, 17, 17, 1088) 0 block17_15[0][0] \n__________________________________________________________________________________________________\nconv2d_137 (Conv2D) (None, 17, 17, 128) 139264 block17_15_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_137 (BatchN (None, 17, 17, 128) 384 conv2d_137[0][0] \n__________________________________________________________________________________________________\nactivation_137 (Activation) (None, 17, 17, 128) 0 batch_normalization_137[0][0] \n__________________________________________________________________________________________________\nconv2d_138 (Conv2D) (None, 17, 17, 160) 143360 activation_137[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_138 (BatchN (None, 17, 17, 160) 480 conv2d_138[0][0] \n__________________________________________________________________________________________________\nactivation_138 (Activation) (None, 17, 17, 160) 0 batch_normalization_138[0][0] \n__________________________________________________________________________________________________\nconv2d_136 (Conv2D) (None, 17, 17, 192) 208896 block17_15_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_139 (Conv2D) (None, 17, 17, 192) 215040 activation_138[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_136 (BatchN (None, 17, 17, 192) 576 conv2d_136[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_139 (BatchN (None, 17, 17, 192) 576 conv2d_139[0][0] \n__________________________________________________________________________________________________\nactivation_136 (Activation) (None, 17, 17, 192) 0 batch_normalization_136[0][0] \n__________________________________________________________________________________________________\nactivation_139 (Activation) (None, 17, 17, 192) 0 batch_normalization_139[0][0] \n__________________________________________________________________________________________________\nblock17_16_mixed (Concatenate) (None, 17, 17, 384) 0 activation_136[0][0] \n activation_139[0][0] \n__________________________________________________________________________________________________\nblock17_16_conv (Conv2D) (None, 17, 17, 1088) 418880 block17_16_mixed[0][0] \n__________________________________________________________________________________________________\nblock17_16 (Lambda) (None, 17, 17, 1088) 0 block17_15_ac[0][0] \n block17_16_conv[0][0] \n__________________________________________________________________________________________________\nblock17_16_ac (Activation) (None, 17, 17, 1088) 0 block17_16[0][0] \n__________________________________________________________________________________________________\nconv2d_141 (Conv2D) (None, 17, 17, 128) 139264 block17_16_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_141 (BatchN (None, 17, 17, 128) 384 conv2d_141[0][0] \n__________________________________________________________________________________________________\nactivation_141 (Activation) (None, 17, 17, 128) 0 batch_normalization_141[0][0] \n__________________________________________________________________________________________________\nconv2d_142 (Conv2D) (None, 17, 17, 160) 143360 activation_141[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_142 (BatchN (None, 17, 17, 160) 480 conv2d_142[0][0] \n__________________________________________________________________________________________________\nactivation_142 (Activation) (None, 17, 17, 160) 0 batch_normalization_142[0][0] \n__________________________________________________________________________________________________\nconv2d_140 (Conv2D) (None, 17, 17, 192) 208896 block17_16_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_143 (Conv2D) (None, 17, 17, 192) 215040 activation_142[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_140 (BatchN (None, 17, 17, 192) 576 conv2d_140[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_143 (BatchN (None, 17, 17, 192) 576 conv2d_143[0][0] \n__________________________________________________________________________________________________\nactivation_140 (Activation) (None, 17, 17, 192) 0 batch_normalization_140[0][0] \n__________________________________________________________________________________________________\nactivation_143 (Activation) (None, 17, 17, 192) 0 batch_normalization_143[0][0] \n__________________________________________________________________________________________________\nblock17_17_mixed (Concatenate) (None, 17, 17, 384) 0 activation_140[0][0] \n activation_143[0][0] \n__________________________________________________________________________________________________\nblock17_17_conv (Conv2D) (None, 17, 17, 1088) 418880 block17_17_mixed[0][0] \n__________________________________________________________________________________________________\nblock17_17 (Lambda) (None, 17, 17, 1088) 0 block17_16_ac[0][0] \n block17_17_conv[0][0] \n__________________________________________________________________________________________________\nblock17_17_ac (Activation) (None, 17, 17, 1088) 0 block17_17[0][0] \n__________________________________________________________________________________________________\nconv2d_145 (Conv2D) (None, 17, 17, 128) 139264 block17_17_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_145 (BatchN (None, 17, 17, 128) 384 conv2d_145[0][0] \n__________________________________________________________________________________________________\nactivation_145 (Activation) (None, 17, 17, 128) 0 batch_normalization_145[0][0] \n__________________________________________________________________________________________________\nconv2d_146 (Conv2D) (None, 17, 17, 160) 143360 activation_145[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_146 (BatchN (None, 17, 17, 160) 480 conv2d_146[0][0] \n__________________________________________________________________________________________________\nactivation_146 (Activation) (None, 17, 17, 160) 0 batch_normalization_146[0][0] \n__________________________________________________________________________________________________\nconv2d_144 (Conv2D) (None, 17, 17, 192) 208896 block17_17_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_147 (Conv2D) (None, 17, 17, 192) 215040 activation_146[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_144 (BatchN (None, 17, 17, 192) 576 conv2d_144[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_147 (BatchN (None, 17, 17, 192) 576 conv2d_147[0][0] \n__________________________________________________________________________________________________\nactivation_144 (Activation) (None, 17, 17, 192) 0 batch_normalization_144[0][0] \n__________________________________________________________________________________________________\nactivation_147 (Activation) (None, 17, 17, 192) 0 batch_normalization_147[0][0] \n__________________________________________________________________________________________________\nblock17_18_mixed (Concatenate) (None, 17, 17, 384) 0 activation_144[0][0] \n activation_147[0][0] \n__________________________________________________________________________________________________\nblock17_18_conv (Conv2D) (None, 17, 17, 1088) 418880 block17_18_mixed[0][0] \n__________________________________________________________________________________________________\nblock17_18 (Lambda) (None, 17, 17, 1088) 0 block17_17_ac[0][0] \n block17_18_conv[0][0] \n__________________________________________________________________________________________________\nblock17_18_ac (Activation) (None, 17, 17, 1088) 0 block17_18[0][0] \n__________________________________________________________________________________________________\nconv2d_149 (Conv2D) (None, 17, 17, 128) 139264 block17_18_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_149 (BatchN (None, 17, 17, 128) 384 conv2d_149[0][0] \n__________________________________________________________________________________________________\nactivation_149 (Activation) (None, 17, 17, 128) 0 batch_normalization_149[0][0] \n__________________________________________________________________________________________________\nconv2d_150 (Conv2D) (None, 17, 17, 160) 143360 activation_149[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_150 (BatchN (None, 17, 17, 160) 480 conv2d_150[0][0] \n__________________________________________________________________________________________________\nactivation_150 (Activation) (None, 17, 17, 160) 0 batch_normalization_150[0][0] \n__________________________________________________________________________________________________\nconv2d_148 (Conv2D) (None, 17, 17, 192) 208896 block17_18_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_151 (Conv2D) (None, 17, 17, 192) 215040 activation_150[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_148 (BatchN (None, 17, 17, 192) 576 conv2d_148[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_151 (BatchN (None, 17, 17, 192) 576 conv2d_151[0][0] \n__________________________________________________________________________________________________\nactivation_148 (Activation) (None, 17, 17, 192) 0 batch_normalization_148[0][0] \n__________________________________________________________________________________________________\nactivation_151 (Activation) (None, 17, 17, 192) 0 batch_normalization_151[0][0] \n__________________________________________________________________________________________________\nblock17_19_mixed (Concatenate) (None, 17, 17, 384) 0 activation_148[0][0] \n activation_151[0][0] \n__________________________________________________________________________________________________\nblock17_19_conv (Conv2D) (None, 17, 17, 1088) 418880 block17_19_mixed[0][0] \n__________________________________________________________________________________________________\nblock17_19 (Lambda) (None, 17, 17, 1088) 0 block17_18_ac[0][0] \n block17_19_conv[0][0] \n__________________________________________________________________________________________________\nblock17_19_ac (Activation) (None, 17, 17, 1088) 0 block17_19[0][0] \n__________________________________________________________________________________________________\nconv2d_153 (Conv2D) (None, 17, 17, 128) 139264 block17_19_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_153 (BatchN (None, 17, 17, 128) 384 conv2d_153[0][0] \n__________________________________________________________________________________________________\nactivation_153 (Activation) (None, 17, 17, 128) 0 batch_normalization_153[0][0] \n__________________________________________________________________________________________________\nconv2d_154 (Conv2D) (None, 17, 17, 160) 143360 activation_153[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_154 (BatchN (None, 17, 17, 160) 480 conv2d_154[0][0] \n__________________________________________________________________________________________________\nactivation_154 (Activation) (None, 17, 17, 160) 0 batch_normalization_154[0][0] \n__________________________________________________________________________________________________\nconv2d_152 (Conv2D) (None, 17, 17, 192) 208896 block17_19_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_155 (Conv2D) (None, 17, 17, 192) 215040 activation_154[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_152 (BatchN (None, 17, 17, 192) 576 conv2d_152[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_155 (BatchN (None, 17, 17, 192) 576 conv2d_155[0][0] \n__________________________________________________________________________________________________\nactivation_152 (Activation) (None, 17, 17, 192) 0 batch_normalization_152[0][0] \n__________________________________________________________________________________________________\nactivation_155 (Activation) (None, 17, 17, 192) 0 batch_normalization_155[0][0] \n__________________________________________________________________________________________________\nblock17_20_mixed (Concatenate) (None, 17, 17, 384) 0 activation_152[0][0] \n activation_155[0][0] \n__________________________________________________________________________________________________\nblock17_20_conv (Conv2D) (None, 17, 17, 1088) 418880 block17_20_mixed[0][0] \n__________________________________________________________________________________________________\nblock17_20 (Lambda) (None, 17, 17, 1088) 0 block17_19_ac[0][0] \n block17_20_conv[0][0] \n__________________________________________________________________________________________________\nblock17_20_ac (Activation) (None, 17, 17, 1088) 0 block17_20[0][0] \n__________________________________________________________________________________________________\nconv2d_160 (Conv2D) (None, 17, 17, 256) 278528 block17_20_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_160 (BatchN (None, 17, 17, 256) 768 conv2d_160[0][0] \n__________________________________________________________________________________________________\nactivation_160 (Activation) (None, 17, 17, 256) 0 batch_normalization_160[0][0] \n__________________________________________________________________________________________________\nconv2d_156 (Conv2D) (None, 17, 17, 256) 278528 block17_20_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_158 (Conv2D) (None, 17, 17, 256) 278528 block17_20_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_161 (Conv2D) (None, 17, 17, 288) 663552 activation_160[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_156 (BatchN (None, 17, 17, 256) 768 conv2d_156[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_158 (BatchN (None, 17, 17, 256) 768 conv2d_158[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_161 (BatchN (None, 17, 17, 288) 864 conv2d_161[0][0] \n__________________________________________________________________________________________________\nactivation_156 (Activation) (None, 17, 17, 256) 0 batch_normalization_156[0][0] \n__________________________________________________________________________________________________\nactivation_158 (Activation) (None, 17, 17, 256) 0 batch_normalization_158[0][0] \n__________________________________________________________________________________________________\nactivation_161 (Activation) (None, 17, 17, 288) 0 batch_normalization_161[0][0] \n__________________________________________________________________________________________________\nconv2d_157 (Conv2D) (None, 8, 8, 384) 884736 activation_156[0][0] \n__________________________________________________________________________________________________\nconv2d_159 (Conv2D) (None, 8, 8, 288) 663552 activation_158[0][0] \n__________________________________________________________________________________________________\nconv2d_162 (Conv2D) (None, 8, 8, 320) 829440 activation_161[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_157 (BatchN (None, 8, 8, 384) 1152 conv2d_157[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_159 (BatchN (None, 8, 8, 288) 864 conv2d_159[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_162 (BatchN (None, 8, 8, 320) 960 conv2d_162[0][0] \n__________________________________________________________________________________________________\nactivation_157 (Activation) (None, 8, 8, 384) 0 batch_normalization_157[0][0] \n__________________________________________________________________________________________________\nactivation_159 (Activation) (None, 8, 8, 288) 0 batch_normalization_159[0][0] \n__________________________________________________________________________________________________\nactivation_162 (Activation) (None, 8, 8, 320) 0 batch_normalization_162[0][0] \n__________________________________________________________________________________________________\nmax_pooling2d_3 (MaxPooling2D) (None, 8, 8, 1088) 0 block17_20_ac[0][0] \n__________________________________________________________________________________________________\nmixed_7a (Concatenate) (None, 8, 8, 2080) 0 activation_157[0][0] \n activation_159[0][0] \n activation_162[0][0] \n max_pooling2d_3[0][0] \n__________________________________________________________________________________________________\nconv2d_164 (Conv2D) (None, 8, 8, 192) 399360 mixed_7a[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_164 (BatchN (None, 8, 8, 192) 576 conv2d_164[0][0] \n__________________________________________________________________________________________________\nactivation_164 (Activation) (None, 8, 8, 192) 0 batch_normalization_164[0][0] \n__________________________________________________________________________________________________\nconv2d_165 (Conv2D) (None, 8, 8, 224) 129024 activation_164[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_165 (BatchN (None, 8, 8, 224) 672 conv2d_165[0][0] \n__________________________________________________________________________________________________\nactivation_165 (Activation) (None, 8, 8, 224) 0 batch_normalization_165[0][0] \n__________________________________________________________________________________________________\nconv2d_163 (Conv2D) (None, 8, 8, 192) 399360 mixed_7a[0][0] \n__________________________________________________________________________________________________\nconv2d_166 (Conv2D) (None, 8, 8, 256) 172032 activation_165[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_163 (BatchN (None, 8, 8, 192) 576 conv2d_163[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_166 (BatchN (None, 8, 8, 256) 768 conv2d_166[0][0] \n__________________________________________________________________________________________________\nactivation_163 (Activation) (None, 8, 8, 192) 0 batch_normalization_163[0][0] \n__________________________________________________________________________________________________\nactivation_166 (Activation) (None, 8, 8, 256) 0 batch_normalization_166[0][0] \n__________________________________________________________________________________________________\nblock8_1_mixed (Concatenate) (None, 8, 8, 448) 0 activation_163[0][0] \n activation_166[0][0] \n__________________________________________________________________________________________________\nblock8_1_conv (Conv2D) (None, 8, 8, 2080) 933920 block8_1_mixed[0][0] \n__________________________________________________________________________________________________\nblock8_1 (Lambda) (None, 8, 8, 2080) 0 mixed_7a[0][0] \n block8_1_conv[0][0] \n__________________________________________________________________________________________________\nblock8_1_ac (Activation) (None, 8, 8, 2080) 0 block8_1[0][0] \n__________________________________________________________________________________________________\nconv2d_168 (Conv2D) (None, 8, 8, 192) 399360 block8_1_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_168 (BatchN (None, 8, 8, 192) 576 conv2d_168[0][0] \n__________________________________________________________________________________________________\nactivation_168 (Activation) (None, 8, 8, 192) 0 batch_normalization_168[0][0] \n__________________________________________________________________________________________________\nconv2d_169 (Conv2D) (None, 8, 8, 224) 129024 activation_168[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_169 (BatchN (None, 8, 8, 224) 672 conv2d_169[0][0] \n__________________________________________________________________________________________________\nactivation_169 (Activation) (None, 8, 8, 224) 0 batch_normalization_169[0][0] \n__________________________________________________________________________________________________\nconv2d_167 (Conv2D) (None, 8, 8, 192) 399360 block8_1_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_170 (Conv2D) (None, 8, 8, 256) 172032 activation_169[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_167 (BatchN (None, 8, 8, 192) 576 conv2d_167[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_170 (BatchN (None, 8, 8, 256) 768 conv2d_170[0][0] \n__________________________________________________________________________________________________\nactivation_167 (Activation) (None, 8, 8, 192) 0 batch_normalization_167[0][0] \n__________________________________________________________________________________________________\nactivation_170 (Activation) (None, 8, 8, 256) 0 batch_normalization_170[0][0] \n__________________________________________________________________________________________________\nblock8_2_mixed (Concatenate) (None, 8, 8, 448) 0 activation_167[0][0] \n activation_170[0][0] \n__________________________________________________________________________________________________\nblock8_2_conv (Conv2D) (None, 8, 8, 2080) 933920 block8_2_mixed[0][0] \n__________________________________________________________________________________________________\nblock8_2 (Lambda) (None, 8, 8, 2080) 0 block8_1_ac[0][0] \n block8_2_conv[0][0] \n__________________________________________________________________________________________________\nblock8_2_ac (Activation) (None, 8, 8, 2080) 0 block8_2[0][0] \n__________________________________________________________________________________________________\nconv2d_172 (Conv2D) (None, 8, 8, 192) 399360 block8_2_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_172 (BatchN (None, 8, 8, 192) 576 conv2d_172[0][0] \n__________________________________________________________________________________________________\nactivation_172 (Activation) (None, 8, 8, 192) 0 batch_normalization_172[0][0] \n__________________________________________________________________________________________________\nconv2d_173 (Conv2D) (None, 8, 8, 224) 129024 activation_172[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_173 (BatchN (None, 8, 8, 224) 672 conv2d_173[0][0] \n__________________________________________________________________________________________________\nactivation_173 (Activation) (None, 8, 8, 224) 0 batch_normalization_173[0][0] \n__________________________________________________________________________________________________\nconv2d_171 (Conv2D) (None, 8, 8, 192) 399360 block8_2_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_174 (Conv2D) (None, 8, 8, 256) 172032 activation_173[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_171 (BatchN (None, 8, 8, 192) 576 conv2d_171[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_174 (BatchN (None, 8, 8, 256) 768 conv2d_174[0][0] \n__________________________________________________________________________________________________\nactivation_171 (Activation) (None, 8, 8, 192) 0 batch_normalization_171[0][0] \n__________________________________________________________________________________________________\nactivation_174 (Activation) (None, 8, 8, 256) 0 batch_normalization_174[0][0] \n__________________________________________________________________________________________________\nblock8_3_mixed (Concatenate) (None, 8, 8, 448) 0 activation_171[0][0] \n activation_174[0][0] \n__________________________________________________________________________________________________\nblock8_3_conv (Conv2D) (None, 8, 8, 2080) 933920 block8_3_mixed[0][0] \n__________________________________________________________________________________________________\nblock8_3 (Lambda) (None, 8, 8, 2080) 0 block8_2_ac[0][0] \n block8_3_conv[0][0] \n__________________________________________________________________________________________________\nblock8_3_ac (Activation) (None, 8, 8, 2080) 0 block8_3[0][0] \n__________________________________________________________________________________________________\nconv2d_176 (Conv2D) (None, 8, 8, 192) 399360 block8_3_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_176 (BatchN (None, 8, 8, 192) 576 conv2d_176[0][0] \n__________________________________________________________________________________________________\nactivation_176 (Activation) (None, 8, 8, 192) 0 batch_normalization_176[0][0] \n__________________________________________________________________________________________________\nconv2d_177 (Conv2D) (None, 8, 8, 224) 129024 activation_176[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_177 (BatchN (None, 8, 8, 224) 672 conv2d_177[0][0] \n__________________________________________________________________________________________________\nactivation_177 (Activation) (None, 8, 8, 224) 0 batch_normalization_177[0][0] \n__________________________________________________________________________________________________\nconv2d_175 (Conv2D) (None, 8, 8, 192) 399360 block8_3_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_178 (Conv2D) (None, 8, 8, 256) 172032 activation_177[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_175 (BatchN (None, 8, 8, 192) 576 conv2d_175[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_178 (BatchN (None, 8, 8, 256) 768 conv2d_178[0][0] \n__________________________________________________________________________________________________\nactivation_175 (Activation) (None, 8, 8, 192) 0 batch_normalization_175[0][0] \n__________________________________________________________________________________________________\nactivation_178 (Activation) (None, 8, 8, 256) 0 batch_normalization_178[0][0] \n__________________________________________________________________________________________________\nblock8_4_mixed (Concatenate) (None, 8, 8, 448) 0 activation_175[0][0] \n activation_178[0][0] \n__________________________________________________________________________________________________\nblock8_4_conv (Conv2D) (None, 8, 8, 2080) 933920 block8_4_mixed[0][0] \n__________________________________________________________________________________________________\nblock8_4 (Lambda) (None, 8, 8, 2080) 0 block8_3_ac[0][0] \n block8_4_conv[0][0] \n__________________________________________________________________________________________________\nblock8_4_ac (Activation) (None, 8, 8, 2080) 0 block8_4[0][0] \n__________________________________________________________________________________________________\nconv2d_180 (Conv2D) (None, 8, 8, 192) 399360 block8_4_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_180 (BatchN (None, 8, 8, 192) 576 conv2d_180[0][0] \n__________________________________________________________________________________________________\nactivation_180 (Activation) (None, 8, 8, 192) 0 batch_normalization_180[0][0] \n__________________________________________________________________________________________________\nconv2d_181 (Conv2D) (None, 8, 8, 224) 129024 activation_180[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_181 (BatchN (None, 8, 8, 224) 672 conv2d_181[0][0] \n__________________________________________________________________________________________________\nactivation_181 (Activation) (None, 8, 8, 224) 0 batch_normalization_181[0][0] \n__________________________________________________________________________________________________\nconv2d_179 (Conv2D) (None, 8, 8, 192) 399360 block8_4_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_182 (Conv2D) (None, 8, 8, 256) 172032 activation_181[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_179 (BatchN (None, 8, 8, 192) 576 conv2d_179[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_182 (BatchN (None, 8, 8, 256) 768 conv2d_182[0][0] \n__________________________________________________________________________________________________\nactivation_179 (Activation) (None, 8, 8, 192) 0 batch_normalization_179[0][0] \n__________________________________________________________________________________________________\nactivation_182 (Activation) (None, 8, 8, 256) 0 batch_normalization_182[0][0] \n__________________________________________________________________________________________________\nblock8_5_mixed (Concatenate) (None, 8, 8, 448) 0 activation_179[0][0] \n activation_182[0][0] \n__________________________________________________________________________________________________\nblock8_5_conv (Conv2D) (None, 8, 8, 2080) 933920 block8_5_mixed[0][0] \n__________________________________________________________________________________________________\nblock8_5 (Lambda) (None, 8, 8, 2080) 0 block8_4_ac[0][0] \n block8_5_conv[0][0] \n__________________________________________________________________________________________________\nblock8_5_ac (Activation) (None, 8, 8, 2080) 0 block8_5[0][0] \n__________________________________________________________________________________________________\nconv2d_184 (Conv2D) (None, 8, 8, 192) 399360 block8_5_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_184 (BatchN (None, 8, 8, 192) 576 conv2d_184[0][0] \n__________________________________________________________________________________________________\nactivation_184 (Activation) (None, 8, 8, 192) 0 batch_normalization_184[0][0] \n__________________________________________________________________________________________________\nconv2d_185 (Conv2D) (None, 8, 8, 224) 129024 activation_184[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_185 (BatchN (None, 8, 8, 224) 672 conv2d_185[0][0] \n__________________________________________________________________________________________________\nactivation_185 (Activation) (None, 8, 8, 224) 0 batch_normalization_185[0][0] \n__________________________________________________________________________________________________\nconv2d_183 (Conv2D) (None, 8, 8, 192) 399360 block8_5_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_186 (Conv2D) (None, 8, 8, 256) 172032 activation_185[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_183 (BatchN (None, 8, 8, 192) 576 conv2d_183[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_186 (BatchN (None, 8, 8, 256) 768 conv2d_186[0][0] \n__________________________________________________________________________________________________\nactivation_183 (Activation) (None, 8, 8, 192) 0 batch_normalization_183[0][0] \n__________________________________________________________________________________________________\nactivation_186 (Activation) (None, 8, 8, 256) 0 batch_normalization_186[0][0] \n__________________________________________________________________________________________________\nblock8_6_mixed (Concatenate) (None, 8, 8, 448) 0 activation_183[0][0] \n activation_186[0][0] \n__________________________________________________________________________________________________\nblock8_6_conv (Conv2D) (None, 8, 8, 2080) 933920 block8_6_mixed[0][0] \n__________________________________________________________________________________________________\nblock8_6 (Lambda) (None, 8, 8, 2080) 0 block8_5_ac[0][0] \n block8_6_conv[0][0] \n__________________________________________________________________________________________________\nblock8_6_ac (Activation) (None, 8, 8, 2080) 0 block8_6[0][0] \n__________________________________________________________________________________________________\nconv2d_188 (Conv2D) (None, 8, 8, 192) 399360 block8_6_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_188 (BatchN (None, 8, 8, 192) 576 conv2d_188[0][0] \n__________________________________________________________________________________________________\nactivation_188 (Activation) (None, 8, 8, 192) 0 batch_normalization_188[0][0] \n__________________________________________________________________________________________________\nconv2d_189 (Conv2D) (None, 8, 8, 224) 129024 activation_188[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_189 (BatchN (None, 8, 8, 224) 672 conv2d_189[0][0] \n__________________________________________________________________________________________________\nactivation_189 (Activation) (None, 8, 8, 224) 0 batch_normalization_189[0][0] \n__________________________________________________________________________________________________\nconv2d_187 (Conv2D) (None, 8, 8, 192) 399360 block8_6_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_190 (Conv2D) (None, 8, 8, 256) 172032 activation_189[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_187 (BatchN (None, 8, 8, 192) 576 conv2d_187[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_190 (BatchN (None, 8, 8, 256) 768 conv2d_190[0][0] \n__________________________________________________________________________________________________\nactivation_187 (Activation) (None, 8, 8, 192) 0 batch_normalization_187[0][0] \n__________________________________________________________________________________________________\nactivation_190 (Activation) (None, 8, 8, 256) 0 batch_normalization_190[0][0] \n__________________________________________________________________________________________________\nblock8_7_mixed (Concatenate) (None, 8, 8, 448) 0 activation_187[0][0] \n activation_190[0][0] \n__________________________________________________________________________________________________\nblock8_7_conv (Conv2D) (None, 8, 8, 2080) 933920 block8_7_mixed[0][0] \n__________________________________________________________________________________________________\nblock8_7 (Lambda) (None, 8, 8, 2080) 0 block8_6_ac[0][0] \n block8_7_conv[0][0] \n__________________________________________________________________________________________________\nblock8_7_ac (Activation) (None, 8, 8, 2080) 0 block8_7[0][0] \n__________________________________________________________________________________________________\nconv2d_192 (Conv2D) (None, 8, 8, 192) 399360 block8_7_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_192 (BatchN (None, 8, 8, 192) 576 conv2d_192[0][0] \n__________________________________________________________________________________________________\nactivation_192 (Activation) (None, 8, 8, 192) 0 batch_normalization_192[0][0] \n__________________________________________________________________________________________________\nconv2d_193 (Conv2D) (None, 8, 8, 224) 129024 activation_192[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_193 (BatchN (None, 8, 8, 224) 672 conv2d_193[0][0] \n__________________________________________________________________________________________________\nactivation_193 (Activation) (None, 8, 8, 224) 0 batch_normalization_193[0][0] \n__________________________________________________________________________________________________\nconv2d_191 (Conv2D) (None, 8, 8, 192) 399360 block8_7_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_194 (Conv2D) (None, 8, 8, 256) 172032 activation_193[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_191 (BatchN (None, 8, 8, 192) 576 conv2d_191[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_194 (BatchN (None, 8, 8, 256) 768 conv2d_194[0][0] \n__________________________________________________________________________________________________\nactivation_191 (Activation) (None, 8, 8, 192) 0 batch_normalization_191[0][0] \n__________________________________________________________________________________________________\nactivation_194 (Activation) (None, 8, 8, 256) 0 batch_normalization_194[0][0] \n__________________________________________________________________________________________________\nblock8_8_mixed (Concatenate) (None, 8, 8, 448) 0 activation_191[0][0] \n activation_194[0][0] \n__________________________________________________________________________________________________\nblock8_8_conv (Conv2D) (None, 8, 8, 2080) 933920 block8_8_mixed[0][0] \n__________________________________________________________________________________________________\nblock8_8 (Lambda) (None, 8, 8, 2080) 0 block8_7_ac[0][0] \n block8_8_conv[0][0] \n__________________________________________________________________________________________________\nblock8_8_ac (Activation) (None, 8, 8, 2080) 0 block8_8[0][0] \n__________________________________________________________________________________________________\nconv2d_196 (Conv2D) (None, 8, 8, 192) 399360 block8_8_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_196 (BatchN (None, 8, 8, 192) 576 conv2d_196[0][0] \n__________________________________________________________________________________________________\nactivation_196 (Activation) (None, 8, 8, 192) 0 batch_normalization_196[0][0] \n__________________________________________________________________________________________________\nconv2d_197 (Conv2D) (None, 8, 8, 224) 129024 activation_196[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_197 (BatchN (None, 8, 8, 224) 672 conv2d_197[0][0] \n__________________________________________________________________________________________________\nactivation_197 (Activation) (None, 8, 8, 224) 0 batch_normalization_197[0][0] \n__________________________________________________________________________________________________\nconv2d_195 (Conv2D) (None, 8, 8, 192) 399360 block8_8_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_198 (Conv2D) (None, 8, 8, 256) 172032 activation_197[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_195 (BatchN (None, 8, 8, 192) 576 conv2d_195[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_198 (BatchN (None, 8, 8, 256) 768 conv2d_198[0][0] \n__________________________________________________________________________________________________\nactivation_195 (Activation) (None, 8, 8, 192) 0 batch_normalization_195[0][0] \n__________________________________________________________________________________________________\nactivation_198 (Activation) (None, 8, 8, 256) 0 batch_normalization_198[0][0] \n__________________________________________________________________________________________________\nblock8_9_mixed (Concatenate) (None, 8, 8, 448) 0 activation_195[0][0] \n activation_198[0][0] \n__________________________________________________________________________________________________\nblock8_9_conv (Conv2D) (None, 8, 8, 2080) 933920 block8_9_mixed[0][0] \n__________________________________________________________________________________________________\nblock8_9 (Lambda) (None, 8, 8, 2080) 0 block8_8_ac[0][0] \n block8_9_conv[0][0] \n__________________________________________________________________________________________________\nblock8_9_ac (Activation) (None, 8, 8, 2080) 0 block8_9[0][0] \n__________________________________________________________________________________________________\nconv2d_200 (Conv2D) (None, 8, 8, 192) 399360 block8_9_ac[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_200 (BatchN (None, 8, 8, 192) 576 conv2d_200[0][0] \n__________________________________________________________________________________________________\nactivation_200 (Activation) (None, 8, 8, 192) 0 batch_normalization_200[0][0] \n__________________________________________________________________________________________________\nconv2d_201 (Conv2D) (None, 8, 8, 224) 129024 activation_200[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_201 (BatchN (None, 8, 8, 224) 672 conv2d_201[0][0] \n__________________________________________________________________________________________________\nactivation_201 (Activation) (None, 8, 8, 224) 0 batch_normalization_201[0][0] \n__________________________________________________________________________________________________\nconv2d_199 (Conv2D) (None, 8, 8, 192) 399360 block8_9_ac[0][0] \n__________________________________________________________________________________________________\nconv2d_202 (Conv2D) (None, 8, 8, 256) 172032 activation_201[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_199 (BatchN (None, 8, 8, 192) 576 conv2d_199[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_202 (BatchN (None, 8, 8, 256) 768 conv2d_202[0][0] \n__________________________________________________________________________________________________\nactivation_199 (Activation) (None, 8, 8, 192) 0 batch_normalization_199[0][0] \n__________________________________________________________________________________________________\nactivation_202 (Activation) (None, 8, 8, 256) 0 batch_normalization_202[0][0] \n__________________________________________________________________________________________________\nblock8_10_mixed (Concatenate) (None, 8, 8, 448) 0 activation_199[0][0] \n activation_202[0][0] \n__________________________________________________________________________________________________\nblock8_10_conv (Conv2D) (None, 8, 8, 2080) 933920 block8_10_mixed[0][0] \n__________________________________________________________________________________________________\nblock8_10 (Lambda) (None, 8, 8, 2080) 0 block8_9_ac[0][0] \n block8_10_conv[0][0] \n__________________________________________________________________________________________________\nconv_7b (Conv2D) (None, 8, 8, 1536) 3194880 block8_10[0][0] \n__________________________________________________________________________________________________\nconv_7b_bn (BatchNormalization) (None, 8, 8, 1536) 4608 conv_7b[0][0] \n__________________________________________________________________________________________________\nconv_7b_ac (Activation) (None, 8, 8, 1536) 0 conv_7b_bn[0][0] \n==================================================================================================\nTotal params: 54,336,736\nTrainable params: 30,659,872\nNon-trainable params: 23,676,864\n__________________________________________________________________________________________________\n" ] ], [ [ "Add Regression Head", "_____no_output_____" ] ], [ [ "global_average_layer = tf.keras.layers.GlobalAveragePooling2D()\nfeature_batch_average = global_average_layer(feature_batch)\nprint(feature_batch_average.shape)", "(64, 1536)\n" ], [ "prediction_layer = tf.keras.layers.Dense(3)\nprediction_batch = prediction_layer(feature_batch_average)\nprint(prediction_batch.shape)", "(64, 3)\n" ], [ "model = tf.keras.Sequential([\n base_model,\n global_average_layer,\n prediction_layer\n])", "_____no_output_____" ], [ "base_learning_rate = 0.0001\nmodel.compile(optimizer=tf.keras.optimizers.Nadam(lr=base_learning_rate),\n loss=tf.keras.losses.MeanAbsoluteError(),\n metrics=['mae', 'mse', 'accuracy'])", "_____no_output_____" ], [ "model.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninception_resnet_v2 (Model) (None, 8, 8, 1536) 54336736 \n_________________________________________________________________\nglobal_average_pooling2d (Gl (None, 1536) 0 \n_________________________________________________________________\ndense (Dense) (None, 3) 4611 \n=================================================================\nTotal params: 54,341,347\nTrainable params: 30,664,483\nNon-trainable params: 23,676,864\n_________________________________________________________________\n" ], [ "from google.colab import drive\ndrive.mount('/content/gdrive')\n\nfolder = '/content/gdrive/My Drive/Models'\nif os.path.isdir(folder) == False:\n os.makedirs(folder)", "Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n\nEnter your authorization code:\n··········\nMounted at /content/gdrive\n" ], [ "# Include the epoch in the file name (uses `str.format`)\ncheckpoint_path = folder + \"/cp-{epoch:04d}.ckpt\"\ncheckpoint_dir = os.path.dirname(checkpoint_path)\n\n# Create a callback that saves the model's weights every 5 epochs\ncp_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_path, \n verbose=1, \n save_weights_only=True,\n period=5)", "WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.\n" ], [ "history = model.fit(batches,epochs=25, callbacks=[cp_callback])", "Epoch 1/25\n75/75 [==============================] - 38s 500ms/step - loss: 2.6666 - mae: 2.6666 - mse: 10.1546 - accuracy: 0.3428\nEpoch 2/25\n75/75 [==============================] - 37s 487ms/step - loss: 2.3121 - mae: 2.3121 - mse: 8.4882 - accuracy: 0.4169\nEpoch 3/25\n75/75 [==============================] - 37s 487ms/step - loss: 2.0193 - mae: 2.0193 - mse: 7.0606 - accuracy: 0.4943\nEpoch 4/25\n75/75 [==============================] - 36s 486ms/step - loss: 1.8176 - mae: 1.8176 - mse: 6.0534 - accuracy: 0.5586\nEpoch 5/25\n75/75 [==============================] - ETA: 0s - loss: 1.6107 - mae: 1.6107 - mse: 4.9274 - accuracy: 0.6087\nEpoch 00005: saving model to /content/gdrive/My Drive/Models/cp-0005.ckpt\n75/75 [==============================] - 42s 556ms/step - loss: 1.6107 - mae: 1.6107 - mse: 4.9274 - accuracy: 0.6087\nEpoch 6/25\n75/75 [==============================] - 36s 486ms/step - loss: 1.3488 - mae: 1.3488 - mse: 3.5469 - accuracy: 0.6520\nEpoch 7/25\n75/75 [==============================] - 36s 486ms/step - loss: 1.1941 - mae: 1.1941 - mse: 2.8907 - accuracy: 0.6868\nEpoch 8/25\n75/75 [==============================] - 36s 486ms/step - loss: 1.0832 - mae: 1.0832 - mse: 2.3217 - accuracy: 0.7504\nEpoch 9/25\n75/75 [==============================] - 36s 486ms/step - loss: 0.9515 - mae: 0.9515 - mse: 1.8312 - accuracy: 0.7751\nEpoch 10/25\n75/75 [==============================] - ETA: 0s - loss: 0.8713 - mae: 0.8713 - mse: 1.5798 - accuracy: 0.7804\nEpoch 00010: saving model to /content/gdrive/My Drive/Models/cp-0010.ckpt\n75/75 [==============================] - 42s 556ms/step - loss: 0.8713 - mae: 0.8713 - mse: 1.5798 - accuracy: 0.7804\nEpoch 11/25\n75/75 [==============================] - 36s 486ms/step - loss: 0.8460 - mae: 0.8460 - mse: 1.5365 - accuracy: 0.7808\nEpoch 12/25\n75/75 [==============================] - 37s 487ms/step - loss: 0.7530 - mae: 0.7530 - mse: 1.2280 - accuracy: 0.8126\nEpoch 13/25\n75/75 [==============================] - 36s 486ms/step - loss: 0.6777 - mae: 0.6777 - mse: 1.0555 - accuracy: 0.8457\nEpoch 14/25\n42/75 [===============>..............] - ETA: 15s - loss: 0.6531 - mae: 0.6531 - mse: 1.1037 - accuracy: 0.8277" ], [ "acc = history.history['accuracy']\nloss = history.history['loss']\nmae = history.history['mae']\nmse = history.history['mse']\n\nplt.figure(figsize=(8, 8))\nplt.subplot(2, 1, 1)\nplt.plot(acc, label='Accuracy')\nplt.legend(loc='lower right')\nplt.ylabel('Accuracy')\nplt.ylim([min(plt.ylim()),1])\nplt.title('Training Accuracy')\n\nplt.subplot(2, 1, 2)\nplt.plot(loss, label='Loss')\nplt.legend(loc='upper right')\nplt.ylabel('MAE')\nplt.ylim([0,5.0])\nplt.title('Training Loss')\nplt.xlabel('epoch')\nplt.show()", "_____no_output_____" ], [ "def get_test_images():\n directory = '/content/'\n images = []\n for file in listdir(directory): \n if file.endswith(\".jpg\"):\n images.append(file)\n return images\n\nimages = get_test_images()\nprint(images)", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ], [ [ "#@title Image Prediction { run: \"auto\", vertical-output: true, display-mode: \"form\" }\n\nimage_idx = 42 #@param {type:\"slider\", min:0, max:100, step:1}\ncnt = len(images)\nif cnt > 0:\n image_idx = image_idx if image_idx < cnt else cnt - 1\n image = images[image_idx]\n x, _ = load_image(image)\n\n img = x[np.newaxis, ...]\n predict = model.predict(img)\n print(predict+5)\n print(image_idx,image)\n plt.imshow(x)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cba2408151f69347239c3e05d4c8ce849068a92d
17,003
ipynb
Jupyter Notebook
2-Regression/1-Tools/solution/lesson_1-R.ipynb
jaypatel31/ML-For-Beginners
4566d412e23400d3569270243334fe87f54b48aa
[ "MIT" ]
5
2021-11-06T15:59:22.000Z
2022-03-10T16:18:11.000Z
2-Regression/1-Tools/solution/lesson_1-R.ipynb
jaypatel31/ML-For-Beginners
4566d412e23400d3569270243334fe87f54b48aa
[ "MIT" ]
3
2022-02-14T15:44:11.000Z
2022-02-27T18:33:19.000Z
2-Regression/1-Tools/solution/lesson_1-R.ipynb
jaypatel31/ML-For-Beginners
4566d412e23400d3569270243334fe87f54b48aa
[ "MIT" ]
3
2021-11-16T14:41:59.000Z
2022-03-30T04:50:26.000Z
38.908467
606
0.560431
[ [ [ "#Build a regression model: Get started with R and Tidymodels for regression models", "_____no_output_____" ], [ "## Introduction to Regression - Lesson 1\n\n#### Putting it into perspective\n\n✅ There are many types of regression methods, and which one you pick depends on the answer you're looking for. If you want to predict the probable height for a person of a given age, you'd use `linear regression`, as you're seeking a **numeric value**. If you're interested in discovering whether a type of cuisine should be considered vegan or not, you're looking for a **category assignment** so you would use `logistic regression`. You'll learn more about logistic regression later. Think a bit about some questions you can ask of data, and which of these methods would be more appropriate.\n\nIn this section, you will work with a [small dataset about diabetes](https://www4.stat.ncsu.edu/~boos/var.select/diabetes.html). Imagine that you wanted to test a treatment for diabetic patients. Machine Learning models might help you determine which patients would respond better to the treatment, based on combinations of variables. Even a very basic regression model, when visualized, might show information about variables that would help you organize your theoretical clinical trials.\n\nThat said, let's get started on this task!\n\n![Artwork by \\@allison_horst](../images/encouRage.jpg)<br>Artwork by @allison_horst", "_____no_output_____" ], [ "## 1. Loading up our tool set\n\nFor this task, we'll require the following packages:\n\n- `tidyverse`: The [tidyverse](https://www.tidyverse.org/) is a [collection of R packages](https://www.tidyverse.org/packages) designed to makes data science faster, easier and more fun!\n\n- `tidymodels`: The [tidymodels](https://www.tidymodels.org/) framework is a [collection of packages](https://www.tidymodels.org/packages/) for modeling and machine learning.\n\nYou can have them installed as:\n\n`install.packages(c(\"tidyverse\", \"tidymodels\"))`\n\nThe script below checks whether you have the packages required to complete this module and installs them for you in case some are missing.", "_____no_output_____" ] ], [ [ "if (!require(\"pacman\")) install.packages(\"pacman\")\npacman::p_load(tidyverse, tidymodels)", "Loading required package: pacman\n\n" ] ], [ [ "Now, let's load these awesome packages and make them available in our current R session.(This is for mere illustration, `pacman::p_load()` already did that for you)", "_____no_output_____" ] ], [ [ "# load the core Tidyverse packages\nlibrary(tidyverse)\n\n# load the core Tidymodels packages\nlibrary(tidymodels)\n", "_____no_output_____" ] ], [ [ "## 2. The diabetes dataset\n\nIn this exercise, we'll put our regression skills into display by making predictions on a diabetes dataset. The [diabetes dataset](https://www4.stat.ncsu.edu/~boos/var.select/diabetes.rwrite1.txt) includes `442 samples` of data around diabetes, with 10 predictor feature variables, `age`, `sex`, `body mass index`, `average blood pressure`, and `six blood serum measurements` as well as an outcome variable `y`: a quantitative measure of disease progression one year after baseline.\n\n|Number of observations|442|\n|----------------------|:---|\n|Number of predictors|First 10 columns are numeric predictive|\n|Outcome/Target|Column 11 is a quantitative measure of disease progression one year after baseline|\n|Predictor Information|- age in years\n||- sex\n||- bmi body mass index\n||- bp average blood pressure\n||- s1 tc, total serum cholesterol\n||- s2 ldl, low-density lipoproteins\n||- s3 hdl, high-density lipoproteins\n||- s4 tch, total cholesterol / HDL\n||- s5 ltg, possibly log of serum triglycerides level\n||- s6 glu, blood sugar level|\n\n\n\n\n> 🎓 Remember, this is supervised learning, and we need a named 'y' target.\n\nBefore you can manipulate data with R, you need to import the data into R's memory, or build a connection to the data that R can use to access the data remotely.\n\n> The [readr](https://readr.tidyverse.org/) package, which is part of the Tidyverse, provides a fast and friendly way to read rectangular data into R.\n\nNow, let's load the diabetes dataset provided in this source URL: <https://www4.stat.ncsu.edu/~boos/var.select/diabetes.html>\n\nAlso, we'll perform a sanity check on our data using `glimpse()` and dsiplay the first 5 rows using `slice()`.\n\nBefore going any further, let's also introduce something you will encounter often in R code 🥁🥁: the pipe operator `%>%`\n\nThe pipe operator (`%>%`) performs operations in logical sequence by passing an object forward into a function or call expression. You can think of the pipe operator as saying \"and then\" in your code.", "_____no_output_____" ] ], [ [ "# Import the data set\ndiabetes <- read_table2(file = \"https://www4.stat.ncsu.edu/~boos/var.select/diabetes.rwrite1.txt\")\n\n\n# Get a glimpse and dimensions of the data\nglimpse(diabetes)\n\n\n# Select the first 5 rows of the data\ndiabetes %>% \n slice(1:5)", "_____no_output_____" ] ], [ [ "`glimpse()` shows us that this data has 442 rows and 11 columns with all the columns being of data type `double` \n\n<br>\n\n\n\n> glimpse() and slice() are functions in [`dplyr`](https://dplyr.tidyverse.org/). Dplyr, part of the Tidyverse, is a grammar of data manipulation that provides a consistent set of verbs that help you solve the most common data manipulation challenges\n\n<br>\n\nNow that we have the data, let's narrow down to one feature (`bmi`) to target for this exercise. This will require us to select the desired columns. So, how do we do this?\n\n[`dplyr::select()`](https://dplyr.tidyverse.org/reference/select.html) allows us to *select* (and optionally rename) columns in a data frame.", "_____no_output_____" ] ], [ [ "# Select predictor feature `bmi` and outcome `y`\ndiabetes_select <- diabetes %>% \n select(c(bmi, y))\n\n# Print the first 5 rows\ndiabetes_select %>% \n slice(1:10)", "_____no_output_____" ] ], [ [ "## 3. Training and Testing data\n\nIt's common practice in supervised learning to *split* the data into two subsets; a (typically larger) set with which to train the model, and a smaller \"hold-back\" set with which to see how the model performed.\n\nNow that we have data ready, we can see if a machine can help determine a logical split between the numbers in this dataset. We can use the [rsample](https://tidymodels.github.io/rsample/) package, which is part of the Tidymodels framework, to create an object that contains the information on *how* to split the data, and then two more rsample functions to extract the created training and testing sets:\n", "_____no_output_____" ] ], [ [ "set.seed(2056)\n# Split 67% of the data for training and the rest for tesing\ndiabetes_split <- diabetes_select %>% \n initial_split(prop = 0.67)\n\n# Extract the resulting train and test sets\ndiabetes_train <- training(diabetes_split)\ndiabetes_test <- testing(diabetes_split)\n\n# Print the first 3 rows of the training set\ndiabetes_train %>% \n slice(1:10)", "_____no_output_____" ] ], [ [ "## 4. Train a linear regression model with Tidymodels\n\nNow we are ready to train our model!\n\nIn Tidymodels, you specify models using `parsnip()` by specifying three concepts:\n\n- Model **type** differentiates models such as linear regression, logistic regression, decision tree models, and so forth.\n\n- Model **mode** includes common options like regression and classification; some model types support either of these while some only have one mode.\n\n- Model **engine** is the computational tool which will be used to fit the model. Often these are R packages, such as **`\"lm\"`** or **`\"ranger\"`**\n\nThis modeling information is captured in a model specification, so let's build one!", "_____no_output_____" ] ], [ [ "# Build a linear model specification\nlm_spec <- \n # Type\n linear_reg() %>% \n # Engine\n set_engine(\"lm\") %>% \n # Mode\n set_mode(\"regression\")\n\n\n# Print the model specification\nlm_spec", "_____no_output_____" ] ], [ [ "After a model has been *specified*, the model can be `estimated` or `trained` using the [`fit()`](https://parsnip.tidymodels.org/reference/fit.html) function, typically using a formula and some data.\n\n`y ~ .` means we'll fit `y` as the predicted quantity/target, explained by all the predictors/features ie, `.` (in this case, we only have one predictor: `bmi` )", "_____no_output_____" ] ], [ [ "# Build a linear model specification\nlm_spec <- linear_reg() %>% \n set_engine(\"lm\") %>%\n set_mode(\"regression\")\n\n\n# Train a linear regression model\nlm_mod <- lm_spec %>% \n fit(y ~ ., data = diabetes_train)\n\n# Print the model\nlm_mod", "_____no_output_____" ] ], [ [ "From the model output, we can see the coefficients learned during training. They represent the coefficients of the line of best fit that gives us the lowest overall error between the actual and predicted variable.\n<br>\n\n## 5. Make predictions on the test set\n\nNow that we've trained a model, we can use it to predict the disease progression y for the test dataset using [parsnip::predict()](https://parsnip.tidymodels.org/reference/predict.model_fit.html). This will be used to draw the line between data groups.", "_____no_output_____" ] ], [ [ "# Make predictions for the test set\npredictions <- lm_mod %>% \n predict(new_data = diabetes_test)\n\n# Print out some of the predictions\npredictions %>% \n slice(1:5)", "_____no_output_____" ] ], [ [ "Woohoo! 💃🕺 We just trained a model and used it to make predictions!\n\nWhen making predictions, the tidymodels convention is to always produce a tibble/data frame of results with standardized column names. This makes it easy to combine the original data and the predictions in a usable format for subsequent operations such as plotting.\n\n`dplyr::bind_cols()` efficiently binds multiple data frames column.", "_____no_output_____" ] ], [ [ "# Combine the predictions and the original test set\nresults <- diabetes_test %>% \n bind_cols(predictions)\n\n\nresults %>% \n slice(1:5)", "_____no_output_____" ] ], [ [ "## 6. Plot modelling results\n\nNow, its time to see this visually 📈. We'll create a scatter plot of all the `y` and `bmi` values of the test set, then use the predictions to draw a line in the most appropriate place, between the model's data groupings.\n\nR has several systems for making graphs, but `ggplot2` is one of the most elegant and most versatile. This allows you to compose graphs by **combining independent components**.", "_____no_output_____" ] ], [ [ "# Set a theme for the plot\ntheme_set(theme_light())\n# Create a scatter plot\nresults %>% \n ggplot(aes(x = bmi)) +\n # Add a scatter plot\n geom_point(aes(y = y), size = 1.6) +\n # Add a line plot\n geom_line(aes(y = .pred), color = \"blue\", size = 1.5)", "_____no_output_____" ] ], [ [ "> ✅ Think a bit about what's going on here. A straight line is running through many small dots of data, but what is it doing exactly? Can you see how you should be able to use this line to predict where a new, unseen data point should fit in relationship to the plot's y axis? Try to put into words the practical use of this model.\n\nCongratulations, you built your first linear regression model, created a prediction with it, and displayed it in a plot!\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cba240cee7447ff49ad8cf466e5575a0a8271e95
13,838
ipynb
Jupyter Notebook
GAN_analysis_psnr_ssim.ipynb
TankyFranky/Snow_Removal_GAN_ELEC825_Final_Project
16f912d08f4a760743117d62fb459d2af9cff35d
[ "MIT" ]
1
2022-02-25T04:01:56.000Z
2022-02-25T04:01:56.000Z
GAN_analysis_psnr_ssim.ipynb
TankyFranky/Snow_Removal_GAN_ELEC825_Final_Project
16f912d08f4a760743117d62fb459d2af9cff35d
[ "MIT" ]
null
null
null
GAN_analysis_psnr_ssim.ipynb
TankyFranky/Snow_Removal_GAN_ELEC825_Final_Project
16f912d08f4a760743117d62fb459d2af9cff35d
[ "MIT" ]
null
null
null
33.105263
389
0.564749
[ [ [ "# Code based on souce from https://machinelearningmastery.com/how-to-develop-a-pix2pix-gan-for-image-to-image-translation/\n\n# Required imports for dataset import, preprocessing and compression\n\n\"\"\"\nGAN analysis file. Takes in trained .h5 files created while training the network.\nGenerates test files from testing synthetic input photos (files the GAN has never seen before).\nGenerates psnr and ssim ratings for each model/.h5 files and loads the results into excel files.\n\"\"\"\n\nfrom os import listdir\nimport numpy\nfrom numpy import asarray\nfrom numpy import vstack\nfrom numpy import savez_compressed\nfrom numpy import load\nfrom numpy import expand_dims\nfrom numpy.random import randint\nfrom keras.preprocessing.image import img_to_array\nfrom keras.preprocessing.image import load_img\nfrom keras.models import load_model\nimport matplotlib\nfrom matplotlib import pyplot\nimport glob\n", "_____no_output_____" ], [ "# Load images from a directory to memory\ndef load_images(path, size=(256,256)):\n pic_list = list()\n\n # enumerate filenames in directory, assume all are images\n\n for filename in listdir(path):\n\n # load and resize the image (the resizing is not being used in our implementation)\n pixels = load_img(path + filename, target_size=size)\n\n # convert to numpy array\n pic_list.append(img_to_array(pixels))\n\n return asarray(pic_list)\n\n# Load and prepare test or validation images from compressed image files to memory\n\ndef load_numpy_images(filename):\n\n # Load the compressed numpy array(s)\n data = load(filename)\n\n img_sets =[]\n\n for item in data:\n img_sets.append((data[item]- 127.5) / 127.5)\n\n return img_sets\n\n# Plot source, generated and target images all in one output\n\ndef plot_images(src_img, gen_img, tar_img):\n images = vstack((src_img, gen_img, tar_img))\n \n # scale from [-1,1] to [0,1]\n images = (images + 1) / 2.0\n\n titles = ['Source', 'Generated', 'Expected']\n # plot images row by row\n for i in range(len(images)):\n # define subplot\n pyplot.subplot(1, 3, 1 + i)\n # turn off axis\n pyplot.axis('off')\n # plot raw pixel data\n pyplot.imshow(images[i])\n # show title\n pyplot.title(titles[i])\n pyplot.show()\n\n# Load a single image\ndef load_image(filename, size=(256,256)):\n # load image with the preferred size\n pixels = load_img(filename, target_size=size)\n \n # convert to numpy array\n pixels = img_to_array(pixels)\n \n # scale from [0,255] to [-1,1]\n pixels = (pixels - 127.5) / 127.5\n \n # reshape to 1 sample\n pixels = expand_dims(pixels, 0)\n \n return pixels\n", "_____no_output_____" ], [ "####################\n# Convert the training dataset to a compressed numpy array (NOT USED FOR METRICS)\n####################\n\n# Source images path (synthetic images)\npath = 'data/training/synthetic/'\nsrc_images = load_images(path)\n\n# Ground truth images path\npath = 'data/training/gt/'\ntar_images = load_images(path)\n\n# Perform a quick check on shape and sizes\nprint('Loaded: ', src_images.shape, tar_images.shape)\n\n# Save as a compressed numpy array\nfilename = 'data/training/train_256.npz'\nsavez_compressed(filename, src_images, tar_images)\nprint('Saved dataset: ', filename)\n\n", "_____no_output_____" ], [ "###################\n# Convert the validation dataset to a compressed numpy array (.npz)\n###################\n\n# Source images path\npath = 'data/validation/synthetic/'\nsrc_images = load_images(path)\n\n# Ground truth images path\npath = 'data/validation/gt/'\ntar_images = load_images(path)\n\n# Perform a quick check on shape and sizes\nprint('Loaded: ', src_images.shape, tar_images.shape)\n\n# Save as a compressed numpy array\nfilename = 'data/validation/validation_256.npz'\nsavez_compressed(filename, src_images, tar_images)\nprint('Saved dataset: ', filename)\n", "_____no_output_____" ], [ "# Load the validation dataset from the compressed numpy array to memory\n\nimg_sets = load_numpy_images('data/validation/validation_256.npz')\nsrc_images = img_sets[0]\nprint('Loaded: ', src_images.shape)\n\n#tar_images = img_sets[1]\n#print('Loaded: ', tar_images.shape)\n\n# Gain some memory\ndel img_sets\n\n", "_____no_output_____" ], [ "# Get the list of gt image names so outputs can be named correctly\npath = 'data/validation/gt/'\nimg_list = os.listdir(path)\n\nexp_path = 'models/exp6/'\nmodel_list = os.listdir(exp_path)\n\n# loop through model/.h5 files \nfor model in model_list:\n model_dir = 'outputs/'+model[:-3]\n os.mkdir(model_dir)\n\n # load model weights to be used in the generator\n predictor = load_model(exp_path+model)\n names = 0\n\n for i in range(0, len(src_images),10 ):\n # push image through generator\n gen_images = predictor.predict(src_images[i:i+10])\n \n # name and export file\n for img in range(len(gen_images)):\n filename = model_dir+'/'+img_list[names]\n names += 1\n matplotlib.image.imsave(filename, (gen_images[img]+1)/2.0)\n\n\n", "_____no_output_____" ], [ "# Code to evaluate generated images from each model run for PNSR and SSIM\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport csv\nimport os\nimport re\nimport cv2\nimport pandas as pd\n\nfrom skimage import data, img_as_float\nfrom skimage.metrics import structural_similarity as ssim\nfrom skimage.metrics import peak_signal_noise_ratio as psnr\n\nexp_dir = 'outputs/' # result director\ngt_dir = 'data/validation/gt/' # ground truth directory\nimg_list = os.listdir(gt_dir)\n\ncolumn_names =[]\nexp_list = [ f.name for f in os.scandir(exp_dir) if f.is_dir() ]\nfor exp in exp_list:\n\n model_list = [ f.name for f in os.scandir('outputs/'+exp+'/') if f.is_dir() ]\n \n for model in model_list:\n column_names.append(exp+'_'+model)\n# create data frames for excel output\npsnr_df = pd.DataFrame(columns = column_names)\nssim_df = pd.DataFrame(columns = column_names)\n\ni=0\npsnr_master=[]\nssim_master=[]\nfor img in img_list: # loop through every image created by the generator\n i+=1\n # load image and create a grayscale for ssim measurement\n gt = cv2.imread(gt_dir+img)\n gt_gray = cv2.cvtColor(gt, cv2.COLOR_BGR2GRAY)\n\n psnr_list=[]\n ssim_list =[]\n\n exp_list = [f.name for f in os.scandir(exp_dir) if f.is_dir()]\n # for each experiment\n for exp in exp_list:\n \n model_list = [ f.name for f in os.scandir('outputs/'+exp+'/') if f.is_dir() ]\n # for each generator weights/model (outputted h5 file from experiemnt)\n for model in model_list:\n pred = cv2.imread(exp_dir+exp+'/'+model+'/'+img)\n pred_gray = cv2.cvtColor(pred, cv2.COLOR_BGR2GRAY)\n \n # calculate psnr and ssim\n psnr_list.append(psnr(gt, pred, data_range=pred.max() - pred.min()))\n ssim_list.append(ssim(gt_gray, pred_gray, data_range=pred.max() - pred.min()))\n \n\n psnr_master.append(psnr_list)\n ssim_master.append(ssim_list)\n\n# export for excel use \npsnr_df = pd.DataFrame(psnr_master, columns = column_names)\npsnr_df.index = img_list\npsnr_df.to_csv(\"PSNR.csv\")\n\nssim_df = pd.DataFrame(ssim_master, columns = column_names)\nssim_df.index = img_list\nssim_df.to_csv(\"SSIM.csv\")", "_____no_output_____" ], [ "import PIL\n\n\n['sidewalk winter -grayscale -gray_05189.jpg', 'sidewalk winter -grayscale -gray_07146.jpg', 'snow_animal_00447.jpg', 'snow_animal_03742.jpg', 'snow_intersection_00058.jpg', 'snow_nature_1_105698.jpg','snow_nature_1_108122.jpg','snow_nature_1_108523.jpg','snow_walk_00080.jpg','winter intersection -snow_00399.jpg','winter__street_03783.jpg','winter__street_05208.jpg']\npic_list_dims = [(426, 640), (538, 640), (640, 427), (432, 640), (480, 640), (640, 527), (480, 640), (427, 640), (640, 427), (502, 640), (269, 640), (427, 640)]\n\ni=0\n\n# load an image\ndef load_image(filename, size=(256,256)):\n\t# load image with the preferred size\n\tpixels = load_img(filename, target_size=size)\n\t# convert to numpy array\n\tpixels = img_to_array(pixels)\n\t# scale from [0,255] to [-1,1]\n\tpixels = (pixels - 127.5) / 127.5\n\t# reshape to 1 sample\n\tpixels = expand_dims(pixels, 0)\n\treturn pixels\n\nsrc_path = 'data/realistic_full/'\nsrc_filename = pic_list[i]\nsrc_image = load_image(src_path+src_filename)\n\nprint('Loaded', src_image.shape)\n\nmodel_path = 'models/Experiment4/'\nmodel_filename = 'model_125000.h5'\n\npredictor = load_model(model_path+model_filename)\ngen_img = predictor.predict(src_image)\n\n# scale from [-1,1] to [0,1]\ngen_img = (gen_img[0] + 1) / 2.0\n# plot the image\npyplot.imshow(gen_img)\npyplot.axis('off')\npyplot.show()\n\ngen_path = 'final/'\ngen_filename = src_filename\n\nmatplotlib.image.imsave(gen_path+gen_filename, gen_img)\ngen_img = load_img(gen_path+gen_filename, target_size=pic_list_dims[i])\npyplot.imshow(gen_img)\npyplot.axis('off')\npyplot.show()\nprint(gen_img)\ngen_img.save(gen_path+gen_filename)", "_____no_output_____" ], [ "pic_list = ['sidewalk winter -grayscale -gray_05189.jpg', 'sidewalk winter -grayscale -gray_07146.jpg', 'snow_animal_00447.jpg', 'snow_animal_03742.jpg', 'snow_intersection_00058.jpg', 'snow_nature_1_105698.jpg','snow_nature_1_108122.jpg','snow_nature_1_108523.jpg','snow_walk_00080.jpg','winter intersection -snow_00399.jpg','winter__street_03783.jpg','winter__street_05208.jpg']\nsrc_path = 'data/realistic_full/'\n\ndims=[]\nfor img in pic_list:\n pixels = load_img(src_path+img)\n dims.append(tuple(reversed(pixels.size)))\nprint(dims) ", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]