hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
4a2bc70d39f1644f740d69e29c900980e312bebc
4,891
ipynb
Jupyter Notebook
code/The_Duck_Problem.ipynb
fung18870/ModSimPy
ad7a586065a40b82e3e80405b64ceb4fe3b0d8ff
[ "MIT" ]
null
null
null
code/The_Duck_Problem.ipynb
fung18870/ModSimPy
ad7a586065a40b82e3e80405b64ceb4fe3b0d8ff
[ "MIT" ]
null
null
null
code/The_Duck_Problem.ipynb
fung18870/ModSimPy
ad7a586065a40b82e3e80405b64ceb4fe3b0d8ff
[ "MIT" ]
null
null
null
20.127572
78
0.431814
[ [ [ "# Configure Jupyter so figures appear in the notebook\n%matplotlib inline\n\n# Configure Jupyter to display the assigned value after an assignment\n%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'\n\n# import functions from the modsim.py module\nfrom modsim import *", "_____no_output_____" ], [ "system = System(\n density_duck = 0.3,\n density_water = 1,\n r = 5)", "_____no_output_____" ], [ "pi", "_____no_output_____" ], [ "def error_func(d, system):\n unpack(system)\n \n volume_duck = 4/3*pi*r**3\n mass_duck = density_duck * volume_duck\n volume_water = pi/3 * (3*r*d**2-d**3)\n mass_water = density_water * volume_water\n \n print(d)\n \n return mass_duck - mass_water", "_____no_output_____" ], [ "error_func(3, system)", "3\n" ], [ "fsolve(error_func, 3, system)", "3\n[3]\n[3.]\n[3.]\n[3.00000004]\n[3.66666666]\n[3.63105175]\n[3.63257187]\n[3.63257491]\n[3.63257491]\n" ], [ "error_func(3.63257491, system)", "3.63257491\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
4a2bda1e2d5149c62f3d406a76ffd9645605b8e0
425,224
ipynb
Jupyter Notebook
src/analysis.ipynb
achalagarwal/gcn-lpa
76ba1341a342d53b4039354e2f1f9ba820e2b5a3
[ "MIT" ]
1
2020-08-04T10:36:25.000Z
2020-08-04T10:36:25.000Z
src/analysis.ipynb
achalagarwal/gcn-lpa
76ba1341a342d53b4039354e2f1f9ba820e2b5a3
[ "MIT" ]
null
null
null
src/analysis.ipynb
achalagarwal/gcn-lpa
76ba1341a342d53b4039354e2f1f9ba820e2b5a3
[ "MIT" ]
null
null
null
381.02509
300,015
0.647666
[ [ [ "import pickle", "_____no_output_____" ], [ "fo = open('./data_stored','rb')", "_____no_output_____" ], [ "lpa_labels,gcn_labels,lambdaz = pickle.load(fo)", "_____no_output_____" ], [ "print(lpa_labels[0])", "[[0. 0. 1. 0. 0. 0.]\n [0. 0. 0. 0. 0. 1.]\n [0. 0. 1. 0. 0. 0.]\n ...\n [0. 0. 1. 0. 0. 0.]\n [0. 0. 0. 0. 0. 1.]\n [0. 0. 1. 0. 0. 0.]]\n" ], [ "import numpy as np", "_____no_output_____" ], [ "np.histogram(lpa_labels[0])\n", "_____no_output_____" ], [ "import matplotlib.pyplot as plt", "_____no_output_____" ], [ "# lpa labels of the first vertex of the graph\nlabels = [np.argmax(lpa_labels[t],axis=-1) for t in range(len(lpa_labels)) ]", "_____no_output_____" ], [ "from data_loader import load_data", "_____no_output_____" ], [ "data_actual = load_data('citeseer')", "_____no_output_____" ], [ "features, labels, adj, train_mask, val_mask, test_mask = data_actual", "_____no_output_____" ], [ "labels", "_____no_output_____" ], [ "gcn_labels[0]", "_____no_output_____" ], [ "a = np.argmax(gcn_labels,-1)", "_____no_output_____" ], [ "a", "_____no_output_____" ], [ "b = np.zeros((a.size, a.max()+1))", "_____no_output_____" ], [ "b = np.reshape(b, (200, -1, 6))", "_____no_output_____" ], [ "a = np.reshape(a, (200*3327))", "_____no_output_____" ], [ "a.shape", "_____no_output_____" ], [ "b[a] = 1", "_____no_output_____" ], [ "np.arange(a[0].size)", "_____no_output_____" ], [ "a.shape", "_____no_output_____" ], [ "c = np.zeros((a.size, a.max()+1))", "_____no_output_____" ], [ "c = np.reshape(c, (200, -1, 6))", "_____no_output_____" ], [ "c[a] = 0", "_____no_output_____" ], [ "c", "_____no_output_____" ], [ "b[np.arange(a.size),a] = 1", "_____no_output_____" ], [ "b", "_____no_output_____" ], [ "b = np.reshape(b, (200, -1, 6))", "_____no_output_____" ], [ "b", "_____no_output_____" ], [ "gcn_actual_diff = np.sum(np.absolute(labels - b),-1)", "_____no_output_____" ], [ "np.sum(gcn_actual_diff,-1)", "_____no_output_____" ], [ "np.bincount(np.ndarray.astype(gcn_actual_diff[199], dtype=np.int64))", "_____no_output_____" ], [ "np.where(gcn_actual_diff[0] == 1)", "_____no_output_____" ], [ "np.where(gcn_actual_diff[1] == 1)", "_____no_output_____" ], [ "incorrect_gcn = np.where(gcn_actual_diff[199] == 2)", "_____no_output_____" ], [ "# do the same for lpa labels", "_____no_output_____" ], [ "a = np.argmax(lpa_labels,-1)", "_____no_output_____" ], [ "b = np.zeros((a.size, a.max()+1))", "_____no_output_____" ], [ "a = np.reshape(a, (200*3327))", "_____no_output_____" ], [ "b[np.arange(a.size),a] = 1", "_____no_output_____" ], [ "b = np.reshape(b, (200, -1 , 6))", "_____no_output_____" ], [ "lpa_actual_diff = np.sum(np.absolute(labels - b),-1)", "_____no_output_____" ], [ "incorrect_lpa = np.where(lpa_actual_diff[199] == 2)", "_____no_output_____" ], [ "incorrect_gcn[0]", "_____no_output_____" ], [ "lpa_actual_diff[199][incorrect_gcn[0]]", "_____no_output_____" ], [ "len(incorrect_lpa[0])", "_____no_output_____" ], [ "labels[2407]", "_____no_output_____" ], [ "for l in lambdaz:\n print(l[2553])", "[0.]\n[0.11226829]\n[0.16716568]\n[0.15366364]\n[0.11738824]\n[0.07378596]\n[0.03330286]\n[-0.00308921]\n[-0.02962606]\n[-0.04640391]\n[-0.05362681]\n[-0.05640174]\n[-0.05845379]\n[-0.06693517]\n[-0.07082189]\n[-0.0731137]\n[-0.08228547]\n[-0.09980046]\n[-0.12091502]\n[-0.13644993]\n[-0.13777246]\n[-0.12655534]\n[-0.11240127]\n[-0.09644253]\n[-0.08485929]\n[-0.07836289]\n[-0.07258212]\n[-0.06981914]\n[-0.07408412]\n[-0.08591678]\n[-0.10296239]\n[-0.12203161]\n[-0.140181]\n[-0.152887]\n[-0.15911837]\n[-0.1613431]\n[-0.16332664]\n[-0.16841015]\n[-0.17681407]\n[-0.18762795]\n[-0.19992194]\n[-0.21172482]\n[-0.2204087]\n[-0.22341416]\n[-0.2196591]\n[-0.2098141]\n[-0.19592415]\n[-0.18053001]\n[-0.16631356]\n[-0.15547133]\n[-0.14929198]\n[-0.1480621]\n[-0.15108379]\n[-0.15674105]\n[-0.16319043]\n[-0.1689679]\n[-0.17312184]\n[-0.17526243]\n[-0.17537351]\n[-0.17363164]\n[-0.17060167]\n[-0.16706505]\n[-0.16374092]\n[-0.16127182]\n[-0.16028477]\n[-0.16066361]\n[-0.16195832]\n[-0.16369505]\n[-0.16543367]\n[-0.1668157]\n[-0.16779861]\n[-0.16838524]\n[-0.168655]\n[-0.16862044]\n[-0.16836005]\n[-0.16800073]\n[-0.16766967]\n[-0.16765712]\n[-0.16783138]\n[-0.16816039]\n[-0.16858284]\n[-0.16900141]\n[-0.16934066]\n[-0.16961193]\n[-0.16989783]\n[-0.17016155]\n[-0.17036022]\n[-0.1704368]\n[-0.17043961]\n[-0.17043584]\n[-0.17049129]\n[-0.17052809]\n[-0.17048075]\n[-0.17030166]\n[-0.17007436]\n[-0.16987417]\n[-0.16981022]\n[-0.16983689]\n[-0.16984367]\n[-0.16976781]\n[-0.16968825]\n[-0.16964472]\n[-0.16959203]\n[-0.16948585]\n[-0.16931362]\n[-0.16906609]\n[-0.16892676]\n[-0.16888169]\n[-0.16893668]\n[-0.16899712]\n[-0.16904331]\n[-0.16908319]\n[-0.16910458]\n[-0.1691713]\n[-0.16924563]\n[-0.1692778]\n[-0.16925069]\n[-0.16926796]\n[-0.16929549]\n[-0.16933708]\n[-0.16934108]\n[-0.16929615]\n[-0.16931276]\n[-0.16922751]\n[-0.16914384]\n[-0.16907547]\n[-0.1691187]\n[-0.16919822]\n[-0.16927033]\n[-0.16940458]\n[-0.16942399]\n[-0.16952424]\n[-0.169439]\n[-0.16945483]\n[-0.1693255]\n[-0.16931294]\n[-0.16916656]\n[-0.16918775]\n[-0.16907592]\n[-0.16917638]\n[-0.1690194]\n[-0.16922907]\n[-0.16874111]\n[-0.1687346]\n[-0.16813071]\n[-0.16825133]\n[-0.16809881]\n[-0.1684111]\n[-0.16901717]\n[-0.16919593]\n[-0.16949845]\n[-0.1690242]\n[-0.16888476]\n[-0.16840143]\n[-0.16834862]\n[-0.16889686]\n[-0.16915212]\n[-0.16957667]\n[-0.16929034]\n[-0.169226]\n[-0.168861]\n[-0.16860548]\n[-0.16882879]\n[-0.16879567]\n[-0.16896061]\n[-0.16867561]\n[-0.16876613]\n[-0.16899856]\n[-0.1691115]\n[-0.16932227]\n[-0.16919947]\n[-0.16918283]\n[-0.16878871]\n[-0.16882069]\n[-0.16876544]\n[-0.16885475]\n[-0.16900093]\n[-0.16901751]\n[-0.16925728]\n[-0.1691707]\n[-0.16913273]\n[-0.16878325]\n[-0.16882581]\n[-0.16879507]\n[-0.16890402]\n[-0.16903167]\n[-0.16917876]\n[-0.16922217]\n[-0.16913728]\n[-0.16923358]\n[-0.16911078]\n[-0.16909863]\n[-0.16881313]\n[-0.16889646]\n[-0.16867925]\n[-0.16889987]\n[-0.16867492]\n[-0.16887979]\n[-0.16872531]\n[-0.16892914]\n" ], [ "np.histogram(lambdaz[199], 4)", "_____no_output_____" ], [ "# find which lambda is what", "_____no_output_____" ], [ "gcn_argmax_labels = [np.argmax(gcn_labels[t],axis=-1) for t in range(len(gcn_labels)) ]", "_____no_output_____" ], [ "counts = [0,0,0,0,0,0]\nfor l in gcn_argmax_labels:\n for i in l:\n counts[i] +=1\n # print(l)", "_____no_output_____" ], [ "counts = [0,0,0,0,0,0]\nfor i in gcn_argmax_labels[1]:\n # for i in l:\n counts[i] +=1\n # print(l)", "_____no_output_____" ], [ "counts", "_____no_output_____" ], [ "for l in labels:\n for i in l:\n if i != 1 and i!=0:\n print(i)\n # print(l)", "3\n2\n3\n2\n3\n2\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n2\n2\n3\n2\n2\n3\n2\n3\n4\n3\n4\n2\n3\n2\n3\n2\n3\n2\n3\n3\n2\n2\n3\n3\n4\n2\n3\n4\n4\n2\n3\n3\n3\n2\n2\n3\n3\n4\n2\n3\n3\n4\n3\n2\n4\n3\n4\n2\n2\n4\n2\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n2\n2\n2\n2\n3\n2\n4\n2\n2\n3\n3\n3\n3\n2\n4\n3\n4\n2\n3\n2\n3\n3\n3\n3\n2\n3\n3\n2\n2\n2\n3\n3\n2\n2\n2\n3\n2\n4\n2\n3\n4\n3\n2\n2\n3\n3\n2\n4\n4\n2\n4\n4\n3\n2\n2\n3\n3\n2\n3\n3\n3\n2\n3\n2\n4\n2\n3\n2\n3\n3\n3\n3\n2\n3\n2\n3\n4\n2\n3\n4\n2\n2\n3\n4\n3\n2\n2\n2\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n2\n3\n3\n3\n2\n2\n3\n3\n4\n2\n3\n3\n3\n3\n3\n2\n3\n4\n4\n3\n3\n3\n3\n2\n3\n3\n2\n2\n3\n3\n2\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n4\n4\n3\n3\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n3\n3\n3\n3\n3\n4\n4\n2\n2\n3\n3\n4\n4\n4\n4\n4\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n2\n2\n2\n3\n2\n3\n3\n3\n3\n2\n3\n2\n3\n2\n3\n4\n4\n4\n3\n3\n2\n3\n2\n3\n3\n3\n2\n2\n3\n3\n3\n2\n4\n4\n4\n2\n2\n2\n3\n2\n3\n2\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n2\n4\n3\n3\n2\n2\n3\n3\n2\n4\n2\n3\n4\n2\n4\n3\n2\n3\n2\n3\n3\n3\n3\n2\n2\n2\n2\n2\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n2\n3\n3\n2\n3\n3\n4\n3\n3\n3\n4\n2\n4\n4\n3\n4\n3\n3\n3\n3\n3\n3\n2\n4\n3\n3\n2\n2\n2\n3\n3\n4\n2\n2\n2\n3\n3\n2\n2\n2\n3\n3\n3\n2\n3\n2\n2\n2\n3\n3\n4\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n4\n2\n2\n2\n3\n2\n3\n3\n2\n4\n3\n3\n3\n4\n4\n2\n4\n2\n2\n2\n3\n3\n2\n3\n3\n2\n3\n3\n3\n2\n2\n3\n2\n2\n2\n2\n4\n3\n3\n3\n3\n2\n3\n3\n2\n2\n2\n3\n2\n2\n3\n3\n3\n3\n3\n2\n2\n2\n3\n3\n4\n2\n3\n3\n3\n3\n2\n3\n2\n3\n2\n3\n3\n3\n2\n2\n2\n3\n2\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n4\n4\n2\n3\n3\n4\n4\n3\n3\n2\n2\n2\n2\n2\n2\n2\n3\n4\n4\n3\n4\n2\n2\n4\n2\n2\n4\n2\n3\n3\n3\n3\n2\n2\n2\n3\n2\n3\n2\n2\n2\n3\n3\n2\n2\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n2\n4\n3\n3\n3\n2\n3\n2\n4\n2\n3\n3\n3\n3\n3\n2\n2\n2\n3\n2\n2\n3\n3\n2\n4\n4\n4\n3\n3\n3\n3\n3\n2\n3\n4\n2\n3\n2\n2\n3\n2\n3\n3\n4\n3\n3\n3\n4\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n4\n2\n3\n3\n3\n3\n2\n3\n3\n2\n2\n2\n2\n4\n2\n4\n2\n2\n3\n4\n4\n3\n2\n3\n3\n3\n3\n4\n2\n2\n4\n3\n3\n2\n2\n2\n3\n3\n4\n3\n3\n3\n4\n4\n3\n2\n2\n4\n3\n2\n3\n3\n4\n2\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n2\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n4\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n3\n3\n3\n3\n4\n3\n3\n3\n2\n4\n4\n3\n3\n3\n2\n2\n3\n3\n4\n3\n2\n3\n3\n3\n4\n4\n2\n2\n3\n3\n3\n3\n2\n2\n3\n2\n3\n3\n2\n2\n4\n4\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n2\n2\n2\n2\n3\n3\n3\n4\n3\n3\n2\n3\n4\n3\n3\n2\n3\n2\n3\n4\n4\n4\n2\n4\n2\n3\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n2\n3\n2\n4\n4\n3\n2\n2\n3\n3\n3\n2\n3\n4\n2\n2\n3\n3\n3\n2\n2\n2\n3\n2\n2\n3\n4\n3\n2\n2\n4\n3\n3\n2\n3\n2\n4\n4\n3\n2\n4\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n3\n3\n4\n4\n3\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n4\n2\n3\n2\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n4\n2\n2\n2\n3\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n4\n2\n2\n2\n2\n2\n4\n4\n4\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n4\n4\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n4\n2\n2\n3\n4\n2\n3\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n3\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n4\n2\n4\n2\n4\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n4\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n3\n2\n2\n2\n2\n2\n3\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n3\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n4\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n3\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n4\n2\n2\n2\n3\n2\n2\n4\n2\n3\n4\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n3\n4\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n3\n2\n3\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n4\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n2\n2\n2\n2\n4\n2\n2\n3\n2\n2\n4\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n3\n2\n3\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n3\n3\n2\n3\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n4\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n4\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n3\n4\n4\n4\n4\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n3\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n3\n3\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n3\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n3\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n4\n4\n2\n2\n2\n3\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n4\n4\n4\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n4\n2\n4\n2\n3\n4\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n4\n2\n3\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n4\n2\n2\n3\n2\n4\n2\n4\n2\n2\n2\n2\n3\n2\n2\n4\n2\n4\n3\n4\n4\n2\n3\n2\n2\n4\n2\n3\n2\n2\n4\n2\n4\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n3\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n4\n4\n3\n2\n4\n3\n2\n3\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n3\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n4\n2\n2\n4\n4\n2\n2\n2\n2\n3\n3\n2\n4\n4\n2\n2\n2\n2\n2\n3\n4\n2\n2\n2\n2\n2\n4\n4\n4\n2\n2\n4\n2\n3\n2\n2\n3\n2\n4\n2\n4\n2\n2\n2\n2\n2\n3\n2\n2\n4\n4\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n3\n4\n2\n2\n4\n4\n4\n2\n4\n2\n2\n4\n2\n4\n2\n4\n2\n2\n4\n4\n2\n2\n2\n4\n4\n4\n2\n2\n4\n2\n2\n4\n4\n3\n4\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n3\n3\n2\n2\n4\n3\n4\n2\n2\n2\n2\n4\n2\n4\n2\n2\n4\n2\n4\n2\n2\n2\n2\n4\n2\n3\n4\n3\n2\n2\n4\n2\n2\n3\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n4\n2\n4\n2\n2\n3\n2\n4\n2\n3\n4\n2\n2\n2\n2\n2\n4\n4\n2\n2\n3\n4\n2\n4\n2\n2\n4\n4\n2\n4\n4\n2\n2\n2\n3\n4\n4\n4\n3\n4\n4\n2\n2\n2\n2\n2\n3\n2\n2\n4\n4\n2\n4\n2\n2\n3\n2\n3\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n4\n2\n2\n2\n4\n2\n2\n2\n2\n3\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n4\n4\n2\n2\n2\n4\n2\n2\n2\n4\n4\n2\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n3\n4\n2\n2\n2\n2\n2\n3\n2\n4\n2\n2\n3\n2\n2\n4\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n4\n2\n2\n2\n4\n3\n2\n2\n3\n2\n2\n3\n4\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n3\n4\n4\n2\n4\n4\n4\n5\n2\n2\n2\n2\n2\n4\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n2\n4\n2\n2\n2\n4\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n2\n2\n2\n4\n3\n2\n2\n3\n2\n2\n3\n2\n2\n2\n3\n2\n4\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n3\n2\n3\n4\n2\n3\n2\n2\n2\n2\n2\n3\n4\n2\n2\n4\n2\n2\n2\n2\n4\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n2\n2\n2\n4\n2\n2\n3\n2\n2\n2\n4\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n4\n3\n4\n3\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n4\n3\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n4\n2\n4\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n3\n3\n2\n2\n2\n4\n3\n2\n2\n2\n3\n2\n2\n4\n2\n2\n2\n3\n3\n2\n4\n3\n4\n2\n2\n4\n2\n2\n4\n2\n2\n4\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n2\n4\n2\n4\n2\n4\n2\n2\n2\n4\n2\n2\n2\n4\n2\n2\n3\n2\n2\n2\n2\n3\n4\n2\n2\n4\n3\n2\n2\n3\n2\n2\n2\n3\n3\n3\n2\n2\n2\n2\n3\n2\n2\n2\n2\n4\n4\n2\n3\n4\n2\n2\n2\n4\n2\n2\n3\n2\n2\n2\n4\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n3\n2\n4\n2\n2\n2\n3\n2\n2\n2\n4\n2\n2\n2\n2\n4\n3\n4\n2\n2\n2\n2\n2\n3\n2\n4\n3\n3\n2\n4\n4\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n4\n2\n2\n4\n2\n2\n4\n4\n2\n3\n4\n2\n4\n3\n3\n2\n2\n2\n4\n4\n4\n3\n2\n3\n2\n2\n2\n4\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n4\n2\n4\n3\n2\n2\n2\n3\n2\n2\n2\n2\n4\n3\n2\n2\n2\n2\n3\n4\n3\n2\n3\n3\n4\n2\n2\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n4\n3\n2\n3\n2\n2\n4\n2\n4\n2\n4\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n4\n4\n3\n2\n2\n2\n2\n3\n4\n4\n2\n2\n2\n4\n2\n2\n3\n2\n4\n2\n4\n4\n2\n2\n3\n4\n3\n4\n2\n4\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n4\n4\n2\n4\n3\n2\n2\n3\n2\n4\n4\n2\n3\n2\n2\n2\n3\n4\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n3\n2\n4\n2\n2\n2\n2\n2\n2\n2\n3\n4\n2\n3\n2\n4\n2\n3\n2\n2\n2\n4\n4\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n4\n4\n4\n4\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n3\n4\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n4\n2\n2\n2\n4\n2\n2\n2\n2\n2\n3\n2\n2\n4\n2\n2\n4\n3\n2\n4\n2\n2\n4\n2\n3\n2\n4\n2\n2\n2\n4\n3\n2\n3\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n3\n4\n2\n3\n4\n2\n4\n2\n4\n2\n2\n2\n3\n2\n2\n2\n3\n3\n3\n2\n2\n4\n2\n2\n2\n2\n2\n4\n3\n2\n2\n2\n4\n3\n2\n2\n4\n2\n2\n3\n4\n2\n4\n2\n4\n2\n2\n4\n2\n4\n2\n3\n3\n2\n2\n4\n2\n2\n2\n4\n4\n2\n2\n2\n4\n4\n3\n3\n4\n3\n4\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n3\n2\n2\n2\n3\n3\n2\n2\n2\n2\n4\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n3\n2\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n4\n4\n2\n3\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n4\n2\n2\n3\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n4\n4\n2\n2\n2\n4\n2\n2\n2\n4\n2\n2\n2\n4\n2\n2\n4\n3\n4\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n4\n2\n2\n4\n2\n4\n2\n4\n2\n2\n2\n3\n2\n2\n3\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n4\n2\n4\n2\n2\n4\n4\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n4\n3\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n3\n2\n2\n2\n4\n3\n2\n2\n2\n2\n2\n3\n2\n2\n4\n2\n4\n4\n4\n4\n2\n3\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n3\n3\n4\n2\n4\n2\n4\n2\n3\n2\n2\n4\n4\n3\n3\n3\n2\n2\n3\n3\n2\n2\n3\n3\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n4\n2\n2\n3\n3\n2\n4\n3\n3\n2\n2\n2\n2\n4\n2\n2\n3\n2\n2\n3\n3\n4\n4\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n3\n2\n2\n3\n2\n4\n4\n2\n2\n2\n3\n4\n4\n4\n4\n4\n4\n2\n2\n3\n2\n2\n2\n3\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n4\n2\n2\n2\n3\n4\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n4\n2\n2\n3\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n3\n4\n2\n2\n2\n2\n2\n3\n3\n3\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n3\n3\n4\n4\n4\n4\n4\n4\n4\n2\n2\n2\n2\n4\n3\n3\n2\n2\n2\n3\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n4\n2\n4\n2\n2\n4\n3\n2\n2\n3\n4\n4\n2\n4\n2\n2\n2\n3\n3\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n4\n4\n4\n2\n2\n4\n2\n2\n4\n4\n4\n2\n2\n2\n2\n2\n2\n3\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n2\n2\n2\n3\n3\n2\n2\n2\n2\n3\n2\n2\n2\n4\n2\n2\n2\n3\n2\n2\n2\n2\n2\n4\n4\n2\n3\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n4\n2\n2\n4\n2\n2\n4\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n3\n2\n2\n3\n3\n2\n2\n4\n4\n4\n4\n2\n2\n4\n4\n2\n4\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n3\n3\n3\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n4\n4\n4\n3\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n4\n4\n2\n2\n2\n3\n3\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n3\n3\n4\n2\n2\n3\n4\n4\n4\n4\n4\n4\n4\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n4\n3\n2\n3\n3\n2\n4\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n4\n2\n2\n2\n3\n4\n2\n2\n3\n2\n4\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n3\n3\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n4\n3\n2\n3\n4\n4\n3\n3\n3\n3\n3\n3\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n3\n3\n2\n2\n4\n4\n2\n2\n4\n2\n4\n4\n4\n4\n2\n4\n4\n3\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n3\n2\n3\n4\n2\n4\n2\n2\n5\n2\n2\n4\n4\n2\n2\n2\n4\n3\n2\n2\n2\n2\n4\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n4\n2\n4\n2\n2\n2\n4\n4\n3\n2\n4\n2\n3\n3\n2\n2\n2\n2\n2\n3\n2\n4\n2\n4\n4\n2\n2\n2\n2\n2\n2\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n2\n4\n4\n4\n4\n3\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n2\n4\n4\n3\n2\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n2\n4\n4\n4\n3\n2\n2\n2\n3\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n2\n2\n4\n3\n3\n4\n4\n4\n3\n2\n4\n4\n2\n4\n4\n4\n3\n4\n4\n4\n4\n2\n4\n4\n3\n4\n4\n2\n3\n2\n2\n4\n3\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n2\n4\n3\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n3\n4\n3\n4\n2\n4\n4\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n3\n4\n4\n2\n3\n4\n3\n4\n4\n3\n3\n2\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n2\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n2\n3\n4\n2\n4\n4\n3\n3\n4\n4\n4\n3\n4\n4\n4\n4\n2\n3\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n2\n4\n2\n3\n3\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n2\n4\n3\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n3\n3\n4\n4\n4\n4\n2\n2\n4\n2\n3\n4\n4\n4\n4\n4\n5\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n3\n3\n4\n4\n3\n4\n4\n2\n3\n3\n4\n4\n3\n2\n4\n4\n4\n4\n2\n4\n3\n4\n3\n4\n4\n3\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n2\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n3\n4\n4\n4\n3\n3\n3\n3\n4\n4\n2\n4\n4\n4\n3\n4\n3\n4\n4\n4\n2\n2\n4\n4\n3\n2\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n2\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n2\n4\n4\n4\n4\n3\n3\n4\n4\n3\n3\n3\n4\n4\n4\n3\n3\n4\n4\n3\n4\n4\n4\n2\n4\n4\n2\n4\n3\n3\n2\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n3\n4\n4\n3\n4\n3\n4\n4\n3\n3\n3\n4\n4\n2\n3\n4\n3\n4\n4\n4\n4\n3\n4\n4\n3\n4\n2\n4\n4\n2\n4\n2\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n3\n2\n4\n3\n4\n3\n3\n2\n3\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n2\n3\n4\n4\n3\n3\n4\n4\n4\n4\n3\n4\n3\n2\n4\n4\n4\n4\n3\n4\n2\n2\n4\n4\n4\n3\n3\n3\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n3\n2\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n3\n4\n3\n4\n4\n4\n4\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n3\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n3\n4\n4\n3\n3\n4\n2\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n2\n4\n4\n4\n4\n4\n4\n2\n3\n4\n4\n4\n4\n4\n3\n4\n3\n4\n3\n3\n4\n3\n4\n4\n4\n4\n3\n4\n3\n2\n4\n4\n3\n4\n4\n2\n2\n3\n4\n3\n2\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n3\n4\n2\n4\n3\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n3\n3\n4\n4\n3\n3\n4\n2\n2\n4\n4\n4\n4\n4\n2\n5\n3\n4\n4\n4\n4\n3\n2\n4\n2\n4\n4\n4\n3\n4\n4\n4\n3\n4\n3\n4\n3\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n3\n4\n3\n3\n3\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n3\n2\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n3\n3\n4\n3\n4\n3\n4\n3\n2\n4\n4\n4\n2\n4\n4\n4\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n2\n4\n3\n4\n4\n3\n4\n4\n4\n3\n4\n3\n2\n2\n4\n4\n4\n4\n4\n2\n2\n4\n4\n3\n4\n4\n4\n4\n2\n4\n4\n2\n3\n4\n4\n4\n2\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n3\n4\n2\n3\n4\n2\n4\n4\n4\n4\n4\n2\n3\n2\n4\n2\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n3\n3\n2\n4\n3\n2\n2\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n2\n4\n4\n2\n3\n4\n3\n3\n4\n2\n4\n3\n4\n4\n4\n4\n3\n4\n3\n2\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n3\n3\n3\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n3\n3\n3\n4\n3\n3\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n3\n4\n3\n3\n4\n4\n2\n4\n4\n3\n4\n3\n2\n4\n3\n3\n4\n4\n3\n2\n3\n4\n4\n4\n2\n4\n3\n3\n4\n4\n4\n4\n2\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n3\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n3\n2\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n4\n3\n3\n3\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n3\n4\n4\n2\n4\n4\n2\n2\n2\n4\n4\n2\n2\n3\n3\n4\n2\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n2\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n2\n2\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n2\n2\n4\n4\n2\n4\n2\n3\n3\n2\n4\n2\n4\n4\n3\n3\n2\n2\n2\n4\n2\n4\n3\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n3\n4\n4\n3\n4\n4\n4\n3\n3\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n3\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n2\n2\n4\n3\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n3\n2\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n4\n3\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n5\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n4\n4\n3\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n4\n4\n2\n4\n4\n4\n3\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n2\n2\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n2\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n3\n4\n4\n4\n4\n4\n5\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n3\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n3\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n3\n4\n4\n3\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n3\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n2\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n3\n2\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n3\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n5\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n4\n4\n4\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n4\n4\n3\n3\n3\n3\n4\n3\n4\n4\n3\n4\n3\n3\n3\n3\n4\n3\n4\n4\n3\n4\n3\n4\n4\n4\n3\n3\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n3\n3\n3\n3\n3\n4\n3\n3\n4\n4\n3\n3\n4\n3\n3\n4\n4\n4\n3\n4\n3\n3\n3\n3\n3\n4\n4\n4\n3\n4\n4\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n4\n4\n3\n3\n4\n4\n3\n3\n2\n4\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n4\n3\n4\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n4\n3\n3\n3\n4\n4\n2\n4\n4\n3\n4\n3\n4\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n4\n3\n4\n4\n4\n3\n3\n3\n4\n4\n3\n3\n4\n3\n3\n4\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n2\n4\n3\n4\n3\n3\n4\n3\n4\n4\n4\n4\n3\n3\n4\n3\n3\n3\n4\n3\n3\n4\n3\n4\n4\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n3\n3\n3\n3\n4\n4\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n3\n4\n3\n3\n4\n3\n4\n3\n4\n4\n3\n4\n3\n4\n2\n4\n4\n4\n3\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n3\n3\n4\n3\n3\n4\n3\n4\n3\n4\n4\n4\n3\n3\n3\n4\n4\n4\n4\n3\n4\n3\n3\n4\n3\n4\n4\n4\n3\n3\n4\n4\n3\n2\n3\n3\n4\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n4\n3\n4\n4\n3\n4\n3\n4\n3\n3\n4\n4\n4\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n4\n4\n4\n3\n3\n4\n3\n3\n3\n3\n4\n2\n3\n4\n3\n3\n4\n3\n3\n4\n4\n3\n3\n3\n4\n3\n4\n3\n3\n4\n4\n4\n4\n3\n4\n3\n4\n3\n4\n4\n4\n3\n4\n4\n4\n3\n3\n3\n3\n3\n3\n4\n3\n4\n4\n3\n4\n2\n4\n4\n3\n3\n4\n3\n3\n4\n3\n4\n3\n3\n3\n4\n3\n3\n4\n3\n3\n4\n3\n4\n3\n4\n2\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n5\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n3\n3\n4\n3\n4\n3\n3\n4\n4\n4\n4\n4\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n4\n3\n4\n3\n4\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n4\n3\n3\n3\n3\n3\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n4\n4\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n4\n2\n3\n4\n4\n4\n4\n4\n5\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n4\n3\n4\n4\n4\n3\n4\n4\n3\n3\n4\n4\n4\n4\n3\n4\n3\n4\n3\n4\n4\n3\n4\n3\n4\n4\n3\n3\n4\n4\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n4\n4\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n4\n4\n3\n3\n3\n4\n3\n3\n3\n4\n4\n3\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n4\n4\n3\n3\n4\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n4\n2\n4\n4\n3\n3\n4\n3\n4\n3\n4\n3\n3\n3\n3\n4\n3\n3\n2\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n2\n4\n4\n3\n3\n3\n4\n4\n4\n3\n4\n3\n4\n3\n3\n4\n3\n4\n4\n3\n4\n4\n2\n4\n4\n4\n3\n4\n3\n3\n4\n3\n4\n3\n3\n3\n3\n4\n3\n3\n2\n3\n3\n4\n3\n4\n4\n3\n3\n4\n3\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n4\n3\n3\n4\n3\n3\n2\n4\n3\n3\n3\n4\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n4\n4\n3\n4\n4\n3\n3\n4\n3\n4\n4\n3\n4\n3\n3\n4\n4\n4\n3\n3\n4\n3\n3\n3\n3\n4\n3\n4\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n4\n3\n4\n3\n4\n3\n4\n4\n3\n3\n4\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n4\n3\n4\n4\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n4\n4\n4\n3\n3\n4\n4\n3\n4\n4\n3\n4\n4\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n2\n4\n3\n3\n3\n4\n3\n3\n4\n4\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n4\n4\n4\n3\n4\n3\n3\n4\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n4\n4\n3\n3\n4\n3\n2\n3\n4\n3\n4\n3\n4\n4\n4\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n4\n3\n4\n3\n4\n4\n3\n4\n3\n4\n4\n4\n3\n3\n3\n3\n3\n4\n4\n4\n3\n4\n4\n4\n3\n4\n3\n4\n4\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n4\n4\n4\n3\n3\n4\n3\n4\n3\n4\n4\n3\n4\n3\n4\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n4\n5\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n4\n4\n4\n4\n3\n4\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n4\n3\n3\n4\n3\n4\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n4\n4\n4\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n4\n4\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n4\n4\n3\n3\n3\n4\n3\n4\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n5\n4\n4\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n4\n4\n3\n3\n3\n3\n4\n3\n3\n2\n3\n4\n3\n3\n2\n3\n3\n3\n4\n4\n4\n3\n4\n3\n4\n3\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n3\n3\n3\n3\n4\n4\n4\n3\n4\n3\n3\n4\n4\n4\n4\n3\n3\n3\n3\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n3\n4\n4\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n4\n3\n4\n4\n3\n4\n4\n4\n3\n3\n4\n4\n3\n4\n4\n3\n4\n4\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n4\n3\n4\n3\n4\n4\n3\n4\n3\n5\n3\n3\n3\n4\n3\n3\n4\n3\n3\n4\n2\n4\n4\n3\n3\n3\n4\n3\n4\n3\n4\n3\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n3\n3\n4\n3\n4\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n4\n4\n3\n3\n3\n4\n3\n3\n4\n5\n4\n3\n4\n3\n3\n3\n3\n4\n3\n3\n4\n4\n4\n3\n3\n3\n4\n3\n4\n4\n3\n4\n2\n4\n3\n4\n3\n3\n4\n3\n4\n3\n3\n3\n4\n4\n4\n3\n4\n4\n3\n4\n3\n3\n3\n3\n4\n4\n3\n3\n4\n3\n3\n4\n3\n4\n3\n5\n3\n4\n3\n3\n3\n3\n3\n4\n4\n3\n4\n3\n4\n3\n3\n3\n3\n4\n3\n4\n3\n4\n3\n3\n4\n3\n3\n3\n3\n4\n4\n3\n2\n4\n3\n3\n3\n4\n3\n3\n3\n4\n3\n3\n4\n4\n3\n3\n4\n4\n3\n3\n3\n3\n4\n3\n3\n2\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n4\n3\n3\n4\n4\n4\n3\n3\n3\n3\n4\n4\n3\n4\n3\n4\n4\n3\n4\n4\n3\n3\n4\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n4\n3\n3\n3\n4\n4\n3\n2\n4\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n2\n3\n3\n3\n4\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n3\n4\n3\n4\n3\n5\n3\n4\n3\n3\n4\n3\n4\n3\n3\n4\n3\n4\n3\n3\n4\n4\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n4\n2\n3\n3\n4\n4\n3\n3\n4\n3\n4\n3\n3\n4\n4\n3\n3\n3\n4\n3\n2\n3\n4\n3\n3\n3\n3\n3\n4\n2\n4\n4\n3\n4\n3\n4\n3\n4\n3\n2\n4\n4\n4\n4\n4\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n4\n3\n3\n3\n4\n4\n3\n3\n4\n3\n3\n4\n3\n3\n3\n4\n3\n3\n3\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n4\n4\n4\n3\n4\n3\n3\n4\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n4\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n4\n4\n4\n3\n2\n3\n3\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n4\n3\n4\n4\n3\n4\n3\n3\n3\n3\n4\n3\n4\n4\n3\n3\n3\n3\n4\n4\n4\n4\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n4\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n4\n3\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n2\n4\n4\n3\n4\n3\n4\n3\n3\n3\n4\n4\n3\n3\n3\n4\n4\n4\n4\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n4\n4\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n4\n4\n3\n3\n3\n4\n3\n3\n3\n3\n2\n3\n3\n3\n3\n4\n3\n3\n3\n3\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n3\n3\n4\n3\n3\n4\n4\n4\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n4\n4\n3\n4\n4\n4\n3\n3\n4\n3\n3\n4\n4\n4\n3\n3\n3\n4\n4\n3\n3\n3\n4\n4\n4\n4\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n4\n3\n4\n3\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n4\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n2\n2\n2\n2\n3\n2\n3\n4\n4\n3\n4\n2\n3\n4\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n2\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n4\n4\n4\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n4\n3\n4\n4\n3\n3\n4\n3\n4\n4\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n2\n2\n4\n4\n3\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n3\n3\n3\n3\n2\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n4\n4\n4\n2\n3\n3\n4\n4\n4\n3\n3\n3\n4\n3\n4\n3\n3\n4\n3\n4\n3\n3\n4\n3\n3\n3\n4\n3\n3\n3\n4\n4\n4\n4\n4\n4\n3\n3\n3\n4\n4\n3\n3\n3\n4\n4\n3\n3\n4\n4\n3\n3\n3\n2\n4\n4\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n3\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n3\n3\n4\n3\n4\n3\n4\n3\n3\n3\n4\n4\n3\n3\n3\n4\n4\n3\n4\n4\n4\n3\n4\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n3\n4\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n4\n3\n3\n5\n3\n3\n4\n4\n3\n3\n4\n4\n3\n4\n3\n3\n4\n4\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n3\n3\n4\n4\n4\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n4\n4\n3\n3\n4\n3\n3\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n2\n4\n4\n3\n5\n3\n4\n4\n4\n2\n4\n3\n4\n4\n3\n3\n2\n3\n4\n4\n3\n4\n3\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n3\n4\n4\n4\n3\n3\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n3\n4\n4\n4\n3\n4\n4\n3\n3\n2\n3\n4\n3\n3\n4\n3\n4\n4\n3\n4\n4\n4\n3\n2\n3\n2\n3\n4\n3\n4\n3\n4\n4\n3\n3\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n2\n2\n4\n3\n3\n4\n3\n3\n4\n3\n4\n5\n4\n3\n4\n3\n4\n3\n3\n4\n4\n4\n3\n4\n4\n4\n4\n3\n5\n4\n4\n3\n4\n4\n3\n4\n3\n4\n4\n2\n4\n4\n3\n4\n3\n4\n2\n3\n4\n3\n2\n2\n4\n3\n4\n4\n5\n3\n4\n4\n4\n4\n3\n3\n4\n3\n3\n4\n3\n4\n3\n3\n4\n4\n4\n3\n4\n4\n3\n3\n3\n4\n4\n4\n3\n2\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n2\n4\n3\n4\n4\n4\n3\n4\n3\n4\n4\n3\n3\n4\n4\n4\n3\n4\n3\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n3\n2\n2\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n2\n3\n4\n4\n3\n4\n3\n4\n4\n3\n4\n3\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n3\n4\n4\n4\n3\n4\n3\n3\n3\n3\n4\n4\n4\n2\n4\n4\n3\n3\n4\n3\n4\n3\n3\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n2\n3\n4\n3\n3\n4\n5\n4\n4\n4\n2\n3\n4\n4\n5\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n3\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n3\n3\n4\n3\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n3\n4\n4\n4\n4\n3\n3\n4\n4\n3\n3\n3\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n2\n3\n4\n4\n4\n4\n4\n5\n4\n3\n4\n3\n4\n4\n3\n5\n3\n4\n4\n2\n4\n4\n4\n3\n4\n4\n3\n3\n4\n4\n5\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n5\n4\n4\n4\n4\n4\n4\n4\n3\n2\n3\n3\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n3\n4\n3\n4\n4\n4\n3\n4\n4\n3\n3\n3\n4\n3\n2\n2\n3\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n3\n4\n4\n3\n4\n4\n4\n4\n3\n4\n3\n4\n3\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n3\n3\n3\n2\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n3\n3\n3\n4\n3\n4\n4\n4\n3\n3\n3\n4\n4\n3\n4\n3\n4\n3\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n3\n3\n4\n4\n3\n2\n4\n4\n4\n4\n3\n4\n3\n2\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n3\n4\n4\n3\n5\n4\n2\n4\n4\n3\n4\n3\n4\n4\n3\n3\n4\n4\n3\n3\n4\n4\n4\n4\n2\n4\n4\n5\n3\n4\n4\n3\n3\n4\n4\n4\n4\n4\n3\n4\n3\n3\n3\n3\n4\n4\n5\n4\n3\n3\n4\n4\n3\n4\n4\n3\n4\n4\n2\n4\n4\n4\n4\n2\n4\n3\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n2\n4\n3\n4\n4\n3\n4\n4\n4\n3\n4\n4\n5\n4\n4\n4\n3\n4\n4\n3\n4\n3\n4\n4\n3\n4\n3\n4\n4\n4\n3\n4\n4\n3\n4\n4\n3\n3\n3\n3\n4\n3\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n3\n3\n3\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n3\n4\n4\n3\n4\n3\n4\n4\n2\n4\n3\n4\n3\n3\n3\n2\n3\n3\n4\n4\n3\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n3\n4\n3\n4\n4\n3\n4\n4\n4\n2\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n3\n4\n3\n3\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n2\n4\n4\n4\n3\n4\n4\n3\n3\n3\n4\n4\n4\n3\n3\n4\n4\n4\n3\n4\n3\n2\n3\n3\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n3\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n3\n4\n3\n3\n4\n4\n3\n3\n4\n4\n3\n3\n4\n5\n4\n4\n4\n4\n3\n3\n4\n4\n3\n3\n4\n4\n4\n4\n3\n3\n3\n3\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n3\n4\n3\n4\n4\n4\n4\n4\n3\n4\n3\n4\n3\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n2\n3\n3\n4\n4\n3\n2\n4\n4\n3\n4\n3\n4\n3\n3\n3\n4\n4\n3\n4\n5\n4\n4\n4\n4\n3\n4\n3\n4\n4\n3\n3\n4\n3\n4\n4\n3\n3\n4\n4\n4\n4\n4\n2\n3\n4\n3\n3\n2\n3\n4\n2\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n5\n3\n4\n4\n3\n4\n3\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n3\n4\n3\n4\n4\n3\n4\n4\n4\n3\n5\n4\n3\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n3\n3\n3\n4\n3\n4\n4\n3\n3\n4\n3\n3\n4\n4\n3\n4\n4\n4\n4\n2\n5\n3\n3\n4\n4\n4\n4\n4\n3\n3\n4\n2\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n3\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n3\n3\n4\n4\n3\n4\n4\n3\n4\n4\n3\n3\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n3\n3\n4\n3\n5\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n3\n4\n3\n4\n3\n4\n5\n2\n4\n4\n4\n4\n2\n4\n4\n3\n4\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n3\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n3\n3\n4\n3\n2\n2\n4\n4\n4\n4\n4\n4\n2\n3\n4\n4\n4\n3\n4\n4\n3\n4\n3\n4\n2\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n3\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n3\n3\n4\n3\n3\n4\n4\n4\n4\n4\n3\n4\n3\n3\n3\n4\n3\n4\n4\n2\n3\n3\n4\n3\n4\n5\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n5\n4\n3\n4\n4\n5\n4\n5\n4\n4\n3\n3\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n3\n2\n4\n4\n3\n3\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n3\n3\n3\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n3\n3\n4\n4\n3\n4\n3\n2\n3\n4\n3\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n3\n3\n4\n3\n3\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n4\n3\n3\n4\n4\n4\n4\n3\n3\n3\n4\n4\n3\n3\n3\n4\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n3\n3\n3\n3\n3\n4\n3\n3\n4\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n3\n3\n4\n4\n3\n2\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n5\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n3\n4\n4\n4\n4\n3\n3\n3\n2\n3\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n3\n2\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n3\n3\n3\n3\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n2\n2\n4\n3\n3\n4\n4\n4\n3\n3\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n2\n3\n3\n3\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n5\n3\n4\n3\n3\n2\n2\n3\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n3\n3\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n4\n2\n2\n4\n4\n3\n4\n2\n3\n3\n3\n4\n4\n4\n2\n4\n4\n3\n3\n4\n4\n4\n4\n3\n4\n3\n3\n4\n4\n3\n3\n3\n4\n4\n3\n3\n3\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n5\n4\n4\n4\n4\n4\n3\n3\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n2\n4\n4\n2\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n3\n4\n4\n3\n3\n4\n3\n2\n4\n3\n4\n4\n4\n5\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n3\n3\n4\n3\n3\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n2\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n2\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n3\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n5\n4\n4\n4\n3\n3\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n3\n4\n4\n3\n3\n3\n3\n3\n3\n5\n3\n4\n4\n4\n4\n3\n3\n3\n4\n4\n3\n3\n4\n3\n3\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n3\n3\n4\n3\n4\n4\n4\n4\n3\n5\n4\n4\n4\n4\n3\n4\n4\n3\n4\n3\n3\n4\n4\n3\n4\n3\n3\n3\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n3\n4\n3\n4\n3\n4\n4\n3\n4\n4\n3\n4\n4\n3\n3\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n3\n5\n2\n5\n5\n3\n5\n2\n5\n5\n5\n5\n3\n3\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n3\n5\n2\n5\n5\n2\n2\n5\n5\n4\n5\n5\n5\n5\n5\n3\n5\n3\n5\n5\n2\n2\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n4\n2\n5\n5\n3\n5\n5\n2\n3\n2\n2\n5\n3\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n3\n3\n2\n5\n3\n5\n5\n5\n5\n4\n5\n3\n3\n3\n5\n5\n5\n3\n5\n5\n5\n3\n3\n5\n5\n5\n5\n5\n5\n5\n3\n5\n3\n5\n5\n5\n5\n5\n3\n5\n2\n5\n2\n5\n5\n5\n5\n5\n5\n3\n5\n4\n5\n5\n5\n5\n5\n5\n5\n2\n3\n5\n5\n5\n3\n3\n5\n5\n5\n5\n5\n5\n4\n5\n5\n5\n4\n3\n3\n2\n2\n5\n5\n3\n5\n5\n5\n4\n5\n3\n2\n3\n5\n5\n5\n3\n5\n5\n5\n5\n5\n2\n5\n3\n3\n5\n3\n5\n5\n2\n5\n3\n3\n5\n5\n5\n3\n5\n3\n5\n5\n2\n5\n5\n5\n3\n5\n5\n2\n5\n4\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n3\n5\n5\n3\n5\n3\n5\n5\n5\n5\n5\n3\n5\n5\n3\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n3\n3\n5\n4\n5\n2\n2\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n4\n5\n3\n5\n3\n5\n5\n5\n3\n5\n5\n3\n5\n5\n3\n2\n5\n5\n2\n5\n5\n3\n5\n3\n5\n3\n5\n5\n5\n5\n3\n2\n5\n5\n5\n5\n5\n5\n5\n3\n5\n3\n3\n5\n3\n3\n5\n3\n5\n2\n3\n5\n3\n5\n5\n5\n2\n5\n2\n3\n5\n5\n5\n3\n3\n5\n5\n2\n5\n5\n5\n5\n3\n5\n5\n3\n4\n5\n2\n5\n5\n5\n5\n3\n3\n5\n5\n5\n3\n3\n3\n5\n5\n2\n3\n3\n5\n4\n3\n4\n5\n2\n5\n5\n2\n5\n3\n3\n5\n5\n5\n5\n2\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n3\n5\n5\n5\n3\n3\n3\n5\n5\n5\n5\n5\n5\n3\n5\n4\n3\n5\n2\n5\n5\n5\n2\n5\n5\n5\n3\n5\n5\n5\n5\n4\n3\n4\n5\n3\n2\n5\n3\n5\n5\n4\n5\n5\n5\n5\n2\n5\n5\n3\n5\n5\n5\n5\n2\n2\n5\n5\n5\n2\n5\n5\n3\n3\n5\n3\n5\n3\n5\n5\n5\n5\n3\n5\n2\n2\n3\n5\n5\n5\n4\n3\n3\n3\n2\n5\n3\n3\n5\n5\n5\n5\n5\n3\n3\n5\n3\n5\n5\n3\n5\n5\n5\n5\n5\n5\n3\n5\n5\n3\n5\n3\n5\n5\n5\n5\n5\n3\n3\n4\n5\n3\n5\n5\n3\n3\n3\n5\n5\n3\n5\n5\n5\n3\n5\n5\n4\n5\n3\n2\n5\n5\n2\n3\n5\n5\n5\n5\n5\n5\n5\n3\n4\n5\n3\n3\n3\n5\n5\n5\n3\n5\n3\n5\n5\n5\n3\n5\n5\n2\n3\n5\n3\n2\n5\n5\n5\n5\n5\n3\n5\n5\n4\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n4\n5\n5\n5\n3\n5\n5\n3\n5\n5\n5\n5\n5\n5\n2\n5\n3\n5\n5\n5\n3\n2\n5\n2\n5\n3\n3\n5\n5\n3\n5\n3\n5\n5\n5\n5\n3\n3\n3\n5\n5\n3\n3\n3\n5\n4\n5\n5\n5\n5\n5\n5\n5\n5\n3\n2\n5\n3\n5\n5\n5\n2\n5\n5\n3\n3\n3\n3\n5\n5\n2\n5\n5\n5\n5\n5\n3\n5\n5\n3\n3\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n3\n5\n3\n2\n2\n5\n5\n5\n5\n2\n3\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n3\n5\n3\n5\n5\n2\n5\n3\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n4\n3\n2\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n3\n5\n3\n3\n2\n5\n3\n5\n5\n2\n5\n5\n2\n2\n5\n5\n2\n5\n5\n5\n3\n5\n5\n2\n3\n5\n5\n5\n5\n2\n3\n3\n5\n5\n5\n5\n5\n5\n3\n3\n3\n3\n5\n5\n3\n5\n5\n3\n3\n3\n3\n3\n5\n5\n5\n5\n5\n3\n3\n5\n3\n3\n2\n5\n5\n2\n5\n5\n5\n3\n4\n5\n3\n2\n5\n5\n5\n5\n2\n3\n3\n5\n5\n2\n3\n5\n5\n3\n3\n5\n5\n5\n5\n5\n3\n3\n5\n3\n5\n5\n5\n5\n3\n4\n4\n5\n5\n5\n5\n3\n3\n5\n5\n2\n5\n5\n4\n5\n3\n5\n5\n5\n2\n5\n5\n5\n5\n3\n2\n5\n5\n5\n2\n5\n5\n3\n5\n3\n3\n3\n5\n4\n5\n3\n3\n5\n2\n3\n2\n5\n2\n2\n2\n5\n2\n2\n3\n3\n4\n5\n5\n2\n2\n5\n5\n5\n5\n2\n5\n2\n3\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n4\n5\n5\n5\n2\n5\n5\n5\n5\n5\n3\n5\n5\n3\n2\n2\n3\n3\n5\n5\n5\n5\n3\n5\n5\n3\n4\n4\n5\n2\n2\n2\n2\n2\n2\n2\n5\n5\n2\n2\n5\n5\n2\n5\n3\n5\n5\n2\n3\n5\n5\n5\n2\n5\n3\n5\n5\n3\n3\n3\n5\n5\n5\n5\n5\n5\n5\n3\n3\n3\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n2\n2\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n2\n3\n5\n5\n5\n5\n5\n4\n2\n5\n5\n3\n3\n5\n5\n5\n5\n5\n5\n3\n3\n3\n5\n5\n2\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n3\n5\n3\n5\n3\n3\n3\n3\n5\n3\n5\n5\n5\n5\n3\n3\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n3\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n3\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n4\n5\n5\n4\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n2\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n2\n2\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n2\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n2\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n2\n2\n2\n2\n2\n2\n5\n5\n5\n5\n5\n2\n2\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n4\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n2\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n2\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n4\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n4\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n3\n5\n5\n5\n5\n5\n4\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n2\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n2\n2\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n4\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n3\n5\n5\n5\n2\n5\n2\n5\n5\n5\n5\n5\n5\n2\n5\n5\n2\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n4\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n2\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n2\n2\n2\n2\n2\n2\n5\n5\n5\n5\n5\n5\n5\n2\n2\n5\n5\n2\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n4\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n4\n5\n5\n5\n5\n5\n5\n4\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n2\n5\n5\n5\n5\n5\n2\n2\n5\n5\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n5\n2\n5\n2\n5\n3\n2\n3\n2\n3\n3\n5\n3\n3\n2\n3\n2\n2\n5\n3\n3\n4\n2\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n2\n3\n3\n5\n2\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n2\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n2\n2\n2\n2\n3\n3\n2\n3\n3\n3\n3\n2\n2\n5\n3\n3\n3\n2\n2\n3\n3\n2\n3\n2\n2\n2\n2\n3\n3\n5\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n2\n2\n3\n3\n3\n5\n3\n3\n5\n3\n5\n5\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n4\n2\n5\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n2\n2\n5\n3\n3\n3\n5\n3\n3\n3\n5\n5\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n5\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n5\n3\n3\n2\n2\n3\n2\n2\n4\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n2\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n2\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n2\n3\n3\n5\n3\n3\n3\n5\n2\n5\n3\n3\n2\n5\n3\n5\n3\n3\n3\n5\n3\n2\n3\n3\n5\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n2\n3\n3\n2\n2\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n5\n3\n3\n3\n3\n2\n3\n2\n5\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n5\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n2\n3\n5\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n2\n3\n2\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n2\n3\n5\n5\n3\n3\n2\n3\n3\n3\n3\n3\n5\n3\n3\n3\n2\n3\n2\n2\n5\n4\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n2\n3\n3\n3\n2\n5\n3\n5\n3\n3\n5\n3\n2\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n2\n5\n3\n5\n3\n3\n5\n3\n5\n2\n3\n3\n3\n2\n3\n5\n3\n3\n2\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n2\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n2\n3\n2\n5\n5\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n2\n3\n2\n3\n3\n3\n4\n3\n3\n2\n5\n3\n2\n3\n5\n2\n5\n5\n3\n2\n3\n3\n3\n2\n3\n3\n3\n5\n3\n3\n5\n2\n3\n3\n3\n3\n5\n3\n3\n2\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n5\n3\n3\n3\n2\n3\n3\n5\n5\n3\n2\n2\n3\n3\n3\n3\n3\n2\n2\n5\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n5\n3\n2\n3\n2\n5\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n5\n3\n3\n3\n5\n3\n3\n3\n2\n3\n2\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n2\n3\n3\n2\n3\n5\n3\n3\n3\n3\n2\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n2\n2\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n5\n3\n2\n3\n3\n5\n3\n2\n3\n3\n3\n3\n3\n5\n2\n3\n3\n3\n4\n3\n2\n2\n3\n3\n5\n3\n5\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n2\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n5\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n5\n3\n3\n2\n3\n2\n3\n2\n2\n3\n3\n2\n3\n2\n5\n5\n3\n3\n5\n3\n5\n3\n3\n3\n3\n2\n2\n5\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n5\n3\n2\n5\n3\n2\n3\n3\n3\n5\n3\n3\n3\n3\n2\n3\n2\n3\n4\n3\n2\n3\n5\n3\n3\n3\n2\n5\n3\n3\n3\n2\n5\n2\n2\n3\n3\n5\n3\n5\n3\n3\n5\n3\n3\n5\n5\n3\n3\n3\n3\n3\n2\n4\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n2\n3\n5\n5\n3\n3\n2\n3\n3\n5\n5\n2\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n2\n2\n3\n3\n3\n3\n3\n5\n3\n3\n3\n2\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n2\n3\n3\n3\n3\n3\n5\n3\n3\n3\n2\n2\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n5\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n5\n3\n3\n3\n2\n2\n3\n5\n3\n3\n2\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n2\n5\n3\n5\n3\n3\n3\n5\n3\n2\n3\n3\n3\n3\n3\n3\n2\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n2\n3\n2\n3\n5\n2\n3\n2\n3\n3\n3\n3\n3\n4\n5\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n5\n5\n3\n3\n2\n3\n3\n3\n3\n3\n5\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n4\n5\n3\n4\n3\n3\n3\n3\n2\n2\n3\n3\n3\n2\n5\n5\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n2\n5\n5\n3\n5\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n5\n5\n3\n5\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n2\n2\n2\n3\n3\n5\n3\n3\n3\n2\n2\n3\n3\n3\n3\n2\n3\n2\n2\n5\n3\n3\n2\n3\n3\n5\n3\n5\n3\n2\n3\n2\n3\n3\n3\n3\n3\n5\n3\n3\n2\n3\n3\n3\n5\n3\n2\n2\n2\n3\n3\n3\n2\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n5\n3\n5\n3\n3\n2\n5\n5\n2\n3\n2\n5\n3\n5\n2\n5\n3\n2\n5\n5\n3\n3\n2\n3\n3\n3\n3\n3\n2\n3\n2\n2\n2\n5\n2\n3\n3\n3\n2\n5\n3\n3\n3\n3\n3\n3\n2\n3\n5\n3\n3\n5\n5\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n3\n2\n2\n3\n3\n5\n2\n2\n3\n2\n5\n5\n3\n3\n2\n5\n2\n5\n3\n2\n3\n3\n3\n2\n3\n3\n2\n3\n2\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n5\n5\n3\n5\n2\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n5\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n5\n5\n3\n2\n3\n3\n2\n3\n3\n3\n3\n3\n5\n5\n5\n3\n2\n2\n3\n3\n5\n3\n3\n3\n2\n3\n3\n3\n2\n2\n3\n3\n3\n2\n3\n3\n5\n5\n3\n3\n3\n5\n3\n3\n3\n2\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n2\n5\n3\n3\n3\n2\n3\n5\n3\n3\n3\n3\n2\n2\n3\n3\n5\n3\n3\n2\n3\n5\n3\n3\n2\n3\n3\n5\n5\n3\n2\n3\n2\n3\n3\n2\n2\n2\n3\n5\n5\n3\n5\n3\n3\n2\n5\n2\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n5\n3\n5\n3\n5\n2\n2\n3\n3\n3\n3\n3\n3\n3\n5\n5\n2\n3\n3\n3\n3\n3\n3\n2\n3\n2\n5\n3\n2\n3\n3\n3\n5\n3\n5\n3\n2\n5\n5\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n5\n2\n3\n2\n5\n5\n3\n2\n3\n3\n3\n3\n2\n3\n5\n3\n2\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n2\n3\n5\n2\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n5\n3\n2\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n2\n5\n3\n5\n3\n3\n3\n2\n4\n3\n3\n2\n2\n3\n2\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n2\n3\n3\n2\n3\n5\n3\n2\n2\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n2\n5\n3\n3\n3\n5\n3\n3\n2\n2\n2\n3\n3\n2\n3\n2\n3\n2\n3\n2\n3\n3\n3\n5\n2\n3\n3\n2\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n3\n2\n3\n2\n3\n3\n3\n3\n5\n5\n3\n3\n3\n3\n2\n3\n3\n2\n2\n3\n3\n3\n5\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n4\n3\n3\n3\n2\n3\n5\n2\n2\n3\n3\n3\n2\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n5\n3\n5\n5\n2\n5\n3\n3\n3\n3\n5\n3\n5\n3\n2\n5\n5\n3\n3\n4\n4\n3\n5\n3\n3\n2\n3\n3\n3\n2\n2\n3\n3\n3\n5\n3\n3\n3\n3\n3\n2\n2\n2\n3\n2\n3\n2\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n2\n3\n5\n5\n2\n5\n3\n3\n2\n3\n5\n3\n3\n5\n3\n3\n2\n3\n5\n5\n3\n3\n3\n3\n2\n2\n2\n2\n5\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n2\n5\n3\n3\n2\n2\n2\n5\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n2\n2\n2\n3\n3\n2\n3\n2\n5\n5\n5\n5\n3\n3\n2\n2\n3\n2\n3\n2\n2\n3\n3\n5\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n2\n3\n2\n3\n2\n3\n3\n3\n5\n3\n5\n2\n3\n3\n5\n5\n5\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n2\n3\n5\n3\n5\n5\n3\n3\n5\n2\n2\n3\n5\n5\n3\n3\n3\n3\n2\n2\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n5\n3\n3\n3\n4\n3\n5\n3\n2\n2\n2\n2\n2\n2\n2\n3\n5\n5\n3\n3\n3\n3\n2\n2\n3\n5\n2\n3\n2\n3\n3\n3\n3\n2\n2\n2\n3\n5\n3\n2\n3\n3\n3\n3\n2\n2\n2\n3\n3\n3\n3\n2\n5\n3\n3\n3\n5\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n2\n5\n3\n5\n5\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n2\n3\n3\n2\n2\n3\n2\n3\n3\n3\n5\n5\n3\n3\n3\n5\n3\n3\n5\n5\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n5\n3\n5\n4\n2\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n2\n3\n3\n5\n2\n2\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n3\n2\n3\n3\n3\n2\n3\n2\n5\n5\n3\n3\n3\n5\n2\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n5\n5\n2\n2\n5\n5\n5\n5\n3\n5\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n4\n3\n5\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n5\n3\n3\n5\n5\n3\n2\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n5\n5\n5\n5\n2\n3\n5\n5\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n5\n5\n2\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n2\n2\n3\n3\n3\n3\n2\n3\n3\n2\n5\n3\n3\n3\n5\n3\n5\n3\n5\n3\n2\n2\n3\n3\n3\n5\n5\n5\n4\n3\n3\n3\n3\n5\n3\n4\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n3\n3\n3\n2\n2\n3\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n5\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n3\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n3\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n5\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n5\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n4\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n3\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n4\n2\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n5\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n3\n5\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n4\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n3\n2\n2\n2\n4\n2\n2\n2\n4\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n5\n2\n2\n2\n4\n4\n3\n2\n2\n2\n4\n2\n2\n4\n3\n2\n2\n4\n2\n3\n2\n4\n4\n4\n3\n2\n2\n2\n4\n3\n2\n2\n2\n2\n2\n2\n4\n2\n3\n2\n2\n4\n2\n2\n4\n4\n2\n4\n2\n4\n2\n2\n2\n3\n2\n2\n3\n2\n3\n4\n2\n2\n4\n4\n2\n4\n4\n3\n4\n3\n3\n3\n2\n2\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n2\n2\n2\n4\n2\n3\n4\n2\n4\n4\n4\n4\n3\n2\n2\n2\n4\n4\n3\n4\n2\n3\n2\n4\n3\n4\n4\n2\n3\n4\n3\n2\n2\n3\n3\n2\n4\n2\n3\n4\n2\n2\n2\n4\n4\n2\n2\n2\n2\n3\n2\n3\n3\n3\n2\n4\n4\n4\n4\n3\n3\n4\n4\n2\n2\n4\n2\n3\n4\n4\n2\n4\n4\n4\n2\n4\n2\n4\n2\n4\n2\n4\n3\n2\n3\n2\n2\n3\n2\n4\n2\n4\n4\n2\n2\n3\n2\n2\n3\n4\n2\n3\n2\n2\n4\n2\n3\n4\n4\n2\n2\n4\n2\n4\n3\n3\n2\n4\n2\n4\n4\n4\n2\n4\n2\n3\n2\n2\n4\n2\n3\n4\n2\n2\n3\n2\n2\n3\n4\n2\n2\n4\n2\n3\n2\n4\n2\n2\n4\n4\n2\n4\n3\n4\n2\n3\n4\n4\n3\n2\n2\n2\n2\n4\n3\n4\n3\n4\n3\n2\n2\n3\n2\n4\n3\n2\n3\n4\n4\n3\n4\n2\n4\n4\n3\n3\n4\n2\n4\n4\n2\n2\n2\n3\n4\n2\n2\n4\n4\n3\n3\n2\n3\n4\n2\n4\n2\n2\n4\n4\n2\n2\n4\n3\n4\n3\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n4\n4\n4\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n3\n4\n4\n4\n2\n2\n2\n2\n2\n4\n2\n3\n2\n3\n2\n4\n2\n4\n3\n4\n3\n3\n3\n4\n2\n3\n4\n2\n2\n2\n4\n4\n2\n4\n4\n4\n4\n2\n2\n3\n4\n4\n2\n2\n4\n3\n3\n2\n2\n4\n2\n3\n2\n2\n2\n2\n3\n2\n2\n2\n4\n2\n2\n2\n3\n2\n2\n2\n2\n2\n4\n4\n4\n4\n4\n2\n4\n2\n4\n2\n2\n4\n2\n2\n2\n4\n3\n2\n3\n4\n2\n2\n4\n2\n2\n4\n2\n4\n2\n4\n4\n2\n3\n4\n4\n2\n2\n4\n3\n2\n3\n3\n4\n3\n3\n3\n3\n3\n3\n2\n3\n2\n2\n4\n3\n2\n2\n2\n4\n4\n4\n4\n4\n3\n3\n3\n3\n2\n3\n4\n4\n4\n2\n3\n3\n4\n4\n2\n2\n2\n4\n3\n2\n2\n4\n2\n2\n4\n3\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n4\n3\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n2\n2\n2\n3\n2\n2\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n2\n2\n2\n4\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n2\n2\n2\n4\n2\n2\n4\n2\n4\n4\n3\n2\n2\n3\n4\n4\n2\n4\n2\n2\n4\n2\n3\n2\n4\n4\n4\n4\n2\n2\n3\n3\n3\n3\n3\n2\n2\n4\n4\n3\n2\n3\n4\n2\n4\n2\n4\n2\n4\n4\n4\n4\n4\n3\n2\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n3\n2\n4\n2\n2\n4\n4\n4\n4\n2\n3\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n3\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n2\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n3\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n2\n4\n4\n3\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n2\n2\n4\n4\n4\n3\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n2\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n2\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n4\n4\n3\n3\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n3\n4\n2\n4\n3\n2\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n3\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n2\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n3\n3\n4\n4\n4\n4\n2\n4\n2\n4\n4\n2\n4\n3\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n4\n2\n4\n2\n4\n4\n4\n2\n3\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n2\n3\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n2\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n2\n2\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n2\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n3\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n3\n4\n4\n3\n2\n4\n2\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n2\n4\n4\n4\n2\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n2\n2\n2\n4\n2\n4\n4\n4\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n3\n2\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n2\n4\n4\n4\n4\n4\n3\n2\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n2\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n3\n3\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n3\n4\n4\n3\n2\n2\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n3\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n2\n3\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n3\n2\n2\n4\n3\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n2\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n3\n4\n4\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n3\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n2\n2\n2\n4\n4\n4\n4\n4\n2\n2\n3\n4\n3\n4\n4\n4\n4\n2\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n2\n4\n2\n2\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n3\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n2\n4\n2\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n3\n2\n2\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n2\n4\n4\n4\n4\n4\n2\n2\n4\n3\n4\n4\n4\n4\n4\n3\n3\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n2\n4\n4\n3\n4\n4\n4\n2\n4\n4\n3\n3\n2\n2\n2\n4\n2\n3\n2\n2\n4\n3\n4\n4\n3\n4\n4\n3\n4\n4\n3\n4\n3\n4\n2\n3\n2\n4\n2\n4\n3\n2\n2\n2\n4\n3\n2\n2\n2\n2\n2\n4\n2\n3\n3\n3\n3\n4\n4\n3\n2\n2\n4\n3\n3\n3\n3\n4\n3\n2\n3\n2\n4\n4\n2\n4\n2\n4\n3\n4\n2\n2\n4\n2\n3\n2\n2\n3\n3\n4\n2\n4\n2\n3\n3\n4\n4\n2\n3\n4\n2\n2\n4\n4\n4\n4\n3\n3\n3\n4\n4\n2\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n2\n4\n2\n4\n3\n4\n4\n2\n4\n3\n4\n3\n4\n4\n3\n4\n4\n2\n4\n4\n4\n4\n3\n4\n4\n3\n2\n4\n2\n2\n3\n2\n4\n3\n3\n2\n4\n3\n4\n3\n2\n4\n4\n4\n4\n4\n3\n3\n4\n3\n2\n2\n4\n3\n3\n2\n4\n4\n4\n3\n3\n4\n2\n3\n4\n2\n4\n3\n4\n4\n2\n4\n4\n4\n2\n3\n4\n4\n4\n3\n4\n3\n4\n4\n2\n4\n2\n3\n3\n3\n3\n3\n3\n2\n4\n4\n2\n3\n4\n2\n3\n2\n2\n4\n4\n4\n3\n4\n4\n4\n4\n2\n3\n3\n2\n4\n3\n4\n4\n2\n3\n2\n3\n3\n4\n2\n3\n4\n3\n4\n3\n2\n4\n3\n4\n4\n4\n2\n4\n3\n3\n3\n3\n4\n2\n4\n2\n2\n4\n2\n3\n4\n4\n4\n4\n4\n2\n3\n3\n4\n4\n4\n4\n2\n4\n2\n4\n4\n3\n2\n3\n3\n4\n4\n4\n2\n2\n4\n3\n3\n3\n3\n3\n3\n4\n2\n3\n2\n2\n3\n2\n2\n2\n3\n3\n3\n2\n3\n3\n4\n4\n2\n4\n4\n4\n4\n4\n2\n3\n2\n4\n3\n4\n2\n3\n4\n3\n3\n3\n3\n4\n4\n2\n2\n3\n3\n4\n2\n2\n4\n2\n4\n4\n3\n4\n4\n3\n3\n4\n4\n2\n3\n2\n3\n4\n2\n4\n3\n3\n4\n3\n3\n3\n4\n2\n3\n3\n4\n4\n3\n4\n4\n2\n4\n4\n2\n3\n3\n2\n2\n4\n2\n3\n4\n2\n4\n3\n4\n4\n3\n2\n3\n4\n4\n3\n3\n3\n3\n2\n3\n3\n2\n3\n2\n4\n3\n4\n2\n4\n3\n2\n3\n2\n4\n2\n2\n4\n3\n4\n3\n4\n3\n2\n4\n3\n4\n3\n3\n2\n3\n4\n3\n3\n4\n4\n4\n3\n3\n2\n2\n2\n2\n4\n4\n4\n3\n3\n4\n3\n3\n4\n4\n3\n3\n3\n4\n3\n2\n2\n4\n3\n3\n3\n2\n4\n3\n3\n4\n3\n2\n3\n3\n4\n3\n4\n4\n4\n3\n4\n3\n3\n4\n4\n4\n2\n4\n3\n2\n3\n4\n3\n3\n4\n4\n3\n3\n4\n3\n4\n3\n4\n2\n4\n2\n4\n4\n4\n2\n2\n3\n2\n2\n3\n3\n3\n4\n2\n3\n4\n3\n4\n4\n4\n3\n3\n3\n2\n4\n4\n3\n3\n4\n2\n3\n2\n4\n2\n2\n4\n4\n3\n2\n4\n4\n4\n2\n3\n3\n2\n3\n4\n3\n3\n4\n3\n4\n4\n4\n3\n3\n3\n3\n3\n4\n2\n2\n3\n4\n3\n3\n2\n3\n2\n2\n2\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n2\n3\n3\n3\n2\n4\n2\n2\n2\n4\n3\n2\n2\n2\n2\n2\n3\n4\n3\n3\n4\n3\n4\n4\n4\n2\n4\n2\n3\n3\n2\n3\n3\n3\n2\n2\n4\n4\n4\n2\n3\n2\n4\n2\n3\n2\n4\n2\n4\n3\n4\n4\n3\n3\n4\n4\n3\n3\n4\n2\n3\n2\n3\n3\n3\n4\n2\n3\n3\n2\n4\n4\n3\n4\n2\n4\n4\n4\n4\n3\n3\n4\n4\n2\n4\n4\n3\n3\n4\n3\n4\n3\n3\n2\n2\n4\n4\n3\n2\n3\n3\n2\n2\n2\n3\n4\n2\n3\n3\n2\n2\n4\n4\n4\n2\n2\n4\n3\n2\n4\n2\n3\n4\n2\n4\n3\n2\n4\n2\n2\n4\n4\n2\n2\n3\n4\n2\n3\n2\n2\n3\n2\n2\n3\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n2\n4\n3\n4\n2\n4\n2\n4\n3\n2\n4\n2\n2\n2\n4\n3\n4\n3\n3\n3\n2\n4\n3\n4\n2\n2\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n2\n3\n3\n3\n2\n4\n2\n4\n2\n2\n3\n4\n4\n2\n2\n3\n4\n3\n4\n4\n4\n4\n2\n3\n3\n3\n3\n4\n4\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n4\n3\n3\n2\n3\n3\n2\n2\n3\n3\n4\n3\n2\n3\n2\n3\n3\n4\n4\n2\n3\n4\n4\n4\n4\n4\n4\n3\n3\n3\n3\n3\n3\n4\n4\n4\n2\n2\n3\n3\n3\n4\n4\n4\n2\n2\n2\n4\n4\n3\n4\n2\n4\n3\n3\n2\n4\n4\n2\n3\n3\n3\n3\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n2\n3\n3\n4\n2\n2\n2\n2\n2\n2\n2\n3\n3\n4\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n4\n3\n4\n4\n4\n2\n2\n2\n2\n2\n2\n4\n4\n4\n4\n2\n3\n4\n4\n4\n3\n4\n2\n2\n2\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n4\n4\n2\n2\n2\n4\n2\n3\n3\n2\n2\n3\n3\n2\n2\n2\n2\n3\n3\n2\n3\n3\n3\n4\n4\n4\n4\n3\n4\n4\n3\n3\n4\n3\n4\n4\n4\n3\n3\n3\n2\n4\n3\n4\n4\n4\n4\n3\n3\n4\n2\n2\n2\n3\n4\n4\n2\n4\n2\n3\n4\n4\n2\n4\n4\n3\n3\n4\n2\n2\n3\n3\n4\n3\n4\n4\n4\n4\n4\n2\n4\n2\n2\n4\n4\n3\n3\n3\n4\n3\n3\n2\n2\n3\n4\n3\n2\n4\n4\n2\n2\n3\n4\n3\n3\n4\n4\n2\n4\n2\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n3\n3\n4\n4\n4\n4\n2\n4\n2\n4\n2\n3\n3\n4\n3\n4\n4\n4\n4\n4\n3\n4\n3\n3\n3\n2\n4\n4\n2\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n4\n4\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n4\n2\n3\n4\n3\n3\n3\n2\n3\n2\n2\n3\n3\n3\n4\n2\n3\n3\n2\n3\n3\n3\n3\n4\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n2\n2\n3\n3\n2\n3\n4\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n2\n2\n2\n3\n3\n3\n3\n3\n4\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n2\n2\n3\n2\n3\n4\n3\n4\n2\n3\n3\n3\n3\n3\n4\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n4\n3\n3\n3\n2\n2\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n4\n3\n3\n3\n3\n3\n3\n2\n3\n3\n4\n2\n4\n3\n3\n3\n4\n3\n3\n4\n3\n4\n3\n3\n4\n4\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n2\n4\n3\n3\n2\n2\n3\n3\n2\n3\n3\n3\n3\n3\n4\n3\n3\n3\n2\n3\n4\n3\n4\n3\n3\n2\n3\n3\n3\n3\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n3\n2\n2\n4\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n2\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n3\n3\n2\n4\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n2\n3\n2\n3\n3\n4\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n2\n4\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n2\n3\n2\n2\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n4\n4\n3\n2\n3\n3\n3\n3\n2\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n4\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n4\n3\n3\n2\n3\n3\n2\n4\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n2\n3\n3\n3\n3\n2\n4\n2\n2\n4\n2\n3\n3\n3\n3\n3\n4\n4\n5\n3\n3\n3\n3\n3\n4\n3\n2\n3\n2\n3\n2\n3\n3\n2\n3\n3\n3\n3\n3\n4\n4\n3\n4\n4\n2\n4\n2\n3\n2\n3\n4\n3\n3\n3\n3\n3\n3\n2\n3\n2\n2\n3\n2\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n2\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n4\n4\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n3\n3\n3\n4\n2\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n2\n3\n4\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n2\n3\n3\n3\n3\n4\n3\n4\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n2\n3\n3\n2\n3\n2\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n2\n2\n2\n3\n3\n3\n4\n4\n3\n3\n4\n3\n3\n4\n3\n2\n3\n3\n3\n4\n2\n2\n3\n3\n3\n3\n3\n3\n2\n3\n2\n4\n3\n2\n3\n3\n2\n4\n2\n4\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n3\n3\n3\n2\n3\n4\n2\n3\n2\n3\n4\n3\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n2\n2\n4\n3\n2\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n4\n3\n3\n4\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n2\n3\n3\n3\n3\n4\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n2\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n4\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n2\n3\n3\n3\n4\n2\n4\n3\n2\n4\n3\n3\n3\n3\n3\n4\n3\n3\n2\n2\n3\n2\n2\n3\n3\n2\n3\n2\n3\n2\n3\n3\n3\n4\n3\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n2\n2\n4\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n4\n3\n3\n2\n2\n3\n3\n4\n3\n3\n3\n2\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n2\n2\n3\n4\n3\n3\n2\n3\n2\n2\n2\n3\n3\n2\n3\n3\n3\n3\n2\n3\n4\n3\n2\n3\n4\n4\n3\n3\n3\n3\n3\n2\n3\n3\n4\n3\n3\n2\n2\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n4\n3\n2\n4\n2\n2\n3\n3\n2\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n2\n2\n2\n2\n2\n3\n3\n3\n3\n3\n2\n2\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n2\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n2\n4\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n2\n4\n3\n3\n3\n4\n3\n3\n3\n2\n3\n3\n2\n3\n4\n3\n2\n3\n3\n2\n3\n2\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n2\n4\n3\n4\n2\n3\n4\n3\n4\n2\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n4\n2\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n4\n3\n3\n2\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n2\n2\n2\n3\n3\n3\n3\n3\n4\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n2\n2\n4\n3\n3\n4\n3\n3\n3\n2\n2\n3\n4\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n3\n4\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n4\n3\n4\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n2\n3\n3\n4\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n2\n3\n3\n2\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n4\n2\n3\n4\n4\n3\n3\n3\n3\n2\n3\n3\n3\n4\n2\n3\n3\n3\n3\n4\n3\n3\n4\n3\n4\n2\n3\n4\n2\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n4\n2\n2\n3\n3\n3\n3\n2\n4\n3\n4\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n2\n4\n3\n2\n3\n2\n4\n2\n3\n4\n3\n4\n3\n2\n3\n3\n2\n4\n4\n4\n3\n4\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n4\n3\n2\n4\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n2\n2\n3\n4\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n4\n2\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n2\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n4\n4\n2\n2\n3\n3\n4\n4\n4\n4\n4\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n2\n3\n3\n3\n2\n2\n3\n3\n3\n3\n4\n4\n4\n3\n3\n2\n2\n2\n3\n2\n3\n2\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n2\n3\n3\n3\n2\n3\n3\n3\n2\n4\n3\n3\n3\n3\n4\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n4\n2\n2\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n2\n4\n3\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n2\n3\n2\n3\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n4\n4\n4\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n2\n2\n2\n2\n2\n2\n2\n3\n4\n4\n3\n4\n4\n4\n2\n2\n4\n3\n2\n4\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n2\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n4\n4\n4\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n4\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n2\n3\n3\n2\n2\n3\n3\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n4\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n4\n3\n4\n2\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n4\n3\n2\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n4\n4\n4\n3\n4\n2\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n2\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n4\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n4\n4\n3\n3\n3\n2\n2\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n4\n2\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n2\n4\n3\n3\n3\n2\n3\n4\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n4\n3\n3\n3\n2\n2\n4\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n4\n4\n3\n3\n3\n2\n2\n3\n3\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n3\n4\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n4\n2\n3\n3\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n4\n2\n2\n2\n3\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n4\n3\n2\n2\n2\n3\n2\n2\n4\n2\n3\n2\n2\n4\n2\n3\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n4\n2\n2\n4\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n3\n2\n3\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n3\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n3\n4\n2\n3\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n3\n4\n2\n2\n2\n2\n2\n4\n4\n4\n2\n2\n2\n2\n3\n2\n2\n3\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n4\n2\n2\n2\n3\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n3\n4\n2\n4\n2\n2\n2\n4\n2\n4\n2\n2\n3\n4\n2\n2\n4\n2\n4\n3\n2\n4\n2\n2\n2\n2\n4\n4\n4\n2\n2\n2\n2\n2\n4\n4\n3\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n3\n3\n2\n2\n2\n3\n4\n3\n2\n2\n2\n2\n3\n2\n2\n2\n4\n4\n2\n4\n2\n2\n2\n2\n4\n2\n2\n3\n4\n2\n3\n2\n2\n4\n3\n2\n3\n2\n2\n2\n2\n4\n2\n2\n2\n5\n2\n2\n2\n4\n2\n4\n2\n2\n4\n2\n3\n3\n2\n2\n4\n2\n3\n4\n2\n2\n2\n3\n2\n2\n2\n4\n4\n2\n2\n3\n4\n2\n2\n2\n2\n2\n4\n2\n4\n3\n2\n2\n2\n2\n3\n2\n4\n4\n3\n4\n2\n4\n2\n2\n2\n2\n3\n2\n2\n2\n3\n4\n2\n4\n2\n4\n2\n2\n2\n2\n3\n3\n2\n3\n2\n3\n3\n2\n2\n3\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n4\n2\n2\n2\n3\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n3\n2\n4\n3\n4\n2\n3\n2\n4\n4\n2\n2\n3\n3\n2\n2\n2\n4\n2\n3\n2\n3\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n4\n2\n2\n2\n3\n2\n2\n4\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n3\n2\n2\n2\n2\n4\n3\n2\n2\n3\n2\n2\n3\n3\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n3\n4\n4\n2\n2\n4\n4\n5\n2\n2\n3\n2\n2\n4\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n4\n4\n3\n4\n4\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n4\n3\n3\n3\n3\n2\n3\n2\n3\n2\n2\n2\n3\n2\n4\n2\n3\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n3\n4\n2\n3\n2\n2\n2\n2\n3\n3\n4\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n4\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n4\n2\n2\n2\n3\n2\n2\n2\n4\n2\n2\n3\n2\n3\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n3\n2\n3\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n3\n4\n3\n2\n2\n4\n4\n2\n2\n2\n2\n4\n2\n3\n2\n3\n2\n4\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n3\n2\n2\n2\n3\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n3\n3\n2\n3\n2\n2\n2\n3\n2\n2\n3\n3\n2\n2\n4\n2\n2\n2\n3\n3\n2\n4\n4\n2\n3\n4\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n2\n4\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n4\n3\n2\n2\n4\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n3\n4\n2\n3\n4\n3\n2\n2\n3\n2\n2\n2\n3\n3\n2\n3\n2\n3\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n4\n2\n3\n4\n2\n2\n2\n3\n4\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n4\n2\n3\n2\n4\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n3\n2\n4\n2\n3\n2\n2\n3\n3\n2\n2\n3\n3\n2\n4\n2\n4\n2\n2\n3\n2\n2\n2\n4\n2\n3\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n4\n2\n2\n2\n4\n2\n2\n2\n2\n3\n2\n3\n2\n2\n4\n3\n3\n2\n2\n2\n4\n3\n2\n2\n3\n2\n3\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n4\n3\n2\n2\n2\n2\n3\n2\n3\n2\n3\n2\n3\n4\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n3\n3\n2\n4\n2\n4\n4\n2\n3\n2\n4\n3\n2\n2\n2\n2\n2\n3\n2\n3\n4\n3\n2\n2\n2\n2\n2\n3\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n4\n4\n2\n2\n3\n2\n3\n4\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n2\n2\n2\n3\n4\n2\n2\n2\n2\n2\n2\n3\n2\n4\n2\n2\n3\n2\n2\n4\n2\n4\n3\n2\n3\n2\n3\n3\n2\n2\n4\n4\n2\n3\n2\n2\n2\n3\n4\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n3\n3\n2\n2\n3\n2\n2\n2\n2\n2\n3\n4\n2\n3\n3\n4\n2\n2\n3\n2\n2\n3\n2\n4\n3\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n4\n2\n2\n2\n3\n4\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n4\n2\n2\n2\n4\n4\n2\n4\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n5\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n4\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n4\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n3\n2\n2\n3\n3\n2\n3\n2\n2\n4\n2\n2\n2\n4\n2\n2\n2\n2\n2\n3\n2\n2\n4\n2\n2\n2\n3\n2\n4\n2\n2\n4\n2\n2\n3\n2\n2\n4\n2\n2\n2\n2\n4\n3\n2\n3\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n2\n3\n4\n2\n3\n2\n2\n4\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n3\n2\n3\n2\n2\n2\n4\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n4\n2\n4\n2\n4\n2\n2\n4\n2\n4\n2\n3\n3\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n3\n2\n2\n4\n4\n3\n3\n4\n3\n2\n4\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n3\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n4\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n4\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n4\n2\n2\n3\n2\n3\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n4\n2\n2\n2\n3\n2\n2\n2\n2\n4\n2\n2\n2\n3\n2\n4\n2\n2\n4\n3\n4\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n4\n2\n3\n2\n2\n2\n3\n2\n2\n3\n3\n2\n2\n2\n3\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n4\n2\n4\n2\n2\n2\n3\n2\n4\n4\n4\n4\n4\n3\n2\n2\n2\n3\n2\n3\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n4\n3\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n3\n4\n3\n2\n2\n2\n2\n2\n3\n2\n2\n4\n2\n3\n2\n2\n2\n2\n2\n3\n3\n2\n3\n3\n2\n2\n2\n2\n2\n2\n3\n3\n4\n2\n2\n2\n4\n2\n3\n2\n2\n4\n4\n2\n2\n3\n3\n3\n2\n2\n3\n3\n2\n2\n3\n3\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n3\n3\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n3\n2\n4\n4\n2\n2\n2\n3\n4\n4\n4\n4\n4\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n3\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n4\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n4\n2\n2\n2\n2\n4\n3\n2\n2\n2\n2\n2\n2\n3\n4\n2\n2\n2\n2\n2\n3\n3\n3\n3\n2\n2\n3\n2\n2\n2\n2\n3\n2\n2\n2\n2\n4\n2\n3\n3\n4\n2\n4\n4\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n3\n3\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n3\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n3\n2\n2\n4\n4\n2\n4\n2\n2\n2\n3\n3\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n3\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n4\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n2\n2\n2\n3\n3\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n3\n3\n2\n2\n2\n4\n4\n2\n2\n3\n4\n4\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n4\n4\n4\n2\n2\n4\n2\n2\n4\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n3\n2\n2\n3\n3\n3\n3\n2\n4\n4\n4\n4\n4\n2\n2\n3\n2\n4\n2\n4\n2\n2\n3\n3\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n3\n3\n3\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n4\n4\n4\n3\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n4\n4\n2\n2\n2\n3\n2\n3\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n3\n4\n4\n4\n4\n4\n4\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n2\n3\n3\n2\n2\n2\n4\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n4\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n3\n3\n4\n4\n2\n2\n2\n2\n4\n2\n2\n2\n3\n2\n2\n2\n3\n3\n2\n4\n4\n3\n3\n3\n3\n3\n3\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n3\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n3\n3\n2\n2\n4\n4\n2\n2\n2\n2\n2\n4\n4\n4\n4\n2\n2\n4\n3\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n4\n2\n2\n2\n2\n2\n5\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n4\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n4\n4\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n4\n4\n2\n2\n3\n2\n2\n2\n2\n2\n4\n2\n3\n3\n2\n4\n3\n4\n2\n2\n2\n4\n4\n2\n4\n2\n2\n2\n2\n2\n4\n4\n2\n4\n2\n3\n4\n2\n3\n3\n2\n2\n2\n2\n2\n2\n3\n4\n2\n3\n3\n2\n2\n3\n2\n3\n4\n3\n4\n2\n2\n4\n2\n2\n4\n3\n2\n4\n4\n4\n4\n3\n4\n3\n2\n2\n3\n4\n2\n4\n4\n2\n4\n3\n4\n4\n2\n3\n2\n4\n4\n4\n3\n4\n2\n4\n2\n4\n4\n2\n3\n2\n3\n3\n4\n2\n2\n4\n2\n2\n4\n2\n2\n4\n2\n2\n3\n4\n2\n2\n2\n2\n4\n2\n2\n2\n4\n2\n2\n4\n3\n2\n2\n2\n2\n3\n3\n4\n2\n2\n4\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n4\n3\n2\n2\n3\n3\n2\n3\n4\n2\n2\n3\n4\n4\n3\n2\n4\n4\n3\n2\n2\n4\n3\n3\n2\n3\n3\n3\n3\n3\n4\n2\n3\n4\n3\n2\n2\n3\n2\n4\n4\n4\n4\n2\n3\n3\n4\n2\n3\n4\n2\n4\n4\n3\n3\n2\n3\n4\n3\n2\n3\n4\n4\n2\n4\n2\n2\n4\n2\n4\n2\n3\n3\n2\n3\n2\n2\n2\n3\n3\n4\n3\n3\n4\n2\n4\n3\n3\n2\n2\n4\n3\n3\n4\n2\n3\n4\n2\n2\n3\n4\n4\n2\n2\n2\n4\n3\n3\n2\n2\n2\n4\n4\n2\n2\n3\n2\n3\n3\n4\n2\n2\n4\n2\n2\n4\n4\n4\n4\n3\n4\n4\n3\n2\n2\n3\n3\n4\n4\n4\n4\n2\n2\n4\n2\n3\n3\n3\n4\n4\n3\n2\n4\n4\n3\n3\n3\n3\n2\n2\n2\n3\n2\n2\n2\n3\n2\n3\n2\n3\n2\n4\n4\n3\n2\n4\n2\n3\n2\n3\n2\n3\n4\n2\n3\n4\n3\n2\n2\n2\n2\n4\n2\n2\n4\n3\n2\n3\n4\n2\n4\n2\n4\n4\n4\n2\n4\n4\n3\n3\n4\n3\n3\n4\n2\n4\n3\n2\n4\n4\n2\n3\n2\n4\n4\n4\n4\n2\n4\n3\n3\n4\n4\n3\n4\n2\n2\n3\n3\n2\n4\n4\n4\n2\n2\n2\n3\n2\n3\n3\n3\n4\n3\n4\n3\n3\n3\n2\n2\n4\n3\n4\n3\n3\n2\n3\n4\n3\n2\n2\n2\n4\n4\n2\n4\n2\n3\n4\n2\n4\n2\n3\n3\n4\n3\n3\n2\n2\n4\n3\n4\n3\n2\n2\n3\n2\n4\n2\n4\n4\n5\n2\n2\n4\n4\n4\n4\n3\n3\n4\n3\n3\n3\n2\n2\n4\n2\n3\n4\n2\n3\n2\n3\n3\n2\n2\n4\n4\n3\n2\n3\n4\n3\n4\n2\n2\n4\n4\n4\n4\n4\n2\n4\n2\n4\n3\n4\n4\n4\n3\n4\n4\n4\n2\n3\n3\n3\n3\n3\n2\n3\n4\n4\n4\n4\n2\n4\n2\n3\n2\n4\n3\n3\n4\n3\n2\n3\n3\n2\n2\n3\n3\n4\n2\n2\n4\n2\n2\n3\n4\n2\n2\n4\n2\n3\n2\n2\n4\n2\n2\n2\n3\n2\n2\n2\n4\n3\n4\n4\n4\n2\n4\n4\n4\n4\n3\n2\n4\n3\n4\n3\n3\n4\n4\n4\n4\n2\n3\n3\n4\n4\n2\n4\n2\n4\n2\n3\n2\n4\n2\n2\n2\n4\n4\n2\n4\n2\n2\n3\n2\n2\n4\n3\n4\n3\n4\n4\n3\n2\n4\n2\n3\n2\n4\n4\n4\n2\n4\n3\n2\n2\n4\n2\n3\n3\n2\n3\n2\n2\n2\n3\n3\n2\n4\n2\n3\n2\n2\n2\n3\n2\n4\n2\n3\n2\n3\n3\n3\n4\n3\n4\n4\n3\n2\n2\n4\n2\n4\n2\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n4\n4\n2\n2\n4\n3\n2\n2\n2\n4\n2\n2\n4\n2\n3\n4\n4\n4\n2\n4\n4\n5\n3\n3\n3\n3\n4\n4\n3\n2\n2\n2\n4\n2\n2\n4\n2\n3\n4\n4\n2\n3\n4\n4\n4\n4\n4\n2\n4\n2\n4\n2\n4\n4\n3\n4\n2\n5\n4\n3\n2\n2\n2\n2\n4\n2\n3\n2\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n2\n2\n3\n3\n2\n3\n4\n4\n4\n2\n2\n4\n4\n3\n3\n3\n3\n2\n3\n2\n3\n2\n2\n3\n3\n4\n4\n2\n3\n3\n2\n4\n3\n2\n2\n3\n2\n3\n2\n4\n2\n2\n4\n2\n2\n3\n3\n3\n4\n4\n3\n2\n2\n2\n2\n3\n3\n4\n2\n2\n4\n2\n2\n3\n4\n4\n4\n3\n4\n4\n2\n3\n4\n4\n4\n2\n4\n4\n3\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n4\n2\n4\n4\n3\n3\n2\n3\n3\n3\n2\n4\n4\n3\n2\n3\n2\n3\n4\n4\n3\n3\n3\n2\n3\n2\n2\n2\n2\n3\n2\n2\n3\n2\n4\n4\n4\n2\n2\n2\n2\n3\n4\n3\n4\n3\n4\n2\n2\n3\n3\n4\n2\n2\n2\n4\n2\n2\n4\n2\n3\n3\n3\n3\n2\n4\n4\n3\n2\n2\n4\n4\n2\n3\n4\n2\n4\n2\n3\n4\n3\n4\n4\n3\n4\n4\n2\n2\n2\n4\n3\n2\n3\n2\n2\n3\n3\n2\n2\n3\n3\n2\n4\n3\n2\n2\n3\n2\n2\n3\n2\n4\n3\n3\n4\n4\n2\n2\n4\n3\n3\n3\n3\n3\n3\n3\n4\n2\n2\n2\n3\n3\n3\n4\n4\n4\n3\n4\n3\n3\n4\n2\n2\n4\n2\n2\n4\n2\n2\n3\n3\n4\n2\n3\n2\n2\n2\n2\n4\n2\n2\n2\n3\n2\n4\n2\n4\n2\n4\n3\n4\n2\n4\n4\n3\n4\n3\n2\n2\n4\n2\n3\n3\n2\n3\n3\n2\n2\n2\n4\n3\n4\n2\n3\n4\n3\n4\n3\n3\n2\n3\n4\n3\n3\n2\n3\n3\n3\n2\n3\n3\n2\n2\n3\n2\n2\n4\n3\n2\n4\n2\n3\n4\n2\n2\n2\n3\n4\n3\n2\n3\n4\n2\n2\n4\n2\n2\n4\n2\n4\n2\n2\n2\n2\n4\n4\n2\n3\n2\n4\n2\n4\n2\n3\n2\n2\n3\n4\n2\n2\n2\n2\n4\n3\n2\n4\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n4\n4\n4\n2\n2\n3\n2\n2\n2\n4\n4\n3\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n4\n4\n3\n2\n3\n4\n2\n3\n4\n4\n3\n2\n3\n4\n2\n4\n3\n3\n3\n2\n3\n4\n4\n4\n4\n3\n2\n3\n2\n4\n2\n4\n2\n2\n2\n2\n2\n3\n2\n3\n4\n4\n2\n2\n2\n4\n4\n4\n3\n2\n2\n2\n3\n3\n3\n2\n4\n4\n3\n3\n4\n4\n3\n3\n4\n3\n2\n3\n3\n4\n3\n4\n4\n4\n4\n3\n3\n4\n3\n3\n3\n2\n2\n4\n2\n4\n2\n2\n3\n3\n4\n3\n2\n3\n3\n4\n4\n2\n4\n4\n2\n4\n4\n4\n3\n2\n2\n2\n2\n4\n4\n4\n3\n4\n3\n2\n4\n2\n3\n4\n3\n4\n4\n2\n2\n2\n2\n4\n3\n2\n3\n3\n3\n4\n2\n3\n4\n4\n2\n2\n3\n4\n3\n4\n2\n4\n2\n2\n4\n2\n4\n3\n3\n2\n4\n2\n4\n2\n2\n2\n2\n2\n3\n2\n2\n4\n2\n3\n2\n3\n3\n3\n4\n2\n3\n2\n4\n2\n3\n3\n2\n4\n4\n2\n3\n3\n3\n4\n4\n4\n4\n3\n4\n3\n2\n3\n3\n3\n3\n4\n4\n3\n3\n3\n2\n3\n3\n4\n2\n4\n2\n2\n2\n3\n2\n2\n3\n2\n3\n3\n3\n4\n2\n2\n2\n2\n2\n2\n4\n4\n4\n4\n2\n2\n4\n3\n2\n2\n4\n2\n4\n2\n4\n2\n2\n3\n3\n4\n4\n4\n2\n2\n3\n3\n4\n2\n3\n2\n2\n4\n2\n4\n3\n4\n3\n3\n3\n4\n2\n3\n3\n3\n3\n3\n2\n4\n4\n3\n3\n2\n4\n3\n2\n2\n3\n2\n2\n2\n3\n2\n2\n3\n3\n3\n2\n4\n2\n2\n2\n3\n4\n3\n3\n2\n3\n2\n2\n2\n4\n4\n2\n4\n3\n2\n3\n2\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n3\n2\n4\n3\n4\n4\n3\n2\n2\n2\n4\n4\n4\n2\n2\n3\n3\n3\n2\n4\n4\n4\n3\n2\n5\n4\n3\n3\n3\n4\n2\n4\n2\n4\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n3\n4\n3\n4\n2\n2\n2\n2\n2\n2\n3\n3\n3\n3\n2\n2\n2\n3\n4\n3\n4\n4\n2\n2\n4\n4\n3\n4\n2\n2\n2\n4\n4\n2\n4\n4\n2\n2\n3\n2\n4\n3\n2\n3\n2\n3\n2\n2\n3\n3\n2\n3\n2\n2\n4\n2\n2\n2\n4\n4\n2\n2\n2\n5\n3\n2\n2\n4\n3\n2\n4\n3\n2\n4\n2\n2\n4\n2\n3\n3\n2\n3\n4\n3\n2\n2\n4\n3\n4\n3\n4\n3\n4\n2\n4\n4\n2\n2\n2\n3\n2\n3\n4\n3\n3\n4\n2\n4\n3\n4\n2\n2\n2\n3\n2\n2\n2\n3\n3\n4\n3\n2\n4\n2\n4\n2\n4\n2\n3\n3\n3\n4\n3\n3\n4\n4\n2\n4\n3\n2\n2\n4\n4\n4\n4\n3\n4\n2\n4\n2\n4\n2\n2\n4\n2\n4\n2\n3\n3\n2\n2\n4\n4\n2\n4\n4\n3\n4\n2\n3\n2\n4\n4\n4\n3\n3\n4\n3\n2\n4\n3\n2\n3\n3\n2\n2\n2\n2\n3\n2\n2\n4\n4\n3\n4\n3\n4\n2\n2\n3\n3\n4\n2\n4\n2\n4\n2\n4\n4\n4\n2\n3\n3\n4\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n4\n2\n3\n2\n4\n2\n2\n2\n2\n2\n2\n3\n3\n2\n3\n2\n2\n4\n2\n4\n2\n4\n2\n2\n2\n4\n4\n3\n2\n2\n3\n3\n4\n2\n2\n3\n4\n2\n4\n2\n3\n2\n2\n4\n2\n3\n2\n2\n2\n4\n3\n4\n2\n4\n2\n2\n4\n4\n3\n2\n4\n2\n3\n3\n2\n3\n4\n4\n3\n3\n3\n2\n4\n2\n3\n2\n4\n2\n2\n2\n2\n3\n4\n2\n2\n3\n2\n3\n2\n3\n2\n2\n4\n4\n2\n3\n4\n3\n3\n4\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n3\n4\n3\n4\n4\n2\n2\n2\n2\n2\n2\n3\n4\n2\n2\n2\n2\n4\n2\n4\n4\n4\n3\n2\n4\n4\n2\n3\n4\n2\n4\n2\n4\n3\n3\n3\n4\n3\n2\n4\n3\n4\n2\n2\n4\n2\n3\n3\n3\n4\n4\n4\n3\n4\n4\n3\n2\n4\n2\n2\n3\n2\n4\n2\n2\n4\n4\n4\n3\n2\n4\n3\n4\n2\n4\n2\n2\n2\n3\n2\n2\n3\n3\n2\n2\n4\n3\n2\n2\n4\n3\n2\n4\n2\n4\n2\n2\n4\n3\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n3\n3\n2\n3\n2\n3\n2\n4\n4\n2\n4\n2\n2\n2\n4\n3\n2\n2\n2\n2\n2\n3\n4\n2\n3\n4\n2\n2\n3\n4\n3\n3\n3\n2\n2\n3\n3\n3\n2\n4\n4\n4\n4\n4\n2\n4\n2\n3\n3\n2\n3\n3\n2\n2\n2\n2\n2\n2\n3\n3\n4\n2\n4\n3\n4\n3\n3\n2\n4\n4\n4\n2\n2\n3\n3\n3\n2\n2\n3\n3\n2\n2\n3\n3\n3\n2\n2\n2\n4\n3\n3\n2\n2\n2\n2\n3\n4\n4\n2\n2\n4\n3\n3\n2\n4\n3\n3\n3\n3\n2\n3\n4\n3\n3\n3\n2\n2\n4\n3\n3\n4\n3\n4\n2\n3\n2\n2\n2\n3\n3\n3\n2\n2\n2\n2\n4\n3\n4\n3\n3\n3\n4\n4\n4\n2\n2\n2\n3\n4\n4\n4\n4\n4\n4\n2\n2\n3\n2\n2\n3\n3\n4\n4\n2\n2\n4\n3\n2\n2\n2\n2\n3\n2\n3\n3\n4\n3\n2\n4\n2\n3\n2\n3\n4\n4\n4\n4\n4\n2\n4\n2\n3\n3\n3\n2\n2\n3\n3\n3\n4\n4\n4\n4\n4\n4\n2\n2\n2\n2\n2\n3\n2\n3\n3\n2\n4\n4\n3\n4\n4\n2\n2\n2\n3\n2\n2\n4\n2\n4\n2\n2\n2\n3\n3\n3\n2\n4\n2\n3\n4\n2\n4\n3\n2\n3\n2\n2\n2\n2\n3\n4\n2\n2\n2\n2\n4\n3\n3\n3\n3\n2\n4\n3\n3\n2\n3\n2\n3\n4\n2\n2\n4\n4\n2\n3\n3\n4\n4\n4\n4\n4\n4\n4\n3\n2\n3\n4\n4\n4\n3\n4\n3\n3\n2\n2\n2\n3\n3\n4\n2\n2\n4\n4\n4\n2\n2\n2\n3\n2\n2\n3\n2\n2\n3\n3\n3\n3\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n4\n2\n4\n2\n2\n4\n3\n2\n2\n4\n4\n2\n4\n3\n2\n2\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n2\n3\n4\n2\n2\n3\n2\n2\n2\n2\n4\n3\n2\n4\n2\n2\n4\n4\n4\n3\n3\n4\n2\n2\n3\n4\n4\n4\n4\n2\n2\n4\n2\n2\n2\n3\n4\n4\n4\n4\n3\n3\n3\n2\n2\n2\n2\n3\n2\n2\n4\n2\n2\n2\n2\n3\n2\n4\n3\n4\n3\n3\n2\n2\n3\n3\n2\n2\n2\n2\n3\n2\n2\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n2\n4\n4\n2\n4\n3\n4\n4\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n4\n4\n4\n2\n2\n4\n2\n2\n4\n2\n3\n3\n3\n3\n2\n2\n2\n3\n4\n3\n2\n4\n4\n3\n3\n2\n2\n2\n4\n4\n4\n4\n2\n4\n3\n3\n2\n2\n2\n3\n3\n2\n4\n3\n3\n3\n3\n4\n4\n4\n4\n4\n4\n2\n3\n4\n4\n2\n4\n3\n3\n3\n3\n4\n2\n4\n3\n4\n3\n4\n4\n4\n4\n4\n2\n2\n2\n3\n3\n3\n4\n2\n2\n2\n2\n2\n4\n3\n2\n4\n4\n4\n4\n3\n2\n2\n2\n2\n3\n4\n2\n4\n2\n2\n3\n2\n2\n4\n2\n2\n3\n4\n4\n4\n3\n4\n4\n4\n2\n2\n3\n4\n2\n2\n2\n2\n2\n2\n4\n4\n3\n3\n3\n3\n2\n3\n3\n4\n4\n4\n5\n4\n2\n4\n2\n2\n3\n4\n4\n4\n2\n2\n3\n4\n3\n4\n2\n2\n4\n2\n4\n2\n2\n2\n3\n3\n3\n4\n2\n2\n3\n2\n3\n4\n4\n4\n4\n4\n4\n3\n4\n2\n2\n4\n4\n2\n2\n2\n4\n4\n3\n3\n4\n2\n2\n3\n2\n2\n4\n4\n4\n3\n3\n4\n4\n3\n2\n2\n3\n4\n3\n4\n3\n3\n3\n2\n3\n3\n3\n3\n2\n4\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n4\n2\n2\n4\n4\n3\n3\n3\n4\n4\n4\n3\n2\n4\n4\n2\n2\n2\n2\n2\n4\n3\n4\n2\n2\n3\n3\n3\n3\n4\n4\n4\n2\n2\n2\n2\n4\n3\n2\n2\n4\n2\n4\n2\n4\n3\n2\n4\n4\n3\n3\n3\n3\n3\n3\n4\n3\n4\n4\n2\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n3\n3\n2\n3\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n3\n3\n2\n3\n3\n4\n4\n2\n2\n2\n2\n3\n3\n2\n3\n4\n3\n4\n4\n2\n2\n5\n2\n2\n4\n4\n3\n2\n2\n4\n3\n2\n3\n2\n2\n4\n2\n2\n2\n3\n3\n2\n4\n2\n2\n2\n2\n2\n4\n3\n4\n4\n2\n2\n4\n2\n4\n3\n2\n2\n4\n4\n4\n3\n3\n4\n2\n3\n3\n2\n3\n2\n3\n2\n2\n3\n3\n2\n4\n3\n2\n4\n4\n2\n2\n4\n2\n2\n2\n5\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n2\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n3\n4\n3\n2\n4\n3\n2\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n3\n4\n4\n4\n4\n2\n4\n4\n3\n4\n4\n4\n2\n4\n4\n4\n3\n2\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n3\n3\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n3\n2\n2\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n5\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n2\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n3\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n3\n4\n4\n5\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n3\n4\n4\n3\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n3\n4\n4\n4\n4\n4\n4\n5\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n2\n4\n3\n3\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n3\n4\n3\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n3\n4\n4\n3\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n3\n3\n4\n4\n3\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n3\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n3\n4\n3\n4\n3\n3\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n2\n4\n4\n4\n4\n2\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n3\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n3\n3\n3\n3\n4\n4\n4\n3\n3\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n2\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n2\n4\n3\n4\n4\n4\n3\n3\n2\n3\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n4\n3\n2\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n4\n4\n3\n3\n4\n5\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n3\n3\n4\n4\n4\n4\n4\n2\n3\n4\n3\n3\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n3\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n5\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n3\n3\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n5\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n3\n4\n4\n3\n2\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n3\n2\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n3\n3\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n2\n3\n4\n4\n4\n4\n4\n5\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n3\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n3\n3\n4\n4\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n3\n3\n4\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n3\n3\n4\n3\n4\n2\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n2\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n2\n2\n3\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n2\n4\n2\n3\n3\n4\n4\n3\n4\n4\n4\n2\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n3\n3\n4\n4\n2\n4\n3\n4\n4\n4\n5\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n3\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n5\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n3\n3\n4\n3\n4\n4\n4\n4\n3\n3\n3\n4\n4\n3\n3\n4\n3\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n5\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n3\n3\n4\n4\n4\n4\n4\n2\n2\n4\n4\n5\n5\n3\n3\n3\n3\n3\n3\n4\n4\n3\n5\n5\n5\n3\n4\n3\n4\n5\n3\n3\n5\n3\n5\n3\n4\n3\n3\n3\n5\n3\n4\n3\n4\n3\n3\n4\n5\n3\n3\n4\n3\n4\n5\n3\n2\n3\n5\n4\n3\n4\n3\n3\n3\n5\n3\n3\n4\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n5\n3\n4\n3\n3\n3\n3\n5\n3\n4\n3\n3\n3\n3\n3\n5\n3\n3\n4\n3\n5\n3\n3\n4\n3\n3\n5\n3\n3\n5\n3\n5\n5\n3\n3\n4\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n4\n5\n4\n3\n3\n5\n3\n3\n3\n4\n5\n3\n4\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n4\n3\n5\n3\n4\n3\n3\n3\n3\n3\n3\n3\n5\n3\n4\n3\n3\n3\n4\n4\n4\n5\n3\n3\n3\n3\n3\n4\n4\n5\n3\n5\n3\n3\n3\n5\n4\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n4\n3\n3\n5\n3\n5\n3\n5\n5\n3\n5\n4\n5\n3\n3\n5\n3\n4\n3\n4\n3\n4\n3\n4\n3\n4\n3\n5\n4\n4\n3\n3\n4\n4\n4\n5\n4\n3\n3\n4\n4\n3\n5\n3\n3\n4\n4\n3\n5\n3\n5\n3\n3\n4\n3\n3\n5\n4\n3\n4\n3\n3\n3\n3\n3\n5\n4\n4\n3\n4\n3\n3\n3\n5\n4\n3\n3\n4\n3\n3\n4\n3\n5\n3\n3\n3\n4\n5\n5\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n4\n3\n4\n3\n3\n5\n5\n4\n4\n3\n5\n3\n4\n3\n4\n3\n3\n4\n4\n4\n5\n5\n3\n4\n4\n4\n3\n4\n4\n3\n3\n3\n3\n3\n5\n4\n4\n4\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n4\n5\n5\n3\n4\n3\n3\n3\n3\n5\n5\n4\n3\n3\n3\n5\n4\n3\n4\n4\n5\n5\n3\n3\n4\n3\n4\n3\n4\n4\n5\n3\n3\n3\n4\n3\n3\n5\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n3\n5\n4\n5\n3\n3\n3\n5\n4\n5\n3\n3\n5\n3\n5\n5\n3\n4\n5\n3\n5\n3\n5\n3\n3\n3\n4\n3\n4\n3\n5\n3\n5\n3\n3\n4\n3\n5\n3\n3\n3\n3\n3\n4\n3\n5\n3\n4\n3\n4\n3\n4\n4\n3\n3\n4\n4\n5\n3\n3\n3\n3\n4\n3\n5\n3\n3\n5\n5\n3\n3\n3\n4\n4\n3\n4\n4\n4\n3\n4\n4\n3\n5\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n4\n4\n3\n5\n3\n4\n3\n3\n3\n3\n5\n3\n3\n5\n5\n3\n3\n4\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n4\n3\n4\n3\n4\n4\n3\n4\n4\n3\n4\n3\n3\n3\n5\n3\n3\n5\n5\n3\n5\n3\n3\n5\n5\n3\n3\n5\n4\n3\n3\n3\n3\n5\n4\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n4\n3\n3\n3\n4\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n5\n4\n3\n3\n4\n3\n5\n3\n5\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n4\n3\n3\n3\n5\n5\n3\n5\n4\n3\n3\n5\n5\n4\n3\n3\n3\n3\n3\n3\n3\n4\n5\n3\n3\n3\n4\n4\n3\n4\n3\n3\n4\n5\n4\n4\n3\n3\n5\n5\n3\n4\n3\n4\n3\n4\n5\n3\n3\n5\n3\n5\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n5\n4\n3\n4\n3\n3\n4\n3\n3\n3\n5\n5\n3\n5\n5\n4\n3\n5\n4\n3\n5\n3\n5\n4\n5\n3\n3\n5\n5\n3\n4\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n4\n5\n3\n5\n5\n3\n4\n3\n5\n5\n3\n3\n3\n3\n3\n3\n2\n3\n4\n3\n5\n3\n4\n3\n3\n5\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n5\n4\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n3\n4\n4\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n4\n3\n3\n3\n3\n3\n5\n5\n3\n3\n4\n3\n5\n5\n3\n4\n3\n3\n4\n3\n3\n3\n3\n4\n4\n3\n4\n3\n4\n4\n3\n4\n3\n3\n3\n4\n5\n4\n5\n3\n5\n3\n3\n3\n3\n3\n3\n4\n5\n5\n3\n3\n3\n4\n3\n3\n3\n4\n4\n5\n4\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n5\n5\n3\n4\n3\n3\n3\n3\n5\n3\n3\n3\n4\n3\n3\n5\n4\n5\n4\n3\n3\n3\n4\n5\n5\n4\n3\n3\n3\n3\n5\n3\n5\n5\n3\n5\n3\n4\n3\n3\n3\n4\n3\n3\n5\n5\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n4\n3\n4\n3\n3\n3\n5\n3\n3\n5\n3\n5\n3\n5\n4\n4\n3\n3\n4\n4\n3\n4\n5\n3\n4\n3\n4\n5\n3\n2\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n5\n5\n5\n5\n5\n4\n5\n3\n5\n5\n5\n5\n3\n3\n4\n3\n3\n5\n3\n3\n3\n3\n5\n3\n4\n3\n3\n3\n4\n5\n3\n5\n5\n4\n4\n5\n3\n3\n3\n4\n3\n3\n3\n5\n3\n5\n3\n3\n5\n3\n3\n3\n4\n5\n5\n4\n3\n3\n5\n5\n3\n3\n4\n3\n3\n3\n4\n3\n4\n5\n3\n3\n4\n3\n5\n3\n4\n3\n3\n3\n5\n5\n4\n5\n3\n5\n3\n3\n3\n5\n3\n4\n3\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n4\n5\n5\n3\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n4\n4\n3\n4\n4\n4\n3\n3\n5\n5\n4\n5\n4\n3\n3\n4\n4\n3\n3\n4\n3\n4\n3\n5\n3\n5\n3\n3\n3\n4\n5\n3\n4\n3\n3\n3\n3\n5\n3\n5\n3\n3\n5\n3\n5\n3\n5\n4\n3\n5\n3\n3\n3\n3\n5\n3\n2\n4\n5\n4\n5\n3\n5\n4\n3\n5\n5\n3\n3\n3\n5\n3\n3\n5\n3\n5\n4\n5\n5\n5\n4\n3\n4\n3\n3\n3\n5\n3\n4\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n4\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n4\n3\n4\n5\n5\n5\n3\n3\n3\n3\n4\n3\n4\n4\n5\n3\n5\n3\n3\n4\n5\n3\n4\n3\n3\n4\n3\n4\n4\n3\n4\n3\n5\n3\n3\n3\n3\n5\n4\n3\n3\n4\n3\n4\n3\n5\n3\n5\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n5\n3\n3\n4\n3\n4\n3\n4\n3\n5\n3\n5\n5\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n5\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n5\n3\n3\n3\n3\n3\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n5\n3\n3\n3\n3\n3\n3\n5\n3\n5\n5\n5\n3\n3\n3\n5\n3\n5\n5\n5\n3\n4\n4\n4\n5\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n4\n4\n3\n4\n3\n3\n5\n5\n4\n4\n5\n5\n5\n3\n3\n4\n5\n3\n5\n5\n4\n3\n5\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n5\n3\n3\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n5\n4\n3\n3\n3\n3\n3\n3\n4\n5\n5\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n5\n5\n5\n5\n4\n4\n3\n5\n4\n3\n5\n5\n4\n4\n4\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n5\n4\n5\n4\n5\n5\n4\n3\n4\n4\n4\n3\n5\n5\n5\n3\n4\n5\n3\n3\n3\n3\n3\n2\n3\n5\n3\n5\n5\n5\n5\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n4\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n4\n4\n4\n5\n4\n4\n4\n4\n4\n4\n4\n5\n3\n4\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n4\n4\n4\n4\n3\n4\n3\n4\n3\n3\n3\n3\n3\n4\n3\n5\n3\n5\n5\n4\n4\n3\n3\n3\n5\n3\n5\n3\n5\n5\n4\n3\n5\n3\n4\n4\n4\n3\n3\n3\n4\n3\n3\n3\n3\n4\n5\n5\n4\n5\n3\n3\n5\n5\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n4\n5\n4\n5\n3\n4\n4\n5\n3\n3\n4\n5\n4\n5\n5\n3\n3\n3\n4\n3\n3\n4\n4\n4\n4\n4\n4\n3\n5\n4\n3\n3\n5\n5\n3\n3\n5\n5\n4\n4\n3\n5\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n5\n3\n5\n5\n5\n5\n3\n5\n3\n3\n3\n3\n3\n3\n3\n4\n4\n5\n3\n3\n3\n3\n3\n4\n5\n3\n3\n3\n3\n4\n4\n4\n5\n4\n3\n5\n5\n3\n3\n5\n3\n3\n5\n4\n4\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n5\n5\n5\n5\n3\n5\n5\n4\n3\n3\n5\n3\n4\n4\n5\n4\n5\n4\n4\n4\n4\n5\n4\n3\n5\n3\n4\n5\n5\n5\n3\n3\n5\n3\n4\n3\n4\n3\n5\n3\n5\n4\n3\n3\n4\n3\n3\n3\n3\n3\n5\n3\n5\n3\n5\n4\n3\n4\n5\n5\n5\n4\n3\n3\n3\n5\n4\n4\n4\n3\n5\n4\n3\n3\n5\n3\n5\n3\n5\n3\n3\n3\n4\n4\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n5\n5\n3\n5\n3\n3\n3\n5\n4\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n4\n5\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n4\n3\n4\n3\n5\n3\n3\n3\n3\n4\n4\n3\n5\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n5\n3\n5\n3\n3\n3\n5\n3\n5\n3\n5\n3\n5\n3\n5\n3\n5\n3\n3\n3\n4\n3\n5\n4\n3\n4\n5\n3\n3\n3\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n4\n4\n3\n3\n3\n5\n4\n3\n3\n3\n3\n3\n3\n3\n3\n5\n4\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n5\n3\n3\n5\n4\n3\n3\n5\n4\n3\n5\n5\n3\n4\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n5\n5\n3\n5\n3\n5\n3\n3\n5\n3\n5\n3\n5\n5\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n5\n3\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n4\n5\n3\n5\n5\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n5\n4\n3\n3\n3\n3\n4\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n4\n3\n4\n3\n4\n5\n3\n3\n3\n3\n5\n5\n3\n5\n3\n5\n5\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n4\n3\n3\n2\n3\n5\n3\n5\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n4\n4\n3\n4\n3\n3\n5\n4\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n5\n3\n3\n4\n3\n3\n3\n5\n3\n5\n4\n5\n3\n5\n4\n3\n5\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n5\n3\n5\n5\n3\n5\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n5\n4\n3\n5\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n5\n3\n3\n3\n3\n4\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n3\n5\n3\n3\n3\n3\n4\n4\n3\n3\n5\n3\n3\n3\n5\n5\n3\n5\n3\n4\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n4\n3\n3\n5\n3\n3\n5\n3\n3\n3\n5\n5\n3\n5\n5\n5\n5\n3\n5\n5\n5\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n5\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n4\n5\n3\n3\n4\n5\n3\n3\n3\n3\n5\n5\n5\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n4\n5\n5\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n5\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n5\n5\n3\n5\n5\n3\n3\n3\n5\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n2\n3\n4\n3\n5\n5\n3\n3\n3\n3\n5\n5\n3\n5\n3\n4\n3\n3\n4\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n5\n3\n3\n3\n4\n3\n5\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n5\n5\n3\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n5\n5\n5\n5\n5\n3\n3\n3\n5\n5\n3\n5\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n4\n5\n3\n3\n3\n3\n4\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n5\n3\n3\n3\n4\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n5\n3\n3\n3\n3\n5\n5\n5\n3\n4\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n4\n4\n3\n3\n2\n3\n3\n3\n3\n3\n5\n3\n4\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n4\n4\n4\n4\n3\n3\n3\n3\n3\n3\n5\n5\n5\n3\n4\n3\n3\n3\n5\n3\n3\n5\n5\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n4\n3\n3\n5\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n4\n3\n5\n3\n5\n5\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n5\n3\n5\n3\n3\n3\n3\n3\n3\n3\n4\n5\n3\n3\n3\n3\n3\n3\n5\n3\n5\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n5\n5\n5\n5\n3\n5\n5\n3\n3\n5\n3\n5\n4\n5\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n5\n5\n3\n5\n5\n5\n4\n3\n3\n5\n4\n4\n3\n5\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n5\n3\n5\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n5\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n2\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n2\n3\n3\n3\n5\n3\n3\n5\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n4\n2\n5\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n3\n3\n3\n2\n5\n3\n4\n3\n3\n3\n3\n5\n4\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n5\n3\n5\n3\n5\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n5\n3\n5\n5\n3\n5\n3\n5\n2\n5\n3\n3\n5\n5\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n4\n3\n3\n3\n3\n5\n4\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n4\n3\n3\n4\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n5\n3\n3\n2\n3\n3\n3\n3\n5\n3\n3\n3\n5\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n5\n3\n3\n3\n5\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n2\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n5\n3\n3\n2\n3\n3\n3\n3\n5\n5\n4\n3\n3\n2\n3\n3\n3\n5\n4\n3\n3\n3\n5\n3\n3\n5\n5\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n5\n3\n5\n3\n3\n5\n3\n5\n3\n5\n3\n3\n3\n3\n5\n3\n5\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n4\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n5\n5\n5\n3\n2\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n4\n3\n3\n3\n3\n5\n3\n5\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n5\n5\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n5\n3\n4\n3\n3\n3\n3\n5\n3\n3\n3\n2\n5\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n3\n2\n3\n5\n3\n3\n3\n3\n5\n3\n3\n5\n5\n3\n3\n3\n3\n3\n2\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n2\n5\n3\n5\n3\n5\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n4\n3\n3\n2\n3\n5\n5\n3\n5\n3\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n3\n3\n5\n5\n5\n3\n3\n4\n2\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n5\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n5\n5\n5\n3\n3\n5\n3\n3\n5\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n4\n3\n3\n3\n5\n5\n3\n3\n2\n5\n3\n5\n3\n3\n5\n3\n5\n3\n3\n5\n3\n5\n3\n3\n5\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n5\n5\n3\n5\n5\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n2\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n5\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n4\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n5\n3\n5\n3\n3\n3\n3\n5\n5\n3\n3\n3\n5\n3\n3\n3\n4\n5\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n4\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n3\n3\n3\n5\n3\n3\n2\n3\n3\n3\n5\n3\n3\n5\n5\n3\n5\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n3\n5\n3\n3\n3\n5\n2\n3\n3\n3\n3\n2\n3\n3\n3\n5\n3\n3\n3\n5\n3\n5\n3\n5\n3\n4\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n5\n5\n3\n2\n5\n5\n5\n3\n3\n5\n5\n5\n3\n5\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n5\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n5\n3\n3\n5\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n5\n3\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n2\n3\n4\n5\n3\n3\n5\n3\n4\n3\n3\n5\n3\n5\n3\n3\n3\n5\n3\n3\n5\n5\n3\n5\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n3\n3\n5\n3\n5\n3\n3\n4\n5\n5\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n5\n3\n5\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n5\n3\n3\n3\n5\n5\n3\n3\n3\n3\n5\n3\n2\n2\n3\n3\n5\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n3\n2\n3\n3\n5\n3\n3\n3\n5\n3\n5\n3\n5\n5\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n5\n2\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n2\n3\n5\n3\n4\n3\n3\n5\n3\n5\n5\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n5\n3\n5\n3\n2\n5\n2\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n5\n5\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n5\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n2\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n5\n2\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n5\n5\n3\n5\n3\n5\n3\n3\n5\n3\n5\n5\n3\n5\n5\n3\n4\n4\n4\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n3\n3\n5\n5\n5\n5\n3\n3\n2\n3\n5\n3\n3\n5\n3\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n5\n5\n3\n3\n3\n5\n3\n3\n5\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n5\n5\n5\n5\n3\n3\n3\n2\n3\n3\n3\n5\n2\n3\n3\n5\n5\n3\n3\n3\n4\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n5\n5\n5\n5\n3\n3\n3\n5\n5\n3\n3\n3\n3\n2\n2\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n4\n4\n3\n5\n3\n2\n2\n2\n2\n2\n2\n2\n3\n5\n5\n3\n3\n3\n3\n2\n2\n3\n5\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n2\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n5\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n5\n5\n3\n3\n3\n5\n3\n3\n5\n5\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n5\n3\n5\n4\n2\n5\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n5\n5\n3\n3\n3\n4\n3\n5\n5\n5\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n5\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n5\n5\n3\n5\n5\n5\n5\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n5\n5\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n5\n5\n5\n5\n3\n3\n5\n5\n3\n3\n3\n5\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n5\n5\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n5\n5\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n5\n3\n5\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n5\n5\n5\n4\n3\n3\n3\n3\n5\n3\n4\n4\n3\n5\n3\n3\n3\n3\n5\n3\n5\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n5\n5\n5\n5\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n2\n5\n3\n3\n4\n3\n3\n3\n2\n3\n3\n5\n3\n3\n3\n4\n3\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n5\n5\n3\n3\n3\n5\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n2\n2\n3\n3\n3\n5\n3\n3\n5\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n4\n2\n5\n5\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n5\n3\n3\n3\n5\n3\n5\n5\n5\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n5\n5\n5\n5\n5\n3\n3\n2\n5\n3\n4\n3\n3\n3\n3\n5\n4\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n4\n3\n5\n4\n5\n3\n5\n3\n5\n3\n3\n5\n3\n3\n5\n4\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n5\n3\n5\n5\n3\n5\n3\n5\n2\n5\n3\n3\n3\n5\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n4\n3\n3\n3\n5\n4\n3\n3\n5\n4\n3\n3\n3\n3\n5\n4\n5\n3\n3\n3\n5\n3\n3\n3\n3\n4\n3\n5\n5\n5\n3\n3\n3\n3\n3\n3\n5\n2\n3\n3\n5\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n4\n3\n3\n4\n3\n3\n3\n5\n4\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n5\n3\n3\n3\n3\n5\n4\n5\n5\n5\n5\n3\n5\n3\n3\n5\n4\n3\n3\n3\n3\n3\n3\n5\n3\n3\n2\n3\n3\n2\n3\n5\n3\n3\n5\n5\n5\n3\n3\n3\n5\n3\n4\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n2\n5\n3\n3\n3\n5\n3\n5\n5\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n2\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n4\n5\n5\n3\n3\n2\n3\n3\n3\n3\n5\n5\n4\n5\n3\n2\n3\n3\n3\n5\n4\n3\n3\n5\n5\n5\n3\n5\n5\n3\n3\n5\n3\n3\n4\n5\n3\n3\n3\n3\n5\n3\n3\n3\n5\n5\n3\n3\n3\n3\n5\n3\n3\n5\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n5\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n5\n3\n5\n3\n3\n5\n3\n5\n3\n5\n3\n3\n3\n3\n5\n3\n5\n3\n5\n3\n5\n5\n3\n3\n5\n3\n3\n3\n4\n3\n3\n5\n3\n5\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n4\n3\n2\n3\n2\n3\n3\n5\n3\n3\n4\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n5\n4\n3\n3\n5\n3\n4\n3\n3\n3\n5\n3\n3\n3\n5\n5\n5\n5\n5\n3\n2\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n5\n5\n3\n4\n3\n3\n3\n3\n4\n5\n3\n5\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n5\n5\n3\n3\n2\n3\n5\n3\n3\n3\n3\n2\n5\n5\n5\n3\n3\n3\n3\n5\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n5\n3\n4\n3\n3\n3\n3\n5\n3\n3\n3\n2\n5\n3\n5\n3\n3\n3\n3\n3\n5\n5\n3\n3\n5\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n5\n5\n3\n3\n3\n5\n5\n3\n5\n3\n3\n5\n3\n2\n3\n5\n3\n3\n3\n3\n5\n3\n3\n5\n5\n3\n3\n3\n3\n3\n2\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n5\n3\n3\n3\n5\n4\n3\n5\n3\n2\n5\n3\n5\n3\n5\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n4\n3\n3\n2\n3\n5\n5\n3\n5\n4\n3\n3\n3\n5\n5\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n2\n3\n3\n3\n3\n4\n4\n3\n3\n3\n4\n3\n3\n4\n5\n2\n5\n5\n3\n4\n2\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n5\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n5\n5\n5\n3\n3\n5\n3\n3\n5\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n5\n3\n3\n4\n3\n3\n3\n5\n5\n3\n3\n2\n5\n5\n5\n4\n3\n5\n2\n5\n4\n3\n5\n3\n5\n3\n3\n5\n3\n3\n5\n5\n3\n3\n3\n3\n5\n3\n4\n3\n5\n4\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n5\n5\n3\n5\n5\n3\n3\n5\n3\n5\n5\n5\n3\n3\n3\n5\n3\n5\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n5\n3\n5\n5\n2\n3\n3\n3\n5\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n4\n5\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n3\n5\n4\n3\n3\n3\n5\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n3\n5\n3\n3\n4\n3\n3\n5\n5\n3\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n4\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n3\n3\n4\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n3\n3\n3\n4\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n5\n3\n5\n3\n5\n3\n3\n3\n3\n5\n5\n5\n3\n3\n5\n3\n3\n3\n4\n5\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n3\n3\n5\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n4\n5\n3\n3\n5\n3\n3\n3\n3\n2\n3\n3\n3\n3\n5\n5\n3\n5\n3\n3\n5\n3\n3\n2\n3\n3\n3\n5\n3\n3\n5\n5\n3\n5\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n5\n5\n3\n3\n5\n3\n3\n3\n5\n2\n3\n3\n3\n3\n2\n3\n3\n3\n5\n3\n3\n3\n5\n3\n5\n3\n5\n3\n4\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n5\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n5\n5\n3\n2\n5\n5\n5\n3\n3\n5\n5\n5\n2\n5\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n5\n5\n5\n3\n4\n4\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n5\n3\n3\n5\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n5\n2\n5\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n2\n3\n4\n5\n3\n3\n5\n3\n4\n3\n3\n5\n3\n5\n3\n5\n3\n5\n3\n3\n5\n5\n3\n5\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n5\n3\n3\n3\n5\n3\n5\n3\n5\n4\n5\n5\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n5\n3\n3\n3\n3\n4\n2\n3\n3\n3\n3\n3\n4\n5\n3\n5\n3\n3\n5\n5\n4\n5\n5\n5\n5\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n4\n3\n5\n3\n3\n2\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n5\n5\n3\n3\n5\n3\n3\n5\n3\n5\n3\n3\n5\n5\n3\n5\n4\n3\n3\n5\n5\n3\n3\n3\n3\n5\n3\n2\n2\n3\n3\n5\n3\n5\n3\n5\n2\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n5\n3\n3\n3\n3\n3\n2\n3\n3\n5\n3\n3\n3\n5\n3\n5\n3\n5\n5\n3\n5\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n5\n2\n5\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n2\n3\n5\n3\n4\n3\n3\n5\n3\n5\n5\n5\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n4\n3\n3\n4\n3\n3\n2\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n5\n3\n5\n3\n5\n3\n2\n5\n2\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n5\n5\n3\n3\n2\n3\n3\n3\n3\n3\n4\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n5\n2\n5\n3\n2\n3\n4\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n4\n3\n5\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n5\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n2\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n5\n2\n3\n3\n3\n3\n3\n4\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n5\n5\n3\n5\n3\n5\n3\n3\n5\n3\n5\n5\n3\n5\n5\n3\n4\n4\n4\n3\n5\n3\n3\n3\n3\n3\n5\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n5\n3\n5\n3\n3\n5\n5\n5\n3\n3\n5\n5\n5\n5\n3\n3\n2\n3\n5\n3\n3\n5\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n5\n3\n3\n3\n5\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n5\n5\n3\n3\n2\n2\n5\n3\n3\n5\n3\n3\n2\n2\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n2\n3\n2\n5\n5\n5\n5\n3\n5\n3\n2\n3\n3\n3\n5\n2\n3\n3\n5\n5\n3\n3\n3\n4\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n2\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n4\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n5\n5\n5\n5\n3\n3\n3\n5\n5\n3\n5\n3\n3\n2\n2\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n4\n4\n3\n5\n3\n2\n2\n2\n2\n2\n2\n2\n3\n5\n5\n5\n3\n3\n3\n2\n2\n3\n5\n3\n3\n4\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n2\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n3\n3\n5\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n5\n5\n3\n4\n3\n5\n5\n3\n3\n3\n5\n3\n5\n3\n5\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n4\n3\n3\n3\n2\n2\n5\n3\n3\n3\n3\n5\n5\n3\n3\n5\n5\n3\n3\n5\n5\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n5\n5\n5\n5\n3\n5\n4\n2\n5\n3\n3\n4\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n4\n5\n5\n3\n3\n3\n4\n3\n5\n5\n5\n3\n3\n5\n2\n3\n5\n3\n3\n3\n3\n5\n5\n5\n3\n3\n3\n5\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n5\n5\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n5\n5\n3\n5\n5\n5\n5\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n5\n5\n5\n3\n3\n4\n3\n5\n3\n3\n3\n3\n5\n3\n5\n5\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n3\n5\n5\n3\n5\n5\n3\n3\n3\n5\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n5\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n3\n5\n5\n5\n5\n3\n5\n5\n5\n3\n3\n3\n5\n3\n3\n3\n3\n5\n5\n5\n5\n3\n3\n3\n3\n5\n5\n5\n3\n5\n3\n3\n3\n3\n5\n5\n3\n5\n5\n5\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n5\n3\n5\n5\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n5\n3\n5\n3\n3\n3\n4\n3\n3\n5\n5\n5\n4\n3\n3\n3\n3\n5\n3\n4\n4\n3\n5\n4\n3\n3\n3\n5\n3\n5\n3\n5\n5\n3\n3\n5\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n2\n3\n3\n3\n3\n4\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n4\n4\n3\n3\n3\n4\n4\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n2\n3\n4\n3\n3\n2\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n4\n3\n3\n2\n2\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n2\n3\n3\n2\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n4\n4\n3\n3\n3\n3\n4\n3\n3\n3\n4\n4\n2\n4\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n2\n3\n3\n4\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n2\n3\n3\n4\n3\n3\n4\n3\n3\n3\n4\n3\n4\n3\n3\n4\n4\n3\n4\n3\n3\n3\n4\n3\n3\n4\n3\n4\n3\n3\n3\n4\n4\n3\n2\n3\n3\n4\n4\n4\n4\n4\n3\n4\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n3\n4\n3\n4\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n4\n3\n3\n3\n2\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n3\n2\n2\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n5\n3\n3\n3\n4\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n4\n2\n3\n4\n3\n3\n4\n3\n3\n3\n3\n5\n4\n4\n3\n3\n3\n4\n3\n4\n3\n3\n4\n4\n4\n4\n3\n3\n3\n2\n3\n3\n4\n4\n4\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n2\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n2\n3\n3\n3\n5\n4\n3\n4\n4\n3\n3\n3\n4\n4\n3\n3\n3\n4\n3\n4\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n5\n3\n4\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n2\n3\n4\n4\n3\n3\n4\n4\n5\n3\n3\n3\n3\n3\n4\n3\n5\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n3\n4\n4\n3\n4\n4\n3\n4\n3\n4\n3\n4\n4\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n4\n3\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n4\n4\n3\n4\n4\n3\n3\n4\n4\n3\n3\n3\n4\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n2\n3\n3\n3\n3\n4\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n2\n3\n4\n3\n3\n3\n4\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n4\n3\n4\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n2\n3\n3\n5\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n5\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n4\n3\n3\n4\n3\n2\n4\n3\n3\n4\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n4\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n4\n3\n4\n4\n3\n3\n4\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n2\n4\n3\n3\n4\n3\n3\n2\n3\n4\n4\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n4\n4\n3\n3\n3\n4\n3\n3\n4\n4\n2\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n3\n4\n3\n2\n3\n3\n3\n4\n3\n3\n4\n4\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n4\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n4\n4\n3\n3\n3\n4\n3\n4\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n5\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n4\n4\n3\n3\n3\n3\n3\n2\n3\n3\n4\n3\n4\n3\n3\n3\n3\n4\n4\n3\n3\n2\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n2\n3\n4\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n5\n3\n3\n3\n3\n4\n4\n3\n3\n3\n4\n4\n4\n4\n3\n4\n3\n3\n4\n3\n4\n4\n3\n3\n2\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n5\n4\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n4\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n4\n3\n3\n4\n3\n3\n3\n5\n3\n3\n4\n4\n3\n4\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n4\n3\n3\n2\n5\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n2\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n4\n3\n4\n4\n3\n4\n3\n3\n3\n3\n4\n3\n3\n4\n3\n4\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n4\n2\n4\n3\n4\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n4\n4\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n2\n4\n3\n2\n3\n3\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n4\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n2\n2\n4\n3\n3\n4\n3\n4\n3\n2\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n4\n4\n3\n3\n4\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n2\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n3\n4\n3\n4\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n4\n3\n3\n3\n4\n5\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n4\n2\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n2\n3\n2\n3\n3\n4\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n3\n2\n3\n2\n4\n3\n3\n4\n3\n4\n4\n2\n4\n3\n2\n4\n4\n2\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n2\n4\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n4\n3\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n4\n3\n3\n3\n3\n3\n3\n4\n4\n3\n2\n3\n3\n4\n4\n4\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n4\n4\n3\n5\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n4\n4\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n2\n4\n3\n3\n3\n3\n3\n3\n2\n4\n3\n3\n3\n3\n4\n3\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n2\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n2\n3\n2\n3\n4\n4\n4\n4\n3\n3\n2\n4\n3\n4\n3\n2\n4\n3\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n2\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n4\n3\n3\n3\n4\n4\n4\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n4\n4\n4\n3\n4\n2\n2\n2\n2\n2\n2\n2\n3\n4\n4\n3\n4\n4\n4\n2\n2\n4\n3\n2\n2\n4\n2\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n4\n4\n4\n4\n3\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n4\n3\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n4\n4\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n3\n2\n2\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n2\n3\n4\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n5\n4\n3\n4\n2\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n4\n4\n4\n4\n4\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n2\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n3\n5\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n4\n4\n3\n3\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n5\n3\n3\n4\n4\n3\n3\n4\n4\n3\n3\n3\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n4\n3\n3\n3\n2\n2\n3\n5\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n2\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n2\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n2\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n2\n3\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n2\n4\n2\n4\n4\n4\n4\n2\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n2\n2\n2\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n2\n4\n4\n3\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n3\n2\n2\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n2\n2\n2\n4\n2\n2\n4\n4\n4\n4\n2\n2\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n3\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n4\n4\n4\n2\n2\n4\n2\n2\n4\n2\n4\n2\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n2\n4\n2\n4\n4\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n2\n4\n4\n4\n4\n2\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n2\n4\n2\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n2\n2\n4\n2\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n2\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n2\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n2\n2\n2\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n2\n2\n4\n2\n4\n4\n4\n4\n2\n2\n2\n4\n4\n4\n2\n4\n4\n4\n4\n2\n2\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n4\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n4\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n4\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n4\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n4\n2\n2\n3\n3\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n3\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n3\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n4\n2\n2\n3\n3\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n4\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n3\n3\n2\n2\n2\n4\n2\n3\n2\n2\n4\n2\n2\n3\n4\n4\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n3\n3\n2\n2\n2\n3\n2\n2\n2\n3\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n3\n3\n4\n2\n2\n2\n2\n4\n3\n2\n3\n4\n2\n3\n3\n2\n2\n3\n3\n2\n2\n3\n3\n2\n2\n2\n3\n2\n3\n4\n3\n3\n4\n4\n2\n2\n4\n3\n4\n2\n2\n2\n3\n2\n2\n2\n3\n3\n2\n3\n2\n4\n3\n2\n2\n4\n2\n4\n2\n2\n2\n4\n3\n3\n2\n2\n3\n2\n4\n5\n2\n3\n3\n4\n3\n3\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n3\n3\n3\n3\n2\n3\n2\n3\n4\n2\n2\n2\n4\n4\n2\n2\n2\n3\n2\n3\n2\n2\n3\n2\n2\n3\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n2\n3\n3\n3\n2\n2\n4\n2\n4\n2\n3\n4\n5\n2\n2\n2\n2\n2\n3\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n3\n3\n3\n2\n2\n2\n3\n2\n3\n2\n2\n3\n2\n2\n2\n3\n2\n3\n3\n2\n4\n2\n4\n2\n2\n2\n3\n2\n2\n2\n3\n2\n3\n2\n3\n3\n2\n2\n3\n2\n4\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n3\n2\n3\n2\n3\n3\n4\n2\n2\n2\n3\n2\n3\n2\n4\n2\n2\n2\n2\n3\n3\n4\n3\n3\n3\n2\n2\n3\n4\n4\n3\n4\n4\n2\n4\n2\n2\n3\n3\n2\n2\n2\n2\n3\n2\n2\n4\n2\n2\n3\n4\n2\n3\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n4\n2\n3\n2\n2\n2\n2\n2\n4\n2\n2\n3\n4\n2\n2\n4\n3\n4\n2\n3\n3\n2\n3\n4\n4\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n2\n4\n3\n3\n4\n2\n3\n3\n2\n4\n2\n3\n2\n2\n2\n3\n2\n3\n3\n2\n4\n3\n3\n3\n2\n3\n4\n2\n3\n4\n2\n3\n3\n3\n3\n4\n4\n3\n2\n2\n2\n2\n2\n2\n2\n3\n4\n2\n2\n2\n3\n3\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n3\n4\n3\n2\n2\n4\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n4\n2\n2\n2\n2\n4\n4\n3\n2\n2\n2\n2\n3\n4\n2\n2\n2\n2\n3\n4\n2\n3\n3\n2\n3\n2\n3\n2\n2\n3\n3\n2\n2\n3\n2\n2\n3\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n4\n2\n2\n2\n2\n2\n3\n3\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n4\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n4\n3\n4\n2\n3\n3\n2\n3\n3\n3\n4\n2\n2\n3\n2\n2\n3\n3\n2\n4\n2\n3\n2\n2\n3\n3\n4\n2\n2\n2\n4\n4\n2\n3\n2\n2\n3\n3\n3\n4\n3\n3\n2\n2\n2\n2\n2\n3\n2\n3\n3\n2\n2\n2\n3\n2\n2\n4\n2\n2\n3\n3\n2\n2\n2\n2\n3\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n3\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n3\n4\n2\n4\n2\n2\n4\n2\n2\n2\n4\n2\n2\n3\n2\n2\n2\n3\n3\n3\n2\n2\n2\n3\n2\n4\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n3\n3\n2\n4\n2\n4\n2\n2\n2\n3\n3\n3\n2\n2\n3\n2\n2\n3\n2\n2\n3\n3\n3\n3\n2\n4\n2\n3\n3\n4\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n2\n2\n2\n3\n3\n2\n2\n2\n3\n3\n3\n2\n2\n3\n2\n2\n3\n3\n4\n2\n2\n2\n2\n2\n3\n3\n3\n2\n3\n4\n4\n4\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n4\n3\n3\n4\n3\n2\n2\n2\n3\n2\n4\n2\n3\n2\n2\n2\n3\n3\n3\n2\n2\n2\n4\n3\n3\n4\n4\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n4\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n4\n4\n2\n2\n4\n2\n2\n2\n3\n2\n2\n3\n2\n2\n3\n3\n2\n2\n2\n2\n2\n3\n3\n2\n4\n4\n4\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n3\n2\n3\n3\n3\n4\n4\n4\n4\n3\n3\n2\n3\n3\n3\n2\n4\n2\n3\n2\n2\n2\n2\n2\n2\n4\n2\n3\n2\n3\n4\n4\n2\n2\n3\n2\n2\n3\n3\n4\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n4\n2\n2\n3\n4\n2\n3\n3\n4\n4\n4\n2\n2\n2\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n4\n3\n3\n2\n3\n2\n3\n4\n2\n5\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n4\n2\n2\n4\n2\n4\n2\n4\n3\n2\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n2\n4\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n2\n3\n3\n3\n4\n2\n3\n3\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n4\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n2\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n2\n2\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n4\n2\n4\n3\n3\n3\n3\n3\n3\n4\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n2\n3\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n2\n2\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n4\n2\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n4\n3\n3\n3\n4\n3\n3\n2\n3\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n2\n3\n3\n2\n3\n3\n3\n2\n3\n2\n3\n4\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n3\n2\n3\n4\n3\n3\n3\n4\n3\n3\n2\n2\n3\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n4\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n2\n3\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n2\n4\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n2\n3\n3\n3\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n4\n4\n3\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n4\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n4\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n4\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n2\n4\n2\n3\n3\n4\n3\n3\n3\n4\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n2\n4\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n4\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n2\n2\n3\n3\n3\n4\n4\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n2\n3\n3\n2\n3\n2\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n4\n4\n3\n3\n3\n4\n3\n3\n4\n3\n2\n4\n3\n3\n4\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n2\n3\n3\n2\n4\n2\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n4\n3\n3\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n4\n3\n3\n4\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n2\n3\n4\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n3\n4\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n2\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n4\n3\n3\n2\n3\n2\n2\n2\n3\n3\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n2\n3\n3\n4\n3\n3\n2\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n3\n3\n3\n2\n4\n3\n2\n3\n2\n3\n3\n3\n2\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n4\n3\n3\n2\n3\n2\n2\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n4\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n5\n3\n3\n3\n3\n3\n3\n3\n3\n2\n4\n3\n2\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n4\n2\n4\n3\n4\n2\n3\n4\n3\n3\n2\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n2\n3\n3\n3\n4\n4\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n2\n2\n3\n2\n3\n3\n2\n2\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n4\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n3\n3\n4\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n4\n5\n2\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n4\n3\n2\n3\n3\n3\n2\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n3\n2\n3\n2\n3\n3\n3\n4\n3\n4\n4\n2\n3\n3\n2\n4\n4\n2\n3\n4\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n4\n3\n2\n4\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n2\n2\n3\n4\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n2\n3\n3\n3\n2\n3\n2\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n2\n4\n3\n3\n2\n3\n3\n3\n3\n2\n4\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n4\n2\n2\n4\n3\n3\n2\n2\n2\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n2\n2\n3\n2\n3\n2\n2\n2\n4\n3\n3\n2\n3\n3\n3\n3\n2\n4\n3\n3\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n2\n2\n2\n2\n2\n3\n2\n2\n3\n3\n3\n3\n4\n4\n4\n2\n2\n3\n3\n2\n2\n4\n2\n3\n3\n3\n3\n2\n2\n3\n3\n4\n3\n2\n3\n3\n3\n3\n2\n2\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n4\n4\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n2\n2\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n5\n3\n4\n3\n4\n2\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n2\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n4\n4\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n4\n3\n3\n4\n4\n4\n3\n5\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n5\n3\n3\n4\n4\n3\n3\n2\n3\n3\n3\n3\n2\n3\n4\n2\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n2\n4\n3\n4\n3\n2\n2\n4\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n2\n2\n3\n3\n3\n4\n4\n4\n4\n3\n2\n3\n4\n3\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n5\n4\n3\n4\n2\n3\n3\n2\n4\n2\n2\n4\n4\n3\n4\n2\n3\n3\n2\n2\n4\n4\n3\n4\n3\n4\n4\n4\n4\n2\n2\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n3\n4\n4\n2\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n2\n3\n4\n4\n3\n2\n4\n4\n2\n2\n4\n2\n4\n4\n4\n4\n3\n4\n2\n4\n2\n4\n2\n4\n4\n4\n4\n2\n4\n4\n3\n3\n2\n2\n2\n4\n3\n4\n4\n4\n4\n4\n3\n2\n3\n4\n2\n2\n2\n2\n2\n4\n4\n3\n4\n4\n3\n4\n2\n3\n3\n4\n3\n4\n4\n4\n3\n4\n4\n3\n3\n4\n3\n4\n3\n2\n2\n4\n3\n4\n4\n3\n3\n4\n3\n4\n4\n4\n3\n4\n3\n3\n2\n4\n3\n2\n4\n3\n4\n2\n4\n3\n3\n4\n4\n4\n4\n4\n2\n4\n4\n3\n3\n4\n3\n4\n4\n3\n4\n3\n4\n4\n2\n4\n2\n4\n3\n4\n3\n4\n2\n3\n3\n4\n3\n2\n2\n4\n3\n4\n4\n5\n3\n4\n4\n4\n4\n3\n3\n2\n2\n4\n3\n3\n4\n2\n4\n4\n3\n4\n3\n4\n4\n4\n2\n3\n3\n3\n3\n3\n4\n4\n4\n4\n4\n4\n3\n2\n3\n3\n3\n4\n4\n2\n4\n2\n2\n4\n4\n4\n4\n3\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n3\n4\n3\n4\n3\n4\n3\n4\n4\n4\n4\n2\n4\n4\n3\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n2\n4\n4\n3\n4\n3\n4\n3\n4\n2\n3\n3\n4\n4\n4\n2\n4\n4\n4\n3\n4\n3\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n3\n3\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n3\n2\n4\n4\n3\n2\n4\n2\n3\n4\n4\n3\n3\n3\n2\n4\n4\n3\n2\n4\n3\n4\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n2\n3\n4\n4\n3\n3\n4\n4\n3\n2\n2\n4\n3\n4\n3\n2\n4\n4\n4\n4\n4\n4\n4\n5\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n3\n3\n4\n2\n3\n4\n2\n4\n4\n4\n3\n4\n4\n2\n4\n4\n3\n4\n3\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n2\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n3\n3\n4\n4\n3\n4\n4\n4\n2\n4\n4\n4\n2\n4\n3\n3\n3\n4\n3\n4\n3\n3\n4\n4\n3\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n3\n3\n4\n2\n5\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n2\n3\n3\n4\n4\n2\n2\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n3\n4\n2\n4\n2\n3\n3\n4\n3\n3\n3\n4\n3\n4\n4\n4\n2\n4\n4\n3\n2\n4\n4\n4\n4\n4\n3\n3\n2\n4\n2\n3\n3\n4\n4\n3\n4\n2\n4\n4\n3\n2\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n2\n4\n4\n4\n3\n3\n4\n3\n4\n2\n3\n3\n4\n4\n3\n3\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n3\n2\n4\n2\n3\n4\n4\n4\n4\n4\n4\n5\n4\n3\n3\n3\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n2\n3\n4\n4\n3\n3\n3\n4\n4\n3\n4\n4\n3\n2\n4\n2\n4\n4\n4\n4\n3\n4\n4\n5\n4\n4\n2\n4\n2\n4\n4\n2\n3\n2\n4\n3\n3\n2\n4\n3\n3\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n3\n3\n3\n4\n4\n4\n3\n4\n3\n2\n3\n2\n3\n4\n4\n2\n3\n3\n3\n4\n3\n2\n2\n3\n3\n4\n3\n4\n4\n4\n2\n4\n4\n4\n3\n4\n3\n4\n4\n3\n4\n4\n3\n2\n3\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n2\n4\n4\n3\n4\n3\n2\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n2\n3\n2\n3\n4\n4\n3\n4\n3\n4\n3\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n2\n2\n4\n4\n2\n4\n4\n3\n4\n3\n4\n4\n4\n4\n3\n3\n2\n4\n2\n4\n4\n4\n4\n4\n3\n3\n4\n4\n3\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n3\n4\n3\n4\n4\n3\n4\n4\n2\n4\n2\n4\n3\n2\n3\n4\n4\n3\n3\n4\n2\n3\n3\n4\n4\n3\n2\n2\n4\n4\n4\n3\n2\n4\n3\n3\n4\n4\n4\n4\n4\n3\n4\n3\n3\n3\n3\n4\n4\n2\n4\n3\n3\n4\n4\n4\n4\n4\n3\n4\n3\n3\n4\n4\n2\n4\n4\n4\n4\n2\n3\n3\n3\n3\n4\n4\n4\n2\n2\n2\n4\n4\n2\n4\n3\n2\n4\n2\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n3\n4\n3\n3\n4\n2\n4\n4\n3\n4\n4\n4\n3\n4\n3\n4\n4\n3\n4\n4\n3\n3\n3\n2\n3\n4\n3\n4\n3\n3\n2\n2\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n2\n3\n4\n3\n2\n3\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n3\n2\n4\n2\n4\n3\n4\n4\n2\n4\n3\n3\n2\n3\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n2\n3\n3\n2\n4\n4\n4\n3\n3\n3\n2\n2\n2\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n3\n3\n3\n4\n4\n4\n4\n4\n4\n3\n4\n3\n3\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n3\n4\n4\n3\n2\n2\n4\n4\n4\n3\n4\n2\n4\n3\n3\n3\n2\n4\n4\n3\n3\n4\n3\n4\n4\n4\n3\n4\n3\n2\n3\n4\n3\n4\n3\n3\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n3\n4\n3\n3\n3\n4\n4\n4\n4\n4\n3\n2\n4\n4\n4\n2\n3\n4\n2\n4\n4\n4\n4\n4\n3\n4\n3\n2\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n2\n3\n3\n4\n4\n4\n3\n2\n4\n3\n3\n4\n5\n4\n4\n4\n3\n2\n4\n3\n4\n4\n4\n4\n3\n3\n3\n4\n4\n3\n4\n4\n3\n4\n3\n4\n3\n3\n4\n3\n4\n3\n4\n2\n3\n4\n4\n4\n3\n4\n3\n4\n3\n4\n3\n4\n4\n4\n4\n3\n4\n3\n3\n3\n4\n3\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n3\n3\n4\n4\n4\n4\n2\n3\n3\n4\n3\n4\n3\n2\n4\n4\n3\n4\n3\n4\n3\n3\n3\n4\n2\n4\n3\n4\n4\n3\n4\n4\n4\n4\n3\n2\n4\n3\n4\n4\n3\n4\n4\n4\n3\n2\n4\n3\n3\n4\n4\n4\n4\n2\n2\n3\n4\n3\n3\n2\n3\n2\n2\n2\n3\n4\n2\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n2\n4\n3\n3\n3\n4\n4\n4\n4\n3\n4\n5\n4\n3\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n2\n4\n4\n4\n2\n4\n4\n2\n4\n2\n4\n4\n3\n3\n2\n3\n4\n3\n4\n2\n4\n2\n2\n2\n4\n3\n4\n4\n4\n2\n2\n4\n3\n4\n4\n3\n4\n4\n3\n4\n4\n4\n3\n4\n2\n4\n4\n3\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n2\n3\n4\n3\n2\n4\n3\n3\n4\n3\n2\n2\n4\n4\n4\n4\n4\n4\n2\n4\n2\n5\n3\n2\n4\n4\n3\n4\n4\n3\n2\n4\n4\n2\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n3\n4\n3\n4\n3\n4\n3\n4\n4\n4\n4\n4\n2\n4\n3\n4\n3\n4\n3\n3\n4\n4\n4\n4\n3\n4\n4\n2\n2\n3\n4\n4\n2\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n3\n4\n3\n3\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n2\n3\n3\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n2\n3\n3\n4\n4\n4\n3\n3\n4\n3\n4\n4\n3\n4\n4\n3\n2\n4\n4\n2\n4\n3\n2\n2\n4\n4\n3\n4\n3\n4\n2\n4\n3\n3\n4\n4\n4\n2\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n2\n4\n2\n3\n4\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n2\n4\n2\n4\n4\n3\n3\n4\n3\n2\n2\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n2\n4\n4\n3\n4\n3\n4\n2\n4\n4\n4\n4\n2\n3\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n3\n4\n4\n3\n3\n3\n4\n3\n4\n4\n3\n3\n4\n2\n4\n2\n3\n4\n4\n4\n4\n2\n4\n3\n4\n4\n2\n3\n4\n3\n3\n2\n3\n4\n4\n3\n4\n4\n3\n4\n3\n3\n4\n4\n2\n3\n4\n2\n4\n4\n2\n3\n4\n3\n4\n3\n4\n5\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n4\n3\n2\n4\n3\n4\n5\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n2\n4\n4\n2\n4\n4\n4\n2\n2\n3\n2\n4\n4\n4\n3\n2\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n3\n3\n2\n2\n3\n3\n3\n2\n4\n3\n2\n4\n2\n4\n3\n4\n4\n3\n4\n4\n2\n4\n4\n2\n4\n4\n2\n4\n4\n3\n2\n4\n3\n3\n4\n3\n3\n3\n2\n4\n4\n2\n4\n2\n2\n2\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n3\n4\n3\n2\n2\n3\n4\n3\n4\n3\n4\n2\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n3\n3\n3\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n3\n3\n3\n4\n4\n3\n3\n4\n4\n3\n3\n4\n4\n4\n4\n3\n3\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n3\n3\n4\n4\n2\n3\n4\n3\n4\n3\n2\n4\n3\n3\n3\n4\n3\n4\n2\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n3\n3\n4\n3\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n3\n2\n4\n3\n4\n4\n2\n4\n4\n4\n3\n3\n4\n3\n4\n4\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n3\n3\n4\n4\n2\n3\n3\n3\n4\n4\n4\n4\n4\n4\n2\n2\n2\n4\n2\n3\n2\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n2\n4\n4\n4\n2\n4\n3\n3\n3\n2\n4\n4\n4\n4\n4\n4\n3\n2\n4\n4\n4\n4\n4\n3\n4\n2\n2\n2\n3\n3\n4\n3\n3\n3\n3\n4\n2\n4\n3\n3\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n2\n4\n4\n3\n3\n4\n2\n2\n4\n4\n4\n2\n2\n2\n4\n4\n4\n4\n3\n4\n2\n2\n3\n2\n3\n3\n4\n2\n2\n2\n3\n3\n2\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n2\n4\n3\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n3\n3\n3\n4\n3\n3\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n2\n3\n2\n2\n2\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n3\n3\n3\n3\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n3\n4\n4\n4\n4\n3\n3\n2\n2\n3\n3\n4\n4\n3\n3\n4\n4\n4\n4\n2\n4\n3\n4\n3\n3\n3\n3\n3\n4\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n2\n2\n2\n2\n3\n2\n2\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n2\n2\n4\n2\n3\n3\n3\n3\n2\n2\n2\n3\n4\n4\n4\n2\n4\n4\n3\n3\n3\n2\n2\n2\n4\n4\n4\n3\n3\n4\n2\n4\n3\n3\n4\n4\n2\n3\n3\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n3\n4\n4\n4\n4\n3\n3\n3\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n4\n2\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n3\n4\n4\n2\n4\n4\n2\n2\n4\n2\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n2\n4\n4\n3\n4\n3\n3\n3\n4\n2\n4\n3\n4\n4\n4\n5\n3\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n3\n3\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n3\n4\n4\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n2\n2\n4\n4\n4\n3\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n3\n4\n4\n4\n4\n4\n3\n3\n3\n4\n2\n4\n4\n4\n4\n4\n3\n5\n2\n2\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n2\n4\n4\n3\n3\n4\n4\n4\n2\n4\n4\n4\n4\n3\n4\n2\n4\n2\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n3\n3\n3\n3\n3\n4\n3\n4\n4\n4\n4\n3\n3\n3\n4\n4\n3\n2\n3\n3\n3\n3\n3\n4\n4\n5\n4\n4\n4\n2\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n5\n4\n4\n3\n3\n3\n4\n3\n3\n4\n4\n3\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n3\n5\n4\n4\n4\n4\n3\n2\n4\n4\n4\n4\n3\n2\n4\n4\n2\n3\n3\n3\n3\n5\n4\n4\n4\n3\n4\n4\n3\n4\n4\n3\n4\n4\n2\n2\n4\n3\n4\n2\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n3\n4\n4\n3\n2\n4\n4\n3\n3\n3\n2\n2\n2\n4\n5\n3\n5\n5\n5\n5\n5\n2\n3\n5\n5\n5\n2\n5\n3\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n2\n5\n5\n5\n5\n3\n5\n3\n3\n2\n5\n2\n2\n5\n5\n5\n5\n2\n3\n5\n2\n5\n5\n5\n5\n5\n3\n4\n5\n5\n5\n2\n5\n5\n3\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n3\n5\n5\n3\n2\n5\n5\n5\n2\n5\n2\n5\n5\n5\n5\n3\n5\n2\n5\n2\n5\n5\n5\n2\n5\n5\n2\n5\n5\n3\n2\n2\n2\n2\n5\n5\n5\n5\n5\n5\n5\n3\n2\n3\n5\n2\n2\n2\n2\n5\n5\n5\n5\n5\n2\n5\n5\n2\n5\n3\n5\n3\n5\n5\n5\n3\n5\n3\n5\n3\n5\n5\n3\n5\n3\n2\n2\n5\n5\n5\n5\n3\n5\n5\n3\n5\n5\n5\n5\n5\n3\n5\n2\n5\n5\n2\n5\n3\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n2\n5\n5\n2\n3\n5\n5\n5\n5\n3\n5\n5\n5\n4\n2\n5\n2\n5\n2\n5\n2\n5\n2\n5\n5\n5\n3\n2\n2\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n3\n2\n2\n2\n5\n5\n5\n5\n5\n5\n5\n2\n5\n3\n5\n5\n5\n5\n5\n5\n3\n3\n5\n5\n5\n5\n5\n5\n5\n5\n2\n3\n5\n3\n5\n5\n2\n5\n2\n2\n4\n5\n5\n5\n5\n5\n5\n3\n3\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n3\n5\n2\n5\n5\n5\n2\n5\n5\n5\n5\n3\n5\n5\n5\n2\n5\n3\n5\n5\n5\n2\n5\n5\n5\n3\n4\n3\n3\n5\n5\n5\n4\n5\n5\n5\n5\n5\n2\n2\n5\n5\n5\n5\n5\n3\n5\n3\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n2\n5\n3\n5\n5\n5\n5\n2\n5\n2\n5\n5\n5\n5\n3\n3\n2\n5\n5\n3\n2\n5\n3\n5\n2\n5\n5\n5\n5\n5\n5\n2\n5\n5\n4\n2\n5\n3\n5\n3\n4\n5\n3\n3\n5\n5\n3\n2\n2\n5\n5\n5\n3\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n3\n5\n5\n5\n4\n5\n5\n5\n5\n5\n3\n5\n3\n5\n5\n2\n3\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n2\n5\n5\n2\n5\n5\n5\n5\n5\n2\n5\n2\n5\n5\n2\n5\n5\n3\n5\n5\n5\n5\n5\n5\n3\n3\n3\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n2\n5\n3\n3\n5\n5\n3\n5\n3\n3\n5\n5\n3\n5\n5\n5\n2\n5\n3\n5\n5\n5\n5\n2\n5\n5\n5\n3\n5\n5\n5\n5\n5\n2\n3\n3\n2\n2\n5\n4\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n2\n2\n5\n5\n5\n3\n5\n5\n5\n5\n5\n2\n3\n5\n2\n5\n2\n5\n3\n5\n2\n5\n3\n5\n3\n5\n5\n5\n2\n5\n2\n3\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n3\n5\n5\n5\n3\n5\n2\n5\n5\n5\n2\n2\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n4\n5\n5\n5\n5\n5\n2\n5\n5\n5\n2\n5\n5\n3\n5\n2\n5\n3\n5\n5\n3\n3\n5\n3\n2\n5\n5\n5\n5\n5\n5\n4\n5\n2\n5\n2\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n2\n5\n5\n5\n5\n5\n5\n2\n3\n3\n5\n5\n5\n5\n5\n3\n2\n4\n2\n5\n5\n5\n4\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n2\n5\n3\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n4\n5\n2\n5\n4\n5\n3\n3\n3\n5\n5\n5\n3\n5\n3\n2\n5\n2\n5\n5\n5\n2\n3\n5\n5\n5\n3\n2\n2\n5\n3\n5\n5\n5\n5\n2\n2\n5\n5\n5\n3\n5\n3\n5\n5\n3\n5\n5\n5\n2\n3\n3\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n4\n5\n5\n2\n5\n5\n5\n5\n3\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n3\n5\n5\n5\n5\n5\n5\n2\n3\n2\n5\n5\n5\n5\n5\n3\n5\n3\n5\n5\n2\n5\n3\n5\n2\n3\n5\n5\n5\n5\n5\n2\n2\n5\n5\n2\n5\n5\n3\n5\n3\n5\n5\n5\n5\n5\n2\n2\n5\n2\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n2\n3\n5\n3\n5\n5\n5\n5\n4\n2\n5\n2\n5\n3\n2\n5\n5\n5\n5\n3\n5\n2\n5\n3\n5\n4\n5\n2\n2\n5\n5\n5\n5\n5\n5\n3\n3\n5\n5\n5\n5\n5\n3\n5\n5\n5\n3\n3\n5\n5\n2\n5\n3\n3\n5\n5\n4\n5\n5\n3\n4\n5\n3\n5\n5\n2\n5\n5\n5\n5\n2\n5\n3\n3\n5\n5\n5\n5\n2\n2\n2\n5\n5\n2\n5\n3\n2\n5\n2\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n3\n5\n5\n2\n5\n5\n3\n5\n5\n5\n3\n5\n5\n5\n5\n3\n5\n5\n5\n3\n5\n2\n5\n5\n3\n5\n3\n5\n2\n2\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n3\n5\n2\n5\n5\n2\n3\n4\n3\n2\n3\n5\n5\n5\n5\n2\n5\n3\n5\n5\n5\n2\n5\n2\n5\n4\n5\n5\n3\n5\n5\n5\n5\n2\n3\n5\n5\n5\n5\n3\n5\n5\n2\n4\n3\n5\n4\n2\n5\n3\n3\n2\n5\n3\n5\n5\n5\n3\n3\n4\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n3\n3\n5\n5\n2\n3\n5\n2\n5\n5\n5\n5\n5\n3\n2\n2\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n3\n5\n5\n5\n3\n3\n5\n5\n5\n5\n5\n5\n5\n3\n5\n3\n3\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n2\n2\n5\n5\n5\n3\n5\n2\n5\n3\n3\n5\n2\n5\n4\n3\n5\n5\n5\n5\n5\n5\n3\n5\n3\n2\n5\n5\n3\n4\n3\n3\n5\n5\n5\n4\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n3\n5\n3\n5\n3\n5\n3\n5\n4\n5\n5\n5\n3\n2\n3\n5\n5\n2\n3\n5\n2\n5\n5\n5\n5\n5\n3\n5\n5\n2\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n3\n5\n2\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n2\n5\n2\n3\n5\n5\n5\n2\n3\n2\n5\n3\n3\n4\n5\n5\n5\n5\n3\n2\n5\n3\n5\n5\n5\n5\n3\n3\n5\n5\n5\n3\n5\n5\n3\n5\n5\n5\n3\n3\n5\n3\n5\n3\n5\n5\n3\n5\n5\n5\n3\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n3\n5\n5\n5\n5\n3\n5\n5\n5\n2\n5\n2\n5\n4\n5\n5\n5\n5\n5\n5\n3\n5\n2\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n2\n5\n5\n5\n3\n5\n5\n2\n5\n5\n3\n5\n3\n4\n5\n3\n3\n5\n2\n5\n3\n5\n5\n5\n5\n5\n5\n5\n3\n2\n5\n5\n5\n5\n3\n5\n5\n5\n3\n2\n5\n3\n3\n5\n5\n5\n5\n2\n2\n3\n5\n5\n3\n2\n5\n2\n2\n5\n5\n5\n2\n5\n3\n5\n5\n5\n5\n2\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n2\n2\n5\n5\n5\n5\n2\n5\n5\n3\n3\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n2\n5\n5\n2\n2\n2\n5\n5\n5\n2\n5\n5\n2\n5\n5\n5\n5\n5\n5\n2\n5\n5\n3\n5\n2\n5\n2\n2\n2\n5\n3\n5\n5\n5\n5\n2\n5\n3\n5\n5\n3\n5\n5\n2\n5\n2\n5\n5\n5\n5\n5\n5\n3\n2\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n2\n3\n5\n3\n2\n5\n5\n3\n5\n3\n2\n2\n5\n5\n5\n5\n5\n5\n2\n5\n2\n5\n3\n2\n5\n5\n5\n5\n5\n3\n2\n5\n5\n2\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n3\n5\n5\n5\n5\n5\n5\n5\n2\n5\n3\n5\n3\n5\n5\n3\n5\n5\n5\n5\n5\n5\n3\n5\n2\n2\n3\n5\n5\n2\n3\n3\n5\n3\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n3\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n3\n3\n4\n2\n5\n5\n5\n2\n5\n4\n5\n5\n2\n3\n3\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n2\n3\n2\n5\n5\n5\n3\n3\n5\n3\n5\n5\n3\n5\n5\n3\n2\n5\n5\n2\n5\n5\n2\n2\n5\n5\n5\n5\n3\n5\n2\n5\n3\n3\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n2\n5\n5\n5\n2\n5\n5\n5\n5\n3\n5\n4\n5\n2\n5\n2\n5\n5\n3\n5\n5\n3\n3\n2\n2\n5\n5\n5\n5\n5\n5\n5\n2\n2\n5\n5\n3\n5\n5\n5\n5\n3\n5\n5\n2\n5\n5\n3\n5\n3\n5\n2\n5\n5\n5\n5\n2\n3\n5\n5\n5\n5\n5\n3\n5\n2\n5\n5\n5\n5\n5\n3\n5\n5\n2\n3\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n2\n3\n5\n5\n5\n5\n2\n5\n3\n5\n5\n2\n3\n5\n3\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n3\n5\n2\n5\n5\n2\n5\n5\n5\n4\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n4\n3\n2\n4\n3\n5\n5\n2\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n2\n5\n5\n5\n2\n2\n3\n2\n5\n5\n5\n5\n2\n5\n5\n5\n2\n5\n5\n5\n5\n5\n3\n5\n5\n3\n3\n2\n2\n3\n3\n5\n2\n4\n5\n2\n5\n2\n5\n5\n5\n5\n5\n2\n5\n2\n5\n5\n2\n5\n5\n2\n5\n5\n5\n2\n5\n3\n3\n5\n3\n5\n5\n2\n4\n5\n2\n5\n2\n5\n2\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n3\n2\n2\n3\n5\n3\n5\n5\n5\n5\n3\n3\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n2\n3\n3\n3\n5\n3\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n3\n5\n5\n5\n5\n5\n5\n3\n3\n3\n5\n5\n3\n3\n5\n5\n3\n3\n5\n5\n5\n5\n5\n3\n3\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n3\n3\n5\n5\n3\n3\n5\n5\n2\n5\n5\n5\n5\n3\n2\n5\n5\n3\n3\n5\n3\n5\n2\n3\n5\n2\n5\n5\n5\n5\n5\n5\n5\n2\n5\n3\n5\n5\n3\n3\n5\n3\n5\n5\n2\n2\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n3\n4\n5\n3\n2\n5\n5\n5\n5\n2\n5\n5\n5\n3\n3\n5\n3\n5\n5\n2\n5\n5\n3\n5\n4\n4\n5\n5\n5\n5\n2\n5\n5\n5\n5\n2\n5\n3\n3\n5\n5\n5\n5\n5\n5\n2\n2\n2\n5\n2\n5\n2\n5\n5\n5\n5\n4\n5\n5\n5\n5\n5\n5\n3\n3\n5\n5\n5\n3\n2\n5\n5\n5\n2\n5\n3\n3\n3\n2\n5\n5\n5\n5\n5\n5\n5\n2\n2\n5\n5\n5\n5\n5\n3\n5\n2\n2\n2\n3\n5\n5\n5\n5\n3\n3\n5\n2\n5\n5\n5\n3\n5\n5\n3\n5\n5\n5\n5\n5\n5\n3\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n3\n3\n2\n5\n5\n3\n3\n5\n2\n2\n5\n5\n5\n2\n2\n2\n5\n5\n5\n5\n5\n5\n2\n2\n3\n5\n3\n5\n4\n2\n2\n2\n2\n2\n2\n2\n5\n2\n5\n5\n5\n5\n5\n5\n5\n2\n5\n2\n5\n5\n2\n5\n3\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n3\n3\n3\n5\n5\n3\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n2\n2\n5\n2\n2\n2\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n3\n3\n4\n5\n4\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n4\n5\n5\n5\n5\n3\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n2\n2\n3\n5\n5\n5\n5\n3\n3\n5\n2\n2\n3\n3\n5\n5\n2\n2\n5\n5\n5\n5\n2\n5\n3\n5\n3\n3\n3\n5\n3\n5\n5\n2\n5\n5\n5\n5\n3\n4\n4\n5\n5\n5\n2\n2\n2\n2\n2\n3\n2\n2\n5\n5\n5\n5\n5\n5\n5\n2\n2\n5\n5\n2\n2\n5\n2\n3\n5\n5\n5\n2\n2\n2\n5\n5\n5\n5\n2\n5\n5\n5\n3\n3\n2\n2\n2\n5\n5\n5\n3\n3\n5\n2\n5\n3\n3\n5\n5\n2\n3\n3\n5\n5\n5\n3\n5\n5\n5\n4\n5\n4\n4\n4\n3\n3\n5\n5\n3\n5\n5\n5\n5\n5\n5\n3\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n3\n3\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n2\n2\n5\n2\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n3\n5\n5\n5\n3\n5\n2\n5\n5\n5\n5\n5\n5\n3\n5\n5\n4\n2\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n2\n2\n5\n5\n3\n3\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n2\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n3\n5\n5\n5\n5\n5\n3\n5\n5\n5\n2\n5\n5\n5\n5\n5\n3\n5\n2\n2\n2\n5\n5\n5\n5\n3\n5\n5\n5\n3\n2\n4\n5\n5\n5\n5\n2\n2\n5\n3\n5\n5\n2\n5\n5\n3\n3\n4\n4\n4\n2\n5\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n3\n5\n3\n5\n5\n5\n5\n3\n3\n3\n3\n3\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n2\n3\n5\n3\n5\n3\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n3\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n5\n3\n3\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n3\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n2\n2\n2\n5\n5\n5\n2\n5\n5\n2\n5\n5\n3\n3\n5\n5\n5\n5\n5\n5\n5\n5\n2\n4\n3\n5\n5\n5\n5\n4\n5\n5\n5\n5\n5\n5\n2\n4\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n3\n5\n5\n3\n2\n5\n5\n5\n5\n5\n2\n2\n2\n5\n5\n3\n2\n5\n2\n2\n3\n2\n2\n2\n3\n2\n2\n5\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n5\n2\n3\n3\n2\n3\n3\n2\n2\n2\n2\n2\n2\n3\n4\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n5\n5\n2\n2\n2\n3\n5\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n3\n2\n2\n2\n3\n2\n3\n2\n3\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n3\n5\n2\n2\n5\n2\n2\n2\n2\n2\n3\n5\n2\n2\n2\n2\n2\n3\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n3\n2\n2\n5\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n3\n3\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n2\n4\n3\n2\n5\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n5\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n5\n2\n2\n3\n3\n2\n2\n5\n2\n2\n2\n2\n3\n2\n3\n3\n5\n2\n2\n2\n3\n5\n2\n4\n2\n2\n2\n3\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n2\n3\n3\n5\n2\n2\n2\n5\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n3\n3\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n3\n3\n2\n3\n3\n2\n2\n3\n2\n2\n2\n3\n5\n3\n2\n2\n2\n2\n2\n2\n2\n2\n5\n2\n3\n2\n3\n2\n5\n2\n4\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n5\n3\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n5\n2\n2\n2\n2\n3\n2\n2\n5\n3\n5\n2\n2\n2\n2\n2\n3\n3\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n3\n2\n3\n2\n3\n3\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n5\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n3\n2\n2\n2\n5\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n5\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n3\n3\n3\n2\n2\n3\n2\n2\n2\n3\n3\n3\n5\n2\n2\n3\n2\n2\n3\n3\n2\n3\n2\n2\n2\n3\n2\n2\n2\n4\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n2\n5\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n5\n2\n2\n2\n2\n2\n2\n5\n2\n3\n2\n2\n3\n3\n2\n2\n3\n2\n2\n3\n2\n2\n2\n3\n2\n2\n3\n2\n2\n3\n2\n2\n2\n4\n3\n3\n3\n3\n2\n3\n2\n3\n2\n3\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n5\n2\n2\n3\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n5\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n5\n3\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n5\n2\n3\n2\n3\n2\n3\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n5\n2\n3\n2\n3\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n3\n5\n2\n2\n2\n2\n5\n3\n2\n3\n2\n2\n3\n3\n2\n2\n3\n3\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n5\n2\n2\n2\n3\n2\n2\n3\n3\n3\n2\n2\n2\n2\n3\n3\n2\n3\n4\n2\n5\n3\n4\n2\n3\n2\n5\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n3\n2\n2\n5\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n3\n2\n5\n2\n3\n2\n3\n2\n2\n3\n2\n2\n2\n3\n3\n2\n3\n2\n3\n2\n3\n2\n2\n2\n5\n3\n2\n2\n2\n5\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n3\n4\n3\n2\n3\n5\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n2\n2\n2\n4\n3\n2\n4\n2\n2\n2\n3\n2\n2\n3\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n3\n2\n3\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n3\n3\n3\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n3\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n3\n2\n3\n2\n3\n2\n3\n3\n2\n3\n2\n2\n2\n4\n2\n2\n2\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n3\n3\n3\n3\n3\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n5\n3\n2\n3\n2\n5\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n5\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n5\n2\n3\n2\n2\n2\n2\n3\n2\n2\n3\n3\n4\n5\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n3\n3\n2\n2\n2\n3\n2\n2\n3\n2\n3\n2\n3\n3\n5\n3\n2\n3\n3\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n4\n5\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n3\n3\n2\n3\n2\n2\n3\n3\n2\n3\n2\n3\n2\n5\n2\n2\n2\n3\n4\n2\n3\n3\n2\n2\n2\n2\n2\n2\n3\n2\n3\n5\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n3\n3\n2\n2\n3\n2\n2\n2\n3\n2\n3\n3\n2\n3\n2\n2\n2\n3\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n5\n2\n3\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n5\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n5\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n5\n3\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n3\n2\n2\n3\n2\n2\n3\n5\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n3\n5\n3\n2\n2\n2\n2\n2\n2\n5\n2\n5\n2\n3\n2\n3\n5\n2\n2\n2\n5\n2\n2\n2\n2\n2\n5\n3\n3\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n3\n5\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n3\n3\n2\n3\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n3\n5\n2\n2\n2\n2\n5\n3\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n5\n5\n2\n2\n5\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n3\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n3\n3\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n5\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n2\n2\n5\n3\n2\n3\n2\n2\n4\n3\n2\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n3\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n3\n2\n3\n2\n3\n2\n4\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n3\n2\n2\n3\n2\n3\n5\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n3\n3\n3\n3\n2\n3\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n3\n3\n2\n2\n2\n2\n2\n2\n3\n3\n3\n2\n2\n3\n3\n2\n2\n3\n3\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n3\n3\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n3\n3\n3\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n5\n3\n3\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n3\n2\n2\n2\n5\n2\n3\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n3\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n3\n2\n2\n5\n5\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n2\n2\n3\n3\n3\n3\n5\n2\n2\n3\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n3\n3\n2\n2\n2\n2\n5\n2\n2\n2\n2\n2\n2\n2\n2\n3\n5\n2\n2\n3\n2\n3\n2\n3\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n3\n3\n2\n3\n3\n5\n5\n2\n5\n5\n5\n2\n2\n5\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n3\n3\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n3\n2\n2\n2\n3\n3\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n3\n3\n3\n2\n3\n2\n2\n2\n5\n5\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n5\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n5\n3\n2\n2\n2\n2\n3\n3\n2\n2\n3\n3\n3\n2\n2\n2\n4\n2\n4\n4\n4\n3\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n5\n3\n2\n5\n2\n2\n2\n2\n2\n2\n3\n3\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n5\n2\n2\n2\n2\n3\n3\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n5\n3\n2\n2\n4\n2\n2\n3\n2\n2\n2\n2\n5\n3\n5\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n2\n3\n2\n3\n5\n2\n2\n2\n2\n5\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n5\n5\n5\n3\n5\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n4\n3\n2\n2\n2\n2\n2\n5\n3\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n2\n2\n3\n3\n3\n3\n3\n5\n3\n2\n2\n2\n2\n3\n3\n3\n2\n2\n3\n2\n3\n2\n3\n3\n3\n2\n2\n5\n2\n2\n2\n2\n2\n5\n5\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n5\n2\n3\n2\n3\n2\n3\n3\n2\n3\n3\n2\n2\n2\n2\n2\n2\n3\n3\n2\n3\n3\n2\n2\n2\n2\n2\n5\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n5\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n2\n5\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n3\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n5\n3\n3\n5\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n5\n3\n3\n3\n3\n3\n3\n2\n3\n2\n2\n3\n3\n3\n4\n2\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n5\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n2\n2\n2\n5\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n2\n2\n2\n3\n3\n5\n2\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n2\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n2\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n5\n3\n3\n3\n2\n5\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n2\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n5\n3\n2\n3\n3\n4\n2\n3\n3\n2\n3\n3\n2\n3\n3\n3\n3\n2\n2\n3\n3\n5\n3\n2\n3\n3\n3\n3\n3\n3\n3\n5\n2\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n2\n5\n4\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n5\n3\n3\n3\n3\n2\n2\n3\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n5\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n2\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n4\n3\n3\n3\n5\n3\n3\n2\n3\n2\n3\n5\n2\n3\n2\n3\n3\n3\n2\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n5\n3\n3\n3\n2\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n2\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n5\n3\n2\n2\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n5\n3\n2\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n2\n2\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n5\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n5\n3\n3\n2\n3\n2\n3\n3\n2\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n5\n3\n3\n3\n3\n3\n5\n3\n3\n2\n3\n3\n3\n3\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n5\n2\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n2\n3\n5\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n2\n3\n2\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n4\n5\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n2\n3\n2\n2\n2\n3\n3\n2\n5\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n2\n3\n3\n2\n5\n3\n3\n3\n2\n3\n2\n3\n2\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n2\n2\n2\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n3\n2\n2\n3\n3\n5\n3\n2\n2\n3\n2\n3\n3\n3\n3\n3\n2\n5\n3\n3\n2\n3\n3\n2\n3\n3\n2\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n2\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n2\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n2\n3\n5\n3\n3\n3\n2\n3\n3\n3\n5\n3\n2\n3\n2\n3\n3\n2\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n2\n3\n2\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n4\n3\n5\n2\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n5\n3\n2\n2\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n2\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n3\n2\n3\n2\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n2\n3\n2\n2\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n2\n5\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n2\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n2\n3\n2\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n2\n3\n5\n5\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n2\n2\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n2\n3\n3\n2\n2\n2\n3\n3\n3\n3\n3\n3\n2\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n2\n2\n2\n2\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n2\n3\n5\n5\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n2\n2\n2\n2\n2\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n2\n2\n3\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n2\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n5\n3\n3\n5\n4\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n2\n5\n5\n5\n5\n5\n3\n5\n2\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n5\n3\n5\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n5\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n3\n3\n5\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n5\n2\n2\n4\n3\n3\n2\n3\n3\n3\n2\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n2\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n5\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n2\n2\n2\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n2\n2\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n5\n5\n5\n3\n3\n5\n3\n3\n3\n5\n5\n5\n3\n3\n3\n4\n5\n5\n5\n5\n3\n5\n5\n5\n3\n2\n4\n5\n5\n3\n3\n5\n3\n3\n2\n5\n5\n2\n5\n5\n3\n4\n2\n3\n3\n2\n3\n3\n5\n3\n3\n3\n4\n5\n3\n3\n5\n5\n5\n3\n5\n3\n3\n4\n5\n5\n5\n5\n5\n5\n3\n5\n2\n5\n5\n3\n5\n3\n5\n5\n5\n3\n3\n5\n5\n5\n3\n5\n5\n5\n5\n4\n5\n3\n5\n3\n3\n5\n3\n3\n5\n5\n3\n5\n3\n3\n5\n5\n3\n5\n3\n5\n5\n5\n2\n5\n5\n3\n5\n5\n5\n2\n5\n4\n3\n3\n2\n2\n2\n5\n3\n3\n4\n3\n5\n5\n3\n5\n3\n5\n5\n2\n2\n2\n3\n3\n5\n3\n5\n3\n3\n5\n2\n3\n3\n5\n3\n3\n5\n5\n3\n5\n3\n4\n3\n5\n5\n3\n3\n3\n2\n2\n4\n3\n3\n5\n3\n3\n5\n3\n5\n5\n5\n3\n3\n3\n3\n2\n5\n3\n2\n5\n3\n4\n5\n4\n3\n3\n5\n5\n5\n5\n4\n2\n5\n5\n3\n3\n5\n3\n5\n5\n3\n3\n3\n5\n4\n2\n5\n2\n5\n3\n5\n3\n5\n2\n3\n3\n5\n3\n2\n2\n5\n3\n5\n5\n5\n3\n5\n5\n5\n5\n3\n3\n3\n2\n5\n3\n3\n5\n3\n3\n5\n3\n5\n3\n5\n5\n5\n3\n3\n3\n3\n3\n3\n5\n5\n5\n4\n4\n5\n5\n3\n2\n3\n5\n3\n4\n3\n3\n5\n2\n2\n4\n3\n3\n5\n3\n5\n3\n3\n3\n5\n3\n5\n4\n5\n5\n4\n5\n5\n5\n3\n5\n5\n3\n5\n5\n3\n5\n4\n5\n5\n5\n3\n3\n5\n5\n5\n3\n5\n5\n5\n5\n5\n3\n5\n3\n5\n3\n5\n3\n3\n5\n5\n3\n5\n5\n5\n3\n5\n5\n5\n5\n3\n5\n2\n5\n5\n3\n5\n5\n5\n5\n5\n3\n4\n5\n3\n2\n3\n3\n5\n5\n5\n2\n4\n5\n5\n3\n4\n3\n3\n5\n5\n5\n4\n3\n5\n5\n4\n5\n3\n2\n3\n5\n4\n5\n4\n3\n4\n3\n5\n5\n5\n3\n3\n3\n4\n3\n5\n5\n5\n5\n5\n3\n4\n5\n5\n5\n2\n5\n3\n5\n5\n5\n3\n2\n4\n2\n3\n4\n5\n3\n3\n3\n2\n5\n5\n3\n2\n4\n3\n3\n2\n5\n5\n3\n5\n5\n3\n4\n4\n5\n4\n3\n3\n3\n5\n3\n4\n5\n3\n3\n5\n3\n3\n2\n2\n3\n3\n5\n3\n3\n5\n3\n5\n4\n5\n5\n5\n5\n5\n3\n5\n3\n5\n5\n5\n4\n5\n5\n4\n3\n3\n3\n5\n3\n3\n4\n2\n3\n4\n2\n3\n5\n5\n3\n5\n5\n5\n4\n5\n3\n5\n3\n4\n3\n4\n3\n3\n3\n3\n5\n4\n5\n5\n5\n2\n5\n3\n4\n5\n5\n3\n5\n5\n4\n5\n3\n3\n3\n3\n3\n5\n5\n3\n5\n5\n5\n2\n5\n5\n5\n5\n5\n3\n3\n4\n5\n3\n5\n3\n3\n5\n5\n3\n3\n5\n5\n3\n4\n3\n5\n5\n3\n4\n2\n5\n5\n5\n3\n5\n5\n4\n5\n5\n2\n3\n3\n5\n2\n5\n4\n5\n3\n5\n5\n5\n5\n5\n5\n3\n3\n5\n5\n3\n4\n5\n3\n4\n5\n4\n5\n2\n3\n3\n5\n5\n5\n2\n3\n3\n5\n3\n5\n5\n5\n5\n5\n2\n3\n5\n5\n5\n2\n3\n3\n3\n3\n5\n3\n5\n3\n5\n5\n3\n5\n3\n5\n3\n5\n5\n4\n5\n5\n5\n3\n3\n5\n3\n5\n3\n3\n5\n3\n3\n5\n5\n5\n5\n3\n2\n3\n4\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n3\n3\n4\n3\n4\n5\n4\n3\n4\n5\n3\n3\n5\n5\n3\n3\n3\n3\n5\n2\n3\n3\n5\n3\n3\n3\n5\n3\n3\n5\n5\n3\n5\n3\n5\n4\n3\n2\n4\n2\n3\n4\n4\n5\n5\n3\n4\n5\n5\n3\n3\n3\n5\n3\n3\n5\n5\n5\n3\n2\n5\n5\n5\n3\n3\n5\n3\n3\n3\n3\n5\n3\n4\n4\n3\n2\n4\n3\n3\n5\n4\n4\n3\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n3\n2\n5\n3\n3\n5\n3\n3\n3\n5\n3\n5\n5\n5\n3\n3\n3\n3\n5\n5\n3\n5\n5\n5\n4\n3\n3\n3\n3\n4\n5\n3\n5\n3\n5\n3\n5\n5\n2\n3\n5\n5\n3\n3\n3\n3\n5\n3\n3\n2\n3\n3\n5\n3\n5\n3\n3\n2\n5\n5\n5\n3\n3\n3\n5\n5\n3\n3\n5\n3\n2\n3\n3\n4\n3\n5\n4\n5\n3\n5\n5\n4\n3\n3\n5\n4\n5\n3\n4\n5\n3\n2\n5\n5\n3\n3\n3\n2\n5\n5\n5\n3\n5\n3\n5\n5\n5\n5\n3\n3\n5\n2\n5\n3\n5\n5\n5\n4\n5\n3\n3\n3\n5\n3\n5\n4\n3\n5\n3\n4\n5\n3\n5\n3\n5\n5\n5\n3\n5\n5\n3\n5\n5\n4\n5\n4\n2\n2\n5\n5\n3\n5\n3\n3\n5\n3\n5\n5\n5\n3\n3\n3\n2\n5\n2\n5\n5\n5\n4\n5\n3\n3\n5\n5\n3\n2\n4\n3\n3\n5\n5\n4\n4\n5\n3\n5\n5\n5\n2\n5\n3\n5\n3\n5\n4\n3\n5\n3\n2\n5\n5\n5\n3\n2\n3\n5\n5\n3\n3\n5\n5\n3\n3\n5\n4\n3\n3\n2\n3\n5\n5\n3\n5\n4\n3\n3\n5\n5\n5\n5\n4\n3\n5\n3\n3\n3\n3\n3\n5\n5\n5\n3\n3\n5\n4\n4\n5\n5\n3\n4\n3\n3\n4\n5\n2\n4\n5\n5\n4\n2\n5\n3\n3\n3\n5\n5\n5\n2\n3\n2\n4\n5\n2\n5\n3\n2\n4\n2\n4\n5\n3\n3\n5\n5\n5\n5\n5\n4\n3\n3\n5\n5\n5\n3\n3\n5\n3\n3\n5\n2\n5\n5\n3\n4\n5\n5\n3\n5\n3\n5\n5\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n3\n3\n2\n5\n3\n5\n3\n5\n5\n5\n5\n5\n5\n3\n3\n3\n5\n5\n3\n3\n4\n3\n3\n3\n5\n5\n5\n5\n2\n5\n3\n5\n5\n4\n3\n5\n2\n5\n4\n4\n5\n3\n5\n4\n5\n5\n3\n3\n5\n5\n3\n4\n3\n3\n5\n2\n4\n3\n5\n4\n2\n3\n3\n3\n2\n3\n3\n5\n5\n3\n3\n3\n4\n5\n4\n5\n5\n3\n5\n5\n5\n3\n5\n3\n3\n5\n5\n2\n3\n3\n3\n5\n5\n5\n3\n3\n3\n2\n2\n2\n5\n4\n3\n5\n3\n4\n5\n3\n5\n4\n3\n5\n3\n3\n5\n4\n3\n3\n3\n5\n5\n4\n5\n3\n3\n5\n3\n3\n5\n5\n4\n5\n5\n5\n3\n5\n3\n5\n3\n5\n5\n3\n5\n2\n3\n5\n4\n3\n5\n5\n5\n3\n3\n3\n2\n5\n4\n3\n3\n4\n3\n5\n5\n5\n3\n3\n3\n2\n3\n5\n3\n4\n3\n3\n5\n3\n5\n5\n4\n3\n3\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n3\n3\n3\n5\n3\n5\n3\n3\n3\n3\n4\n5\n4\n3\n2\n3\n5\n4\n2\n3\n5\n3\n5\n5\n5\n5\n5\n3\n4\n3\n2\n5\n5\n5\n5\n3\n5\n3\n4\n5\n5\n5\n5\n3\n4\n5\n5\n3\n3\n3\n5\n5\n5\n5\n5\n5\n5\n3\n4\n3\n4\n3\n4\n5\n5\n5\n5\n5\n5\n3\n3\n4\n5\n4\n5\n2\n5\n2\n5\n5\n3\n3\n5\n5\n3\n3\n5\n5\n3\n3\n4\n5\n5\n5\n5\n3\n2\n3\n3\n3\n3\n5\n5\n3\n3\n3\n5\n4\n3\n5\n5\n3\n5\n3\n5\n3\n3\n5\n3\n4\n3\n3\n5\n3\n3\n5\n5\n3\n3\n3\n5\n3\n5\n3\n5\n5\n5\n3\n3\n3\n5\n3\n3\n3\n4\n3\n5\n4\n3\n5\n2\n5\n4\n5\n3\n4\n5\n3\n3\n3\n5\n2\n5\n5\n3\n4\n5\n5\n5\n5\n5\n3\n3\n3\n5\n5\n5\n2\n3\n3\n5\n3\n5\n3\n2\n5\n5\n3\n5\n3\n3\n3\n3\n3\n5\n3\n3\n3\n3\n5\n5\n4\n5\n5\n3\n3\n5\n3\n5\n5\n3\n5\n3\n5\n3\n2\n5\n3\n3\n5\n5\n3\n5\n5\n2\n3\n5\n3\n3\n2\n3\n2\n2\n5\n3\n5\n2\n5\n3\n3\n5\n3\n5\n5\n4\n3\n5\n5\n4\n4\n3\n3\n5\n5\n3\n5\n4\n5\n5\n5\n3\n2\n2\n3\n3\n5\n3\n2\n5\n3\n3\n3\n5\n5\n3\n5\n3\n5\n5\n5\n3\n5\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n4\n2\n5\n5\n5\n5\n5\n3\n2\n5\n5\n5\n5\n3\n3\n2\n3\n5\n3\n5\n2\n5\n2\n2\n2\n5\n3\n3\n3\n5\n5\n5\n5\n3\n3\n5\n3\n3\n5\n3\n5\n4\n5\n3\n5\n5\n5\n5\n3\n2\n4\n5\n5\n3\n5\n5\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n5\n3\n3\n5\n3\n2\n3\n4\n5\n5\n5\n3\n4\n3\n5\n2\n5\n3\n3\n5\n3\n3\n5\n5\n3\n2\n4\n5\n2\n3\n4\n5\n5\n3\n5\n3\n4\n5\n5\n5\n3\n5\n3\n5\n3\n5\n3\n3\n5\n5\n4\n5\n5\n3\n5\n3\n5\n3\n4\n3\n3\n5\n5\n3\n5\n3\n5\n3\n5\n3\n3\n3\n5\n5\n3\n3\n3\n5\n3\n5\n5\n5\n4\n5\n5\n5\n2\n5\n3\n3\n4\n3\n5\n5\n5\n3\n5\n5\n5\n3\n3\n2\n5\n3\n3\n5\n3\n3\n4\n2\n4\n3\n5\n5\n5\n4\n5\n3\n5\n3\n3\n5\n5\n4\n3\n5\n5\n5\n5\n3\n4\n5\n3\n3\n5\n4\n5\n3\n3\n4\n3\n5\n4\n3\n5\n5\n3\n2\n5\n5\n2\n5\n3\n5\n2\n4\n5\n3\n4\n3\n5\n3\n5\n3\n3\n5\n5\n5\n5\n5\n5\n3\n3\n3\n5\n3\n5\n5\n5\n5\n2\n5\n3\n3\n5\n5\n2\n5\n3\n5\n5\n3\n5\n4\n3\n3\n5\n5\n5\n3\n3\n3\n3\n5\n3\n3\n2\n2\n3\n3\n5\n3\n5\n4\n5\n2\n2\n5\n4\n3\n3\n5\n5\n5\n3\n5\n5\n5\n5\n5\n3\n3\n3\n5\n2\n3\n5\n5\n3\n3\n3\n5\n3\n5\n3\n5\n3\n5\n2\n5\n5\n5\n3\n3\n3\n5\n3\n3\n3\n3\n5\n3\n5\n5\n3\n3\n5\n2\n3\n2\n3\n5\n5\n5\n5\n2\n5\n3\n5\n5\n2\n3\n5\n3\n3\n3\n3\n5\n5\n3\n4\n3\n3\n3\n3\n3\n5\n5\n3\n3\n5\n2\n5\n5\n2\n3\n5\n3\n4\n3\n5\n5\n5\n5\n5\n5\n4\n3\n5\n3\n5\n3\n5\n5\n5\n3\n3\n5\n4\n3\n5\n5\n5\n4\n3\n5\n3\n3\n3\n4\n5\n5\n5\n3\n5\n4\n3\n3\n4\n3\n5\n5\n2\n5\n4\n5\n5\n5\n5\n3\n5\n5\n3\n5\n3\n5\n2\n5\n5\n5\n5\n5\n4\n2\n5\n3\n2\n5\n4\n5\n3\n2\n5\n3\n4\n5\n5\n3\n5\n5\n5\n3\n5\n5\n3\n3\n2\n3\n3\n3\n5\n5\n4\n3\n2\n5\n2\n4\n5\n5\n5\n3\n4\n5\n2\n5\n3\n2\n5\n4\n2\n4\n4\n3\n5\n5\n3\n3\n5\n3\n3\n3\n3\n4\n5\n3\n4\n3\n3\n2\n4\n3\n5\n5\n5\n5\n5\n5\n3\n4\n5\n3\n3\n4\n3\n2\n2\n3\n4\n3\n5\n3\n5\n5\n3\n3\n3\n5\n5\n3\n5\n3\n5\n5\n3\n2\n3\n3\n3\n3\n5\n3\n3\n5\n3\n5\n5\n5\n5\n3\n3\n4\n5\n5\n3\n3\n3\n3\n5\n5\n4\n4\n5\n5\n3\n3\n3\n5\n5\n3\n3\n5\n5\n3\n3\n5\n5\n5\n5\n3\n3\n3\n5\n3\n5\n5\n3\n2\n5\n3\n3\n5\n3\n3\n5\n5\n3\n3\n3\n3\n2\n3\n5\n3\n3\n3\n5\n5\n3\n3\n3\n3\n3\n5\n2\n3\n5\n5\n5\n3\n3\n3\n5\n5\n5\n2\n5\n3\n5\n5\n3\n3\n5\n3\n3\n3\n5\n2\n5\n3\n4\n4\n4\n4\n4\n5\n5\n5\n3\n5\n5\n3\n3\n3\n3\n3\n3\n5\n3\n3\n5\n3\n3\n3\n5\n5\n5\n5\n3\n5\n3\n3\n5\n3\n5\n5\n5\n5\n5\n3\n4\n4\n4\n5\n5\n3\n5\n2\n3\n3\n5\n5\n2\n3\n3\n3\n5\n4\n4\n4\n5\n5\n5\n3\n2\n3\n2\n3\n5\n3\n3\n5\n5\n4\n3\n4\n5\n5\n5\n3\n3\n3\n5\n5\n4\n3\n2\n4\n5\n5\n5\n5\n3\n3\n3\n2\n4\n5\n3\n5\n5\n3\n3\n2\n2\n5\n5\n5\n3\n5\n3\n5\n5\n5\n5\n3\n3\n4\n5\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n5\n3\n5\n3\n5\n5\n4\n5\n3\n3\n4\n4\n3\n3\n4\n4\n4\n5\n5\n3\n3\n5\n5\n5\n5\n3\n3\n2\n5\n5\n3\n3\n4\n2\n2\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n3\n5\n2\n2\n3\n5\n3\n3\n4\n3\n3\n3\n3\n3\n2\n2\n3\n2\n5\n5\n5\n5\n4\n5\n5\n2\n3\n3\n5\n5\n2\n4\n3\n5\n5\n3\n4\n4\n3\n4\n5\n3\n3\n3\n3\n3\n5\n3\n3\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n5\n5\n2\n2\n3\n3\n3\n3\n3\n3\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n4\n5\n2\n3\n3\n3\n3\n3\n5\n5\n5\n5\n5\n5\n5\n3\n5\n5\n4\n5\n3\n3\n3\n3\n3\n2\n5\n5\n3\n5\n5\n5\n5\n5\n3\n3\n3\n4\n5\n5\n3\n5\n3\n3\n3\n2\n2\n3\n3\n5\n5\n3\n3\n3\n5\n5\n5\n2\n4\n3\n5\n3\n3\n3\n3\n3\n3\n3\n2\n5\n5\n5\n4\n3\n4\n4\n4\n5\n5\n2\n2\n2\n2\n2\n3\n2\n2\n5\n5\n5\n5\n4\n4\n4\n2\n2\n5\n5\n2\n2\n4\n2\n3\n3\n3\n3\n2\n2\n3\n4\n3\n5\n3\n2\n3\n3\n3\n3\n3\n5\n5\n5\n5\n5\n5\n3\n3\n3\n2\n5\n3\n3\n5\n5\n2\n3\n3\n5\n3\n3\n3\n3\n5\n5\n5\n4\n5\n4\n4\n4\n3\n3\n5\n5\n3\n3\n4\n3\n4\n3\n3\n3\n3\n5\n5\n5\n5\n5\n3\n5\n5\n5\n4\n4\n5\n5\n5\n3\n3\n3\n5\n2\n5\n5\n5\n5\n3\n3\n5\n3\n5\n5\n4\n3\n5\n5\n5\n5\n3\n4\n5\n5\n4\n3\n5\n5\n2\n2\n5\n3\n3\n3\n4\n5\n5\n5\n3\n5\n5\n5\n5\n5\n5\n5\n5\n5\n5\n3\n3\n2\n5\n3\n3\n5\n3\n3\n3\n3\n2\n5\n3\n5\n5\n5\n5\n3\n4\n5\n4\n2\n5\n3\n4\n4\n5\n5\n5\n3\n5\n3\n3\n5\n5\n5\n5\n5\n2\n2\n5\n3\n3\n3\n4\n5\n5\n3\n3\n3\n4\n4\n5\n5\n5\n5\n3\n5\n2\n3\n5\n3\n3\n4\n3\n3\n5\n5\n5\n3\n3\n5\n5\n5\n5\n2\n2\n4\n4\n3\n3\n3\n5\n5\n3\n5\n5\n5\n5\n5\n5\n3\n3\n3\n3\n3\n3\n5\n5\n5\n5\n3\n3\n5\n5\n5\n3\n5\n5\n5\n5\n5\n3\n3\n5\n2\n2\n3\n5\n5\n3\n3\n3\n5\n5\n5\n3\n2\n4\n4\n5\n3\n3\n3\n3\n5\n3\n4\n5\n3\n3\n3\n3\n3\n4\n4\n4\n5\n5\n5\n5\n4\n3\n5\n5\n4\n3\n5\n5\n3\n5\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n5\n3\n5\n5\n5\n5\n3\n3\n3\n5\n5\n3\n2\n3\n5\n3\n3\n3\n5\n5\n5\n5\n5\n5\n2\n5\n5\n5\n4\n3\n3\n5\n3\n5\n4\n5\n5\n4\n5\n5\n3\n5\n5\n5\n5\n5\n5\n3\n4\n3\n5\n3\n3\n5\n3\n3\n5\n5\n3\n5\n5\n5\n3\n3\n5\n3\n4\n3\n4\n5\n5\n3\n5\n3\n5\n5\n5\n3\n3\n5\n4\n3\n5\n3\n2\n5\n4\n2\n5\n3\n3\n3\n5\n5\n5\n5\n3\n5\n3\n5\n5\n3\n4\n5\n5\n5\n4\n5\n4\n5\n5\n5\n4\n4\n4\n3\n5\n5\n3\n3\n3\n5\n3\n5\n3\n5\n5\n5\n3\n5\n3\n3\n3\n4\n4\n3\n3\n3\n2\n2\n2\n3\n5\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n3\n3\n2\n4\n2\n2\n4\n4\n4\n4\n2\n3\n4\n2\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n2\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n3\n4\n4\n2\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n4\n4\n3\n4\n3\n4\n3\n4\n4\n3\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n3\n4\n4\n4\n2\n4\n4\n4\n3\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n3\n4\n3\n4\n4\n2\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n2\n4\n4\n4\n3\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n3\n3\n2\n4\n4\n3\n2\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n3\n3\n4\n4\n3\n2\n2\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n5\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n2\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n3\n3\n4\n4\n3\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n3\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n2\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n2\n4\n3\n4\n2\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n3\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n3\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n3\n4\n4\n4\n4\n4\n4\n5\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n3\n4\n4\n3\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n2\n4\n4\n4\n2\n3\n4\n4\n4\n3\n4\n2\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n3\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n2\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n3\n2\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n3\n4\n4\n2\n4\n3\n3\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n2\n4\n4\n4\n4\n2\n4\n3\n3\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n3\n2\n4\n2\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n3\n4\n3\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n2\n4\n4\n3\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n2\n2\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n3\n2\n4\n4\n3\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n3\n4\n3\n4\n3\n4\n4\n4\n4\n3\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n3\n4\n4\n4\n4\n4\n3\n2\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n3\n4\n4\n3\n4\n3\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n4\n2\n4\n4\n4\n4\n3\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n2\n3\n4\n4\n3\n2\n4\n2\n2\n4\n4\n4\n2\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n5\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n2\n4\n2\n2\n2\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n2\n4\n4\n4\n3\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n3\n2\n4\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n3\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n3\n4\n4\n3\n2\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n3\n4\n4\n4\n3\n3\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n3\n3\n2\n2\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n2\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n2\n3\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n2\n4\n3\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n3\n2\n4\n3\n3\n4\n4\n4\n3\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n2\n4\n4\n4\n2\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n2\n2\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n3\n3\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n3\n4\n3\n4\n4\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n3\n4\n4\n3\n4\n4\n4\n2\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n3\n2\n4\n4\n4\n2\n4\n3\n4\n3\n2\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n4\n2\n4\n4\n4\n4\n2\n2\n2\n4\n4\n4\n4\n4\n4\n2\n2\n3\n4\n3\n4\n4\n4\n4\n2\n4\n4\n2\n2\n4\n2\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n2\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n2\n4\n4\n2\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n4\n2\n2\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n2\n2\n2\n3\n2\n2\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n2\n2\n4\n4\n2\n4\n4\n4\n4\n2\n2\n2\n4\n4\n4\n4\n2\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n2\n4\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n3\n4\n2\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n2\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n3\n4\n3\n4\n4\n4\n4\n3\n3\n3\n3\n3\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n2\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n3\n4\n4\n4\n3\n4\n4\n4\n4\n4\n5\n4\n4\n4\n4\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n2\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n4\n3\n4\n4\n4\n4\n4\n3\n4\n4\n3\n2\n4\n4\n4\n4\n4\n2\n2\n2\n4\n4\n3\n2\n4\n2\n2\n3\n2\n2\n2\n4\n4\n2\n2\n3\n2\n4\n4\n2\n2\n2\n2\n2\n2\n4\n2\n4\n2\n4\n2\n2\n3\n4\n2\n3\n3\n2\n2\n2\n2\n2\n2\n3\n4\n2\n3\n2\n2\n2\n4\n2\n2\n4\n3\n4\n2\n2\n4\n2\n2\n4\n3\n2\n4\n4\n4\n2\n4\n4\n4\n2\n2\n3\n4\n2\n4\n2\n2\n4\n3\n4\n4\n2\n3\n2\n4\n4\n4\n3\n2\n2\n4\n2\n4\n2\n2\n2\n3\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n3\n4\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n3\n2\n2\n2\n2\n2\n4\n2\n4\n2\n2\n4\n3\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n3\n2\n3\n2\n2\n2\n3\n4\n3\n4\n3\n2\n2\n3\n2\n3\n2\n2\n4\n2\n4\n2\n2\n3\n2\n3\n2\n2\n2\n2\n4\n3\n2\n2\n2\n3\n2\n2\n3\n4\n2\n4\n4\n3\n4\n2\n2\n4\n4\n2\n2\n2\n2\n3\n4\n2\n2\n2\n2\n2\n4\n4\n4\n2\n4\n2\n2\n2\n4\n2\n4\n2\n3\n3\n2\n3\n2\n2\n3\n2\n2\n2\n3\n2\n2\n4\n2\n3\n2\n2\n2\n2\n3\n2\n4\n2\n4\n4\n2\n2\n2\n3\n4\n4\n2\n2\n2\n3\n3\n3\n2\n2\n2\n4\n4\n4\n2\n2\n2\n2\n3\n2\n3\n4\n2\n2\n2\n2\n2\n4\n4\n4\n2\n2\n4\n2\n3\n2\n2\n3\n2\n4\n4\n2\n4\n2\n2\n4\n2\n2\n2\n3\n2\n2\n2\n2\n4\n4\n2\n4\n3\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n2\n4\n4\n3\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n3\n2\n2\n2\n2\n4\n2\n2\n4\n2\n2\n3\n4\n4\n4\n2\n3\n3\n4\n4\n4\n2\n4\n4\n2\n3\n4\n3\n3\n4\n4\n2\n4\n3\n2\n2\n4\n4\n2\n2\n4\n2\n4\n4\n4\n3\n4\n3\n4\n2\n4\n2\n2\n4\n4\n3\n4\n2\n2\n2\n4\n2\n4\n4\n4\n2\n2\n2\n3\n2\n2\n2\n3\n2\n4\n2\n2\n4\n2\n3\n3\n3\n2\n2\n2\n3\n2\n4\n3\n2\n2\n2\n4\n4\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n3\n4\n2\n3\n3\n4\n2\n3\n2\n2\n4\n3\n4\n3\n2\n2\n2\n2\n4\n2\n4\n2\n5\n2\n3\n2\n3\n2\n4\n4\n2\n4\n4\n4\n2\n3\n2\n3\n2\n4\n2\n3\n4\n2\n4\n2\n2\n3\n2\n2\n2\n4\n4\n3\n2\n3\n4\n2\n4\n2\n2\n4\n4\n2\n4\n4\n2\n2\n2\n2\n3\n2\n4\n4\n3\n4\n4\n4\n2\n2\n2\n4\n3\n3\n3\n2\n4\n3\n4\n2\n4\n2\n4\n2\n2\n2\n2\n3\n3\n4\n3\n2\n3\n3\n2\n2\n3\n4\n2\n2\n2\n4\n3\n2\n2\n2\n4\n2\n2\n4\n2\n3\n2\n2\n4\n2\n2\n2\n3\n3\n2\n2\n2\n4\n2\n4\n4\n2\n2\n2\n4\n4\n2\n3\n2\n4\n3\n2\n3\n4\n4\n4\n4\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n4\n2\n4\n2\n2\n3\n4\n2\n4\n2\n2\n3\n2\n2\n2\n3\n4\n3\n4\n4\n4\n2\n4\n2\n3\n2\n4\n4\n4\n2\n2\n3\n2\n2\n4\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n4\n2\n4\n4\n2\n2\n4\n2\n4\n2\n2\n2\n2\n3\n3\n4\n3\n4\n2\n4\n4\n4\n2\n2\n4\n2\n2\n2\n3\n4\n3\n4\n2\n2\n3\n2\n2\n3\n3\n4\n4\n2\n2\n4\n3\n2\n2\n2\n4\n2\n2\n4\n2\n3\n4\n4\n2\n2\n4\n4\n5\n2\n2\n4\n2\n4\n4\n4\n2\n2\n2\n2\n2\n2\n4\n2\n3\n4\n2\n2\n3\n3\n4\n4\n3\n4\n4\n3\n2\n4\n2\n4\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n3\n2\n2\n3\n2\n2\n2\n4\n2\n2\n3\n2\n2\n2\n3\n2\n2\n4\n4\n4\n4\n2\n2\n2\n4\n3\n3\n3\n3\n4\n2\n4\n2\n3\n2\n3\n2\n2\n2\n3\n4\n4\n2\n3\n3\n2\n4\n3\n2\n2\n2\n3\n2\n2\n2\n4\n2\n2\n4\n2\n2\n3\n4\n2\n4\n2\n3\n2\n2\n2\n2\n3\n3\n4\n2\n2\n4\n2\n2\n2\n2\n4\n4\n2\n4\n4\n2\n2\n4\n4\n2\n2\n2\n4\n2\n2\n3\n2\n2\n2\n2\n2\n2\n3\n2\n4\n2\n4\n4\n2\n2\n2\n2\n3\n2\n2\n2\n4\n2\n4\n2\n3\n2\n3\n4\n4\n4\n4\n3\n4\n2\n3\n2\n2\n2\n2\n3\n2\n2\n3\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n4\n4\n3\n4\n3\n4\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n4\n2\n3\n2\n2\n2\n2\n2\n4\n4\n3\n2\n2\n4\n4\n2\n2\n4\n2\n4\n2\n2\n3\n2\n3\n4\n4\n3\n4\n4\n2\n2\n2\n4\n3\n2\n2\n2\n2\n3\n3\n2\n2\n3\n3\n2\n4\n2\n2\n2\n4\n2\n2\n2\n2\n4\n3\n3\n2\n4\n2\n2\n4\n3\n4\n2\n2\n3\n3\n2\n2\n2\n2\n3\n3\n2\n4\n4\n4\n2\n3\n4\n2\n3\n4\n4\n2\n4\n2\n2\n4\n2\n2\n3\n3\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n3\n2\n4\n2\n4\n2\n4\n3\n4\n2\n4\n2\n2\n4\n3\n2\n2\n4\n2\n2\n3\n4\n2\n2\n2\n2\n2\n4\n3\n4\n2\n2\n4\n4\n3\n4\n2\n3\n2\n2\n4\n3\n3\n2\n3\n2\n3\n2\n3\n2\n2\n2\n2\n3\n2\n2\n4\n4\n2\n4\n4\n2\n3\n4\n2\n2\n4\n2\n4\n4\n3\n2\n3\n4\n2\n2\n4\n2\n2\n3\n2\n2\n4\n2\n2\n2\n2\n4\n4\n2\n3\n2\n4\n2\n4\n2\n3\n2\n2\n2\n4\n3\n2\n2\n2\n4\n3\n3\n2\n4\n2\n2\n3\n3\n2\n2\n3\n4\n4\n4\n3\n2\n4\n4\n4\n2\n2\n4\n2\n2\n2\n4\n2\n3\n3\n2\n2\n2\n3\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n4\n4\n4\n2\n3\n4\n2\n4\n4\n4\n3\n2\n3\n4\n2\n4\n3\n3\n3\n2\n2\n4\n4\n4\n3\n2\n3\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n3\n4\n2\n2\n2\n2\n2\n4\n4\n3\n2\n2\n2\n3\n4\n2\n2\n4\n4\n3\n3\n2\n2\n4\n2\n3\n4\n3\n2\n3\n2\n3\n4\n3\n3\n4\n3\n4\n4\n4\n3\n3\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n3\n4\n2\n2\n3\n4\n3\n2\n4\n2\n4\n3\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n4\n2\n4\n3\n2\n2\n2\n2\n4\n3\n2\n3\n4\n4\n2\n2\n2\n2\n4\n2\n2\n2\n4\n3\n4\n2\n2\n4\n4\n2\n2\n3\n4\n3\n4\n2\n4\n2\n2\n4\n2\n2\n4\n2\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n4\n2\n3\n2\n2\n3\n3\n4\n2\n2\n2\n4\n3\n2\n2\n4\n2\n4\n4\n2\n3\n3\n2\n4\n4\n3\n4\n4\n3\n2\n4\n2\n3\n3\n4\n3\n4\n3\n4\n2\n3\n2\n2\n2\n3\n4\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n4\n2\n2\n3\n4\n2\n2\n4\n2\n2\n2\n2\n4\n4\n4\n4\n2\n2\n4\n3\n2\n3\n2\n2\n2\n3\n4\n2\n4\n2\n2\n2\n2\n3\n4\n4\n4\n2\n2\n3\n4\n2\n3\n2\n4\n2\n2\n4\n2\n4\n3\n2\n3\n3\n4\n2\n2\n2\n4\n2\n4\n2\n4\n4\n2\n3\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n3\n3\n2\n2\n4\n2\n2\n2\n3\n4\n3\n3\n2\n3\n2\n2\n2\n2\n2\n2\n4\n3\n3\n2\n4\n2\n4\n4\n2\n2\n2\n4\n4\n4\n4\n4\n4\n2\n2\n4\n2\n4\n4\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n3\n3\n2\n4\n4\n4\n4\n2\n5\n4\n3\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n4\n3\n4\n2\n2\n2\n2\n2\n2\n3\n4\n4\n2\n2\n2\n2\n3\n4\n2\n3\n4\n4\n2\n2\n4\n4\n3\n2\n4\n2\n2\n2\n3\n2\n4\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n2\n2\n2\n4\n2\n2\n2\n4\n4\n2\n2\n2\n2\n3\n2\n2\n4\n3\n2\n2\n3\n2\n4\n2\n2\n2\n4\n2\n4\n3\n2\n3\n4\n4\n2\n2\n2\n4\n3\n4\n3\n4\n2\n4\n2\n4\n4\n2\n2\n3\n2\n3\n2\n3\n4\n2\n3\n4\n2\n4\n2\n2\n4\n3\n2\n2\n2\n3\n2\n2\n2\n2\n3\n4\n3\n2\n2\n2\n4\n2\n2\n2\n2\n4\n4\n3\n4\n3\n2\n2\n2\n2\n4\n2\n4\n2\n2\n2\n4\n4\n3\n4\n3\n3\n4\n2\n4\n2\n4\n2\n2\n4\n2\n4\n2\n3\n4\n4\n2\n4\n3\n4\n2\n2\n4\n2\n4\n2\n2\n2\n4\n4\n4\n3\n3\n4\n3\n2\n4\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n4\n3\n4\n2\n2\n3\n3\n4\n2\n4\n2\n4\n4\n3\n4\n4\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n2\n3\n2\n4\n2\n2\n2\n2\n2\n3\n2\n3\n2\n2\n3\n3\n2\n2\n4\n2\n2\n4\n2\n4\n2\n2\n2\n4\n4\n3\n2\n2\n2\n2\n3\n4\n4\n2\n2\n2\n3\n4\n2\n4\n2\n2\n4\n4\n4\n2\n2\n2\n2\n2\n4\n2\n3\n4\n2\n4\n2\n2\n4\n4\n3\n2\n4\n2\n3\n2\n2\n3\n2\n4\n2\n2\n2\n2\n4\n2\n3\n2\n2\n2\n2\n2\n2\n3\n4\n2\n2\n3\n2\n3\n3\n2\n2\n2\n2\n3\n4\n2\n2\n4\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n3\n4\n2\n4\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n4\n2\n2\n4\n4\n2\n2\n2\n4\n3\n2\n2\n4\n2\n4\n2\n2\n4\n3\n2\n4\n2\n2\n4\n3\n4\n2\n2\n2\n4\n2\n2\n4\n2\n4\n4\n2\n4\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n3\n2\n2\n4\n4\n2\n2\n4\n4\n4\n2\n2\n4\n2\n2\n2\n3\n2\n2\n3\n3\n2\n2\n2\n3\n2\n2\n4\n2\n2\n2\n2\n4\n2\n2\n4\n4\n4\n4\n2\n4\n2\n2\n4\n4\n2\n4\n4\n4\n2\n4\n3\n3\n2\n3\n2\n3\n2\n2\n2\n2\n2\n2\n4\n3\n2\n2\n2\n2\n2\n2\n2\n4\n2\n3\n3\n4\n3\n2\n2\n4\n4\n3\n4\n3\n2\n2\n2\n3\n4\n2\n2\n4\n4\n4\n4\n2\n4\n2\n3\n3\n3\n3\n2\n3\n3\n2\n2\n2\n2\n2\n2\n3\n4\n4\n2\n4\n4\n4\n3\n3\n2\n4\n4\n4\n2\n2\n3\n3\n3\n4\n2\n3\n3\n2\n2\n3\n3\n4\n2\n2\n2\n4\n3\n3\n2\n2\n2\n2\n4\n4\n2\n2\n2\n2\n3\n3\n2\n4\n3\n3\n2\n2\n2\n2\n4\n3\n4\n3\n2\n2\n4\n3\n3\n4\n3\n2\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n3\n2\n4\n3\n3\n2\n3\n4\n4\n2\n2\n2\n3\n4\n4\n4\n4\n4\n4\n2\n2\n3\n2\n2\n4\n4\n2\n4\n3\n4\n4\n2\n2\n4\n3\n3\n2\n2\n2\n2\n2\n2\n2\n3\n3\n2\n3\n2\n4\n2\n4\n2\n3\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n4\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n4\n4\n2\n2\n2\n3\n3\n2\n2\n4\n3\n2\n4\n4\n4\n2\n2\n3\n3\n3\n2\n4\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n3\n4\n2\n2\n2\n3\n2\n4\n4\n3\n3\n3\n3\n2\n2\n4\n4\n2\n2\n2\n2\n3\n2\n2\n2\n4\n4\n2\n3\n3\n4\n4\n4\n4\n4\n4\n4\n2\n2\n2\n4\n4\n4\n2\n4\n3\n3\n2\n2\n2\n3\n3\n4\n2\n2\n2\n4\n4\n2\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n3\n2\n3\n2\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n4\n2\n2\n2\n2\n4\n3\n2\n2\n3\n4\n4\n2\n4\n2\n2\n2\n3\n3\n3\n2\n4\n3\n4\n4\n2\n4\n4\n4\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n2\n4\n3\n3\n2\n2\n4\n4\n2\n4\n4\n4\n2\n2\n4\n2\n2\n3\n2\n4\n4\n4\n2\n2\n2\n4\n2\n2\n2\n3\n2\n2\n4\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n3\n4\n2\n2\n2\n2\n3\n3\n2\n2\n2\n3\n3\n2\n2\n2\n2\n3\n2\n2\n2\n2\n4\n3\n4\n3\n3\n3\n2\n3\n2\n2\n2\n4\n4\n2\n4\n2\n4\n4\n4\n2\n4\n2\n2\n2\n2\n2\n3\n2\n2\n2\n4\n4\n2\n4\n4\n4\n2\n2\n4\n2\n2\n2\n4\n4\n2\n2\n3\n2\n2\n2\n2\n2\n4\n2\n2\n2\n2\n2\n2\n3\n3\n2\n2\n2\n4\n2\n2\n3\n3\n2\n2\n4\n3\n2\n2\n2\n2\n3\n3\n2\n4\n3\n3\n3\n4\n4\n4\n4\n4\n4\n3\n3\n2\n2\n3\n4\n4\n2\n4\n2\n2\n3\n3\n4\n2\n4\n2\n4\n3\n2\n2\n4\n4\n4\n2\n2\n2\n3\n3\n3\n4\n2\n2\n2\n2\n2\n4\n2\n2\n4\n4\n4\n4\n3\n2\n2\n2\n2\n3\n2\n2\n4\n2\n2\n4\n2\n2\n2\n2\n2\n2\n4\n2\n2\n2\n4\n2\n4\n2\n2\n2\n4\n2\n2\n2\n2\n3\n3\n2\n2\n4\n3\n4\n2\n2\n2\n2\n2\n2\n3\n2\n2\n2\n2\n3\n4\n2\n4\n2\n2\n3\n4\n4\n2\n2\n2\n3\n4\n3\n4\n2\n2\n4\n2\n4\n2\n2\n2\n4\n2\n3\n4\n2\n2\n3\n2\n3\n4\n4\n4\n4\n4\n4\n4\n4\n2\n2\n4\n4\n2\n2\n2\n4\n4\n2\n2\n4\n2\n2\n2\n2\n2\n4\n4\n4\n2\n2\n2\n2\n2\n2\n2\n2\n2\n2\n4\n2\n2\n3\n2\n3\n3\n2\n2\n2\n4\n2\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n3\n4\n2\n2\n2\n4\n4\n2\n2\n3\n4\n2\n2\n3\n2\n4\n4\n2\n2\n2\n2\n2\n4\n3\n4\n2\n2\n2\n4\n3\n3\n4\n4\n4\n2\n2\n2\n4\n4\n2\n2\n2\n4\n2\n2\n2\n3\n2\n3\n4\n4\n4\n4\n3\n3\n3\n3\n3\n4\n3\n4\n4\n2\n2\n3\n3\n3\n2\n2\n3\n2\n3\n2\n3\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n4\n4\n4\n3\n3\n2\n2\n4\n4\n2\n2\n4\n2\n4\n4\n4\n4\n4\n2\n4\n4\n3\n4\n3\n2\n3\n3\n2\n3\n3\n4\n4\n2\n2\n2\n2\n3\n3\n2\n3\n4\n3\n2\n4\n4\n2\n2\n5\n2\n2\n4\n4\n3\n2\n2\n4\n4\n2\n2\n2\n2\n4\n2\n2\n2\n3\n3\n2\n2\n2\n2\n2\n2\n2\n2\n2\n3\n4\n4\n2\n2\n4\n2\n4\n2\n2\n2\n4\n4\n4\n3\n4\n4\n2\n2\n2\n2\n3\n2\n2\n2\n2\n2\n3\n2\n4\n3\n2\n4\n4\n2\n2\n3\n2\n2\n2\n2\n2\n3\n3\n4\n3\n3\n2\n3\n3\n3\n4\n2\n3\n3\n3\n4\n4\n4\n3\n3\n3\n2\n3\n4\n4\n4\n2\n4\n5\n3\n3\n4\n3\n3\n3\n2\n3\n2\n2\n3\n3\n3\n4\n2\n3\n3\n2\n3\n3\n3\n3\n4\n3\n4\n3\n3\n4\n2\n2\n3\n3\n3\n4\n3\n4\n4\n3\n4\n3\n3\n3\n3\n4\n2\n4\n3\n3\n4\n3\n4\n4\n2\n3\n3\n4\n4\n4\n3\n4\n3\n4\n2\n4\n4\n3\n3\n2\n3\n3\n4\n3\n2\n4\n3\n3\n2\n4\n3\n3\n4\n3\n3\n3\n4\n2\n3\n2\n4\n3\n3\n2\n4\n4\n2\n3\n4\n3\n3\n2\n2\n2\n4\n3\n3\n4\n3\n2\n4\n3\n2\n3\n3\n2\n2\n2\n2\n2\n3\n2\n3\n4\n3\n4\n2\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n4\n3\n3\n3\n2\n2\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n2\n2\n3\n2\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n4\n2\n4\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n4\n2\n4\n2\n3\n3\n4\n3\n4\n2\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n2\n2\n3\n3\n3\n3\n2\n3\n4\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n4\n4\n4\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n2\n2\n4\n4\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n4\n3\n4\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n4\n4\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n2\n3\n2\n3\n4\n3\n4\n4\n3\n4\n3\n2\n3\n4\n2\n3\n4\n3\n3\n3\n4\n3\n4\n2\n3\n3\n4\n4\n4\n2\n4\n4\n3\n3\n4\n3\n3\n3\n4\n3\n4\n3\n3\n3\n4\n4\n3\n2\n3\n3\n4\n4\n4\n3\n4\n3\n4\n3\n4\n3\n3\n4\n4\n3\n4\n4\n2\n3\n3\n4\n3\n4\n2\n2\n3\n3\n2\n3\n3\n3\n2\n4\n2\n3\n4\n3\n3\n3\n3\n2\n4\n4\n3\n2\n4\n3\n3\n2\n3\n4\n3\n4\n3\n3\n4\n4\n2\n2\n3\n3\n2\n3\n4\n3\n3\n3\n4\n3\n3\n2\n2\n4\n3\n4\n3\n2\n3\n3\n2\n4\n4\n3\n4\n5\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n2\n3\n4\n3\n3\n4\n3\n3\n3\n3\n2\n4\n4\n3\n3\n4\n3\n4\n3\n3\n4\n4\n4\n4\n4\n2\n3\n2\n4\n3\n4\n4\n4\n3\n4\n4\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n4\n2\n4\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n2\n4\n3\n3\n4\n3\n3\n4\n3\n3\n3\n4\n2\n3\n4\n3\n3\n4\n3\n4\n3\n2\n3\n3\n2\n2\n4\n4\n3\n4\n4\n4\n3\n4\n4\n3\n3\n4\n4\n3\n3\n3\n4\n4\n4\n4\n2\n3\n3\n3\n4\n2\n2\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n2\n3\n4\n2\n3\n2\n3\n3\n3\n3\n3\n3\n4\n3\n4\n4\n3\n2\n3\n3\n3\n2\n4\n4\n4\n4\n3\n3\n2\n4\n2\n3\n3\n3\n3\n4\n2\n3\n3\n3\n2\n2\n4\n3\n3\n4\n2\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n4\n3\n4\n3\n2\n3\n3\n3\n3\n4\n3\n4\n2\n3\n3\n3\n3\n3\n3\n4\n4\n3\n2\n4\n3\n3\n3\n3\n4\n3\n2\n4\n2\n3\n4\n4\n3\n4\n4\n4\n5\n4\n3\n3\n3\n4\n4\n3\n3\n2\n3\n2\n4\n2\n3\n4\n4\n3\n3\n3\n4\n4\n3\n4\n4\n3\n2\n4\n3\n4\n4\n4\n4\n4\n3\n5\n3\n3\n2\n3\n2\n4\n4\n2\n3\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n4\n4\n3\n3\n2\n2\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n2\n3\n2\n3\n4\n4\n3\n3\n3\n3\n4\n3\n2\n2\n3\n3\n2\n3\n3\n4\n2\n2\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n2\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n4\n4\n3\n4\n4\n3\n3\n4\n4\n3\n2\n3\n4\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n4\n3\n4\n4\n3\n3\n2\n4\n3\n3\n3\n4\n4\n3\n3\n3\n3\n2\n3\n4\n4\n3\n3\n3\n4\n2\n3\n4\n3\n2\n3\n3\n2\n3\n3\n4\n4\n4\n2\n2\n4\n3\n2\n3\n4\n3\n4\n3\n4\n4\n3\n3\n3\n3\n2\n3\n2\n4\n2\n3\n4\n2\n3\n3\n3\n3\n3\n2\n4\n4\n3\n3\n3\n4\n4\n4\n3\n4\n3\n4\n2\n2\n3\n4\n3\n4\n4\n3\n4\n4\n2\n4\n2\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n4\n3\n2\n2\n3\n3\n2\n3\n2\n4\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n2\n2\n2\n3\n3\n3\n4\n4\n4\n3\n3\n4\n3\n3\n4\n2\n4\n3\n3\n2\n2\n3\n3\n3\n3\n3\n2\n2\n2\n4\n2\n2\n3\n3\n2\n4\n2\n4\n4\n3\n3\n4\n3\n4\n3\n3\n4\n3\n3\n4\n4\n4\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n3\n2\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n2\n3\n4\n3\n4\n4\n4\n3\n3\n4\n2\n4\n4\n2\n3\n4\n3\n2\n3\n4\n4\n3\n4\n2\n2\n3\n4\n3\n4\n3\n3\n2\n2\n4\n4\n3\n4\n4\n4\n3\n3\n4\n3\n4\n3\n3\n3\n2\n4\n3\n3\n3\n4\n2\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n4\n4\n4\n3\n2\n3\n4\n3\n4\n4\n3\n3\n2\n2\n3\n3\n2\n3\n3\n2\n3\n3\n3\n2\n2\n2\n4\n4\n3\n3\n3\n4\n3\n3\n4\n4\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n4\n4\n2\n3\n4\n3\n3\n3\n3\n4\n3\n3\n2\n2\n3\n4\n4\n3\n2\n2\n3\n3\n3\n2\n4\n4\n3\n3\n3\n4\n4\n3\n3\n4\n3\n2\n3\n3\n3\n4\n3\n3\n3\n3\n4\n4\n4\n3\n3\n4\n3\n3\n4\n2\n2\n3\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n4\n4\n3\n4\n3\n2\n3\n3\n4\n2\n3\n2\n3\n2\n3\n4\n4\n3\n4\n3\n2\n3\n2\n3\n3\n3\n3\n4\n4\n4\n2\n2\n2\n4\n3\n3\n3\n3\n3\n4\n3\n3\n4\n4\n4\n2\n3\n4\n3\n4\n2\n4\n3\n2\n4\n3\n4\n3\n3\n3\n4\n4\n4\n2\n2\n4\n2\n3\n2\n3\n3\n2\n3\n3\n3\n2\n3\n3\n3\n4\n4\n3\n4\n4\n3\n2\n3\n3\n3\n4\n4\n2\n3\n3\n3\n4\n4\n3\n4\n4\n3\n3\n3\n2\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n4\n3\n2\n3\n3\n4\n3\n3\n3\n4\n3\n3\n4\n3\n3\n2\n3\n4\n4\n4\n3\n3\n3\n3\n3\n2\n3\n2\n3\n3\n3\n4\n2\n4\n2\n2\n3\n3\n3\n4\n4\n3\n3\n2\n3\n3\n4\n3\n2\n3\n2\n3\n4\n3\n4\n3\n3\n3\n3\n4\n2\n3\n3\n3\n3\n3\n2\n4\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n3\n2\n2\n3\n3\n3\n3\n4\n3\n2\n2\n3\n4\n3\n3\n2\n3\n2\n2\n2\n3\n3\n2\n3\n3\n3\n4\n4\n3\n2\n3\n4\n4\n4\n4\n4\n4\n3\n2\n4\n3\n4\n4\n3\n2\n2\n2\n4\n3\n4\n2\n4\n3\n3\n3\n4\n4\n4\n4\n3\n3\n5\n4\n3\n3\n3\n3\n2\n3\n3\n2\n4\n4\n3\n2\n4\n2\n4\n2\n3\n3\n2\n4\n3\n2\n3\n3\n3\n3\n3\n3\n2\n3\n4\n3\n4\n2\n2\n2\n2\n2\n3\n3\n3\n3\n2\n2\n3\n3\n4\n3\n3\n4\n4\n3\n4\n4\n4\n3\n3\n3\n2\n4\n3\n3\n2\n4\n4\n4\n3\n3\n3\n3\n2\n4\n3\n2\n3\n4\n3\n2\n2\n3\n3\n3\n3\n2\n2\n4\n2\n3\n4\n4\n2\n3\n2\n5\n3\n3\n2\n4\n3\n2\n3\n3\n2\n4\n3\n2\n4\n4\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n4\n3\n3\n3\n4\n3\n4\n4\n4\n2\n3\n3\n3\n3\n4\n3\n3\n4\n3\n4\n3\n3\n4\n3\n2\n3\n3\n3\n4\n4\n3\n3\n3\n4\n3\n2\n3\n3\n4\n3\n4\n3\n2\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n2\n4\n3\n3\n4\n3\n3\n4\n2\n4\n3\n4\n2\n2\n4\n2\n4\n2\n3\n3\n3\n4\n3\n4\n4\n4\n3\n4\n2\n3\n3\n3\n4\n4\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n2\n2\n3\n2\n4\n3\n2\n2\n4\n4\n3\n4\n3\n4\n3\n4\n3\n3\n4\n4\n4\n2\n4\n3\n4\n3\n2\n3\n3\n4\n4\n3\n2\n4\n2\n3\n3\n2\n2\n4\n4\n4\n3\n3\n4\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n4\n3\n4\n4\n4\n2\n2\n4\n4\n3\n3\n3\n3\n3\n3\n3\n4\n2\n3\n3\n3\n4\n3\n4\n2\n3\n4\n4\n3\n2\n3\n3\n3\n3\n4\n3\n3\n4\n2\n4\n2\n4\n4\n3\n2\n4\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n2\n4\n2\n3\n2\n4\n3\n3\n2\n4\n3\n4\n4\n2\n3\n3\n3\n3\n2\n3\n3\n2\n3\n4\n3\n3\n4\n3\n3\n4\n3\n3\n3\n2\n2\n3\n3\n2\n3\n2\n3\n4\n3\n4\n4\n3\n3\n4\n3\n3\n2\n3\n3\n3\n2\n3\n3\n4\n2\n3\n4\n4\n3\n3\n4\n4\n3\n3\n4\n3\n4\n2\n3\n3\n3\n3\n4\n3\n3\n4\n3\n4\n5\n2\n2\n4\n2\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n2\n4\n3\n2\n3\n3\n4\n2\n2\n3\n2\n3\n4\n4\n3\n2\n4\n3\n4\n2\n3\n3\n4\n3\n3\n3\n4\n4\n3\n3\n2\n2\n3\n3\n3\n2\n4\n3\n2\n3\n2\n4\n2\n3\n4\n3\n4\n4\n2\n4\n3\n2\n4\n4\n2\n4\n4\n3\n2\n4\n3\n3\n4\n3\n3\n3\n2\n3\n2\n2\n3\n2\n4\n3\n3\n3\n4\n4\n3\n3\n3\n4\n4\n3\n3\n4\n3\n2\n2\n3\n4\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n4\n3\n4\n2\n4\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n2\n4\n3\n3\n4\n2\n4\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n2\n3\n4\n3\n3\n3\n2\n2\n3\n3\n3\n4\n3\n3\n2\n3\n2\n2\n3\n3\n3\n3\n2\n3\n2\n4\n3\n4\n3\n3\n3\n3\n3\n4\n4\n2\n2\n3\n3\n4\n4\n4\n4\n4\n4\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n2\n4\n3\n3\n3\n2\n2\n2\n3\n2\n3\n3\n4\n3\n4\n4\n2\n3\n3\n3\n4\n4\n4\n4\n4\n3\n3\n2\n3\n3\n3\n2\n2\n3\n3\n3\n4\n4\n4\n4\n3\n3\n2\n2\n2\n3\n2\n3\n2\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n2\n4\n3\n2\n3\n3\n3\n3\n2\n4\n3\n3\n3\n4\n3\n2\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n4\n3\n3\n3\n3\n3\n2\n4\n3\n3\n3\n2\n3\n3\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n4\n3\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n4\n2\n2\n4\n3\n4\n2\n2\n2\n3\n3\n3\n3\n3\n3\n2\n2\n3\n2\n3\n3\n4\n2\n2\n3\n3\n3\n2\n2\n3\n2\n2\n2\n2\n4\n4\n4\n2\n4\n3\n4\n3\n2\n4\n3\n3\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n4\n2\n2\n3\n2\n3\n2\n3\n4\n3\n3\n3\n4\n2\n4\n4\n4\n3\n3\n4\n4\n2\n3\n3\n4\n4\n4\n3\n2\n2\n4\n3\n3\n3\n3\n4\n3\n4\n4\n3\n3\n3\n3\n2\n3\n4\n3\n4\n3\n4\n3\n3\n2\n2\n3\n4\n4\n4\n3\n4\n3\n3\n3\n2\n2\n3\n3\n4\n3\n3\n3\n3\n3\n3\n2\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n2\n4\n4\n3\n3\n3\n4\n4\n4\n3\n4\n2\n2\n2\n2\n2\n3\n2\n2\n3\n4\n4\n4\n4\n4\n4\n2\n2\n4\n3\n2\n2\n4\n4\n2\n3\n3\n3\n3\n2\n2\n3\n3\n4\n3\n2\n3\n3\n3\n3\n3\n2\n2\n2\n4\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n4\n4\n4\n4\n4\n4\n3\n3\n3\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n4\n2\n4\n4\n3\n4\n3\n4\n4\n4\n3\n4\n4\n3\n3\n3\n4\n2\n3\n3\n4\n3\n4\n4\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n2\n3\n3\n2\n2\n3\n2\n3\n3\n4\n4\n4\n3\n4\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n2\n3\n4\n3\n4\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n2\n3\n3\n4\n4\n4\n3\n3\n3\n3\n3\n4\n3\n2\n4\n4\n4\n2\n2\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n4\n4\n4\n4\n4\n3\n4\n2\n3\n4\n4\n3\n3\n4\n4\n4\n3\n3\n4\n4\n4\n3\n2\n2\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n2\n4\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n4\n2\n2\n3\n4\n4\n3\n3\n3\n4\n4\n4\n3\n2\n4\n4\n3\n3\n3\n3\n4\n3\n4\n3\n2\n3\n4\n3\n3\n4\n4\n4\n2\n4\n3\n4\n3\n3\n2\n4\n3\n4\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n2\n5\n2\n2\n4\n4\n4\n3\n3\n4\n3\n4\n4\n3\n4\n4\n3\n4\n4\n4\n4\n2\n5\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n4\n3\n3\n5\n3\n3\n4\n4\n3\n3\n2\n4\n3\n3\n3\n3\n2\n3\n4\n2\n3\n3\n3\n3\n5\n2\n4\n3\n3\n3\n3\n2\n2\n3\n4\n4\n2\n2\n4\n3\n4\n2\n3\n4\n4\n4\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n4\n3\n3\n4\n4\n3\n3\n3\n2\n2\n2\n3\n5\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n4\n3\n3\n3\n3\n4\n4\n4\n2\n4\n3\n3\n4\n3\n3\n3\n2\n3\n3\n2\n3\n3\n3\n4\n2\n3\n3\n2\n3\n3\n3\n4\n3\n4\n3\n4\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n2\n4\n3\n4\n3\n4\n4\n3\n3\n3\n4\n4\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n3\n4\n3\n3\n3\n2\n2\n3\n4\n3\n4\n4\n3\n2\n3\n3\n3\n2\n2\n2\n2\n3\n3\n4\n3\n2\n3\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n4\n3\n3\n3\n2\n2\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n2\n4\n3\n2\n3\n3\n4\n3\n4\n4\n3\n3\n4\n2\n4\n4\n3\n3\n3\n3\n4\n3\n3\n3\n4\n4\n2\n2\n3\n3\n4\n3\n4\n2\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n2\n2\n4\n4\n4\n3\n3\n4\n3\n3\n3\n3\n3\n4\n4\n3\n4\n4\n3\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n4\n3\n3\n4\n3\n2\n4\n2\n4\n3\n3\n4\n3\n4\n3\n3\n4\n4\n2\n4\n3\n3\n4\n3\n3\n4\n3\n4\n3\n3\n4\n4\n3\n2\n3\n4\n4\n3\n4\n3\n4\n3\n4\n3\n3\n4\n4\n3\n3\n4\n4\n2\n3\n3\n3\n3\n2\n4\n2\n3\n4\n3\n3\n3\n3\n2\n4\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n2\n2\n4\n3\n3\n3\n3\n3\n4\n5\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n4\n2\n3\n4\n2\n3\n3\n4\n4\n4\n3\n3\n4\n3\n4\n3\n3\n4\n4\n4\n4\n3\n2\n3\n4\n4\n4\n3\n4\n4\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n4\n2\n4\n3\n4\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n4\n2\n3\n4\n3\n3\n3\n4\n3\n2\n3\n3\n2\n2\n4\n3\n4\n4\n3\n4\n3\n3\n4\n3\n3\n4\n4\n4\n4\n3\n3\n3\n2\n2\n3\n3\n3\n3\n4\n3\n5\n4\n3\n4\n3\n3\n2\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n2\n3\n3\n2\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n4\n4\n3\n4\n3\n3\n3\n3\n4\n3\n2\n3\n3\n3\n3\n3\n4\n4\n3\n3\n4\n3\n3\n4\n3\n2\n4\n2\n3\n4\n4\n3\n3\n4\n4\n5\n3\n3\n3\n4\n3\n4\n3\n3\n2\n4\n3\n3\n4\n3\n3\n3\n4\n4\n3\n4\n4\n3\n3\n4\n3\n4\n4\n4\n3\n3\n3\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n2\n3\n4\n4\n3\n3\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n4\n3\n2\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n4\n4\n3\n4\n4\n3\n3\n4\n4\n3\n2\n3\n4\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n2\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n2\n3\n3\n4\n4\n2\n2\n4\n3\n3\n4\n3\n4\n3\n4\n3\n3\n3\n3\n2\n3\n2\n4\n3\n4\n3\n3\n3\n3\n2\n4\n4\n3\n3\n3\n4\n4\n3\n4\n3\n4\n2\n3\n3\n3\n3\n4\n4\n3\n4\n4\n2\n3\n3\n3\n2\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n2\n3\n3\n3\n3\n4\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n4\n3\n4\n3\n3\n4\n2\n4\n3\n3\n2\n3\n3\n3\n3\n3\n3\n2\n4\n4\n2\n3\n3\n2\n4\n2\n4\n3\n3\n4\n3\n4\n4\n3\n3\n4\n4\n3\n3\n5\n3\n3\n2\n3\n3\n4\n4\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n4\n4\n4\n3\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n4\n2\n4\n3\n4\n3\n3\n2\n3\n4\n4\n3\n3\n3\n4\n4\n3\n3\n3\n4\n3\n3\n2\n4\n3\n3\n3\n4\n2\n3\n3\n3\n2\n3\n3\n3\n4\n3\n3\n4\n4\n4\n3\n3\n4\n3\n4\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n2\n2\n4\n4\n3\n3\n3\n4\n3\n3\n4\n4\n3\n3\n4\n4\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n4\n2\n3\n4\n4\n3\n3\n3\n3\n3\n3\n2\n4\n4\n3\n3\n3\n4\n4\n3\n3\n4\n3\n2\n3\n3\n4\n3\n3\n3\n3\n4\n4\n4\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n4\n3\n2\n3\n4\n2\n3\n2\n3\n3\n4\n4\n3\n4\n3\n2\n3\n4\n3\n4\n3\n4\n4\n3\n2\n3\n4\n3\n3\n3\n4\n3\n4\n4\n3\n4\n3\n4\n3\n4\n3\n3\n4\n3\n3\n3\n4\n4\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n4\n4\n3\n4\n4\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n4\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n2\n3\n4\n4\n3\n2\n3\n3\n4\n4\n3\n2\n3\n3\n3\n3\n4\n3\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n4\n2\n3\n4\n3\n3\n2\n3\n2\n2\n3\n3\n3\n2\n4\n3\n3\n4\n3\n3\n4\n4\n3\n4\n3\n4\n4\n4\n4\n3\n4\n4\n3\n4\n4\n3\n3\n2\n3\n4\n2\n3\n3\n3\n3\n4\n4\n4\n4\n3\n5\n4\n3\n3\n4\n2\n4\n3\n2\n4\n4\n4\n2\n3\n3\n2\n3\n3\n3\n3\n3\n3\n3\n2\n3\n4\n3\n4\n2\n3\n2\n3\n2\n3\n3\n3\n3\n3\n3\n4\n5\n3\n4\n4\n3\n4\n4\n3\n3\n4\n3\n3\n4\n4\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n3\n4\n4\n3\n3\n2\n5\n3\n3\n4\n3\n3\n3\n3\n4\n2\n3\n4\n3\n3\n3\n4\n3\n3\n4\n3\n3\n3\n4\n3\n4\n4\n4\n3\n3\n3\n3\n4\n3\n3\n4\n3\n4\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n2\n3\n4\n3\n3\n4\n3\n4\n3\n2\n3\n3\n3\n3\n4\n2\n4\n3\n4\n3\n3\n4\n4\n2\n3\n3\n3\n4\n3\n4\n4\n3\n4\n2\n3\n3\n4\n4\n3\n3\n4\n3\n4\n3\n3\n3\n2\n4\n3\n4\n3\n3\n2\n4\n3\n4\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n2\n2\n3\n4\n2\n4\n3\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n2\n2\n4\n3\n4\n4\n2\n3\n4\n4\n3\n3\n3\n3\n3\n3\n4\n3\n4\n2\n3\n4\n3\n3\n3\n3\n4\n3\n4\n2\n3\n4\n4\n3\n4\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n2\n4\n3\n3\n3\n4\n3\n3\n2\n3\n4\n3\n3\n3\n3\n3\n2\n3\n4\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n2\n3\n3\n2\n3\n3\n4\n3\n4\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n3\n4\n4\n3\n4\n4\n3\n4\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n4\n3\n4\n5\n2\n4\n2\n3\n4\n4\n3\n3\n3\n2\n4\n3\n3\n3\n3\n4\n2\n4\n3\n2\n4\n4\n3\n2\n4\n3\n4\n4\n5\n3\n4\n3\n3\n3\n3\n2\n3\n3\n3\n3\n4\n3\n2\n3\n2\n4\n3\n3\n4\n3\n4\n4\n2\n4\n3\n2\n4\n4\n2\n4\n4\n3\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n4\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n2\n2\n3\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n3\n4\n4\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n2\n3\n4\n3\n3\n3\n2\n3\n3\n3\n4\n3\n2\n3\n3\n3\n3\n3\n3\n2\n4\n3\n3\n3\n3\n3\n4\n4\n2\n3\n4\n4\n4\n4\n4\n4\n3\n3\n3\n3\n4\n4\n3\n4\n3\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n4\n4\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n3\n4\n3\n2\n4\n2\n3\n3\n3\n2\n4\n3\n4\n3\n4\n3\n2\n2\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n4\n4\n4\n4\n4\n4\n4\n3\n3\n3\n4\n4\n3\n3\n2\n3\n3\n3\n3\n4\n2\n2\n4\n4\n2\n2\n2\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n2\n2\n3\n2\n4\n4\n4\n4\n4\n2\n4\n3\n4\n2\n2\n4\n3\n3\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n3\n4\n2\n2\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n4\n4\n4\n4\n4\n2\n3\n3\n4\n4\n4\n3\n3\n4\n3\n3\n3\n3\n4\n3\n4\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n2\n2\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n4\n3\n3\n3\n3\n3\n3\n3\n3\n4\n4\n4\n3\n4\n3\n4\n4\n4\n3\n4\n2\n2\n2\n2\n2\n3\n2\n2\n3\n4\n4\n4\n4\n4\n2\n2\n4\n3\n2\n2\n4\n4\n2\n3\n3\n3\n4\n3\n2\n3\n3\n3\n3\n3\n4\n4\n4\n3\n3\n3\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n4\n4\n4\n4\n3\n3\n3\n4\n4\n3\n4\n3\n3\n3\n3\n4\n4\n3\n4\n3\n4\n4\n4\n3\n3\n3\n3\n3\n3\n4\n3\n4\n4\n4\n4\n4\n3\n3\n3\n3\n4\n3\n2\n2\n3\n3\n3\n4\n4\n4\n4\n3\n3\n3\n3\n3\n3\n2\n4\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n3\n4\n3\n4\n2\n3\n4\n4\n4\n3\n3\n3\n3\n4\n4\n4\n3\n2\n3\n3\n3\n4\n3\n3\n3\n4\n4\n4\n4\n4\n4\n3\n4\n2\n3\n4\n3\n3\n4\n4\n4\n3\n3\n4\n4\n3\n2\n2\n4\n4\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n2\n3\n4\n4\n3\n3\n3\n4\n4\n4\n3\n3\n4\n4\n3\n3\n3\n3\n3\n4\n3\n3\n3\n4\n4\n4\n2\n4\n4\n3\n3\n3\n4\n3\n4\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n3\n3\n3\n3\n2\n3\n3\n3\n3\n3\n5\n3\n4\n4\n4\n3\n3\n3\n4\n4\n4\n4\n4\n4\n4\n4\n3\n5\n4\n3\n4\n3\n3\n3\n3\n3\n4\n4\n3\n3\n3\n3\n3\n4\n3\n3\n4\n4\n3\n5\n3\n3\n4\n4\n3\n4\n4\n3\n3\n3\n3\n2\n3\n4\n2\n3\n3\n3\n3\n5\n3\n3\n3\n3\n3\n3\n4\n3\n4\n3\n3\n4\n3\n4\n3\n3\n4\n4\n4\n3\n5\n4\n3\n3\n3\n3\n3\n3\n3\n4\n3\n3\n4\n4\n3\n2\n2\n3\n3\n" ], [ "plt.plot(labels_0)", "_____no_output_____" ], [ "isinstance(gcn_labels, list)", "_____no_output_____" ], [ "import pandas", "_____no_output_____" ], [ "df = pandas.DataFrame(gcn_labels[1])", "_____no_output_____" ], [ "df.hist()", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df2 = pandas.DataFrame(lambdaz[90])", "_____no_output_____" ], [ "df2", "_____no_output_____" ], [ "df2.hist()", "_____no_output_____" ], [ "import scipy.sparse as sp", "_____no_output_____" ], [ "sp.csr_matrix()", "_____no_output_____" ], [ "adj = data_actual[2]", "_____no_output_____" ], [ "adj[0].T[0]", "_____no_output_____" ], [ "sp.csr_matrix((adj[1], (adj[0].T[0],adj[0].T[1])))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2be93af77e94d3ff1b43db040fe6c4d71147a2
29,039
ipynb
Jupyter Notebook
site/en/tutorials/distribute/custom_training.ipynb
PhilipMay/docs
4d291590e6352f7fda6175e4f663cceb287589d5
[ "Apache-2.0" ]
3
2020-01-28T11:36:06.000Z
2020-01-28T12:15:04.000Z
site/en/tutorials/distribute/custom_training.ipynb
PhilipMay/docs
4d291590e6352f7fda6175e4f663cceb287589d5
[ "Apache-2.0" ]
1
2020-02-20T14:49:33.000Z
2020-02-20T14:49:33.000Z
site/en/tutorials/distribute/custom_training.ipynb
PhilipMay/docs
4d291590e6352f7fda6175e4f663cceb287589d5
[ "Apache-2.0" ]
1
2020-03-04T00:12:25.000Z
2020-03-04T00:12:25.000Z
37.811198
646
0.553945
[ [ [ "##### Copyright 2019 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Custom training with tf.distribute.Strategy", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/tutorials/distribute/custom_training\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/distribute/custom_training.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/tutorials/distribute/custom_training.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/distribute/custom_training.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>", "_____no_output_____" ], [ "This tutorial demonstrates how to use [`tf.distribute.Strategy`](https://www.tensorflow.org/guide/distributed_training) with custom training loops. We will train a simple CNN model on the fashion MNIST dataset. The fashion MNIST dataset contains 60000 train images of size 28 x 28 and 10000 test images of size 28 x 28.\n\nWe are using custom training loops to train our model because they give us flexibility and a greater control on training. Moreover, it is easier to debug the model and the training loop.", "_____no_output_____" ] ], [ [ "from __future__ import absolute_import, division, print_function, unicode_literals\n\n# Import TensorFlow\ntry:\n # %tensorflow_version only exists in Colab.\n %tensorflow_version 2.x\nexcept Exception:\n pass\nimport tensorflow as tf\n\n# Helper libraries\nimport numpy as np\nimport os\n\nprint(tf.__version__)", "_____no_output_____" ] ], [ [ "## Download the fashion MNIST dataset", "_____no_output_____" ] ], [ [ "fashion_mnist = tf.keras.datasets.fashion_mnist\n\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\n\n# Adding a dimension to the array -> new shape == (28, 28, 1)\n# We are doing this because the first layer in our model is a convolutional\n# layer and it requires a 4D input (batch_size, height, width, channels).\n# batch_size dimension will be added later on.\ntrain_images = train_images[..., None]\ntest_images = test_images[..., None]\n\n# Getting the images in [0, 1] range.\ntrain_images = train_images / np.float32(255)\ntest_images = test_images / np.float32(255)", "_____no_output_____" ] ], [ [ "## Create a strategy to distribute the variables and the graph", "_____no_output_____" ], [ "How does `tf.distribute.MirroredStrategy` strategy work?\n\n* All the variables and the model graph is replicated on the replicas.\n* Input is evenly distributed across the replicas.\n* Each replica calculates the loss and gradients for the input it received.\n* The gradients are synced across all the replicas by summing them.\n* After the sync, the same update is made to the copies of the variables on each replica.\n\nNote: You can put all the code below inside a single scope. We are dividing it into several code cells for illustration purposes.\n", "_____no_output_____" ] ], [ [ "# If the list of devices is not specified in the\n# `tf.distribute.MirroredStrategy` constructor, it will be auto-detected.\nstrategy = tf.distribute.MirroredStrategy()", "_____no_output_____" ], [ "print ('Number of devices: {}'.format(strategy.num_replicas_in_sync))", "_____no_output_____" ] ], [ [ "## Setup input pipeline", "_____no_output_____" ], [ "Export the graph and the variables to the platform-agnostic SavedModel format. After your model is saved, you can load it with or without the scope.", "_____no_output_____" ] ], [ [ "BUFFER_SIZE = len(train_images)\n\nBATCH_SIZE_PER_REPLICA = 64\nGLOBAL_BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync\n\nEPOCHS = 10", "_____no_output_____" ] ], [ [ "Create the datasets and distribute them:", "_____no_output_____" ] ], [ [ "train_dataset = tf.data.Dataset.from_tensor_slices((train_images, train_labels)).shuffle(BUFFER_SIZE).batch(GLOBAL_BATCH_SIZE) \ntest_dataset = tf.data.Dataset.from_tensor_slices((test_images, test_labels)).batch(GLOBAL_BATCH_SIZE) \n\ntrain_dist_dataset = strategy.experimental_distribute_dataset(train_dataset)\ntest_dist_dataset = strategy.experimental_distribute_dataset(test_dataset)", "_____no_output_____" ] ], [ [ "## Create the model\n\nCreate a model using `tf.keras.Sequential`. You can also use the Model Subclassing API to do this.", "_____no_output_____" ] ], [ [ "def create_model():\n model = tf.keras.Sequential([\n tf.keras.layers.Conv2D(32, 3, activation='relu'),\n tf.keras.layers.MaxPooling2D(),\n tf.keras.layers.Conv2D(64, 3, activation='relu'),\n tf.keras.layers.MaxPooling2D(),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(64, activation='relu'),\n tf.keras.layers.Dense(10)\n ])\n\n return model", "_____no_output_____" ], [ "# Create a checkpoint directory to store the checkpoints.\ncheckpoint_dir = './training_checkpoints'\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")", "_____no_output_____" ] ], [ [ "## Define the loss function\n\nNormally, on a single machine with 1 GPU/CPU, loss is divided by the number of examples in the batch of input.\n\n*So, how should the loss be calculated when using a `tf.distribute.Strategy`?*\n\n* For an example, let's say you have 4 GPU's and a batch size of 64. One batch of input is distributed\nacross the replicas (4 GPUs), each replica getting an input of size 16.\n\n* The model on each replica does a forward pass with its respective input and calculates the loss. Now, instead of dividing the loss by the number of examples in its respective input (BATCH_SIZE_PER_REPLICA = 16), the loss should be divided by the GLOBAL_BATCH_SIZE (64).\n\n*Why do this?*\n\n* This needs to be done because after the gradients are calculated on each replica, they are synced across the replicas by **summing** them.\n\n*How to do this in TensorFlow?*\n* If you're writing a custom training loop, as in this tutorial, you should sum the per example losses and divide the sum by the GLOBAL_BATCH_SIZE: \n`scale_loss = tf.reduce_sum(loss) * (1. / GLOBAL_BATCH_SIZE)`\nor you can use `tf.nn.compute_average_loss` which takes the per example loss,\noptional sample weights, and GLOBAL_BATCH_SIZE as arguments and returns the scaled loss.\n\n* If you are using regularization losses in your model then you need to scale\nthe loss value by number of replicas. You can do this by using the `tf.nn.scale_regularization_loss` function.\n\n* Using `tf.reduce_mean` is not recommended. Doing so divides the loss by actual per replica batch size which may vary step to step.\n\n* This reduction and scaling is done automatically in keras `model.compile` and `model.fit`\n\n* If using `tf.keras.losses` classes (as in the example below), the loss reduction needs to be explicitly specified to be one of `NONE` or `SUM`. `AUTO` and `SUM_OVER_BATCH_SIZE` are disallowed when used with `tf.distribute.Strategy`. `AUTO` is disallowed because the user should explicitly think about what reduction they want to make sure it is correct in the distributed case. `SUM_OVER_BATCH_SIZE` is disallowed because currently it would only divide by per replica batch size, and leave the dividing by number of replicas to the user, which might be easy to miss. So instead we ask the user do the reduction themselves explicitly.", "_____no_output_____" ] ], [ [ "with strategy.scope():\n # Set reduction to `none` so we can do the reduction afterwards and divide by\n # global batch size.\n loss_object = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True,\n reduction=tf.keras.losses.Reduction.NONE)\n def compute_loss(labels, predictions):\n per_example_loss = loss_object(labels, predictions)\n return tf.nn.compute_average_loss(per_example_loss, global_batch_size=GLOBAL_BATCH_SIZE)", "_____no_output_____" ] ], [ [ "## Define the metrics to track loss and accuracy\n\nThese metrics track the test loss and training and test accuracy. You can use `.result()` to get the accumulated statistics at any time.", "_____no_output_____" ] ], [ [ "with strategy.scope():\n test_loss = tf.keras.metrics.Mean(name='test_loss')\n\n train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(\n name='train_accuracy')\n test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(\n name='test_accuracy')", "_____no_output_____" ] ], [ [ "## Training loop", "_____no_output_____" ] ], [ [ "# model and optimizer must be created under `strategy.scope`.\nwith strategy.scope():\n model = create_model()\n\n optimizer = tf.keras.optimizers.Adam()\n\n checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)", "_____no_output_____" ], [ "with strategy.scope():\n def train_step(inputs):\n images, labels = inputs\n\n with tf.GradientTape() as tape:\n predictions = model(images, training=True)\n loss = compute_loss(labels, predictions)\n\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\n train_accuracy.update_state(labels, predictions)\n return loss \n\n def test_step(inputs):\n images, labels = inputs\n\n predictions = model(images, training=False)\n t_loss = loss_object(labels, predictions)\n\n test_loss.update_state(t_loss)\n test_accuracy.update_state(labels, predictions)", "_____no_output_____" ], [ "with strategy.scope():\n # `experimental_run_v2` replicates the provided computation and runs it\n # with the distributed input.\n @tf.function\n def distributed_train_step(dataset_inputs):\n per_replica_losses = strategy.experimental_run_v2(train_step,\n args=(dataset_inputs,))\n return strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses,\n axis=None)\n \n @tf.function\n def distributed_test_step(dataset_inputs):\n return strategy.experimental_run_v2(test_step, args=(dataset_inputs,))\n\n for epoch in range(EPOCHS):\n # TRAIN LOOP\n total_loss = 0.0\n num_batches = 0\n for x in train_dist_dataset:\n total_loss += distributed_train_step(x)\n num_batches += 1\n train_loss = total_loss / num_batches\n\n # TEST LOOP\n for x in test_dist_dataset:\n distributed_test_step(x)\n\n if epoch % 2 == 0:\n checkpoint.save(checkpoint_prefix)\n\n template = (\"Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, \"\n \"Test Accuracy: {}\")\n print (template.format(epoch+1, train_loss,\n train_accuracy.result()*100, test_loss.result(),\n test_accuracy.result()*100))\n\n test_loss.reset_states()\n train_accuracy.reset_states()\n test_accuracy.reset_states()", "_____no_output_____" ] ], [ [ "Things to note in the example above:\n\n* We are iterating over the `train_dist_dataset` and `test_dist_dataset` using a `for x in ...` construct.\n* The scaled loss is the return value of the `distributed_train_step`. This value is aggregated across replicas using the `tf.distribute.Strategy.reduce` call and then across batches by summing the return value of the `tf.distribute.Strategy.reduce` calls.\n* `tf.keras.Metrics` should be updated inside `train_step` and `test_step` that gets executed by `tf.distribute.Strategy.experimental_run_v2`.\n*`tf.distribute.Strategy.experimental_run_v2` returns results from each local replica in the strategy, and there are multiple ways to consume this result. You can do `tf.distribute.Strategy.reduce` to get an aggregated value. You can also do `tf.distribute.Strategy.experimental_local_results` to get the list of values contained in the result, one per local replica.\n", "_____no_output_____" ], [ "## Restore the latest checkpoint and test", "_____no_output_____" ], [ "A model checkpointed with a `tf.distribute.Strategy` can be restored with or without a strategy.", "_____no_output_____" ] ], [ [ "eval_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(\n name='eval_accuracy')\n\nnew_model = create_model()\nnew_optimizer = tf.keras.optimizers.Adam()\n\ntest_dataset = tf.data.Dataset.from_tensor_slices((test_images, test_labels)).batch(GLOBAL_BATCH_SIZE)", "_____no_output_____" ], [ "@tf.function\ndef eval_step(images, labels):\n predictions = new_model(images, training=False)\n eval_accuracy(labels, predictions)", "_____no_output_____" ], [ "checkpoint = tf.train.Checkpoint(optimizer=new_optimizer, model=new_model)\ncheckpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))\n\nfor images, labels in test_dataset:\n eval_step(images, labels)\n\nprint ('Accuracy after restoring the saved model without strategy: {}'.format(\n eval_accuracy.result()*100))", "_____no_output_____" ] ], [ [ "## Alternate ways of iterating over a dataset\n\n### Using iterators\n\nIf you want to iterate over a given number of steps and not through the entire dataset you can create an iterator using the `iter` call and explicity call `next` on the iterator. You can choose to iterate over the dataset both inside and outside the tf.function. Here is a small snippet demonstrating iteration of the dataset outside the tf.function using an iterator.\n", "_____no_output_____" ] ], [ [ "with strategy.scope():\n for _ in range(EPOCHS):\n total_loss = 0.0\n num_batches = 0\n train_iter = iter(train_dist_dataset)\n\n for _ in range(10):\n total_loss += distributed_train_step(next(train_iter))\n num_batches += 1\n average_train_loss = total_loss / num_batches\n\n template = (\"Epoch {}, Loss: {}, Accuracy: {}\")\n print (template.format(epoch+1, average_train_loss, train_accuracy.result()*100))\n train_accuracy.reset_states()", "_____no_output_____" ] ], [ [ "### Iterating inside a tf.function\nYou can also iterate over the entire input `train_dist_dataset` inside a tf.function using the `for x in ...` construct or by creating iterators like we did above. The example below demonstrates wrapping one epoch of training in a tf.function and iterating over `train_dist_dataset` inside the function.", "_____no_output_____" ] ], [ [ "with strategy.scope():\n @tf.function\n def distributed_train_epoch(dataset):\n total_loss = 0.0\n num_batches = 0\n for x in dataset:\n per_replica_losses = strategy.experimental_run_v2(train_step,\n args=(x,))\n total_loss += strategy.reduce(\n tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)\n num_batches += 1\n return total_loss / tf.cast(num_batches, dtype=tf.float32)\n\n for epoch in range(EPOCHS):\n train_loss = distributed_train_epoch(train_dist_dataset)\n\n template = (\"Epoch {}, Loss: {}, Accuracy: {}\")\n print (template.format(epoch+1, train_loss, train_accuracy.result()*100))\n\n train_accuracy.reset_states()", "_____no_output_____" ] ], [ [ "### Tracking training loss across replicas\n\nNote: As a general rule, you should use `tf.keras.Metrics` to track per-sample values and avoid values that have been aggregated within a replica.\n\nWe do *not* recommend using `tf.metrics.Mean` to track the training loss across different replicas, because of the loss scaling computation that is carried out.\n\nFor example, if you run a training job with the following characteristics:\n* Two replicas\n* Two samples are processed on each replica\n* Resulting loss values: [2, 3] and [4, 5] on each replica\n* Global batch size = 4\n\nWith loss scaling, you calculate the per-sample value of loss on each replica by adding the loss values, and then dividing by the global batch size. In this case: `(2 + 3) / 4 = 1.25` and `(4 + 5) / 4 = 2.25`. \n\nIf you use `tf.metrics.Mean` to track loss across the two replicas, the result is different. In this example, you end up with a `total` of 3.50 and `count` of 2, which results in `total`/`count` = 1.75 when `result()` is called on the metric. Loss calculated with `tf.keras.Metrics` is scaled by an additional factor that is equal to the number of replicas in sync.", "_____no_output_____" ], [ "### Guide and examples\nHere are some examples for using distribution strategy with custom training loops:\n\n1. [Distributed training guide](../../guide/distributed_training)\n2. [DenseNet](https://github.com/tensorflow/examples/blob/master/tensorflow_examples/models/densenet/distributed_train.py) example using `MirroredStrategy`.\n1. [BERT](https://github.com/tensorflow/models/blob/master/official/nlp/bert/run_classifier.py) example trained using `MirroredStrategy` and `TPUStrategy`.\nThis example is particularly helpful for understanding how to load from a checkpoint and generate periodic checkpoints during distributed training etc.\n2. [NCF](https://github.com/tensorflow/models/blob/master/official/recommendation/ncf_keras_main.py) example trained using `MirroredStrategy` that can be enabled using the `keras_use_ctl` flag.\n3. [NMT](https://github.com/tensorflow/examples/blob/master/tensorflow_examples/models/nmt_with_attention/distributed_train.py) example trained using `MirroredStrategy`.\n\nMore examples listed in the [Distribution strategy guide](../../guide/distributed_training.ipynb#examples_and_tutorials)", "_____no_output_____" ], [ "## Next steps\n\nTry out the new `tf.distribute.Strategy` API on your models.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
4a2bea427bacd8b35a77db31ea3b44b2c9481996
3,319
ipynb
Jupyter Notebook
07-Extra-Content/Big-Data-Google-Colab/day-2/Activities/05-Stu_Pyspark_NLP_Stopwords/Unsolved/nlp_stopwords.ipynb
anirudhmungre/sneaky-lessons
8e48015c50865059db96f8cd369bcc15365d66c7
[ "ADSL" ]
null
null
null
07-Extra-Content/Big-Data-Google-Colab/day-2/Activities/05-Stu_Pyspark_NLP_Stopwords/Unsolved/nlp_stopwords.ipynb
anirudhmungre/sneaky-lessons
8e48015c50865059db96f8cd369bcc15365d66c7
[ "ADSL" ]
null
null
null
07-Extra-Content/Big-Data-Google-Colab/day-2/Activities/05-Stu_Pyspark_NLP_Stopwords/Unsolved/nlp_stopwords.ipynb
anirudhmungre/sneaky-lessons
8e48015c50865059db96f8cd369bcc15365d66c7
[ "ADSL" ]
null
null
null
19.755952
95
0.532389
[ [ [ "\n ## Install Java, Spark, and Findspark\n\n", "_____no_output_____" ] ], [ [ "!apt-get install openjdk-8-jdk-headless -qq > /dev/null\n!wget -q http://www-us.apache.org/dist/spark/spark-2.3.2/spark-2.3.2-bin-hadoop2.7.tgz\n!tar xf spark-2.3.2-bin-hadoop2.7.tgz\n!pip install -q findspark", "_____no_output_____" ] ], [ [ "## Set Environmental Variables", "_____no_output_____" ] ], [ [ "import os\nos.environ[\"JAVA_HOME\"] = \"/usr/lib/jvm/java-8-openjdk-amd64\"\nos.environ[\"SPARK_HOME\"] = \"/content/spark-2.3.2-bin-hadoop2.7\"", "_____no_output_____" ] ], [ [ "## Find Spark and start session", "_____no_output_____" ] ], [ [ "import findspark\nfindspark.init()\n\nfrom pyspark.sql import SparkSession\nspark = SparkSession.builder.appName(\"stu_stop\").getOrCreate()", "_____no_output_____" ], [ "from pyspark.sql import SparkSession\nfrom pyspark.ml.feature import Tokenizer, StopWordsRemover", "_____no_output_____" ], [ "# Read in data from S3 Buckets\nfrom pyspark import SparkFiles\nurl =\"https://s3.amazonaws.com/dataviz-curriculum/day_2/food_reviews.csv\"\nspark.sparkContext.addFile(url)\ndf = spark.read.csv(SparkFiles.get(\"food_reviews.csv\"), sep=\",\", header=True)\ndf.show()", "_____no_output_____" ], [ "# Tokenize dataframe", "_____no_output_____" ], [ "# Transform dataframe", "_____no_output_____" ], [ "# Remove stop words", "_____no_output_____" ], [ "# Transform new dataframe", "_____no_output_____" ], [ "# Show simplified review", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2bebf0afe61ee4febaef23c37e55d78d9341aa
104,445
ipynb
Jupyter Notebook
module2-intermediate-linear-algebra/Kim_Lowry_Intermediate_Linear_Algebra_Assignment.ipynb
hBar2013/DS-Unit-1-Sprint-4-Statistical-Tests-and-Experiments
21e773e2e657fca9f3d8509ae4caaa170d536406
[ "MIT" ]
null
null
null
module2-intermediate-linear-algebra/Kim_Lowry_Intermediate_Linear_Algebra_Assignment.ipynb
hBar2013/DS-Unit-1-Sprint-4-Statistical-Tests-and-Experiments
21e773e2e657fca9f3d8509ae4caaa170d536406
[ "MIT" ]
null
null
null
module2-intermediate-linear-algebra/Kim_Lowry_Intermediate_Linear_Algebra_Assignment.ipynb
hBar2013/DS-Unit-1-Sprint-4-Statistical-Tests-and-Experiments
21e773e2e657fca9f3d8509ae4caaa170d536406
[ "MIT" ]
null
null
null
55.320445
9,914
0.633922
[ [ [ "<a href=\"https://colab.research.google.com/github/hBar2013/DS-Unit-1-Sprint-4-Statistical-Tests-and-Experiments/blob/master/module2-intermediate-linear-algebra/Kim_Lowry_Intermediate_Linear_Algebra_Assignment.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Statistics", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ] ], [ [ "## 1.1 Sales for the past week was the following amounts: [3505, 2400, 3027, 2798, 3700, 3250, 2689]. Without using library functions, what is the mean, variance, and standard deviation of of sales from last week? (for extra bonus points, write your own function that can calculate these two values for any sized list)", "_____no_output_____" ] ], [ [ "sales = np.array([3505, 2400, 3027, 2798, 3700, 3250, 2689])\nlength = len(sales)\n", "_____no_output_____" ], [ "def mean_var_stdev(data):\n sales_mean = sum(data)/length\n for num in data:\n vnom = sum((data - sales_mean)**2)\n sales_var = vnom / length\n sales_stdev = sales_var ** 0.5\n return sales_mean, sales_var, sales_stdev\n \n \n ", "_____no_output_____" ], [ "mean_var_stdev(sales)", "_____no_output_____" ] ], [ [ "## 1.2 Find the covariance between last week's sales numbers and the number of customers that entered the store last week: [127, 80, 105, 92, 120, 115, 93] (you may use librray functions for calculating the covariance since we didn't specifically talk about its formula)", "_____no_output_____" ] ], [ [ "customers = np.array([127, 80, 105, 92, 120, 115, 93])", "_____no_output_____" ], [ "cov_sc = np.cov(sales, customers)\ncov_sc", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "## 1.3 Find the standard deviation of customers who entered the store last week. Then, use the standard deviations of both sales and customers to standardize the covariance to find the correlation coefficient that summarizes the relationship between sales and customers. (You may use library functions to check your work.)", "_____no_output_____" ] ], [ [ "length = len(customers)", "_____no_output_____" ], [ "mean_var_stdev(customers)", "_____no_output_____" ], [ "corr_sc = np.corrcoef(sales, customers)\ncorr_sc", "_____no_output_____" ] ], [ [ "## 1.4 Use pandas to import a cleaned version of the titanic dataset from the following link: [Titanic Dataset](https://raw.githubusercontent.com/Geoyi/Cleaning-Titanic-Data/master/titanic_clean.csv)\n\n## Calculate the variance-covariance matrix and correlation matrix for the titanic dataset's numeric columns. (you can encode some of the categorical variables and include them as a stretch goal if you finish early)", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "file_url = 'https://raw.githubusercontent.com/Geoyi/Cleaning-Titanic-Data/master/titanic_clean.csv'\ntitanic = pd.read_csv(file_url)", "_____no_output_____" ], [ "titanic.head()", "_____no_output_____" ], [ "titanic.cov()", "_____no_output_____" ], [ "titanic.corr()", "_____no_output_____" ] ], [ [ "# Orthogonality", "_____no_output_____" ], [ "## 2.1 Plot two vectors that are orthogonal to each other. What is a synonym for orthogonal?", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt", "_____no_output_____" ], [ "dp = np.dot(vector_1, vector_2)\ndp", "_____no_output_____" ], [ "vector_1 = [2, 4]\nvector_2 = [-2, 1]\n\n# Plot the Scaled Vectors\nplt.arrow(0,0, vector_1[0], vector_1[1],head_width=.05, head_length=0.05, color ='red')\nplt.arrow(0,0, vector_2[0], vector_2[1],head_width=.05, head_length=0.05, color ='green')\nplt.xlim(-4,5) \nplt.ylim(-4,5)\nplt.title(\"Orthogonal Vectors\")\nplt.show()", "_____no_output_____" ] ], [ [ "## 2.2 Are the following vectors orthogonal? Why or why not?\n\n\\begin{align}\na = \\begin{bmatrix} -5 \\\\ 3 \\\\ 7 \\end{bmatrix}\n\\qquad\nb = \\begin{bmatrix} 6 \\\\ -8 \\\\ 2 \\end{bmatrix}\n\\end{align}", "_____no_output_____" ] ], [ [ "vector_a = [-5,3,7]\nvector_b = [6,-8,2]\nab_dp = np.dot(vector_a, vector_b)\nab_dp", "_____no_output_____" ] ], [ [ "Not orthagonal as the dot product of the vectors does not == zero", "_____no_output_____" ], [ "## 2.3 Compute the following values: What do these quantities have in common?\n\n## What is $||c||^2$? \n\n## What is $c \\cdot c$? \n\n## What is $c^{T}c$?\n\n\\begin{align}\nc = \\begin{bmatrix} 2 & -15 & 6 & 20 \\end{bmatrix}\n\\end{align}", "_____no_output_____" ] ], [ [ "from numpy import linalg as LA", "_____no_output_____" ], [ "𝑐 = np.array([2,-15, 6, 20])", "_____no_output_____" ], [ "dp_c = np.dot(c,c)\ndp_c", "_____no_output_____" ], [ "norm_c = LA.norm(c)\nnorm_c", "_____no_output_____" ], [ "norm_c_sq = norm_c**2\nnorm_c_sq", "_____no_output_____" ], [ "cTxC = np.matmul(c.T,c)\ncTxC", "_____no_output_____" ] ], [ [ "# Unit Vectors", "_____no_output_____" ], [ "## 3.1 Using Latex, write the following vectors as a linear combination of scalars and unit vectors:\n\n\\begin{align}\nd = \\begin{bmatrix} 7 \\\\ 12 \\end{bmatrix}\n\\qquad\ne = \\begin{bmatrix} 2 \\\\ 11 \\\\ -8 \\end{bmatrix}\n\\end{align}", "_____no_output_____" ], [ "||d|| = 13.89\n||e|| = 13.74\n\n\\begin{align}\nd-hat = \\begin{bmatrix} 0.49\\\\ 0.84 \\end{bmatrix}\n\\end{align}\n\n\\begin{align}\nd-hat = 0.49\\begin{bmatrix} 1\\\\ 0 \\end{bmatrix}, 0.84\\begin{bmatrix} 0\\\\ 1 \\end{bmatrix}\n\\end{align}", "_____no_output_____" ], [ "\\begin{align}e -hat = \\begin{bmatrix} 0.14 \\\\ 0.79 \\\\ -0.58 \\end{bmatrix}\n\\end{align}\n\n\\begin{align}\ne-hat = 0.14\\begin{bmatrix} 1\\\\ 0\\\\0 \\end{bmatrix}, 0.79\\begin{bmatrix} 0\\\\ 1\\\\0 \\end{bmatrix}, -0.58\\begin{bmatrix} 0\\\\ 0\\\\1 \\end{bmatrix}\n\\end{align}", "_____no_output_____" ], [ "## 3.2 Turn vector $f$ into a unit vector:\n\n\\begin{align}\nf = \\begin{bmatrix} 4 & 12 & 11 & 9 & 2 \\end{bmatrix}\n\\end{align}", "_____no_output_____" ] ], [ [ "f = np.array([4, 12, 11, 9, 2])\nnorm_f = LA.norm(f)\ninv_norm_f = 1/norm_f\nunit_f = np.multiply(inv_norm_f,f)\nunit_f", "_____no_output_____" ] ], [ [ "# Linear Independence / Dependence ", "_____no_output_____" ], [ "## 4.1 Plot two vectors that are linearly dependent and two vectors that are linearly independent (bonus points if done in $\\mathbb{R}^3$).", "_____no_output_____" ] ], [ [ "vector_1 = [2, 4]\nvector_2 = [-2, 1]\n\nplt.arrow(0,0, vector_1[0], vector_1[1],head_width=.05, head_length=0.05, color ='red')\nplt.arrow(0,0, vector_2[0], vector_2[1],head_width=.05, head_length=0.05, color ='green')\nplt.xlim(-4,5) \nplt.ylim(-4,5)\nplt.title(\"Linearly Independent\")\nplt.show()", "_____no_output_____" ], [ "vector_g = [1, 2]\nvector_h = [4, 8]\n\nplt.arrow(0,0, vector_g[0], vector_1[1],head_width=.05, head_length=0.05, color ='blue')\nplt.arrow(0,0, vector_h[0], vector_2[1],head_width=.05, head_length=0.05, color ='orange')\nplt.xlim(-1,10) \nplt.ylim(-1,10)\nplt.title(\"Linearly Dependent\")\nplt.show()", "_____no_output_____" ] ], [ [ "I have no idea what's going on with my colors. Happened yesterday also", "_____no_output_____" ], [ "# Span", "_____no_output_____" ], [ "## 5.1 What is the span of the following vectors?\n\n\\begin{align}\ng = \\begin{bmatrix} 1 & 2 \\end{bmatrix}\n\\qquad\nh = \\begin{bmatrix} 4 & 8 \\end{bmatrix}\n\\end{align}", "_____no_output_____" ], [ "you can see that the span is 1, because h is just g scaled by 4, also see above for the plot", "_____no_output_____" ], [ "## 5.2 What is the span of $\\{l, m, n\\}$?\n\n\\begin{align}\nl = \\begin{bmatrix} 1 & 2 & 3 \\end{bmatrix}\n\\qquad\nm = \\begin{bmatrix} -1 & 0 & 7 \\end{bmatrix}\n\\qquad\nn = \\begin{bmatrix} 4 & 8 & 2\\end{bmatrix}\n\\end{align}", "_____no_output_____" ], [ "The rank is 3 so therefore the span is also 3 and so all 3 equations are required to describe the solution space. (ie there are no linearly dependent rows)", "_____no_output_____" ] ], [ [ "M = np.array([[1,-1,4],\n [2,0,8],\n [3,7,2]])", "_____no_output_____" ], [ "M_rank = LA.matrix_rank(M)\nM_rank", "_____no_output_____" ] ], [ [ "# Basis", "_____no_output_____" ], [ "## 6.1 Graph two vectors that form a basis for $\\mathbb{R}^2$\n\n", "_____no_output_____" ] ], [ [ "vector_1 = [2, 4]\nvector_2 = [-2, 1]\n\nplt.arrow(0,0, vector_1[0], vector_1[1],head_width=.05, head_length=0.05, color ='red')\nplt.arrow(0,0, vector_2[0], vector_2[1],head_width=.05, head_length=0.05, color ='green')\nplt.xlim(-4,5) \nplt.ylim(-4,5)\nplt.title(\"Linearly Independent\")\nplt.show()", "_____no_output_____" ] ], [ [ "Two vectors form a basis for 2D when they are linearly independent. They can be scaled and used as a basis set of vectors to represent the entire plane they lie in. In the case above these vectors form an orthagonal basis.", "_____no_output_____" ], [ "## 6.2 What does it mean to form a basis?", "_____no_output_____" ], [ "^^^^^ See above", "_____no_output_____" ], [ "# Rank", "_____no_output_____" ], [ "## 7.1 What is the Rank of P?\n\n\\begin{align}\nP = \\begin{bmatrix} \n1 & 2 & 3 \\\\\n -1 & 0 & 7 \\\\\n4 & 8 & 2\n\\end{bmatrix}\n\\end{align}", "_____no_output_____" ] ], [ [ "P = np.array([[1,2,3],\n [-1,0,7],\n [4,8,9]])", "_____no_output_____" ], [ "P_rank = LA.matrix_rank(P)\nP_rank", "_____no_output_____" ] ], [ [ "## 7.2 What does the rank of a matrix tell us?", "_____no_output_____" ], [ "The matrix cannot be reduced as all rows are linearly independent. All 3 are required to describe the solution space", "_____no_output_____" ], [ "# Linear Projections\n\n## 8.1 Line $L$ is formed by all of the vectors that can be created by scaling vector $v$ \n\\begin{align}\nv = \\begin{bmatrix} 1 & 3 \\end{bmatrix}\n\\end{align}\n\n\\begin{align}\nw = \\begin{bmatrix} -1 & 2 \\end{bmatrix}\n\\end{align}\n\n## find $proj_{L}(w)$\n\n## graph your projected vector to check your work (make sure your axis are square/even)", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ], [ [ "# Stretch Goal\n\n## For vectors that begin at the origin, the coordinates of where the vector ends can be interpreted as regular data points. (See 3Blue1Brown videos about Spans, Basis, etc.)\n\n## Write a function that can calculate the linear projection of each point (x,y) (vector) onto the line y=x. run the function and plot the original points in blue and the new projected points on the line y=x in red. \n\n## For extra points plot the orthogonal vectors as a dashed line from the original blue points to the projected red points.", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\n\n# Creating a dataframe for you to work with -Feel free to not use the dataframe if you don't want to.\nx_values = [1, 4, 7, 3, 9, 4, 5 ]\ny_values = [4, 2, 5, 0, 8, 2, 8]\n\ndata = {\"x\": x_values, \"y\": y_values}\n\ndf = pd.DataFrame(data)\n\ndf.head()\n\nplt.scatter(df.x, df.y)\nplt.show()", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
4a2befc2038f4073607afe3683323e5f5b3db577
1,025
ipynb
Jupyter Notebook
i18n/locales/ja/ch-upcoming/0.ipynb
duartefrazao/qiskit-textbook
bce4bde0823e1f0c5a96776179c029f09e47b353
[ "Apache-2.0" ]
526
2020-06-21T16:38:52.000Z
2022-03-30T00:42:43.000Z
i18n/locales/ja/ch-upcoming/0.ipynb
duartefrazao/qiskit-textbook
bce4bde0823e1f0c5a96776179c029f09e47b353
[ "Apache-2.0" ]
602
2020-06-19T17:09:23.000Z
2022-03-31T08:54:55.000Z
i18n/locales/ja/ch-upcoming/0.ipynb
duartefrazao/qiskit-textbook
bce4bde0823e1f0c5a96776179c029f09e47b353
[ "Apache-2.0" ]
512
2020-06-19T20:29:13.000Z
2022-03-31T11:49:39.000Z
18.636364
41
0.559024
[ [ [ "# 今後更新されるトピック", "_____no_output_____" ], [ "以下のトピックがテキストブックに追加される予定で現在作業中です。", "_____no_output_____" ], [ "1. ショアのアルゴリズム\n2. NISQハードウェア向けの量子アルゴリズムの全体像\n3. 超伝導量子コンピューターへのイジングモデルのマッピング\n4. QAOAを使った組み合わせ最適化問題の解法\n5. HHLを使った線形方程式の解法\n6. BB84を使った通信のセキュリティ\n7. デコヒーレンスとエネルギー緩和:T2とT1の測定\n8. 高いフィデリティーの量子ビット操作のためのマイクロ波最適化\n9. 状態とプロセスのトモグラフィー", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown" ] ]
4a2c0e20ce9ce2c473c5a99f75cc5d736d932141
4,890
ipynb
Jupyter Notebook
examples/Adaptive sampling example using a mock device object.ipynb
ZW7436/PycQED_py3
dcc19dbaedd226112a2f98a7985dcf2bab2c9734
[ "MIT" ]
1
2019-07-05T13:41:51.000Z
2019-07-05T13:41:51.000Z
examples/Adaptive sampling example using a mock device object.ipynb
ball199578/PycQED_py3
dcc19dbaedd226112a2f98a7985dcf2bab2c9734
[ "MIT" ]
null
null
null
examples/Adaptive sampling example using a mock device object.ipynb
ball199578/PycQED_py3
dcc19dbaedd226112a2f98a7985dcf2bab2c9734
[ "MIT" ]
null
null
null
29.457831
143
0.578732
[ [ [ "%matplotlib inline\nimport adaptive\nimport matplotlib.pyplot as plt\nimport pycqed as pq\nimport numpy as np\nfrom pycqed.measurement import measurement_control\nimport pycqed.measurement.detector_functions as det\nfrom qcodes import station\nstation = station.Station()\n", "_____no_output_____" ] ], [ [ "## Setting up the mock device\n\nMeasurements are controlled through the `MeasurementControl` usually instantiated as `MC`", "_____no_output_____" ] ], [ [ "from pycqed.instrument_drivers.virtual_instruments.mock_device import Mock_Device\nMC = measurement_control.MeasurementControl('MC',live_plot_enabled=True, verbose=True)\nMC.station = station\nstation.add_component(MC)\n\nmock_device = Mock_Device('mock_device')\nmock_device.mw_pow(-20)\nmock_device.res_freq(7.400023457e9)\nmock_device.cw_noise_level(.0005)\nmock_device.acq_delay(.05)\n", "_____no_output_____" ] ], [ [ "## Measuring a resonator using the conventional method\nPoints are chosen on a linspace of 100 points. This is enough to identify the location of the resonator. ", "_____no_output_____" ] ], [ [ "freqs = np.linspace(7.39e9, 7.41e9, 100)\n\nd = det.Function_Detector(mock_device.S21,value_names=['Magn', 'Phase'], \n value_units=['V', 'deg'])\nMC.set_sweep_function(mock_device.mw_freq)\nMC.set_sweep_points(freqs)\nMC.set_detector_function(d)\ndat=MC.run('test')", "_____no_output_____" ] ], [ [ "## Using 1D adaptive sampler from the MC \n\nThis can also be done using an adaptive `Leaner1D` object, chosing 100 points optimally in the interval. ", "_____no_output_____" ] ], [ [ "mock_device.acq_delay(.05)", "_____no_output_____" ], [ "\nd = det.Function_Detector(mock_device.S21, value_names=['Magn', 'Phase'], value_units=['V', 'deg'])\n\nMC.set_sweep_function(mock_device.mw_freq)\nMC.set_detector_function(d)\nMC.set_adaptive_function_parameters({'adaptive_function': adaptive.Learner1D, \n 'goal':lambda l: l.npoints>100, \n 'bounds':(7.39e9, 7.41e9)})\ndat = MC.run(mode='adaptive')\nfrom pycqed.analysis import measurement_analysis as ma\n# ma.Homodyne_Analysis(close_fig=False, label='M')", "_____no_output_____" ] ], [ [ "## Two D learner\n\nThe learner can also be used to adaptively sample a 2D /heatmap type experiment. \nHowever, currently we do not have easy plotting function for that and we still need to rely on the adaptive Learner plotting methods. \n\nIt would be great to have this working with a realtime pyqtgraph based plotting window so that we can use this without the notebooks. ", "_____no_output_____" ] ], [ [ "d = det.Function_Detector(mock_device.S21, value_names=['Magn', 'Phase'], value_units=['V', 'deg'])\nMC.set_sweep_function(mock_device.mw_freq)\nMC.set_sweep_function_2D(mock_device.mw_pow)\nMC.set_detector_function(d)\nMC.set_adaptive_function_parameters({'adaptive_function': adaptive.Learner2D, \n 'goal':lambda l: l.npoints>20*20, \n 'bounds':((7.398e9, 7.402e9), \n (-20, -10))})\ndat = MC.run(mode='adaptive')\n\n", "_____no_output_____" ], [ "# Required to be able to use the fancy interpolating plot \nadaptive.notebook_extension()\nMC.learner.plot(tri_alpha=.1)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
4a2c0f73456a546f65014ff794fae9270a3555d6
82,343
ipynb
Jupyter Notebook
Code-Example-015.2--Gradient-Descents-Vectorized.ipynb
waltsco059/Elements-of-Data-Analytics
13c27760474cf51a0aa5318d5b8518a03cddab99
[ "MIT" ]
8
2021-08-30T21:01:20.000Z
2022-03-29T20:32:34.000Z
Code-Example-015.2--Gradient-Descents-Vectorized.ipynb
waltsco059/Elements-of-Data-Analytics
13c27760474cf51a0aa5318d5b8518a03cddab99
[ "MIT" ]
null
null
null
Code-Example-015.2--Gradient-Descents-Vectorized.ipynb
waltsco059/Elements-of-Data-Analytics
13c27760474cf51a0aa5318d5b8518a03cddab99
[ "MIT" ]
9
2021-08-30T06:07:37.000Z
2021-10-12T18:13:26.000Z
125.331811
45,732
0.80512
[ [ [ "import numpy as np\nimport pandas as pd\nimport time\nimport psutil\nimport matplotlib.pyplot as plt \nimport numpy as np\n\n# We create a very simple data set with 5 data items in it. \nsize= 5\n\n# mu, sigma = 100, 5000 # mean and standard deviation\n# error=np.random.normal(mu, sigma, size)", "_____no_output_____" ], [ "x1 = np.arange(0, size)\n# x2 = np.arange(1, size)\nx2 = np.arange(5, 5+size)\n# y = 2.5*x1 + error\ny1=2.5 * x1\ny2 =-1 *x2\n\n# y = 2*x1 + 10* x2\nx = []\nfor i in range(size):\n x.append(np.array([x1[i],x2[i]]))\n\ny = y1 + y2\n\n\nprint(x)\nprint(y)", "[array([0, 5]), array([1, 6]), array([2, 7]), array([3, 8]), array([4, 9])]\n[-5. -3.5 -2. -0.5 1. ]\n" ], [ "import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\nimport matplotlib as mpl\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport matplotlib.pyplot as plt\n \nmpl.rcParams['legend.fontsize'] = 10\n \nfig = plt.figure()\nax = fig.gca(projection='3d')\n\nx1 = np.arange(0, size)\n# x2 = np.arange(1, size)\nx2 = np.arange(5, 5+size)\nax.scatter3D(x1, x2, y, label='parametric curve')\nax.legend()\n \nplt.show()", "_____no_output_____" ], [ "learningRate = 0.01\nnum_iteration = 300 \n\n# This is our regression coefficients. \nbeta=np.zeros(2)\n\nn = float(size)\n# print(\"Sample size\", n)\n\n# Let's start with main iterative part of gradient descent algorithm \nfor i in range(num_iteration):\n \n # Calculate the prediction with current regression coefficients. \n cost = 0\n m_gradient = 0\n \n for j in range(size):\n \n y_prediction = np.dot(beta , x[j])\n \n # We compute costs just for monitoring \n cost += ( y[j] - y_prediction)**2\n\n # calculate gradients. sum the gradients for all rows\n m_gradient += x[j] * (y[j] - y_prediction)\n \n m_gradient = (-1.0/n)* m_gradient\n \n print(i , \"beta = \", beta, \" Cost=\", cost)\n \n # update the weights - Regression Coefficients \n beta = beta - learningRate * m_gradient", "0 beta = [0. 0.] Cost= 42.5\n1 beta = [-0.01 -0.11] Cost= 33.5645\n2 beta = [-0.0018 -0.1623] Cost= 31.392868550000003\n3 beta = [ 0.014276 -0.189239] Cost= 30.532238746894997\n4 beta = [ 0.03369768 -0.20501127] Cost= 29.932011752353187\n5 beta = [ 0.05447762 -0.21584715] Cost= 29.38965773095855\n6 beta = [ 0.07574451 -0.22448152] Cost= 28.86602394426417\n7 beta = [ 0.09711688 -0.23211507] Cost= 28.353433547975342\n8 beta = [ 0.11842828 -0.23927508] Cost= 27.850275694662844\n9 beta = [ 0.1396066 -0.24619332] Cost= 27.35611045054374\n10 beta = [ 0.16062113 -0.25297178] Cost= 26.87072575002126\n11 beta = [ 0.18145935 -0.25965555] Cost= 26.39395568369584\n12 beta = [ 0.20211668 -0.26626472] Cost= 25.925645452726474\n13 beta = [ 0.22259203 -0.27280838] Cost= 25.465644578160983\n14 beta = [ 0.24288585 -0.27929083] Cost= 25.013805554089178\n15 beta = [ 0.26299923 -0.28571424] Cost= 24.56998355019686\n16 beta = [ 0.28293356 -0.29207986] Cost= 24.134036317153765\n17 beta = [ 0.30269032 -0.2983885 ] Cost= 23.70582413173796\n18 beta = [ 0.32227106 -0.30464082] Cost= 23.28520975026551\n19 beta = [ 0.34167733 -0.31083737] Cost= 22.872058364258823\n20 beta = [ 0.36091067 -0.31697868] Cost= 22.466237557173706\n21 beta = [ 0.37997262 -0.32306526] Cost= 22.06761726194651\n22 beta = [ 0.3988647 -0.3290976] Cost= 21.67606971930476\n23 beta = [ 0.41758843 -0.33507617] Cost= 21.291469436819334\n24 beta = [ 0.43614532 -0.34100148] Cost= 20.913693148683592\n25 beta = [ 0.45453683 -0.34687397] Cost= 20.542619776206212\n26 beta = [ 0.47276446 -0.35269414] Cost= 20.178130389005027\n27 beta = [ 0.49082965 -0.35846244] Cost= 19.820108166889383\n28 beta = [ 0.50873387 -0.36417934] Cost= 19.468438362418862\n29 beta = [ 0.52647853 -0.3698453 ] Cost= 19.12300826412628\n30 beta = [ 0.54406506 -0.37546076] Cost= 18.783707160393252\n31 beta = [ 0.56149488 -0.38102618] Cost= 18.45042630396674\n32 beta = [ 0.57876938 -0.38654201] Cost= 18.12305887710511\n33 beta = [ 0.59588994 -0.39200869] Cost= 17.80149995734269\n34 beta = [ 0.61285793 -0.39742665] Cost= 17.485646483861707\n35 beta = [ 0.62967472 -0.40279633] Cost= 17.17539722446095\n36 beta = [ 0.64634165 -0.40811815] Cost= 16.870652743110437\n37 beta = [ 0.66286005 -0.41339256] Cost= 16.57131536808183\n38 beta = [ 0.67923126 -0.41861996] Cost= 16.277289160644273\n39 beta = [ 0.69545658 -0.42380078] Cost= 15.98847988431567\n40 beta = [ 0.71153731 -0.42893544] Cost= 15.704794974659567\n41 beta = [ 0.72747474 -0.43402433] Cost= 15.426143509617896\n42 beta = [ 0.74327015 -0.43906788] Cost= 15.152436180370108\n43 beta = [ 0.7589248 -0.44406649] Cost= 14.883585262709392\n44 beta = [ 0.77443995 -0.44902055] Cost= 14.619504588926734\n45 beta = [ 0.78981684 -0.45393046] Cost= 14.360109520193832\n46 beta = [ 0.8050567 -0.45879662] Cost= 14.105316919436074\n47 beta = [ 0.82016076 -0.46361942] Cost= 13.855045124686765\n48 beta = [ 0.83513022 -0.46839924] Cost= 13.609213922914188\n49 beta = [ 0.84996629 -0.47313646] Cost= 13.367744524313029\n50 beta = [ 0.86467014 -0.47783147] Cost= 13.130559537051939\n51 beta = [ 0.87924297 -0.48248464] Cost= 12.897582942469187\n52 beta = [ 0.89368593 -0.48709635] Cost= 12.668740070708397\n53 beta = [ 0.90800019 -0.49166696] Cost= 12.443957576786563\n54 beta = [ 0.9221869 -0.49619684] Cost= 12.223163417086738\n55 beta = [ 0.93624718 -0.50068636] Cost= 12.006286826267772\n56 beta = [ 0.95018216 -0.50513586] Cost= 11.793258294583762\n57 beta = [ 0.96399297 -0.50954572] Cost= 11.584009545605948\n58 beta = [ 0.97768071 -0.51391628] Cost= 11.378473514339813\n59 beta = [ 0.99124647 -0.51824789] Cost= 11.17658432573056\n60 beta = [ 1.00469134 -0.5225409 ] Cost= 10.978277273549885\n61 beta = [ 1.01801641 -0.52679566] Cost= 10.783488799657393\n62 beta = [ 1.03122273 -0.5310125 ] Cost= 10.592156473629993\n63 beta = [ 1.04431136 -0.53519176] Cost= 10.404218972752712\n64 beta = [ 1.05728336 -0.53933378] Cost= 10.219616062364526\n65 beta = [ 1.07013977 -0.54343889] Cost= 10.03828857655295\n66 beta = [ 1.0828816 -0.54750742] Cost= 9.860178399191135\n67 beta = [ 1.09550989 -0.55153969] Cost= 9.68522844531143\n68 beta = [ 1.10802565 -0.55553603] Cost= 9.513382642809459\n69 beta = [ 1.12042988 -0.55949676] Cost= 9.344585914472777\n70 beta = [ 1.13272357 -0.56342219] Cost= 9.178784160328448\n71 beta = [ 1.1449077 -0.56731265] Cost= 9.015924240303782\n72 beta = [ 1.15698326 -0.57116843] Cost= 8.855953957194762\n73 beta = [ 1.16895122 -0.57498985] Cost= 8.698822039936644\n74 beta = [ 1.18081252 -0.57877722] Cost= 8.544478127171383\n75 beta = [ 1.19256813 -0.58253084] Cost= 8.392872751106648\n76 beta = [ 1.20421897 -0.58625101] Cost= 8.243957321661192\n77 beta = [ 1.215766 -0.58993803] Cost= 8.097684110891583\n78 beta = [ 1.22721012 -0.59359219] Cost= 7.954006237695181\n79 beta = [ 1.23855227 -0.59721379] Cost= 7.812877652784607\n80 beta = [ 1.24979334 -0.60080312] Cost= 7.674253123928768\n81 beta = [ 1.26093424 -0.60436046] Cost= 7.538088221455737\n82 beta = [ 1.27197586 -0.6078861 ] Cost= 7.404339304012927\n83 beta = [ 1.28291908 -0.61138033] Cost= 7.272963504579826\n84 beta = [ 1.29376479 -0.61484341] Cost= 7.143918716728992\n85 beta = [ 1.30451385 -0.61827564] Cost= 7.017163581130775\n86 beta = [ 1.31516712 -0.62167728] Cost= 6.892657472297501\n87 beta = [ 1.32572546 -0.62504861] Cost= 6.770360485562864\n88 beta = [ 1.33618971 -0.62838989] Cost= 6.650233424292315\n89 beta = [ 1.34656071 -0.6317014 ] Cost= 6.532237787320409\n90 beta = [ 1.35683929 -0.6349834 ] Cost= 6.4163357566110335\n91 beta = [ 1.36702627 -0.63823615] Cost= 6.302490185136601\n92 beta = [ 1.37712248 -0.64145992] Cost= 6.190664584972269\n93 beta = [ 1.38712872 -0.64465496] Cost= 6.0808231156014445\n94 beta = [ 1.39704579 -0.64782152] Cost= 5.97293057242876\n95 beta = [ 1.40687449 -0.65095987] Cost= 5.866952375496865\n96 beta = [ 1.4166156 -0.65407026] Cost= 5.762854558403437\n97 beta = [ 1.4262699 -0.65715292] Cost= 5.660603757414804\n98 beta = [ 1.43583818 -0.66020812] Cost= 5.560167200772762\n99 beta = [ 1.44532118 -0.66323608] Cost= 5.461512698191104\n100 beta = [ 1.45471969 -0.66623707] Cost= 5.364608630538501\n101 beta = [ 1.46403444 -0.66921131] Cost= 5.269423939704475\n102 beta = [ 1.47326618 -0.67215905] Cost= 5.175928118645138\n103 beta = [ 1.48241566 -0.67508053] Cost= 5.084091201605595\n104 beta = [ 1.4914836 -0.67797596] Cost= 4.993883754515789\n105 beta = [ 1.50047074 -0.6808456 ] Cost= 4.905276865556782\n106 beta = [ 1.50937779 -0.68368966] Cost= 4.818242135894412\n107 beta = [ 1.51820547 -0.68650838] Cost= 4.732751670577363\n108 beta = [ 1.52695448 -0.68930198] Cost= 4.648778069596722\n109 beta = [ 1.53562553 -0.69207069] Cost= 4.566294419104188\n110 beta = [ 1.54421931 -0.69481472] Cost= 4.485274282786073\n111 beta = [ 1.55273651 -0.6975343 ] Cost= 4.4056916933903665\n112 beta = [ 1.5611778 -0.70022965] Cost= 4.327521144404146\n113 beta = [ 1.56954388 -0.70290098] Cost= 4.250737581878639\n114 beta = [ 1.5778354 -0.7055485] Cost= 4.175316396399339\n115 beta = [ 1.58605304 -0.70817243] Cost= 4.101233415198597\n116 beta = [ 1.59419745 -0.71077298] Cost= 4.028464894408167\n117 beta = [ 1.60226927 -0.71335035] Cost= 3.9569875114492\n118 beta = [ 1.61026917 -0.71590476] Cost= 3.8867783575572807\n119 beta = [ 1.61819778 -0.7184364 ] Cost= 3.817814930440073\n120 beta = [ 1.62605574 -0.72094548] Cost= 3.750075127065266\n121 beta = [ 1.63384367 -0.7234322 ] Cost= 3.683537236576457\n122 beta = [ 1.64156221 -0.72589677] Cost= 3.6181799333347553\n123 beta = [ 1.64921196 -0.72833937] Cost= 3.5539822700838233\n124 beta = [ 1.65679354 -0.7307602 ] Cost= 3.4909236712362146\n125 beta = [ 1.66430756 -0.73315947] Cost= 3.4289839262788173\n126 beta = [ 1.67175462 -0.73553735] Cost= 3.3681431832953095\n127 beta = [ 1.67913532 -0.73789404] Cost= 3.30838194260355\n128 beta = [ 1.68645025 -0.74022973] Cost= 3.249681050505857\n129 beta = [ 1.69369999 -0.74254461] Cost= 3.1920216931501724\n130 beta = [ 1.70088513 -0.74483886] Cost= 3.1353853905001667\n131 beta = [ 1.70800623 -0.74711266] Cost= 3.0797539904123057\n132 beta = [ 1.71506389 -0.7493662 ] Cost= 3.025109662818023\n133 beta = [ 1.72205864 -0.75159966] Cost= 2.9714348940091257\n134 beta = [ 1.72899107 -0.75381322] Cost= 2.918712481024584\n135 beta = [ 1.73586172 -0.75600705] Cost= 2.8669255261369058\n136 beta = [ 1.74267115 -0.75818133] Cost= 2.8160574314363735\n137 beta = [ 1.74941989 -0.76033623] Cost= 2.7660918935113408\n138 beta = [ 1.75610849 -0.76247194] Cost= 2.717012898222926\n139 beta = [ 1.76273749 -0.76458861] Cost= 2.668804715572433\n140 beta = [ 1.76930742 -0.76668642] Cost= 2.6214518946598173\n141 beta = [ 1.7758188 -0.76876553] Cost= 2.574939258731625\n142 beta = [ 1.78227216 -0.77082612] Cost= 2.529251900316782\n143 beta = [ 1.78866801 -0.77286834] Cost= 2.484375176448696\n144 beta = [ 1.79500686 -0.77489237] Cost= 2.440294703972129\n145 beta = [ 1.80128923 -0.77689836] Cost= 2.3969963549333504\n146 beta = [ 1.80751562 -0.77888647] Cost= 2.3544662520520676\n147 beta = [ 1.81368651 -0.78085687] Cost= 2.3126907642737122\n148 beta = [ 1.81980242 -0.78280971] Cost= 2.2716565024006323\n149 beta = [ 1.82586383 -0.78474515] Cost= 2.231350314800808\n150 beta = [ 1.83187122 -0.78666333] Cost= 2.1917592831927113\n151 beta = [ 1.83782508 -0.78856443] Cost= 2.1528707185049334\n152 beta = [ 1.84372589 -0.79044858] Cost= 2.1146721568093043\n153 beta = [ 1.84957411 -0.79231595] Cost= 2.077151355326156\n154 beta = [ 1.85537021 -0.79416667] Cost= 2.0402962885004614\n155 beta = [ 1.86111467 -0.7960009 ] Cost= 2.0040951441475996\n156 beta = [ 1.86680793 -0.79781879] Cost= 1.9685363196675154\n157 beta = [ 1.87245046 -0.79962048] Cost= 1.9336084183260347\n158 beta = [ 1.87804271 -0.80140611] Cost= 1.899300245602172\n159 beta = [ 1.88358513 -0.80317583] Cost= 1.8656008056002487\n160 beta = [ 1.88907815 -0.80492978] Cost= 1.8324992975256613\n161 beta = [ 1.89452223 -0.80666809] Cost= 1.7999851122231922\n162 beta = [ 1.89991779 -0.80839092] Cost= 1.7680478287767347\n163 beta = [ 1.90526527 -0.8100984 ] Cost= 1.7366772111693527\n164 beta = [ 1.91056509 -0.81179066] Cost= 1.7058632050025946\n165 beta = [ 1.91581769 -0.81346784] Cost= 1.6755959342740279\n166 beta = [ 1.92102349 -0.81513007] Cost= 1.6458656982119384\n167 beta = [ 1.92618289 -0.81677749] Cost= 1.616662968166204\n168 beta = [ 1.93129631 -0.81841023] Cost= 1.587978384554318\n169 beta = [ 1.93636417 -0.82002842] Cost= 1.55980275386162\n170 beta = [ 1.94138687 -0.8216322 ] Cost= 1.5321270456947265\n171 beta = [ 1.94636481 -0.82322168] Cost= 1.504942389887268\n172 beta = [ 1.95129839 -0.82479699] Cost= 1.478240073656968\n173 beta = [ 1.956188 -0.82635827] Cost= 1.4520115388131516\n174 beta = [ 1.96103405 -0.82790563] Cost= 1.426248379013833\n175 beta = [ 1.96583691 -0.82943921] Cost= 1.4009423370714345\n176 beta = [ 1.97059696 -0.83095912] Cost= 1.3760853023063362\n177 beta = [ 1.9753146 -0.83246548] Cost= 1.35166930794737\n178 beta = [ 1.97999021 -0.83395842] Cost= 1.3276865285784503\n179 beta = [ 1.98462414 -0.83543806] Cost= 1.304129277630478\n180 beta = [ 1.98921678 -0.83690451] Cost= 1.2809900049177925\n181 beta = [ 1.9937685 -0.8383579] Cost= 1.2582612942182874\n182 beta = [ 1.99827965 -0.83979833] Cost= 1.2359358608964965\n183 beta = [ 2.0027506 -0.84122593] Cost= 1.2140065495688377\n184 beta = [ 2.00718172 -0.8426408 ] Cost= 1.1924663318102886\n185 beta = [ 2.01157334 -0.84404307] Cost= 1.171308303901744\n186 beta = [ 2.01592583 -0.84543284] Cost= 1.1505256846173566\n187 beta = [ 2.02023953 -0.84681022] Cost= 1.1301118130511267\n188 beta = [ 2.0245148 -0.84817533] Cost= 1.1100601464820474\n189 beta = [ 2.02875196 -0.84952828] Cost= 1.0903642582771584\n190 beta = [ 2.03295137 -0.85086917] Cost= 1.0710178358317672\n191 beta = [ 2.03711336 -0.85219811] Cost= 1.0520146785462487\n192 beta = [ 2.04123825 -0.85351521] Cost= 1.0333486958387215\n193 beta = [ 2.04532639 -0.85482057] Cost= 1.0150139051929994\n194 beta = [ 2.0493781 -0.8561143] Cost= 0.9970044302411729\n195 beta = [ 2.0533937 -0.85739651] Cost= 0.9793144988802087\n196 beta = [ 2.05737352 -0.85866728] Cost= 0.9619384414219714\n197 beta = [ 2.06131788 -0.85992673] Cost= 0.9448706887760667\n198 beta = [ 2.06522708 -0.86117496] Cost= 0.9281057706649283\n199 beta = [ 2.06910145 -0.86241206] Cost= 0.9116383138705729\n200 beta = [ 2.07294129 -0.86363814] Cost= 0.8954630405124657\n201 beta = [ 2.07674692 -0.8648533 ] Cost= 0.8795747663559357\n202 beta = [ 2.08051863 -0.86605762] Cost= 0.8639683991506172\n203 beta = [ 2.08425673 -0.86725122] Cost= 0.8486389369983587\n204 beta = [ 2.08796152 -0.86843417] Cost= 0.8335814667500958\n205 beta = [ 2.0916333 -0.86960659] Cost= 0.8187911624311728\n206 beta = [ 2.09527235 -0.87076856] Cost= 0.8042632836945989\n207 beta = [ 2.09887898 -0.87192017] Cost= 0.7899931743017461\n208 beta = [ 2.10245347 -0.87306152] Cost= 0.7759762606300115\n209 beta = [ 2.1059961 -0.8741927] Cost= 0.7622080502069521\n210 beta = [ 2.10950717 -0.8753138 ] Cost= 0.7486841302704352\n211 beta = [ 2.11298695 -0.87642491] Cost= 0.7354001663543238\n212 beta = [ 2.11643572 -0.87752612] Cost= 0.7223519008992727\n213 beta = [ 2.11985375 -0.87861751] Cost= 0.7095351518881574\n214 beta = [ 2.12324133 -0.87969918] Cost= 0.6969458115057321\n215 beta = [ 2.12659872 -0.88077121] Cost= 0.684579844822049\n216 beta = [ 2.12992619 -0.88183369] Cost= 0.6724332884992525\n217 beta = [ 2.13322401 -0.8828867 ] Cost= 0.6605022495213204\n218 beta = [ 2.13649244 -0.88393032] Cost= 0.6487829039463275\n219 beta = [ 2.13973174 -0.88496465] Cost= 0.6372714956808678\n220 beta = [ 2.14294218 -0.88598976] Cost= 0.6259643352762077\n221 beta = [ 2.14612401 -0.88700573] Cost= 0.6148577987458042\n222 beta = [ 2.14927749 -0.88801265] Cost= 0.603948326403805\n223 beta = [ 2.15240286 -0.8890106 ] Cost= 0.5932324217241561\n224 beta = [ 2.15550039 -0.88999965] Cost= 0.5827066502199505\n225 beta = [ 2.15857031 -0.89097989] Cost= 0.5723676383426669\n226 beta = [ 2.16161287 -0.8919514 ] Cost= 0.5622120724009299\n227 beta = [ 2.16462832 -0.89291424] Cost= 0.5522366974984604\n228 beta = [ 2.1676169 -0.89386851] Cost= 0.5424383164908746\n229 beta = [ 2.17057885 -0.89481428] Cost= 0.5328137889609833\n230 beta = [ 2.1735144 -0.89575161] Cost= 0.5233600302122736\n231 beta = [ 2.1764238 -0.89668059] Cost= 0.514074010280259\n232 beta = [ 2.17930726 -0.8976013 ] Cost= 0.5049527529613582\n233 beta = [ 2.18216504 -0.8985138 ] Cost= 0.4959933348590186\n234 beta = [ 2.18499734 -0.89941817] Cost= 0.4871928844467466\n235 beta = [ 2.18780441 -0.90031448] Cost= 0.4785485811477848\n236 beta = [ 2.19058646 -0.9012028 ] Cost= 0.47005765443109576\n237 beta = [ 2.19334372 -0.90208321] Cost= 0.4617173829234049\n238 beta = [ 2.19607641 -0.90295577] Cost= 0.4535250935369845\n239 beta = [ 2.19878475 -0.90382055] Cost= 0.44547816061292206\n240 beta = [ 2.20146895 -0.90467763] Cost= 0.4375740050795852\n241 beta = [ 2.20412924 -0.90552707] Cost= 0.4298100936260252\n242 beta = [ 2.20676581 -0.90636894] Cost= 0.42218393789003184\n243 beta = [ 2.20937889 -0.90720331] Cost= 0.4146930936606137\n244 beta = [ 2.21196869 -0.90803025] Cost= 0.4073351600946139\n245 beta = [ 2.21453541 -0.90884981] Cost= 0.4001077789472326\n246 beta = [ 2.21707925 -0.90966207] Cost= 0.3930086338162002\n247 beta = [ 2.21960043 -0.9104671 ] Cost= 0.386035449399363\n248 beta = [ 2.22209914 -0.91126495] Cost= 0.37918599076544157\n249 beta = [ 2.22457558 -0.91205569] Cost= 0.3724580626377243\n250 beta = [ 2.22702996 -0.91283938] Cost= 0.3658495086904709\n251 beta = [ 2.22946246 -0.91361609] Cost= 0.35935821085781167\n252 beta = [ 2.23187329 -0.91438588] Cost= 0.3529820886548876\n253 beta = [ 2.23426263 -0.91514881] Cost= 0.3467190985110592\n254 beta = [ 2.23663068 -0.91590494] Cost= 0.34056723311492215\n255 beta = [ 2.23897763 -0.91665433] Cost= 0.3345245207709666\n256 beta = [ 2.24130366 -0.91739704] Cost= 0.3285890247676372\n257 beta = [ 2.24360897 -0.91813314] Cost= 0.3227588427566106\n258 beta = [ 2.24589374 -0.91886267] Cost= 0.3170321061430849\n259 beta = [ 2.24815814 -0.91958571] Cost= 0.3114069794868918\n260 beta = [ 2.25040236 -0.9203023 ] Cost= 0.30588165991422345\n261 beta = [ 2.25262659 -0.9210125 ] Cost= 0.3004543765398131\n262 beta = [ 2.25483099 -0.92171638] Cost= 0.29512338989935766\n263 beta = [ 2.25701576 -0.92241399] Cost= 0.28988699139200913\n264 beta = [ 2.25918105 -0.92310537] Cost= 0.2847435027327655\n265 beta = [ 2.26132705 -0.9237906 ] Cost= 0.27969127541457317\n266 beta = [ 2.26345392 -0.92446972] Cost= 0.2747286901799751\n267 beta = [ 2.26556184 -0.92514279] Cost= 0.26985415650212957\n268 beta = [ 2.26765098 -0.92580986] Cost= 0.26506611207504654\n" ], [ "x1 = np.arange(0, size)\n# x2 = np.arange(1, size)\nx2 = np.arange(5, 5+size)\nx3 = np.arange(2, 2+size)\n\n# y = 2.5*x1 + error\ny1=2.5 * x1\ny2 =-1 *x2\ny3 = 1*x3\n# y = 2*x1 + 10* x2\nx = []\nfor i in range(size):\n x.append(np.array([x1[i],x2[i],x3[i]]))\n\ny = y1+y2+y3\n# plt.plot(x1, y, 'o', markersize=2)\n# plt.show()\n\nprint(x)\n# print(x2)\n# print(error)\nprint(y)\n\n\nlearningRate = 0.01\nnum_iteration = 100 \n\n\n# Now we have 3 variables. \nbeta = np.zeros(3)\n\nn = float(size)\n# print(\"Sample size\", n)\n\n# Let's start with main iterative part of gradient descent algorithm \nfor i in range(num_iteration):\n \n # Calculate the prediction with current regression coefficients. \n cost = 0\n m_gradient = 0\n \n for j in range(size):\n \n y_prediction = np.dot(beta, x[j])\n \n \n # We compute costs just for monitoring \n cost += ( y[j] - y_prediction)**2\n\n # calculate gradients.\n m_gradient += x[j] * (y[j] - y_prediction)\n \n m_gradient = (-1.0/n)* m_gradient\n \n print(i , \"beta=\", beta, \" Cost=\", cost)\n \n # update the weights - Regression Coefficients \n beta = beta - learningRate * m_gradient", "[array([0, 5, 2]), array([1, 6, 3]), array([2, 7, 4]), array([3, 8, 5]), array([4, 9, 6])]\n[-3. -0.5 2. 4.5 7. ]\n0 beta= [0. 0. 0.] Cost= 82.5\n1 beta= [0.09 0.19 0.13] Cost= 43.685500000000005\n2 beta= [0.1312 0.2297 0.1706] Cost= 40.27730895\n3 beta= [0.159516 0.230381 0.187862] Cost= 39.283553738855\n4 beta= [0.18429788 0.22100553 0.19898094] Cost= 38.46895382995289\n5 beta= [0.20798103 0.20911077 0.20843292] Cost= 37.68176671515962\n6 beta= [0.23120115 0.19665743 0.21738366] Cost= 36.91140067508757\n7 beta= [0.25412553 0.18415486 0.22613726] Cost= 36.15683229495866\n8 beta= [0.27679949 0.17173462 0.23477354] Cost= 35.41769259524891\n9 beta= [0.29923663 0.15942998 0.24331397] Cost= 34.69366305521507\n10 beta= [0.32144224 0.14724864 0.2517648 ] Cost= 33.98443457273744\n11 beta= [0.34341944 0.13519164 0.26012832] Cost= 33.28970456148909\n12 beta= [0.36517078 0.1232583 0.26840579] Cost= 32.60917663413411\n13 beta= [0.38669863 0.1114475 0.27659818] Cost= 31.942560463166455\n14 beta= [0.40800529 0.09975804 0.28470639] Cost= 31.28957165618875\n15 beta= [0.42909305 0.08818868 0.2927313 ] Cost= 30.649931634526183\n16 beta= [0.44996415 0.07673817 0.30067376] Cost= 30.023367514375096\n17 beta= [0.47062081 0.06540531 0.30853461] Cost= 29.409611990384747\n18 beta= [0.49106525 0.05418889 0.31631471] Cost= 28.80840322161925\n19 beta= [0.51129965 0.0430877 0.32401487] Cost= 28.21948471985076\n20 beta= [0.53132615 0.03210057 0.33163592] Cost= 27.64260524013627\n21 beta= [0.5511469 0.02122632 0.33917867] Cost= 27.07751867363119\n22 beta= [0.570764 0.01046379 0.34664392] Cost= 26.523983942594235\n23 beta= [ 5.90179565e-01 -1.88157527e-04 3.54032476e-01] Cost= 25.98176489753861\n24 beta= [ 0.60939565 -0.01073067 0.36134512] Cost= 25.450630216485656\n25 beta= [ 0.6284143 -0.02116487 0.36858264] Cost= 24.930353306278143\n26 beta= [ 0.64723756 -0.03149186 0.37574579] Cost= 24.420712205910796\n27 beta= [ 0.66586743 -0.04171276 0.38283535] Cost= 23.921489491837164\n28 beta= [ 0.68430589 -0.05182865 0.38985207] Cost= 23.43247218521216\n29 beta= [ 0.70255491 -0.0618406 0.39679671] Cost= 22.95345166103085\n30 beta= [ 0.72061644 -0.07174969 0.40366999] Cost= 22.484223559124622\n31 beta= [ 0.73849241 -0.08155698 0.41047265] Cost= 22.02458769697693\n32 beta= [ 0.75618471 -0.0912635 0.41720543] Cost= 21.5743479843212\n33 beta= [ 0.77369525 -0.1008703 0.42386903] Cost= 21.133312339484647\n34 beta= [ 0.79102588 -0.11037839 0.43046417] Cost= 20.701292607442202\n35 beta= [ 0.80817845 -0.1197888 0.43699155] Cost= 20.278104479545657\n36 beta= [ 0.8251548 -0.12910253 0.44345187] Cost= 19.863567414893744\n37 beta= [ 0.84195673 -0.13832057 0.44984581] Cost= 19.4575045633096\n38 beta= [ 0.85858603 -0.1474439 0.45617406] Cost= 19.0597426898928\n39 beta= [ 0.87504449 -0.15647349 0.4624373 ] Cost= 18.67011210111373\n40 beta= [ 0.89133385 -0.16541032 0.46863618] Cost= 18.288446572418753\n41 beta= [ 0.90745585 -0.17425533 0.47477138] Cost= 17.91458327731536\n42 beta= [ 0.92341222 -0.18300946 0.48084354] Cost= 17.54836271790699\n43 beta= [ 0.93920464 -0.19167365 0.48685332] Cost= 17.189628656847884\n44 beta= [ 0.95483482 -0.20024883 0.49280136] Cost= 16.83822805068897\n45 beta= [ 0.9703044 -0.2087359 0.49868828] Cost= 16.49401098458632\n46 beta= [ 0.98561506 -0.21713578 0.50451472] Cost= 16.156830608344368\n47 beta= [ 1.00076841 -0.22544936 0.5102813 ] Cost= 15.826543073766514\n48 beta= [ 1.01576607 -0.23367752 0.51598863] Cost= 15.503007473286502\n49 beta= [ 1.03060964 -0.24182115 0.52163733] Cost= 15.186085779854288\n50 beta= [ 1.04530072 -0.2498811 0.52722799] Cost= 14.875642788050842\n51 beta= [ 1.05984085 -0.25785825 0.53276121] Cost= 14.571546056406664\n52 beta= [ 1.0742316 -0.26575344 0.53823758] Cost= 14.273665850899492\n53 beta= [ 1.0884745 -0.27356752 0.54365769] Cost= 13.981875089607055\n54 beta= [ 1.10257106 -0.28130131 0.54902211] Cost= 13.696049288491285\n55 beta= [ 1.11652279 -0.28895565 0.55433142] Cost= 13.4160665082908\n56 beta= [ 1.13033119 -0.29653134 0.55958618] Cost= 13.141807302499078\n57 beta= [ 1.14399771 -0.3040292 0.56478695] Cost= 12.873154666406087\n58 beta= [ 1.15752383 -0.31145003 0.56993429] Cost= 12.609993987181614\n59 beta= [ 1.17091097 -0.31879461 0.57502874] Cost= 12.352212994979048\n60 beta= [ 1.18416058 -0.32606374 0.58007085] Cost= 12.099701715038718\n61 beta= [ 1.19727406 -0.33325818 0.58506116] Cost= 11.852352420770346\n62 beta= [ 1.21025281 -0.34037871 0.5900002 ] Cost= 11.610059587794655\n63 beta= [ 1.22309821 -0.34742608 0.5948885 ] Cost= 11.372719848924449\n64 beta= [ 1.23581164 -0.35440104 0.59972657] Cost= 11.140231950065994\n65 beta= [ 1.24839445 -0.36130434 0.60451493] Cost= 10.912496707021955\n66 beta= [ 1.26084799 -0.36813672 0.6092541 ] Cost= 10.689416963177276\n67 beta= [ 1.27317357 -0.3748989 0.61394458] Cost= 10.470897548050216\n68 beta= [ 1.28537252 -0.38159161 0.61858687] Cost= 10.256845236690541\n69 beta= [ 1.29744614 -0.38821555 0.62318146] Cost= 10.047168709907897\n70 beta= [ 1.30939572 -0.39477144 0.62772885] Cost= 9.841778515313088\n71 beta= [ 1.32122252 -0.40125998 0.63222952] Cost= 9.64058702915582\n72 beta= [ 1.33292781 -0.40768185 0.63668395] Cost= 9.443508418942587\n73 beta= [ 1.34451284 -0.41403774 0.64109261] Cost= 9.25045860681874\n74 beta= [ 1.35597885 -0.42032833 0.64545598] Cost= 9.06135523369909\n75 beta= [ 1.36732705 -0.42655429 0.64977452] Cost= 8.876117624131846\n76 beta= [ 1.37855867 -0.43271629 0.65404869] Cost= 8.694666751880733\n77 beta= [ 1.38967488 -0.43881497 0.65827894] Cost= 8.516925206210782\n78 beta= [ 1.40067689 -0.444851 0.66246574] Cost= 8.342817158863273\n79 beta= [ 1.41156586 -0.45082501 0.66660951] Cost= 8.172268331705817\n80 beta= [ 1.42234296 -0.45673765 0.67071072] Cost= 8.005205965043773\n81 beta= [ 1.43300934 -0.46258954 0.67476979] Cost= 7.841558786579408\n82 beta= [ 1.44356612 -0.4683813 0.67878715] Cost= 7.681256981005671\n83 beta= [ 1.45401445 -0.47411356 0.68276324] Cost= 7.524232160221513\n84 beta= [ 1.46435543 -0.47978693 0.68669848] Cost= 7.370417334156093\n85 beta= [ 1.47459016 -0.48540201 0.69059329] Cost= 7.219746882189414\n86 beta= [ 1.48471975 -0.4909594 0.69444809] Cost= 7.072156525157201\n87 beta= [ 1.49474526 -0.49645969 0.69826328] Cost= 6.927583297928065\n88 beta= [ 1.50466776 -0.50190347 0.70203927] Cost= 6.7859655225412485\n89 beta= [ 1.51448833 -0.50729133 0.70577647] Cost= 6.647242781893533\n90 beta= [ 1.52420799 -0.51262382 0.70947527] Cost= 6.511355893964031\n91 beta= [ 1.5338278 -0.51790153 0.71313607] Cost= 6.378246886565903\n92 beta= [ 1.54334877 -0.52312502 0.71675925] Cost= 6.247858972614219\n93 beta= [ 1.55277192 -0.52829484 0.72034522] Cost= 6.120136525899386\n94 beta= [ 1.56209826 -0.53341154 0.72389434] Cost= 5.995025057355844\n95 beta= [ 1.57132878 -0.53847568 0.72740699] Cost= 5.872471191815913\n96 beta= [ 1.58046446 -0.54348778 0.73088356] Cost= 5.7524226452387985\n97 beta= [ 1.58950628 -0.5484484 0.73432441] Cost= 5.63482820240516\n98 beta= [ 1.59845521 -0.55335804 0.73772991] Cost= 5.519637695067601\n99 beta= [ 1.60731219 -0.55821725 0.74110042] Cost= 5.406801980547865\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
4a2c0ff9a5d383ae839c4cbf879884133cb5283b
12,065
ipynb
Jupyter Notebook
notebooks_for_data/scrape_for_pitching_data.ipynb
knishina/World_Series_Prediction_Revisited
1bcddc3ac63f2404c48b9200adb9bab9e986fbc6
[ "MIT" ]
1
2019-08-13T22:34:15.000Z
2019-08-13T22:34:15.000Z
notebooks_for_data/scrape_for_pitching_data.ipynb
knishina/World_Series_Prediction_Revisited
1bcddc3ac63f2404c48b9200adb9bab9e986fbc6
[ "MIT" ]
null
null
null
notebooks_for_data/scrape_for_pitching_data.ipynb
knishina/World_Series_Prediction_Revisited
1bcddc3ac63f2404c48b9200adb9bab9e986fbc6
[ "MIT" ]
null
null
null
33.985915
447
0.395027
[ [ [ "## Purpose: Get the stats for pitching per year (1876-2019).", "_____no_output_____" ] ], [ [ "# import dependencies.\nimport time\nimport pandas as pd\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup as bs", "_____no_output_____" ], [ "!which chromedriver", "/usr/local/bin/chromedriver\r\n" ], [ "# set up driver.\nexecutable_path = {\"executable_path\": \"/usr/local/bin/chromedriver\"}\nbrowser = Browser(\"chrome\", **executable_path, headless=False)", "_____no_output_____" ], [ "# Grab the data into lists.\npitching_data = []\n\nfor year in range(2019, 1875, -1):\n year = str(year)\n url = \"http://mlb.mlb.com/stats/sortable.jsp#elem=%5Bobject+Object%5D&tab_level=child&click_text=Sortable+Team+pitching&game_type='R'&season=\"+year+\"&season_type=ANY&league_code='MLB'&sectionType=st&statType=pitching&page=1&ts=1564260727128&playerType=QUALIFIER&sportCode='mlb'&split=&team_id=&active_sw=&position='1'&page_type=SortablePlayer&sortOrder='desc'&sortColumn=avg&results=&perPage=50&timeframe=&last_x_days=&extended=0\"\n \n browser.visit(url)\n html = browser.html\n soup = bs(html, \"html.parser\")\n a = soup.find(\"tbody\")\n time.sleep(20)\n for tr in a:\n team_data = {}\n team_data[\"year\"] = year\n team_data[\"team\"] = tr.find(\"td\", class_=\"dg-team_full\").text\n team_data[\"W\"] = tr.find(\"td\", class_=\"dg-w\").text\n team_data[\"L\"] = tr.find(\"td\", class_=\"dg-l\").text\n team_data[\"ERA\"] = tr.find(\"td\", class_=\"dg-era\").text\n team_data[\"G1\"] = tr.find(\"td\", class_=\"dg-g\").text\n team_data[\"GS\"] = tr.find(\"td\", class_=\"dg-gs\").text\n team_data[\"SV\"] = tr.find(\"td\", class_=\"dg-sv\").text\n team_data[\"SVO\"] = tr.find(\"td\", class_=\"dg-svo\").text\n team_data[\"IP\"] = tr.find(\"td\", class_=\"dg-ip\").text\n team_data[\"H1\"] = tr.find(\"td\", class_=\"dg-h\").text\n team_data[\"R1\"] = tr.find(\"td\", class_=\"dg-r\").text\n team_data[\"ER\"] = tr.find(\"td\", class_=\"dg-er\").text\n team_data[\"HR1\"] = tr.find(\"td\", class_=\"dg-hr\").text\n team_data[\"BB1\"] = tr.find(\"td\", class_=\"dg-bb\").text\n team_data[\"SO1\"] = tr.find(\"td\", class_=\"dg-so\").text\n team_data[\"WHIP\"] = tr.find(\"td\", class_=\"dg-whip\").text\n team_data[\"CG\"] = tr.find(\"td\", class_=\"dg-cg\").text\n team_data[\"SHO\"] = tr.find(\"td\", class_=\"dg-sho\").text\n team_data[\"HB\"] = tr.find(\"td\", class_=\"dg-hb\").text\n team_data[\"IBB1\"] = tr.find(\"td\", class_=\"dg-ibb\").text\n team_data[\"GF\"] = tr.find(\"td\", class_=\"dg-gf\").text\n team_data[\"HLD\"] = tr.find(\"td\", class_=\"dg-hld\").text\n team_data[\"GIDP\"] = tr.find(\"td\", class_=\"dg-gidp\").text\n team_data[\"GO1\"] = tr.find(\"td\", class_=\"dg-go\").text\n team_data[\"AO1\"] = tr.find(\"td\", class_=\"dg-ao\").text\n team_data[\"WP\"] = tr.find(\"td\", class_=\"dg-wp\").text\n team_data[\"BK\"] = tr.find(\"td\", class_=\"dg-bk\").text\n team_data[\"SB1\"] = tr.find(\"td\", class_=\"dg-sb\").text\n team_data[\"CS1\"] = tr.find(\"td\", class_=\"dg-cs\").text\n team_data[\"PK\"] = tr.find(\"td\", class_=\"dg-pk\").text\n team_data[\"TBF\"] = tr.find(\"td\", class_=\"dg-tbf\").text\n team_data[\"NP\"] = tr.find(\"td\", class_=\"dg-np\").text\n team_data[\"WPCT\"] = tr.find(\"td\", class_=\"dg-wpct\").text\n team_data[\"GO_AO1\"] = tr.find(\"td\", class_=\"dg-go_ao\").text\n team_data[\"OBP1\"] = tr.find(\"td\", class_=\"dg-obp\").text\n team_data[\"SLG1\"] = tr.find(\"td\", class_=\"dg-slg\").text\n team_data[\"OPS\"] = tr.find(\"td\", class_=\"dg-ops\").text\n pitching_data.append(team_data)\n team_data = {}", "_____no_output_____" ], [ "pitching_data = pd.DataFrame(pitching_data)\npitching_data.head()", "_____no_output_____" ], [ "pitching_data.to_csv(\"../Resources/pitching_data.csv\")", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
4a2c1206a06bbb89824d8732acb723a81180c7b8
2,958
ipynb
Jupyter Notebook
Corrected_conversion_pdf_text.ipynb
shresht77/autograder-for-concept-maps
7ce42605bd392cc7f7777bc6ebd87d222cc6583a
[ "MIT" ]
null
null
null
Corrected_conversion_pdf_text.ipynb
shresht77/autograder-for-concept-maps
7ce42605bd392cc7f7777bc6ebd87d222cc6583a
[ "MIT" ]
null
null
null
Corrected_conversion_pdf_text.ipynb
shresht77/autograder-for-concept-maps
7ce42605bd392cc7f7777bc6ebd87d222cc6583a
[ "MIT" ]
null
null
null
25.282051
263
0.403989
[ [ [ "<a href=\"https://colab.research.google.com/github/shresht77/autograder-for-concept-maps/blob/main/Corrected_conversion_pdf_text.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "from google.colab import drive\ndrive.mount('/content/drive')", "Mounted at /content/drive\n" ], [ "%ls\n", "\u001b[0m\u001b[01;34msample_data\u001b[0m/ Security.txt\n" ], [ "prev = \"\"\na = \"Attacks_mod.txt\"\nb = \"out_\" + a \nwith open(a) as f:\n for line in f: \n for ch in line:\n if (ch==\"\\n\"):\n if (prev == \"\\n\"):\n ch = \"\"\n\n if (ch == \"-\"):\n prev = \"-\"\n else:\n with open(b, \"a\") as f1:\n if (prev == \"-\"):\n if (ch == \" \"):\n ch = \"\"\n else:\n f1.write(\"-\") \n f1.write(ch.lower())\n prev = ch", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ] ]
4a2c17e9f83b4834b423973f7126b196cbe476d7
344,950
ipynb
Jupyter Notebook
Notebook/Section 2 - Feedforward Neural Network_v2c.ipynb
PacktPublishing/Practical-Deep-Learning-with-PyTorch
5507decaf76a73b3e2465f0fb0818d0b9c8dd83b
[ "MIT" ]
17
2019-04-11T14:22:57.000Z
2021-07-24T13:32:00.000Z
Notebook/Section 2 - Feedforward Neural Network_v2c.ipynb
PacktPublishing/Practical-Deep-Learning-with-PyTorch
5507decaf76a73b3e2465f0fb0818d0b9c8dd83b
[ "MIT" ]
null
null
null
Notebook/Section 2 - Feedforward Neural Network_v2c.ipynb
PacktPublishing/Practical-Deep-Learning-with-PyTorch
5507decaf76a73b3e2465f0fb0818d0b9c8dd83b
[ "MIT" ]
14
2019-04-11T15:31:29.000Z
2021-04-18T21:19:50.000Z
350.203046
21,716
0.925711
[ [ [ "<div class=\"alert alert-block alert-info\">\n<font size=\"6\"><b><center> Section 2</font></center>\n<br>\n<font size=\"6\"><b><center> Fully-Connected, Feed-Forward Neural Network Examples </font></center>\n</div>", "_____no_output_____" ], [ "# Example 1: A feedforward network with one hidden layer using torch.nn and simulated data", "_____no_output_____" ], [ "In developing (and training) a feedforward neural network, the developer needs to make many decisions, many of which are required when developing more complicated neural networks, such as CNN and RNN:\n\n - the depth of the network (i.e. number of layer) \n - the width of the network (i.e. number of hidden units per layer)\n - the type of nonlinear activation function applied in each hidden layer\n - the type of activation function applied in the output layer\n - the loss function\n - the optimization algorithms\n - the regularization technique (*which we will consider in Section 3*)\n - number of epoch and batch size\n\nOur first example uses simulated data, which has the advantage that we define our own data generating mechanism and can observe how a neural network can approximate the mechanism.", "_____no_output_____" ], [ "----", "_____no_output_____" ], [ "## Simulate and Visualize Data", "_____no_output_____" ], [ "Let's first consider an example with one explanatory variable.\n<br><br>", "_____no_output_____" ], [ "The output is related to the input using the following function\n\n$$y_i = 3x_{i,1} + x^2 exp(x_{i,1}) + \\epsilon_i$$\n\nwhere $\\epsilon$ is an independently and identically distributed (i.i.d.) random variable and $i = 1,2,\\dots,n$ is an index of examples (or observations)", "_____no_output_____" ] ], [ [ "# In the following example, n=100\n\nimport numpy as np\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline\n\nn = 100 # number of examples (or observations)\n\n# Generate a set of n random numbers from a standard normal distribution\nepsilon = np.random.randn(n)\n\n# Generate a set of n random numbers from a uniform[0,1] distribution\nx1 = np.random.uniform(0,1,n)\n\n# Create the data generating mechanism\ny = 3*x1 + np.power(x1,2)*np.exp(x1) + epsilon\n\nstats.describe(y)\nstats.describe(x1)\n\nfig = plt.figure(figsize=(12,8))\nplt.subplot(2, 2, 1)\nsns.set()\n#ax = sns.distplot(x1)\nplt.hist(x1)\n\nplt.subplot(2, 2, 2)\nplt.scatter(x1, y)", "_____no_output_____" ] ], [ [ "**Note: Before training, `numpy array` needs to be converted to `PyTorch's tensors`**", "_____no_output_____" ] ], [ [ "type(x1)", "_____no_output_____" ], [ "print(x1.shape)\nprint(y.shape)", "(100,)\n(100,)\n" ], [ "# convert numpy array to tensor in shape of input size\nimport torch \n\nx1 = torch.from_numpy(x1.reshape(-1,1)).float()\ny = torch.from_numpy(y.reshape(-1,1)).float()", "_____no_output_____" ], [ "print(x1.shape)\nprint(y.shape)", "torch.Size([100, 1])\ntorch.Size([100, 1])\n" ] ], [ [ "## Create a network: First Attempt", "_____no_output_____" ], [ "* Specify a network\n* Define a loss function and choose an optimization algorithm\n* Train the network", "_____no_output_____" ], [ "Our first network is a linear regression model", "_____no_output_____" ], [ "### Create a linear regression model", "_____no_output_____" ] ], [ [ "from __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nclass LinearNet(nn.Module):\n def __init__(self):\n super(LinearNet, self).__init__()\n self.linearlayer1 = torch.nn.Linear(1, 1)\n \n def forward(self, x):\n y_pred = self.linearlayer1(x) \n return y_pred\n\nlinearNet = LinearNet()\nprint(linearNet)", "LinearNet(\n (linearlayer1): Linear(in_features=1, out_features=1, bias=True)\n)\n" ] ], [ [ "### Define Loss Function and Optimization Algorithm", "_____no_output_____" ] ], [ [ "# Define Optimizer and Loss Function\noptimizer = torch.optim.SGD(linearNet.parameters(), lr=0.01)\nloss_func = torch.nn.MSELoss()", "_____no_output_____" ] ], [ [ "### Model training and print losses", "_____no_output_____" ] ], [ [ "X = Variable(x1)\ny_data = Variable(y)\n\nfor epoch in range(500):\n y_pred = linearNet(X)\n loss = torch.sqrt(loss_func(y_pred, y_data))\n optimizer.zero_grad()\n loss.backward() \n optimizer.step() \n \n # Plot the prediction and print out the loss\n if epoch in [0,99,299,399,499]:\n print(epoch)\n plt.cla()\n plt.scatter(x1.data.numpy(), y.data.numpy())\n #plt.plot(x.data.numpy(), y_pred.data.numpy(), 'r-', lw=2)\n plt.scatter(x1.data.numpy(), y_pred.data.numpy())\n plt.text(0.7, -1, 'Loss=%.4f' % loss.data.numpy(), fontdict={'size': 14, 'color': 'red'})\n plt.pause(0.1)\n\nplt.show()", "0\n" ] ], [ [ "## Create a Network: 2nd Attempt", "_____no_output_____" ], [ "### Define a Feed-forward network with 1 hidden layer", "_____no_output_____" ], [ "**Let's insert a computational graph here**", "_____no_output_____" ] ], [ [ "from __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nclass ffNet(nn.Module):\n def __init__(self):\n super(ffNet, self).__init__()\n self.linearCombo1 = torch.nn.Linear(1, 4) # z1 = W1*x1 + b1\n self.linearCombo2 = torch.nn.Linear(4, 1) # z2 = W2*h1 + b2\n\n self.relu = torch.nn.ReLU()\n \n \n def forward(self, x):\n h1 = self.relu(self.linearCombo1(x)) # the ReLU (non-linear activation function) is applied to the linear combination of the weights and input (x1)\n y_pred = self.linearCombo2(h1) \n return y_pred\n\nffnet = ffNet()\nprint(ffnet)", "ffNet(\n (linearCombo1): Linear(in_features=1, out_features=4, bias=True)\n (linearCombo2): Linear(in_features=4, out_features=1, bias=True)\n (relu): ReLU()\n)\n" ] ], [ [ "### Define loss function and optimization algorithm", "_____no_output_____" ] ], [ [ "# Define Optimizer and Loss Function\noptimizer = torch.optim.SGD(ffnet.parameters(), lr=0.01)\nloss_func = torch.nn.MSELoss()", "_____no_output_____" ] ], [ [ "### Model Training", "_____no_output_____" ] ], [ [ "X = Variable(x1)\ny_data = Variable(y)\n\nfor epoch in range(500):\n y_pred = ffnet(X)\n loss = loss_func(y_pred, y_data) \n optimizer.zero_grad()\n loss.backward() \n optimizer.step() \n \n if epoch in [0,99,299,399,499]:\n print(epoch)\n plt.cla()\n plt.scatter(x1.data.numpy(), y.data.numpy())\n plt.scatter(x1.data.numpy(), y_pred.data.numpy())\n #plt.plot(x.data.numpy(), y_pred.data.numpy(), 'r-', lw=2)\n plt.text(0.5, 0, 'Loss=%.4f' % loss.data.numpy(), fontdict={'size': 10, 'color': 'red'})\n plt.pause(0.1)\n\nplt.show()", "0\n" ] ], [ [ "## Create a Network: 3rd Attempt", "_____no_output_____" ], [ "### Define a Feed-forward network with 2 hidden layers", "_____no_output_____" ] ], [ [ "from __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nclass ffNet(nn.Module):\n def __init__(self):\n super(ffNet, self).__init__()\n self.linearlayer1 = torch.nn.Linear(1, 8)\n self.linearlayer2 = torch.nn.Linear(8, 4)\n self.linearlayer3 = torch.nn.Linear(4, 1)\n\n self.relu = torch.nn.ReLU()\n\n def forward(self, x):\n out1 = self.relu(self.linearlayer1(x))\n out2 = self.relu(self.linearlayer2(out1))\n y_pred = self.linearlayer3(out2) \n return y_pred\n\nffnet2 = ffNet()\nprint(ffnet2)", "ffNet(\n (linearlayer1): Linear(in_features=1, out_features=8, bias=True)\n (linearlayer2): Linear(in_features=8, out_features=4, bias=True)\n (linearlayer3): Linear(in_features=4, out_features=1, bias=True)\n (relu): ReLU()\n)\n" ] ], [ [ "### Define loss function and optimization algorithm", "_____no_output_____" ] ], [ [ "# Define Optimizer and Loss Function\noptimizer = torch.optim.SGD(ffnet2.parameters(), lr=0.01)\nloss_func = torch.nn.MSELoss()", "_____no_output_____" ] ], [ [ "### Model Training", "_____no_output_____" ] ], [ [ "X = Variable(x1)\ny_data = Variable(y)\n\nfor epoch in range(500):\n y_pred = ffnet2(X)\n loss = loss_func(y_pred, y_data) \n optimizer.zero_grad()\n loss.backward() \n optimizer.step() \n \n if epoch in [0,99,299,399,499,999]:\n print(epoch)\n plt.cla()\n plt.scatter(x1.data.numpy(), y.data.numpy())\n #plt.plot(x.data.numpy(), y_pred.data.numpy(), 'r', lw=1)\n plt.scatter(x1.data.numpy(), y_pred.data.numpy())\n plt.text(0.5, 0, 'Loss=%.4f' % loss.data.numpy(), fontdict={'size': 10, 'color': 'red'})\n plt.pause(0.1)\n\nplt.show()", "0\n" ] ], [ [ "# Lab 2", "_____no_output_____" ], [ "**Review modeling attempt 1 - 3 and design a network to improve the existing results.**", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
4a2c255fd427dbe0d9cbf95ff32c3dffd312189f
371,860
ipynb
Jupyter Notebook
ke-ci1-misc/4.2.2. MNIST CNN-telescopeuser-digit-10-GPU - Solution.ipynb
telescopeuser/iss-misc
cd09f30ee597cc93d833207daad824f658b07293
[ "MIT" ]
null
null
null
ke-ci1-misc/4.2.2. MNIST CNN-telescopeuser-digit-10-GPU - Solution.ipynb
telescopeuser/iss-misc
cd09f30ee597cc93d833207daad824f658b07293
[ "MIT" ]
null
null
null
ke-ci1-misc/4.2.2. MNIST CNN-telescopeuser-digit-10-GPU - Solution.ipynb
telescopeuser/iss-misc
cd09f30ee597cc93d833207daad824f658b07293
[ "MIT" ]
null
null
null
165.786893
19,712
0.862844
[ [ [ "# Convolution Nets for MNIST\n\n### TelescopeUser: 10-class classification problem", "_____no_output_____" ], [ "<img src=\"imgs/mnist_plot.png\"\n style=\"float: left; margin-right: 1px;\" width=\"500\" height=\"400\" />", "_____no_output_____" ], [ "Deep Learning models can take quite a bit of time to run, particularly if GPU isn't used. \n\nIn the interest of time, you could sample a subset of observations (e.g. $1000$) that are a particular number of your choice (e.g. $6$) and $1000$ observations that aren't that particular number (i.e. $\\neq 6$). \n\nWe will build a model using that and see how it performs on the test dataset", "_____no_output_____" ] ], [ [ "#Import the required libraries\nimport numpy as np\nnp.random.seed(1338)\n\nfrom keras.datasets import mnist", "Using TensorFlow backend.\n" ], [ "from keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten", "_____no_output_____" ], [ "from keras.layers.convolutional import Conv2D\nfrom keras.layers.pooling import MaxPooling2D", "_____no_output_____" ], [ "from keras.utils import np_utils\nfrom keras.optimizers import SGD", "_____no_output_____" ] ], [ [ "## Loading Data", "_____no_output_____" ] ], [ [ "# Load the training and testing data\n(X_train, Y_train), (X_test, Y_test) = mnist.load_data()\n# Display purpose:\nX_train_orig = X_train\nX_test_orig = X_test", "_____no_output_____" ] ], [ [ "## Data Preparation", "_____no_output_____" ], [ "### Very Important: \nWhen dealing with images & convolutions, it is paramount to handle `image_data_format` properly", "_____no_output_____" ] ], [ [ "from keras import backend as K", "_____no_output_____" ], [ "img_rows, img_cols = 28, 28\n\nif K.image_data_format() == 'channels_first':\n shape_ord = (1, img_rows, img_cols)\nelse: # channel_last\n shape_ord = (img_rows, img_cols, 1)", "_____no_output_____" ] ], [ [ "#### Preprocess and Normalise Data", "_____no_output_____" ] ], [ [ "X_train = X_train.reshape((X_train.shape[0],) + shape_ord)\nX_test = X_test.reshape((X_test.shape[0],) + shape_ord)\n\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\n\nX_train /= 255\nX_test /= 255", "_____no_output_____" ] ], [ [ "### Let's look at some images", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "print(Y_train[0:10])\n\nslice = 10\nplt.figure(figsize=(16,8))\nfor i in range(slice):\n plt.subplot(1, slice, i+1)\n plt.imshow(X_train_orig[i], interpolation='nearest')\n plt.axis('off')", "[5 0 4 1 9 2 1 3 1 4]\n" ], [ "print(Y_test[0:10])\n\nslice = 10\nplt.figure(figsize=(16,8))\nfor i in range(slice):\n plt.subplot(1, slice, i+1)\n plt.imshow(X_test_orig[i], interpolation='nearest')\n plt.axis('off')", "[7 2 1 0 4 1 4 9 5 9]\n" ] ], [ [ "### One-hot Encoding for label digits 0 ~ 9", "_____no_output_____" ] ], [ [ "print(X_train.shape, Y_train.shape, X_test.shape, Y_test.shape)", "(60000, 28, 28, 1) (60000,) (10000, 28, 28, 1) (10000,)\n" ], [ "# Converting the classes to its binary categorical form\nnb_classes = 10\nY_train = np_utils.to_categorical(Y_train, nb_classes)\nY_test = np_utils.to_categorical(Y_test, nb_classes)", "_____no_output_____" ], [ "print(X_train.shape, Y_train.shape, X_test.shape, Y_test.shape)", "(60000, 28, 28, 1) (60000, 10) (10000, 28, 28, 1) (10000, 10)\n" ], [ "Y_train[0:10]", "_____no_output_____" ], [ "Y_test[0:10]", "_____no_output_____" ] ], [ [ "# A simple CNN", "_____no_output_____" ] ], [ [ "# -- Initializing the values for the convolution neural network\n\n# nb_epoch = 2 # kept very low! Please increase if you have GPU instead of CPU\nnb_epoch = 20 # kept very low! Please increase if you have GPU instead of CPU\n\nbatch_size = 64\n# number of convolutional filters to use\nnb_filters = 32\n# size of pooling area for max pooling\nnb_pool = 2\n# convolution kernel size\nnb_conv = 3\n\n# Vanilla SGD\nsgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)", "_____no_output_____" ] ], [ [ "#### Step 1: Model Definition", "_____no_output_____" ] ], [ [ "model = Sequential()\n\nmodel.add(Conv2D(nb_filters, (nb_conv, nb_conv), padding='valid', \n input_shape=shape_ord)) # note: the very first layer **must** always specify the input_shape\nmodel.add(Activation('relu'))\n\nmodel.add(Flatten())\nmodel.add(Dense(nb_classes))\nmodel.add(Activation('softmax'))\nmodel.summary()", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_1 (Conv2D) (None, 26, 26, 32) 320 \n_________________________________________________________________\nactivation_1 (Activation) (None, 26, 26, 32) 0 \n_________________________________________________________________\nflatten_1 (Flatten) (None, 21632) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 10) 216330 \n_________________________________________________________________\nactivation_2 (Activation) (None, 10) 0 \n=================================================================\nTotal params: 216,650\nTrainable params: 216,650\nNon-trainable params: 0\n_________________________________________________________________\n" ] ], [ [ "#### Step 2: Compile", "_____no_output_____" ] ], [ [ "model.compile(loss='categorical_crossentropy',\n optimizer=sgd,\n metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "#### Step 3: Fit", "_____no_output_____" ] ], [ [ "hist = model.fit(X_train, Y_train, batch_size=batch_size, \n epochs=nb_epoch, verbose=1, \n validation_data=(X_test, Y_test))", "Train on 60000 samples, validate on 10000 samples\nEpoch 1/20\n60000/60000 [==============================] - 22s - loss: 0.1662 - acc: 0.9507 - val_loss: 0.0783 - val_acc: 0.9752\nEpoch 2/20\n60000/60000 [==============================] - 20s - loss: 0.0677 - acc: 0.9797 - val_loss: 0.0816 - val_acc: 0.9755\nEpoch 3/20\n60000/60000 [==============================] - 20s - loss: 0.0482 - acc: 0.9849 - val_loss: 0.0662 - val_acc: 0.9786\nEpoch 4/20\n60000/60000 [==============================] - 20s - loss: 0.0357 - acc: 0.9887 - val_loss: 0.0756 - val_acc: 0.9786\nEpoch 5/20\n60000/60000 [==============================] - 20s - loss: 0.0251 - acc: 0.9919 - val_loss: 0.0811 - val_acc: 0.9765\nEpoch 6/20\n60000/60000 [==============================] - 20s - loss: 0.0164 - acc: 0.9949 - val_loss: 0.0701 - val_acc: 0.9815\nEpoch 7/20\n60000/60000 [==============================] - 20s - loss: 0.0133 - acc: 0.9956 - val_loss: 0.0838 - val_acc: 0.9793\nEpoch 8/20\n60000/60000 [==============================] - 20s - loss: 0.0090 - acc: 0.9972 - val_loss: 0.0772 - val_acc: 0.9804\nEpoch 9/20\n60000/60000 [==============================] - 20s - loss: 0.0063 - acc: 0.9981 - val_loss: 0.0766 - val_acc: 0.9817\nEpoch 10/20\n60000/60000 [==============================] - 20s - loss: 0.0036 - acc: 0.9992 - val_loss: 0.0835 - val_acc: 0.9828\nEpoch 11/20\n60000/60000 [==============================] - 20s - loss: 0.0025 - acc: 0.9995 - val_loss: 0.0869 - val_acc: 0.9807\nEpoch 12/20\n60000/60000 [==============================] - 20s - loss: 0.0012 - acc: 0.9999 - val_loss: 0.0875 - val_acc: 0.9831\nEpoch 13/20\n60000/60000 [==============================] - 20s - loss: 6.8169e-04 - acc: 1.0000 - val_loss: 0.0884 - val_acc: 0.9827\nEpoch 14/20\n60000/60000 [==============================] - 20s - loss: 5.3203e-04 - acc: 1.0000 - val_loss: 0.0892 - val_acc: 0.9833\nEpoch 15/20\n60000/60000 [==============================] - 20s - loss: 4.8383e-04 - acc: 1.0000 - val_loss: 0.0895 - val_acc: 0.9832\nEpoch 16/20\n60000/60000 [==============================] - 20s - loss: 4.5790e-04 - acc: 1.0000 - val_loss: 0.0909 - val_acc: 0.9833\nEpoch 17/20\n60000/60000 [==============================] - 20s - loss: 4.3741e-04 - acc: 1.0000 - val_loss: 0.0907 - val_acc: 0.9836\nEpoch 18/20\n60000/60000 [==============================] - 20s - loss: 4.2373e-04 - acc: 1.0000 - val_loss: 0.0915 - val_acc: 0.9835\nEpoch 19/20\n60000/60000 [==============================] - 20s - loss: 4.1196e-04 - acc: 1.0000 - val_loss: 0.0923 - val_acc: 0.9833\nEpoch 20/20\n60000/60000 [==============================] - 20s - loss: 4.0083e-04 - acc: 1.0000 - val_loss: 0.0929 - val_acc: 0.9835\n" ], [ "# import matplotlib.pyplot as plt\n# %matplotlib inline\n\nplt.figure()\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.plot(hist.history['loss'])\nplt.plot(hist.history['val_loss'])\nplt.legend(['Training', 'Validation'])\n\nplt.figure()\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.plot(hist.history['acc'])\nplt.plot(hist.history['val_acc'])\nplt.legend(['Training', 'Validation'], loc='lower right')", "_____no_output_____" ] ], [ [ "### Step 4: Evaluate", "_____no_output_____" ] ], [ [ "print('Available Metrics in Model: {}'.format(model.metrics_names))", "Available Metrics in Model: ['loss', 'acc']\n" ], [ "# Evaluating the model on the test data \nloss, accuracy = model.evaluate(X_test, Y_test, verbose=0)\nprint('Test Loss:', loss)\nprint('Test Accuracy:', accuracy)", "Test Loss: 0.0929218709099\nTest Accuracy: 0.9835\n" ] ], [ [ "### Let's plot our model Predictions!", "_____no_output_____" ] ], [ [ "slice = 20\npredicted = model.predict(X_test[:slice]).argmax(-1)\n\nplt.figure(figsize=(16,8))\nfor i in range(slice):\n plt.subplot(1, slice, i+1)\n plt.imshow(X_test_orig[i], interpolation='nearest')\n plt.text(0, 0, predicted[i], color='black', \n bbox=dict(facecolor='white', alpha=1))\n plt.axis('off')", "_____no_output_____" ] ], [ [ "# Adding more Dense Layers", "_____no_output_____" ] ], [ [ "model = Sequential()\nmodel.add(Conv2D(nb_filters, (nb_conv, nb_conv),\n padding='valid', input_shape=shape_ord))\nmodel.add(Activation('relu'))\n\nmodel.add(Flatten())\nmodel.add(Dense(128))\nmodel.add(Activation('relu'))\n\nmodel.add(Dense(nb_classes))\nmodel.add(Activation('softmax'))\nmodel.summary()", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_2 (Conv2D) (None, 26, 26, 32) 320 \n_________________________________________________________________\nactivation_3 (Activation) (None, 26, 26, 32) 0 \n_________________________________________________________________\nflatten_2 (Flatten) (None, 21632) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 128) 2769024 \n_________________________________________________________________\nactivation_4 (Activation) (None, 128) 0 \n_________________________________________________________________\ndense_3 (Dense) (None, 10) 1290 \n_________________________________________________________________\nactivation_5 (Activation) (None, 10) 0 \n=================================================================\nTotal params: 2,770,634\nTrainable params: 2,770,634\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\nhist = model.fit(X_train, Y_train, batch_size=batch_size, \n epochs=nb_epoch,verbose=1,\n validation_data=(X_test, Y_test))", "Train on 60000 samples, validate on 10000 samples\nEpoch 1/20\n60000/60000 [==============================] - 22s - loss: 0.4825 - acc: 0.8743 - val_loss: 0.2789 - val_acc: 0.9173\nEpoch 2/20\n60000/60000 [==============================] - 22s - loss: 0.2376 - acc: 0.9315 - val_loss: 0.2125 - val_acc: 0.9387\nEpoch 3/20\n60000/60000 [==============================] - 22s - loss: 0.1855 - acc: 0.9467 - val_loss: 0.1669 - val_acc: 0.9510\nEpoch 4/20\n60000/60000 [==============================] - 22s - loss: 0.1528 - acc: 0.9554 - val_loss: 0.1325 - val_acc: 0.9611\nEpoch 5/20\n60000/60000 [==============================] - 22s - loss: 0.1301 - acc: 0.9624 - val_loss: 0.1321 - val_acc: 0.9590\nEpoch 6/20\n60000/60000 [==============================] - 22s - loss: 0.1137 - acc: 0.9671 - val_loss: 0.1093 - val_acc: 0.9680\nEpoch 7/20\n60000/60000 [==============================] - 22s - loss: 0.1012 - acc: 0.9708 - val_loss: 0.0986 - val_acc: 0.9698\nEpoch 8/20\n60000/60000 [==============================] - 22s - loss: 0.0911 - acc: 0.9732 - val_loss: 0.0929 - val_acc: 0.9724\nEpoch 9/20\n60000/60000 [==============================] - 22s - loss: 0.0824 - acc: 0.9762 - val_loss: 0.0859 - val_acc: 0.9750\nEpoch 10/20\n60000/60000 [==============================] - 22s - loss: 0.0758 - acc: 0.9769 - val_loss: 0.0795 - val_acc: 0.9764\nEpoch 11/20\n60000/60000 [==============================] - 22s - loss: 0.0696 - acc: 0.9800 - val_loss: 0.0773 - val_acc: 0.9766\nEpoch 12/20\n60000/60000 [==============================] - 22s - loss: 0.0655 - acc: 0.9806 - val_loss: 0.0799 - val_acc: 0.9746\nEpoch 13/20\n60000/60000 [==============================] - 22s - loss: 0.0603 - acc: 0.9822 - val_loss: 0.0816 - val_acc: 0.9761\nEpoch 14/20\n60000/60000 [==============================] - 22s - loss: 0.0561 - acc: 0.9831 - val_loss: 0.0748 - val_acc: 0.9762\nEpoch 15/20\n60000/60000 [==============================] - 22s - loss: 0.0540 - acc: 0.9835 - val_loss: 0.0751 - val_acc: 0.9771\nEpoch 16/20\n60000/60000 [==============================] - 22s - loss: 0.0499 - acc: 0.9851 - val_loss: 0.0723 - val_acc: 0.9785\nEpoch 17/20\n60000/60000 [==============================] - 22s - loss: 0.0473 - acc: 0.9861 - val_loss: 0.0717 - val_acc: 0.9795\nEpoch 18/20\n60000/60000 [==============================] - 22s - loss: 0.0451 - acc: 0.9867 - val_loss: 0.0721 - val_acc: 0.9789\nEpoch 19/20\n60000/60000 [==============================] - 22s - loss: 0.0429 - acc: 0.9873 - val_loss: 0.0822 - val_acc: 0.9750\nEpoch 20/20\n60000/60000 [==============================] - 22s - loss: 0.0405 - acc: 0.9879 - val_loss: 0.0659 - val_acc: 0.9797\n" ], [ "# import matplotlib.pyplot as plt\n# %matplotlib inline\n\nplt.figure()\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.plot(hist.history['loss'])\nplt.plot(hist.history['val_loss'])\nplt.legend(['Training', 'Validation'])\n\nplt.figure()\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.plot(hist.history['acc'])\nplt.plot(hist.history['val_acc'])\nplt.legend(['Training', 'Validation'], loc='lower right')", "_____no_output_____" ], [ "#Evaluating the model on the test data \nscore, accuracy = model.evaluate(X_test, Y_test, verbose=0)\nprint('Test score:', score)\nprint('Test accuracy:', accuracy)", "Test score: 0.0659397623455\nTest accuracy: 0.9797\n" ], [ "slice = 20\npredicted = model.predict(X_test[:slice]).argmax(-1)\n\nplt.figure(figsize=(16,8))\nfor i in range(slice):\n plt.subplot(1, slice, i+1)\n plt.imshow(X_test_orig[i], interpolation='nearest')\n plt.text(0, 0, predicted[i], color='black', \n bbox=dict(facecolor='white', alpha=1))\n plt.axis('off')", "_____no_output_____" ] ], [ [ "# Adding Dropout", "_____no_output_____" ] ], [ [ "model = Sequential()\n\nmodel.add(Conv2D(nb_filters, (nb_conv, nb_conv),\n padding='valid',\n input_shape=shape_ord))\nmodel.add(Activation('relu'))\n\nmodel.add(Flatten())\nmodel.add(Dense(128))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(nb_classes))\nmodel.add(Activation('softmax'))\nmodel.summary()", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_3 (Conv2D) (None, 26, 26, 32) 320 \n_________________________________________________________________\nactivation_6 (Activation) (None, 26, 26, 32) 0 \n_________________________________________________________________\nflatten_3 (Flatten) (None, 21632) 0 \n_________________________________________________________________\ndense_4 (Dense) (None, 128) 2769024 \n_________________________________________________________________\nactivation_7 (Activation) (None, 128) 0 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 128) 0 \n_________________________________________________________________\ndense_5 (Dense) (None, 10) 1290 \n_________________________________________________________________\nactivation_8 (Activation) (None, 10) 0 \n=================================================================\nTotal params: 2,770,634\nTrainable params: 2,770,634\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\nhist = model.fit(X_train, Y_train, batch_size=batch_size, \n epochs=nb_epoch,verbose=1,\n validation_data=(X_test, Y_test))", "Train on 60000 samples, validate on 10000 samples\nEpoch 1/20\n60000/60000 [==============================] - 23s - loss: 0.6322 - acc: 0.8171 - val_loss: 0.2592 - val_acc: 0.9258\nEpoch 2/20\n60000/60000 [==============================] - 23s - loss: 0.3303 - acc: 0.9036 - val_loss: 0.1945 - val_acc: 0.9429\nEpoch 3/20\n60000/60000 [==============================] - 23s - loss: 0.2710 - acc: 0.9214 - val_loss: 0.1619 - val_acc: 0.9532\nEpoch 4/20\n60000/60000 [==============================] - 23s - loss: 0.2409 - acc: 0.9310 - val_loss: 0.1425 - val_acc: 0.9578\nEpoch 5/20\n60000/60000 [==============================] - 23s - loss: 0.2231 - acc: 0.9350 - val_loss: 0.1306 - val_acc: 0.9617\nEpoch 6/20\n60000/60000 [==============================] - 23s - loss: 0.2050 - acc: 0.9399 - val_loss: 0.1211 - val_acc: 0.9644\nEpoch 7/20\n60000/60000 [==============================] - 23s - loss: 0.1983 - acc: 0.9416 - val_loss: 0.1139 - val_acc: 0.9669\nEpoch 8/20\n60000/60000 [==============================] - 23s - loss: 0.1886 - acc: 0.9445 - val_loss: 0.1087 - val_acc: 0.9685\nEpoch 9/20\n60000/60000 [==============================] - 23s - loss: 0.1781 - acc: 0.9482 - val_loss: 0.1033 - val_acc: 0.9695\nEpoch 10/20\n60000/60000 [==============================] - 23s - loss: 0.1729 - acc: 0.9495 - val_loss: 0.1009 - val_acc: 0.9711\nEpoch 11/20\n60000/60000 [==============================] - 23s - loss: 0.1681 - acc: 0.9505 - val_loss: 0.0975 - val_acc: 0.9712\nEpoch 12/20\n60000/60000 [==============================] - 23s - loss: 0.1637 - acc: 0.9511 - val_loss: 0.0971 - val_acc: 0.9699\nEpoch 13/20\n60000/60000 [==============================] - 23s - loss: 0.1577 - acc: 0.9525 - val_loss: 0.0914 - val_acc: 0.9727\nEpoch 14/20\n60000/60000 [==============================] - 23s - loss: 0.1547 - acc: 0.9539 - val_loss: 0.0891 - val_acc: 0.9739\nEpoch 15/20\n60000/60000 [==============================] - 23s - loss: 0.1466 - acc: 0.9559 - val_loss: 0.0900 - val_acc: 0.9723\nEpoch 16/20\n60000/60000 [==============================] - 23s - loss: 0.1438 - acc: 0.9581 - val_loss: 0.0869 - val_acc: 0.9745\nEpoch 17/20\n60000/60000 [==============================] - 23s - loss: 0.1422 - acc: 0.9582 - val_loss: 0.0875 - val_acc: 0.9735\nEpoch 18/20\n60000/60000 [==============================] - 23s - loss: 0.1396 - acc: 0.9572 - val_loss: 0.0811 - val_acc: 0.9759\nEpoch 19/20\n60000/60000 [==============================] - 23s - loss: 0.1393 - acc: 0.9584 - val_loss: 0.0802 - val_acc: 0.9768\nEpoch 20/20\n60000/60000 [==============================] - 23s - loss: 0.1372 - acc: 0.9588 - val_loss: 0.0793 - val_acc: 0.9764\n" ], [ "# import matplotlib.pyplot as plt\n# %matplotlib inline\n\nplt.figure()\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.plot(hist.history['loss'])\nplt.plot(hist.history['val_loss'])\nplt.legend(['Training', 'Validation'])\n\nplt.figure()\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.plot(hist.history['acc'])\nplt.plot(hist.history['val_acc'])\nplt.legend(['Training', 'Validation'], loc='lower right')", "_____no_output_____" ], [ "#Evaluating the model on the test data \nscore, accuracy = model.evaluate(X_test, Y_test, verbose=0)\nprint('Test score:', score)\nprint('Test accuracy:', accuracy)", "Test score: 0.0792893245731\nTest accuracy: 0.9764\n" ], [ "slice = 20\npredicted = model.predict(X_test[:slice]).argmax(-1)\n\nplt.figure(figsize=(16,8))\nfor i in range(slice):\n plt.subplot(1, slice, i+1)\n plt.imshow(X_test_orig[i], interpolation='nearest')\n plt.text(0, 0, predicted[i], color='black', \n bbox=dict(facecolor='white', alpha=1))\n plt.axis('off')", "_____no_output_____" ] ], [ [ "# Adding more Convolution Layers", "_____no_output_____" ] ], [ [ "model = Sequential()\nmodel.add(Conv2D(nb_filters, (nb_conv, nb_conv),\n padding='valid', input_shape=shape_ord))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(nb_filters, (nb_conv, nb_conv)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))\nmodel.add(Dropout(0.25))\n \nmodel.add(Flatten())\nmodel.add(Dense(128))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(nb_classes))\nmodel.add(Activation('softmax'))\nmodel.summary()", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_4 (Conv2D) (None, 26, 26, 32) 320 \n_________________________________________________________________\nactivation_9 (Activation) (None, 26, 26, 32) 0 \n_________________________________________________________________\nconv2d_5 (Conv2D) (None, 24, 24, 32) 9248 \n_________________________________________________________________\nactivation_10 (Activation) (None, 24, 24, 32) 0 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 12, 12, 32) 0 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 12, 12, 32) 0 \n_________________________________________________________________\nflatten_4 (Flatten) (None, 4608) 0 \n_________________________________________________________________\ndense_6 (Dense) (None, 128) 589952 \n_________________________________________________________________\nactivation_11 (Activation) (None, 128) 0 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 128) 0 \n_________________________________________________________________\ndense_7 (Dense) (None, 10) 1290 \n_________________________________________________________________\nactivation_12 (Activation) (None, 10) 0 \n=================================================================\nTotal params: 600,810\nTrainable params: 600,810\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\nhist = model.fit(X_train, Y_train, batch_size=batch_size, \n epochs=nb_epoch,verbose=1,\n validation_data=(X_test, Y_test))", "Train on 60000 samples, validate on 10000 samples\nEpoch 1/20\n60000/60000 [==============================] - 29s - loss: 0.9232 - acc: 0.7016 - val_loss: 0.2835 - val_acc: 0.9184\nEpoch 2/20\n60000/60000 [==============================] - 29s - loss: 0.3928 - acc: 0.8798 - val_loss: 0.1982 - val_acc: 0.9427\nEpoch 3/20\n60000/60000 [==============================] - 29s - loss: 0.3140 - acc: 0.9049 - val_loss: 0.1621 - val_acc: 0.9528\nEpoch 4/20\n60000/60000 [==============================] - 29s - loss: 0.2622 - acc: 0.9213 - val_loss: 0.1308 - val_acc: 0.9604\nEpoch 5/20\n60000/60000 [==============================] - 29s - loss: 0.2216 - acc: 0.9326 - val_loss: 0.1105 - val_acc: 0.9665\nEpoch 6/20\n60000/60000 [==============================] - 29s - loss: 0.1954 - acc: 0.9415 - val_loss: 0.0959 - val_acc: 0.9706\nEpoch 7/20\n60000/60000 [==============================] - 29s - loss: 0.1741 - acc: 0.9473 - val_loss: 0.0849 - val_acc: 0.9741\nEpoch 8/20\n60000/60000 [==============================] - 30s - loss: 0.1591 - acc: 0.9528 - val_loss: 0.0762 - val_acc: 0.9754\nEpoch 9/20\n60000/60000 [==============================] - 29s - loss: 0.1444 - acc: 0.9557 - val_loss: 0.0716 - val_acc: 0.9778\nEpoch 10/20\n60000/60000 [==============================] - 29s - loss: 0.1370 - acc: 0.9584 - val_loss: 0.0652 - val_acc: 0.9784\nEpoch 11/20\n60000/60000 [==============================] - 29s - loss: 0.1275 - acc: 0.9615 - val_loss: 0.0627 - val_acc: 0.9798\nEpoch 12/20\n60000/60000 [==============================] - 29s - loss: 0.1218 - acc: 0.9625 - val_loss: 0.0578 - val_acc: 0.9824\nEpoch 13/20\n60000/60000 [==============================] - 29s - loss: 0.1134 - acc: 0.9666 - val_loss: 0.0556 - val_acc: 0.9817\nEpoch 14/20\n60000/60000 [==============================] - 29s - loss: 0.1075 - acc: 0.9677 - val_loss: 0.0540 - val_acc: 0.9824\nEpoch 15/20\n60000/60000 [==============================] - 29s - loss: 0.1014 - acc: 0.9692 - val_loss: 0.0517 - val_acc: 0.9834\nEpoch 16/20\n60000/60000 [==============================] - 30s - loss: 0.0984 - acc: 0.9700 - val_loss: 0.0501 - val_acc: 0.9840\nEpoch 17/20\n60000/60000 [==============================] - 29s - loss: 0.0986 - acc: 0.9695 - val_loss: 0.0476 - val_acc: 0.9854\nEpoch 18/20\n60000/60000 [==============================] - 29s - loss: 0.0922 - acc: 0.9719 - val_loss: 0.0464 - val_acc: 0.9852\nEpoch 19/20\n60000/60000 [==============================] - 29s - loss: 0.0892 - acc: 0.9734 - val_loss: 0.0451 - val_acc: 0.9855\nEpoch 20/20\n60000/60000 [==============================] - 29s - loss: 0.0844 - acc: 0.9748 - val_loss: 0.0457 - val_acc: 0.9855\n" ], [ "# import matplotlib.pyplot as plt\n# %matplotlib inline\n\nplt.figure()\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.plot(hist.history['loss'])\nplt.plot(hist.history['val_loss'])\nplt.legend(['Training', 'Validation'])\n\nplt.figure()\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.plot(hist.history['acc'])\nplt.plot(hist.history['val_acc'])\nplt.legend(['Training', 'Validation'], loc='lower right')", "_____no_output_____" ], [ "#Evaluating the model on the test data \nscore, accuracy = model.evaluate(X_test, Y_test, verbose=0)\nprint('Test score:', score)\nprint('Test accuracy:', accuracy)", "Test score: 0.0457361380112\nTest accuracy: 0.9855\n" ], [ "slice = 20\npredicted = model.predict(X_test[:slice]).argmax(-1)\n\nplt.figure(figsize=(16,8))\nfor i in range(slice):\n plt.subplot(1, slice, i+1)\n plt.imshow(X_test_orig[i], interpolation='nearest')\n plt.text(0, 0, predicted[i], color='black', \n bbox=dict(facecolor='white', alpha=1))\n plt.axis('off')", "_____no_output_____" ] ], [ [ "# Exercise\n\nThe above code has been written as a function. \n\nChange some of the **hyperparameters** and see what happens. ", "_____no_output_____" ] ], [ [ "nb_epoch = 100", "_____no_output_____" ], [ "# Function for constructing the convolution neural network\n# Feel free to add parameters, if you want\n\ndef build_model():\n \"\"\"\"\"\"\n model = Sequential()\n model.add(Conv2D(nb_filters, (nb_conv, nb_conv), \n padding='valid',\n input_shape=shape_ord))\n model.add(Activation('relu'))\n model.add(Conv2D(nb_filters, (nb_conv, nb_conv)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))\n model.add(Dropout(0.25))\n \n model.add(Flatten())\n model.add(Dense(128))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n model.summary()\n \n model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n hist = model.fit(X_train, Y_train, batch_size=batch_size, \n epochs=nb_epoch,verbose=1,\n validation_data=(X_test, Y_test))\n \n\n #Evaluating the model on the test data \n score, accuracy = model.evaluate(X_test, Y_test, verbose=0)\n print('Test score:', score)\n print('Test accuracy:', accuracy)\n return hist, model", "_____no_output_____" ], [ "# Train and test model in one shot\nhist, model = build_model()", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_6 (Conv2D) (None, 26, 26, 32) 320 \n_________________________________________________________________\nactivation_13 (Activation) (None, 26, 26, 32) 0 \n_________________________________________________________________\nconv2d_7 (Conv2D) (None, 24, 24, 32) 9248 \n_________________________________________________________________\nactivation_14 (Activation) (None, 24, 24, 32) 0 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 12, 12, 32) 0 \n_________________________________________________________________\ndropout_4 (Dropout) (None, 12, 12, 32) 0 \n_________________________________________________________________\nflatten_5 (Flatten) (None, 4608) 0 \n_________________________________________________________________\ndense_8 (Dense) (None, 128) 589952 \n_________________________________________________________________\nactivation_15 (Activation) (None, 128) 0 \n_________________________________________________________________\ndropout_5 (Dropout) (None, 128) 0 \n_________________________________________________________________\ndense_9 (Dense) (None, 10) 1290 \n_________________________________________________________________\nactivation_16 (Activation) (None, 10) 0 \n=================================================================\nTotal params: 600,810\nTrainable params: 600,810\nNon-trainable params: 0\n_________________________________________________________________\nTrain on 60000 samples, validate on 10000 samples\nEpoch 1/100\n60000/60000 [==============================] - 29s - loss: 0.9038 - acc: 0.7055 - val_loss: 0.2763 - val_acc: 0.9209\nEpoch 2/100\n60000/60000 [==============================] - 29s - loss: 0.4007 - acc: 0.8782 - val_loss: 0.2037 - val_acc: 0.9412\nEpoch 3/100\n60000/60000 [==============================] - 29s - loss: 0.3280 - acc: 0.9003 - val_loss: 0.1665 - val_acc: 0.9502\nEpoch 4/100\n60000/60000 [==============================] - 30s - loss: 0.2779 - acc: 0.9163 - val_loss: 0.1403 - val_acc: 0.9590\nEpoch 5/100\n60000/60000 [==============================] - 29s - loss: 0.2434 - acc: 0.9273 - val_loss: 0.1213 - val_acc: 0.9662\nEpoch 6/100\n60000/60000 [==============================] - 29s - loss: 0.2118 - acc: 0.9377 - val_loss: 0.1083 - val_acc: 0.9673\nEpoch 7/100\n60000/60000 [==============================] - 29s - loss: 0.1921 - acc: 0.9433 - val_loss: 0.0918 - val_acc: 0.9716\nEpoch 8/100\n60000/60000 [==============================] - 29s - loss: 0.1716 - acc: 0.9486 - val_loss: 0.0854 - val_acc: 0.9735\nEpoch 9/100\n60000/60000 [==============================] - 29s - loss: 0.1554 - acc: 0.9532 - val_loss: 0.0745 - val_acc: 0.9770\nEpoch 10/100\n60000/60000 [==============================] - 29s - loss: 0.1453 - acc: 0.9560 - val_loss: 0.0688 - val_acc: 0.9780\nEpoch 11/100\n60000/60000 [==============================] - 29s - loss: 0.1331 - acc: 0.9599 - val_loss: 0.0639 - val_acc: 0.9795\nEpoch 12/100\n60000/60000 [==============================] - 29s - loss: 0.1277 - acc: 0.9614 - val_loss: 0.0596 - val_acc: 0.9806\nEpoch 13/100\n60000/60000 [==============================] - 30s - loss: 0.1170 - acc: 0.9641 - val_loss: 0.0577 - val_acc: 0.9813\nEpoch 14/100\n60000/60000 [==============================] - 29s - loss: 0.1130 - acc: 0.9665 - val_loss: 0.0536 - val_acc: 0.9824\nEpoch 15/100\n60000/60000 [==============================] - 29s - loss: 0.1062 - acc: 0.9679 - val_loss: 0.0520 - val_acc: 0.9827\nEpoch 16/100\n60000/60000 [==============================] - 29s - loss: 0.1005 - acc: 0.9697 - val_loss: 0.0491 - val_acc: 0.9838\nEpoch 17/100\n60000/60000 [==============================] - 29s - loss: 0.0951 - acc: 0.9705 - val_loss: 0.0479 - val_acc: 0.9842\nEpoch 18/100\n60000/60000 [==============================] - 29s - loss: 0.0906 - acc: 0.9726 - val_loss: 0.0457 - val_acc: 0.9844\nEpoch 19/100\n60000/60000 [==============================] - 29s - loss: 0.0884 - acc: 0.9733 - val_loss: 0.0444 - val_acc: 0.9848\nEpoch 20/100\n60000/60000 [==============================] - 29s - loss: 0.0853 - acc: 0.9746 - val_loss: 0.0435 - val_acc: 0.9855\nEpoch 21/100\n60000/60000 [==============================] - 30s - loss: 0.0843 - acc: 0.9751 - val_loss: 0.0408 - val_acc: 0.9852\nEpoch 22/100\n60000/60000 [==============================] - 29s - loss: 0.0799 - acc: 0.9762 - val_loss: 0.0399 - val_acc: 0.9859\nEpoch 23/100\n60000/60000 [==============================] - 29s - loss: 0.0767 - acc: 0.9769 - val_loss: 0.0395 - val_acc: 0.9870\nEpoch 24/100\n60000/60000 [==============================] - 29s - loss: 0.0729 - acc: 0.9779 - val_loss: 0.0382 - val_acc: 0.9866\nEpoch 25/100\n60000/60000 [==============================] - 29s - loss: 0.0715 - acc: 0.9785 - val_loss: 0.0378 - val_acc: 0.9866\nEpoch 26/100\n60000/60000 [==============================] - 29s - loss: 0.0701 - acc: 0.9786 - val_loss: 0.0369 - val_acc: 0.9873\nEpoch 27/100\n60000/60000 [==============================] - 29s - loss: 0.0679 - acc: 0.9796 - val_loss: 0.0365 - val_acc: 0.9867\nEpoch 28/100\n60000/60000 [==============================] - 29s - loss: 0.0664 - acc: 0.9802 - val_loss: 0.0370 - val_acc: 0.9868\nEpoch 29/100\n60000/60000 [==============================] - 29s - loss: 0.0643 - acc: 0.9808 - val_loss: 0.0369 - val_acc: 0.9875\nEpoch 30/100\n60000/60000 [==============================] - 30s - loss: 0.0629 - acc: 0.9804 - val_loss: 0.0348 - val_acc: 0.9878\nEpoch 31/100\n60000/60000 [==============================] - 29s - loss: 0.0630 - acc: 0.9809 - val_loss: 0.0355 - val_acc: 0.9868\nEpoch 32/100\n60000/60000 [==============================] - 29s - loss: 0.0601 - acc: 0.9810 - val_loss: 0.0340 - val_acc: 0.9881\nEpoch 33/100\n60000/60000 [==============================] - 29s - loss: 0.0585 - acc: 0.9822 - val_loss: 0.0340 - val_acc: 0.9879\nEpoch 34/100\n60000/60000 [==============================] - 30s - loss: 0.0588 - acc: 0.9821 - val_loss: 0.0341 - val_acc: 0.9884\nEpoch 35/100\n60000/60000 [==============================] - 29s - loss: 0.0561 - acc: 0.9820 - val_loss: 0.0331 - val_acc: 0.9882\nEpoch 36/100\n60000/60000 [==============================] - 29s - loss: 0.0544 - acc: 0.9826 - val_loss: 0.0324 - val_acc: 0.9883\nEpoch 37/100\n60000/60000 [==============================] - 29s - loss: 0.0537 - acc: 0.9829 - val_loss: 0.0333 - val_acc: 0.9885\nEpoch 38/100\n60000/60000 [==============================] - 30s - loss: 0.0541 - acc: 0.9829 - val_loss: 0.0319 - val_acc: 0.9888\nEpoch 39/100\n60000/60000 [==============================] - 29s - loss: 0.0518 - acc: 0.9839 - val_loss: 0.0313 - val_acc: 0.9886\nEpoch 40/100\n60000/60000 [==============================] - 29s - loss: 0.0495 - acc: 0.9844 - val_loss: 0.0313 - val_acc: 0.9893\nEpoch 41/100\n60000/60000 [==============================] - 29s - loss: 0.0495 - acc: 0.9837 - val_loss: 0.0322 - val_acc: 0.9885\nEpoch 42/100\n60000/60000 [==============================] - 29s - loss: 0.0496 - acc: 0.9848 - val_loss: 0.0315 - val_acc: 0.9898\nEpoch 43/100\n60000/60000 [==============================] - 29s - loss: 0.0497 - acc: 0.9849 - val_loss: 0.0305 - val_acc: 0.9897\nEpoch 44/100\n60000/60000 [==============================] - 29s - loss: 0.0488 - acc: 0.9846 - val_loss: 0.0308 - val_acc: 0.9903\nEpoch 45/100\n60000/60000 [==============================] - 29s - loss: 0.0476 - acc: 0.9851 - val_loss: 0.0306 - val_acc: 0.9890\nEpoch 46/100\n60000/60000 [==============================] - 29s - loss: 0.0454 - acc: 0.9854 - val_loss: 0.0305 - val_acc: 0.9892\nEpoch 47/100\n60000/60000 [==============================] - 29s - loss: 0.0451 - acc: 0.9857 - val_loss: 0.0307 - val_acc: 0.9894\nEpoch 48/100\n" ], [ "# import matplotlib.pyplot as plt\n# %matplotlib inline\n\nplt.figure()\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.plot(hist.history['loss'])\nplt.plot(hist.history['val_loss'])\nplt.legend(['Training', 'Validation'])\n\nplt.figure()\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.plot(hist.history['acc'])\nplt.plot(hist.history['val_acc'])\nplt.legend(['Training', 'Validation'], loc='lower right')", "_____no_output_____" ], [ "#Evaluating the model on the test data \nscore, accuracy = model.evaluate(X_test, Y_test, verbose=0)\nprint('Test score:', score)\nprint('Test accuracy:', accuracy)", "Test score: 0.0288531594841\nTest accuracy: 0.9914\n" ], [ "slice = 20\npredicted = model.predict(X_test[:slice]).argmax(-1)\n\nplt.figure(figsize=(16,8))\nfor i in range(slice):\n plt.subplot(1, slice, i+1)\n plt.imshow(X_test_orig[i], interpolation='nearest')\n plt.text(0, 0, predicted[i], color='black', \n bbox=dict(facecolor='white', alpha=1))\n plt.axis('off')", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "## Understanding Convolutional Layers Structure\n\nIn this exercise we want to build a (_quite shallow_) network which contains two \n[Convolution, Convolution, MaxPooling] stages, and two Dense layers.\n\nTo test a different optimizer, we will use [AdaDelta](http://keras.io/optimizers/), which is a bit more complex than the simple Vanilla SGD with momentum.", "_____no_output_____" ] ], [ [ "from keras.optimizers import Adadelta", "_____no_output_____" ], [ "input_shape = shape_ord\nnb_classes = 10", "_____no_output_____" ] ], [ [ "### Understanding layer shapes\n\nAn important feature of Keras layers is that each of them has an `input_shape` attribute, which you can use to visualize the shape of the input tensor, and an `output_shape` attribute, for inspecting the shape of the output tensor.\n\nAs we can see, the input shape of the first convolutional layer corresponds to the `input_shape` attribute (which must be specified by the user). \n\nIn this case, it is a `28x28` image with three color channels. \n\nSince this convolutional layer has the `padding` set to `same`, its output width and height will remain the same, and the number of output channel will be equal to the number of filters learned by the layer, 16. \n\nThe following convolutional layer, instead, have the default `padding`, and therefore reduce width and height by $(k-1)$, where $k$ is the size of the kernel. \n\n`MaxPooling` layers, instead, reduce width and height of the input tensor, but keep the same number of channels. \n\n`Activation` layers, of course, don't change the shape.", "_____no_output_____" ] ], [ [ "model.summary()", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_6 (Conv2D) (None, 26, 26, 32) 320 \n_________________________________________________________________\nactivation_13 (Activation) (None, 26, 26, 32) 0 \n_________________________________________________________________\nconv2d_7 (Conv2D) (None, 24, 24, 32) 9248 \n_________________________________________________________________\nactivation_14 (Activation) (None, 24, 24, 32) 0 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 12, 12, 32) 0 \n_________________________________________________________________\ndropout_4 (Dropout) (None, 12, 12, 32) 0 \n_________________________________________________________________\nflatten_5 (Flatten) (None, 4608) 0 \n_________________________________________________________________\ndense_8 (Dense) (None, 128) 589952 \n_________________________________________________________________\nactivation_15 (Activation) (None, 128) 0 \n_________________________________________________________________\ndropout_5 (Dropout) (None, 128) 0 \n_________________________________________________________________\ndense_9 (Dense) (None, 10) 1290 \n_________________________________________________________________\nactivation_16 (Activation) (None, 10) 0 \n=================================================================\nTotal params: 600,810\nTrainable params: 600,810\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "for i, layer in enumerate(model.layers):\n print (\"Layer\", i, \"\\t\", layer.name, \"\\t\\t\", layer.input_shape, \"\\t\", layer.output_shape)", "Layer 0 \t conv2d_6 \t\t (None, 28, 28, 1) \t (None, 26, 26, 32)\nLayer 1 \t activation_13 \t\t (None, 26, 26, 32) \t (None, 26, 26, 32)\nLayer 2 \t conv2d_7 \t\t (None, 26, 26, 32) \t (None, 24, 24, 32)\nLayer 3 \t activation_14 \t\t (None, 24, 24, 32) \t (None, 24, 24, 32)\nLayer 4 \t max_pooling2d_2 \t\t (None, 24, 24, 32) \t (None, 12, 12, 32)\nLayer 5 \t dropout_4 \t\t (None, 12, 12, 32) \t (None, 12, 12, 32)\nLayer 6 \t flatten_5 \t\t (None, 12, 12, 32) \t (None, 4608)\nLayer 7 \t dense_8 \t\t (None, 4608) \t (None, 128)\nLayer 8 \t activation_15 \t\t (None, 128) \t (None, 128)\nLayer 9 \t dropout_5 \t\t (None, 128) \t (None, 128)\nLayer 10 \t dense_9 \t\t (None, 128) \t (None, 10)\nLayer 11 \t activation_16 \t\t (None, 10) \t (None, 10)\n" ] ], [ [ "### Understanding weights shape\n\nIn the same way, we can visualize the shape of the weights learned by each layer. \n\nIn particular, Keras lets you inspect weights by using the `get_weights` method of a layer object. \n\nThis will return a list with two elements, the first one being the **weight tensor** and the second one being the **bias vector**.\n\nIn particular:\n\n- **MaxPooling layer** don't have any weight tensor, since they don't have learnable parameters. \n\n\n- **Convolutional layers**, instead, learn a $(n_o, n_i, k, k)$ weight tensor, where $k$ is the size of the kernel, $n_i$ is the number of channels of the input tensor, and $n_o$ is the number of filters to be learned. \n\nFor each of the $n_o$ filters, a bias is also learned. \n\n\n- **Dense layers** learn a $(n_i, n_o)$ weight tensor, where $n_o$ is the output size and $n_i$ is the input size of the layer. Each of the $n_o$ neurons also has a bias.", "_____no_output_____" ] ], [ [ "for i, layer in enumerate(model.layers):\n if len(layer.get_weights()) > 0:\n W, b = layer.get_weights()\n print(\"Layer\", i, \"\\t\", layer.name, \"\\t\\t\", W.shape, \"\\t\", b.shape)", "Layer 0 \t conv2d_6 \t\t (3, 3, 1, 32) \t (32,)\nLayer 2 \t conv2d_7 \t\t (3, 3, 32, 32) \t (32,)\nLayer 7 \t dense_8 \t\t (4608, 128) \t (128,)\nLayer 10 \t dense_9 \t\t (128, 10) \t (10,)\n" ] ], [ [ "# Batch Normalisation", "_____no_output_____" ], [ "Normalize the activations of the previous layer at each batch, i.e. applies a transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1.", "_____no_output_____" ], [ "## How to BatchNorm in Keras", "_____no_output_____" ], [ "```python\nfrom keras.layers.normalization import BatchNormalization\n\nBatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, \n beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros',\n moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None,\n beta_constraint=None, gamma_constraint=None)\n```\n\n#### Arguments\n\n<ul>\n<li><strong>axis</strong>: Integer, the axis that should be normalized\n (typically the features axis).\n For instance, after a <code>Conv2D</code> layer with\n <code>data_format=\"channels_first\"</code>,\n set <code>axis=1</code> in <code>BatchNormalization</code>.</li>\n<li><strong>momentum</strong>: Momentum for the moving average.</li>\n<li><strong>epsilon</strong>: Small float added to variance to avoid dividing by zero.</li>\n<li><strong>center</strong>: If True, add offset of <code>beta</code> to normalized tensor.\n If False, <code>beta</code> is ignored.</li>\n<li><strong>scale</strong>: If True, multiply by <code>gamma</code>.\n If False, <code>gamma</code> is not used.\n When the next layer is linear (also e.g. <code>nn.relu</code>),\n this can be disabled since the scaling\n will be done by the next layer.</li>\n<li><strong>beta_initializer</strong>: Initializer for the beta weight.</li>\n<li><strong>gamma_initializer</strong>: Initializer for the gamma weight.</li>\n<li><strong>moving_mean_initializer</strong>: Initializer for the moving mean.</li>\n<li><strong>moving_variance_initializer</strong>: Initializer for the moving variance.</li>\n<li><strong>beta_regularizer</strong>: Optional regularizer for the beta weight.</li>\n<li><strong>gamma_regularizer</strong>: Optional regularizer for the gamma weight.</li>\n<li><strong>beta_constraint</strong>: Optional constraint for the beta weight.</li>\n<li><strong>gamma_constraint</strong>: Optional constraint for the gamma weight.</li>\n</ul>", "_____no_output_____" ], [ "### Excercise", "_____no_output_____" ] ], [ [ "# Try to add a new BatchNormalization layer to the Model \n# (after the Dropout layer) - before or after the ReLU Activation", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
4a2c25f0fb9681238bace91b0d1fbb15e33b94d2
3,330
ipynb
Jupyter Notebook
_ipynb/.ipynb_checkpoints/2021-01-12-code-snippet-checkpoint.ipynb
ralasun/ralasun.github.io
049b2641ec9e4dd9cdeb94212b6f0fa8d9cb5c18
[ "MIT" ]
1
2022-01-13T07:36:07.000Z
2022-01-13T07:36:07.000Z
_ipynb/.ipynb_checkpoints/2021-01-12-code-snippet-checkpoint.ipynb
ralasun/ralasun.github.io
049b2641ec9e4dd9cdeb94212b6f0fa8d9cb5c18
[ "MIT" ]
2
2020-07-11T03:12:21.000Z
2021-06-30T08:47:16.000Z
_ipynb/.ipynb_checkpoints/2021-01-12-code-snippet-checkpoint.ipynb
ralasun/ralasun.github.io
049b2641ec9e4dd9cdeb94212b6f0fa8d9cb5c18
[ "MIT" ]
null
null
null
22.348993
99
0.449249
[ [ [ "---\nlayout : jupyter\ntitle : 코드 블럭 모음\ncategory : Code Snippet\ntags : python\n---", "_____no_output_____" ] ], [ [ "***\n개인적으로 기억해두면 좋을 코드 블럭 모음입니다.\n***", "_____no_output_____" ], [ "## 모델안에서 tensor flat하는 방법", "_____no_output_____" ] ], [ [ "import torch\nfrom torch import nn\nimport numpy as np", "_____no_output_____" ], [ "##Pytorch예제\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n \n #1 input image channel, 6 output channels, 3x3 square convolution\n # kernel\n self.conv1 = nn.Conv2d(1, 6, 3)\n self.conv2 = nn.Conv2d(6, 16, 3)\n #an affine operation : y = Wx + b\n self.fc1 = nn.Linear(16*6*6, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n \n def forward(self, x):\n x = F.max_pool2d(F.relu(self.conv1(x)), (2,2))\n x = F.max_pool2d(F.relu(self.conv2(x)), 2) #pooling window가 정사각형이면 싱글 숫자만 넣어도 상관없음\n x = x.view(-1, self.num_flat_features(x))\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n \n def num_flat_features(self, x):\n \"\"\"\n 이 부분 기억하기!! 자주 사용하는 기법 중 하나!\n \"\"\"\n size = x.size()[1:]\n num_features = 1\n for s in size:\n num_features *= s\n return num_features", "_____no_output_____" ] ], [ [ "위의 코드블럭은 텐서플로우에서도 활용도가 있을 것 같음", "_____no_output_____" ], [ "## accuracy 함수", "_____no_output_____" ] ], [ [ "def accuracy(out, yb):\n preds = torch.argmax(out, dim=1)\n return (preds == yb).float().mean() ", "_____no_output_____" ] ], [ [ "# 1 epoch내에서 모든 데이터를 훑기 위한 방법", "_____no_output_____" ] ], [ [ "# n = 데이터 전체 갯수 , bs = 배치사이즈\nfor epoch in range(epochs):\n for i in range((n-1)//bs+1):\n ...", "_____no_output_____" ] ] ]
[ "raw", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "raw" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a2c2e5aae4b68dd38e56c54f7ad98112b8d8c9d
22,651
ipynb
Jupyter Notebook
research/dataset_structure.ipynb
samuelsinayoko/kaggle-housing-prices
be54566d9988b1520abda78b75e447143ea77fbe
[ "MIT" ]
1
2016-12-13T20:12:56.000Z
2016-12-13T20:12:56.000Z
research/dataset_structure.ipynb
samuelsinayoko/kaggle-housing-prices
be54566d9988b1520abda78b75e447143ea77fbe
[ "MIT" ]
1
2016-10-14T17:50:22.000Z
2016-10-14T17:50:22.000Z
research/dataset_structure.ipynb
samuelsinayoko/kaggle-housing-prices
be54566d9988b1520abda78b75e447143ea77fbe
[ "MIT" ]
3
2016-10-10T15:56:24.000Z
2020-05-31T23:42:18.000Z
20.818934
335
0.535694
[ [ [ "# Exploring datastructures for dataset\nA Pandas exploration. Find the best datastructure to explore and transform the dataset (both training and test dataframes). Use case:\n- find all numerical features (filtering)\n- transform all numerical features (e.g. take square)\n- replace NaN values for a numerical feature\n- plot distribution for a column in the training dataset\n", "_____no_output_____" ] ], [ [ "import sys\nimport os\n\nimport pandas as pd\nimport seaborn as sns", "_____no_output_____" ], [ "sys.path.insert(1, os.path.join(sys.path[0], '..')) # add parent directory to path\nimport samlib", "_____no_output_____" ] ], [ [ "## Using `samlib.DataSet`\nOriginal approach used in `data_exploration_numerical_features`\n- class that contains 3 dataframes attributes (train, test, df, where df is the full dataframe\n- whenever df is updated, the train and test frames are updated\nThis allows to work with the training dataset, and to update/transform the full dataset if necessary, so that the transformation is also applied to the test dataframe that will be needed for the final prediction.", "_____no_output_____" ] ], [ [ "raw_train = pd.read_csv('../data/train_prepared_light.csv')\nraw_test = pd.read_csv('../data/test_prepared_light.csv')\nds = samlib.DataSet(raw_train, raw_test)", "_____no_output_____" ], [ "is_num = ds.dtypes != object\ndfnum = ds.df.loc[:, is_num]\ndfnum.head()", "_____no_output_____" ], [ "ds.apply(lambda df: df.loc[:, is_num]**2, inplace=True)\nds.df.head()", "_____no_output_____" ], [ "ds.df.MasVnrArea.isnull().sum()", "_____no_output_____" ], [ "ds.df.loc[ds.df.MasVnrArea.isnull(), 'MasVnrArea'] = 0", "_____no_output_____" ], [ "ds.df.MasVnrArea.isnull().sum()", "_____no_output_____" ], [ "sns.distplot(ds.train.GrLivArea)", "_____no_output_____" ] ], [ [ "Works but not so great because requires a new dependency (samlib) and a different way of working compared to Pandas. Need to learn the behaviour of the DataSet class, and remember to use the `apply` method otherwise the `train` and `test` sets are *not* going to be kept in sync (for example when assigning to a slice of `ds.df`)", "_____no_output_____" ], [ "## Using an extra categorical `dataset` column\n", "_____no_output_____" ] ], [ [ "traindf = raw_train.copy()\ntestdf = raw_test.copy()\ntraindf['dataset'] = 'train'\ntestdf['dataset'] = 'test'\ndf = pd.concat([traindf, testdf])", "_____no_output_____" ] ], [ [ "Then we can filter using the value of the `dataset` column", "_____no_output_____" ] ], [ [ "train = df['dataset'] == 'train'\ntest = ~train", "_____no_output_____" ], [ "df[train].head()", "_____no_output_____" ], [ "df[test].head()", "_____no_output_____" ], [ "is_num = df.dtypes != object\ndfnum = df.loc[:, is_num]\ndfnum.head()", "_____no_output_____" ], [ "df.loc[:, is_num] = dfnum **2\ndf.head()", "_____no_output_____" ], [ "df.MasVnrArea.isnull().sum()", "_____no_output_____" ], [ "df.loc[df.MasVnrArea.isnull(), 'MasVnrArea'] = 0", "_____no_output_____" ], [ "df.MasVnrArea.isnull().sum()", "_____no_output_____" ], [ "sns.distplot(df.loc[train, 'GrLivArea'])", "_____no_output_____" ] ], [ [ "Works quite well but takes a bit of work to setup and requires to keep two boolean series (`train` and `test`) to filter the dataset whenever needed. An improvement over `samlib.DataSet` though. ", "_____no_output_____" ], [ "## Using Panel object", "_____no_output_____" ] ], [ [ "panel = pd.Panel({'train':raw_train.copy(), 'test': raw_test.copy()})", "_____no_output_____" ], [ "panel.train.head()", "_____no_output_____" ], [ "panel.test.head()", "_____no_output_____" ] ], [ [ "The above is very nice, but unfortunately a panel isn't a dataframe so we can't really get a view of the full data. Also we seem to have lost all the data types:", "_____no_output_____" ] ], [ [ "is_num = panel.train.dtypes != object\nany(is_num)", "_____no_output_____" ] ], [ [ "So we must keep the raw data if we want to filter the numerical columns :-(", "_____no_output_____" ] ], [ [ "is_num = raw_train.dtypes != object\nnumpanel = panel.loc[:, :, is_num]", "_____no_output_____" ], [ "numpanel", "_____no_output_____" ], [ "numpanel.train.head()", "_____no_output_____" ] ], [ [ "Finally this raises an error!", "_____no_output_____" ] ], [ [ "try:\n panel.loc[:, :, is_num] = panel.loc[:, :, is_num]**2\nexcept NotImplementedError as err:\n print('raises NotImplementedError: ', err)", "_____no_output_____" ] ], [ [ "Looked promising initially but not really workable as we can't assign an indexer with a Panel yet. We really need a dataframe object.", "_____no_output_____" ], [ "## Using multi-index on rows", "_____no_output_____" ] ], [ [ "traindf = raw_train.copy()\ntestdf = raw_test.copy()\ndf = pd.concat([traindf, testdf], keys=('train', 'test'))\ndf.head()", "_____no_output_____" ], [ "df.tail()", "_____no_output_____" ] ], [ [ "The test and train datasets can be accessed by filtering the index. Nice but not quite as compact as `df[train]`, though we don't need the extra `train` (and `test`) masks.", "_____no_output_____" ] ], [ [ "df.loc['train'].head()", "_____no_output_____" ], [ "is_num = df.dtypes != object\ndfnum = df.loc[:, is_num]\ndfnum.head()", "_____no_output_____" ], [ "df.loc[:, is_num] = dfnum **2\ndf.head()", "_____no_output_____" ], [ "df.MasVnrArea.isnull().sum()", "_____no_output_____" ], [ "df.loc[df.MasVnrArea.isnull(), 'MasVnrArea'] = 0", "_____no_output_____" ], [ "df.MasVnrArea.isnull().sum()", "_____no_output_____" ], [ "sns.distplot(df.GrLivArea.train)", "_____no_output_____" ] ], [ [ "Another way of doing it", "_____no_output_____" ] ], [ [ "sns.distplot(df.loc['train', 'GrLivArea'])", "_____no_output_____" ] ], [ [ "Works very well. ", "_____no_output_____" ], [ "## Using multi-index on columns (swapped levels)\nSwap the levels to fix the issue with filtering on features in the column multi-index case.", "_____no_output_____" ] ], [ [ "traindf = raw_train.copy()\ntestdf = raw_test.copy()\ndf = pd.concat([traindf, testdf], axis=1, keys=('train','test')).swaplevel(axis=1)\ndf.sort_index(axis=1, inplace=True) # needed otherwise we get in trouble for slicing", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.tail()", "_____no_output_____" ] ], [ [ "The test and train datasets can be accessed by filtering the index. Nice but not quite as compact as `df[train]`, though we don't need the extra `train` (and `test`) masks.", "_____no_output_____" ] ], [ [ "df.xs('train', level=1, axis=1).head() # or use IndexSlice", "_____no_output_____" ] ], [ [ "We must also deal with the extra index level when filtering, but it's not too bad.", "_____no_output_____" ] ], [ [ "is_num = df.dtypes != object\ndfnum = df.loc[:, is_num]\ndfnum.head()", "_____no_output_____" ], [ "df.loc[:, is_num] = dfnum **2\ndf.head()", "_____no_output_____" ] ], [ [ "Getting nulls and setting nulls (without fillna) is a little tricky. Boolean indexing is (by definition) meant to work over rows, not rows *and columns. We can use boolean arrays with DataFrame.mask though. But this is definitely something to keep in mind when using multi indexing over columns.", "_____no_output_____" ] ], [ [ "df.MasVnrArea = df.MasVnrArea.mask(df.MasVnrArea.isnull(), 0)", "_____no_output_____" ], [ "df.MasVnrArea.tail()\n", "_____no_output_____" ] ], [ [ "Visualizing the training dataset is pretty easy. ", "_____no_output_____" ] ], [ [ "sns.distplot(df.GrLivArea.train)", "_____no_output_____" ] ], [ [ "## Using multi-index on columns\nMakes it easier to filter on dataset (train or test) and has the advantage of being a dataframe.", "_____no_output_____" ] ], [ [ "traindf = raw_train.copy()\ntestdf = raw_test.copy()\ndf = pd.concat([traindf, testdf], axis=1, keys=('train','test'))\ndf.head()", "_____no_output_____" ], [ "df.tail()", "_____no_output_____" ] ], [ [ "The test and train datasets can be accessed by filtering the index. Nice but not quite as compact as `df[train]`, though we don't need the extra `train` (and `test`) masks.", "_____no_output_____" ] ], [ [ "df.train.head()", "_____no_output_____" ] ], [ [ "We must also deal with the extra index level when filtering, but it's not too bad.", "_____no_output_____" ] ], [ [ "is_num = df.dtypes != object\ndfnum = df.loc[:, is_num]\ndfnum.head()", "_____no_output_____" ], [ "df.loc[:, is_num] = dfnum **2\ndf.head()", "_____no_output_____" ] ], [ [ "Definitely harder to slice accross columns. It's possible (unlike with panels), but hard (requires pd.IndexSlice).", "_____no_output_____" ] ], [ [ "df.loc[:, pd.IndexSlice[:, 'MasVnrArea']].isnull().sum()", "_____no_output_____" ] ], [ [ "You can also use a cross section to get the data more easily, but you can't use this for sssignments", "_____no_output_____" ] ], [ [ "df.xs('MasVnrArea', axis=1, level=1).head()", "_____no_output_____" ], [ "df.loc[:, pd.IndexSlice[:, 'MasVnrArea']] = 0", "_____no_output_____" ], [ "df.loc[:, pd.IndexSlice[:, 'MasVnrArea']].isnull().sum()", "_____no_output_____" ] ], [ [ "Visualizing the training dataset is pretty easy. ", "_____no_output_____" ] ], [ [ "sns.distplot(df.train.GrLivArea)", "_____no_output_____" ] ], [ [ "## Using dataset type as label", "_____no_output_____" ], [ "** Method 1: add columns then use set_index **", "_____no_output_____" ] ], [ [ "traindf = raw_train.copy()\ntestdf = raw_test.copy()\ntraindf['Dataset'] = 'train'\ntestdf['Dataset'] = 'test'\ndf = pd.concat([traindf, testdf])\ndf.set_index('Dataset').head()", "_____no_output_____" ] ], [ [ "** Method 2: use concat and droplevel **", "_____no_output_____" ] ], [ [ "traindf = raw_train.copy()\ntestdf = raw_test.copy()\ndf = pd.concat([traindf, testdf], keys=('train', 'test'))\ndf.index = df.index.droplevel(1)\ndf.head()\n", "_____no_output_____" ], [ "df.tail()", "_____no_output_____" ] ], [ [ "The test and train datasets can be accessed by using `loc` .", "_____no_output_____" ] ], [ [ "df.loc['train'].head()", "_____no_output_____" ] ], [ [ "Filtering columns is very easy", "_____no_output_____" ] ], [ [ "is_num = df.dtypes != object\ndfnum = df.loc[:, is_num]\ndfnum.head()", "_____no_output_____" ], [ "df.loc[:, is_num] = dfnum **2\ndf.head()", "_____no_output_____" ], [ "df.MasVnrArea.isnull().sum()", "_____no_output_____" ], [ "df.loc[df.MasVnrArea.isnull(), 'MasVnrArea'] = 0", "_____no_output_____" ], [ "df.MasVnrArea.isnull().sum()", "_____no_output_____" ], [ "sns.distplot(df.GrLivArea.train)", "_____no_output_____" ] ], [ [ "Another way of doing it", "_____no_output_____" ] ], [ [ "sns.distplot(df.loc['train', 'GrLivArea'])", "_____no_output_____" ] ], [ [ "## Discussion\n### Samlib\n- Pros: does most of what we need pretty easily \n- Cons: third party dependency, hackish, introduces new structure with weird behaviour (assining to a slice doesn't update training and test datasets) \n- Score: 2/5\n### Extra categorical `dataset` column\n- Pros: works very well and syntax is compact\n- Cons: a bit long to setup, requires to maintain mask variables `test` and `train` alongside the data.\n- Score; 4/5\n### Panel\ndoesn't work\n### Multi-index on rows\n- Pros: excellent, easy to filter on colums and on dataset\n- Cons: none\n- Score: 5/5\n### Multi-index on columns\n- Pros: easy to filter on train/test sets\n- Cons: hard to transform features for both datasets + would be weird if train and test sets have widely different numbers of indices\n- Score: 1/5\n### Dataset label\n- Pros: index is not a multi index\n- Cons: a bit hard to setup and index looks a bit weird as all samples have the same index\n- Score: 4/5", "_____no_output_____" ], [ "## Conclusion\nUse `pd.concat([traindf, testdf], keys=['train', 'test'])` to merge the datasets into one dataframe while making it easy to visualize/process features on only the training dataset.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
4a2c2f918f2f8b9dc40fbec1e24d240b8e7431d2
2,450
ipynb
Jupyter Notebook
notebooks/learning/animation.ipynb
maruel/deepdream
39e1883d7ee0c4393ba0b35bd7cc9ac2066e4e72
[ "Apache-2.0" ]
1
2022-02-01T21:27:52.000Z
2022-02-01T21:27:52.000Z
notebooks/learning/animation.ipynb
maruel/deepdream
39e1883d7ee0c4393ba0b35bd7cc9ac2066e4e72
[ "Apache-2.0" ]
null
null
null
notebooks/learning/animation.ipynb
maruel/deepdream
39e1883d7ee0c4393ba0b35bd7cc9ac2066e4e72
[ "Apache-2.0" ]
null
null
null
24.019608
84
0.524898
[ [ [ "# Enable interactive plot\n# %config InlineBackend.figure_formats = ['retina'] #, 'svg']\n%matplotlib widget\n# %matplotlib inline\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\nimport numpy as np", "_____no_output_____" ], [ "TWOPI = 2*np.pi\n\nfig, ax = plt.subplots()\nt = np.arange(0.0, TWOPI, 0.001)\ns = np.sin(t)\nl = plt.plot(t, s)\nax = plt.axis([0,TWOPI, -1, 1])\nredDot, = plt.plot([0], [np.sin(0)], 'ro')\n\ndef animate(i):\n redDot.set_data(i, np.sin(i))\n return redDot,\n\nanim = animation.FuncAnimation(\n fig, animate, frames=np.arange(0.0, TWOPI, 0.1),\n interval=10, blit=True, repeat=True)\nplt.show()", "_____no_output_____" ], [ "TWOPI = 2*np.pi\n\nfig, ax = plt.subplots()\nline, = ax.plot([])\nax.set_xlim(0, TWOPI)\nax.set_ylim(-1.1, 1.1)\nnbFrames = 100\n\ndef animate(frame_num):\n x = np.linspace(0, TWOPI, num=nbFrames)\n y = np.sin(x + TWOPI * frame_num/nbFrames)\n line.set_data((x, y))\n return line\n\nanim = animation.FuncAnimation(fig, animate, frames=nbFrames, interval=100)\nplt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
4a2c3f60754992775e2e6024831b91c1082c4bd4
107,648
ipynb
Jupyter Notebook
tutorials/01_NeMo_Models.ipynb
antrikshmisri/NeMo
17fc2541172dd3d781bfa0b5a1fa41576d4bfe21
[ "Apache-2.0" ]
null
null
null
tutorials/01_NeMo_Models.ipynb
antrikshmisri/NeMo
17fc2541172dd3d781bfa0b5a1fa41576d4bfe21
[ "Apache-2.0" ]
null
null
null
tutorials/01_NeMo_Models.ipynb
antrikshmisri/NeMo
17fc2541172dd3d781bfa0b5a1fa41576d4bfe21
[ "Apache-2.0" ]
null
null
null
38.092003
482
0.533795
[ [ [ "\"\"\"\nYou can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n\nInstructions for setting up Colab are as follows:\n1. Open a new Python 3 notebook.\n2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n4. Run this cell to set up dependencies.\n\"\"\"\n# If you're using Google Colab and not running locally, run this cell.\n\n## Install dependencies\n!pip install wget\n!apt-get install sox libsndfile1 ffmpeg\n!pip install unidecode\n\n# ## Install NeMo\nBRANCH = 'v1.0.0b2'\n!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n\n## Install TorchAudio\n!pip install torchaudio>=0.6.0 -f https://download.pytorch.org/whl/torch_stable.html\n\n## Grab the config we'll use in this example\n!mkdir configs", "_____no_output_____" ] ], [ [ "# minGPT License\n\n*This notebook port's the [minGPT codebase](https://github.com/karpathy/minGPT) into equivalent NeMo code. The license for minGPT has therefore been attached here.*\n\n```\nThe MIT License (MIT) Copyright (c) 2020 Andrej Karpathy\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n```", "_____no_output_____" ], [ "# torch-rnn License\n*This notebook utilizes the `tiny-shakespeare` dataset from the [torch-rnn](https://github.com/jcjohnson/torch-rnn) codebase. The license for torch-rnn has therefore been attached here.*\n\n```\nThe MIT License (MIT)\n\nCopyright (c) 2016 Justin Johnson\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n```\n", "_____no_output_____" ], [ "-------\n\n***Note: This notebook will intentionally introduce some errors to show the power of Neural Types or model development concepts, inside the cells marked with `[ERROR CELL]`. The explanation of and resolution of such errors can be found in the subsequent cells.***\n\n-----", "_____no_output_____" ], [ "# The NeMo Model\n\nNeMo comes with many state of the art pre-trained Conversational AI models for users to quickly be able to start training and fine-tuning on their own datasets. \n\nIn the previous [NeMo Primer](https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/00_NeMo_Primer.ipynb) notebook, we learned how to download pretrained checkpoints with NeMo and we also discussed the fundamental concepts of the NeMo Model. The previous tutorial showed us how to use, modify, save, and restore NeMo Models.\n\nIn this tutorial we will learn how to develop a non-trivial NeMo model from scratch. This helps us to understand the underlying components and how they interact with the overall PyTorch ecosystem.\n", "_____no_output_____" ], [ "-------\nAt the heart of NeMo lies the concept of the \"Model\". For NeMo developers, a \"Model\" is the neural network(s) as well as all the infrastructure supporting those network(s), wrapped into a singular, cohesive unit. As such, most NeMo models are constructed to contain the following out of the box (note: some NeMo models support additional functionality specific to the domain/use case!) - \n\n - Neural Network architecture - all of the modules that are required for the model.\n\n - Dataset + Data Loaders - all of the components that prepare the data for consumption during training or evaluation.\n\n - Preprocessing + Postprocessing - any of the components that process the datasets so the modules can easily consume them.\n\n - Optimizer + Schedulers - basic defaults that work out of the box and allow further experimentation with ease.\n\n - Any other supporting infrastructure - tokenizers, language model configuration, data augmentation, etc.", "_____no_output_____" ], [ "# Constructing a NeMo Model\n\nNeMo \"Models\" are comprised of a few key components, so let's tackle them one by one. We will attempt to go in the order that's stated above.\n\nTo make this slightly challenging, let's port a model from the NLP domain this time. Transformers are all the rage, with BERT and his friends from Sesame Street forming the core infrastructure for many NLP tasks. \n\nAn excellent (yet simple) implementation of one such model - GPT - can be found in the `minGPT` repository - https://github.com/karpathy/minGPT. While the script is short, it explains and succinctly explores all of the core components we expect in a NeMo model, so it's a prime candidate for NeMo! Sidenote: NeMo supports GPT in its NLP collection, and as such, this notebook aims to be an in-depth development walkthrough for such models.\n\nIn the following notebook, we will attempt to port minGPT to NeMo, and along the way, discuss some core concepts of NeMo itself.", "_____no_output_____" ], [ "# Constructing the Neural Network Architecture\n\nFirst, on the list - the neural network that forms the backbone of the NeMo Model.\n\nSo how do we create such a model? Using PyTorch! As you'll see below, NeMo components are compatible with all of PyTorch, so you can augment your workflow without ever losing the flexibility of PyTorch itself!\n\nLet's start with a couple of imports - ", "_____no_output_____" ] ], [ [ "import torch\nimport nemo\nfrom nemo.core import NeuralModule\nfrom nemo.core import typecheck", "_____no_output_____" ] ], [ [ "## Neural Module\nWait, what's `NeuralModule`? Where is the wonderful `torch.nn.Module`? \n\n`NeuralModule` is a subclass of `torch.nn.Module`, and it brings with it a few additional functionalities.\n\nIn addition to being a `torch.nn.Module`, thereby being entirely compatible with the PyTorch ecosystem, it has the following capabilities - \n\n1) `Typing` - It adds support for `Neural Type Checking` to the model. `Typing` is optional but quite useful, as we will discuss below!\n\n2) `Serialization` - Remember the `OmegaConf` config dict and YAML config files? Well, all `NeuralModules` inherently supports serialization/deserialization from such config dictionaries!\n\n3) `FileIO` - This is another entirely optional file serialization system. Does your `NeuralModule` require some way to preserve data that can't be saved into a PyTorch checkpoint? Write your serialization and deserialization logic in two handy methods! **Note**: When you create the final NeMo Model, this will be implemented for you! Automatic serialization and deserialization support of NeMo models!\n", "_____no_output_____" ] ], [ [ "class MyEmptyModule(NeuralModule):\n\n def forward(self):\n print(\"Neural Module ~ hello world!\")", "_____no_output_____" ], [ "x = MyEmptyModule()\nx()", "_____no_output_____" ] ], [ [ "## Neural Types\n\nNeural Types? You might be wondering what that term refers to.\n\nAlmost all NeMo components inherit the class `Typing`. `Typing` is a simple class that adds two properties to the class that inherits it - `input_types` and `output_types`. A NeuralType, by its shortest definition, is simply a semantic tensor. It contains information regarding the semantic shape the tensor should hold, as well as the semantic information of what that tensor represents. That's it.\n\nSo what semantic information does such a typed tensor contain? Let's take an example below.\n\n\n", "_____no_output_____" ], [ "------\nAcross the Deep Learning domain, we often encounter cases where tensor shapes may match, but the semantics don't match at all. For example take a look at the following rank 3 tensors - ", "_____no_output_____" ] ], [ [ "# Case 1:\nembedding = torch.nn.Embedding(num_embeddings=10, embedding_dim=30)\nx = torch.randint(high=10, size=(1, 5))\nprint(\"x :\", x)\nprint(\"embedding(x) :\", embedding(x).shape)", "_____no_output_____" ], [ "# Case 2\nlstm = torch.nn.LSTM(1, 30, batch_first=True)\nx = torch.randn(1, 5, 1)\nprint(\"x :\", x)\nprint(\"lstm(x) :\", lstm(x)[0].shape) # Let's take all timestep outputs of the LSTM", "_____no_output_____" ] ], [ [ "-------\nAs you can see, the output of Case 1 is an embedding of shape [1, 5, 30], and the output of Case 2 is an LSTM output (state `h` over all time steps), also of the same shape [1, 5, 30].\n\nDo they have the same shape? **Yes**. <br>If we do a Case 1 .shape == Case 2 .shape, will we get True as an output? **Yes**. <br>\nDo they represent the same concept? **No**. <br>\n\n\nThe ability to recognize that the two tensors do not represent the same semantic information is precisely why we utilize Neural Types. It contains the information of both the shape and the semantic concept of what that tensor represents. If we performed a neural type check between the two outputs of those tensors, it would raise an error saying semantically they were different things (more technically, it would say that they are `INCOMPATIBLE` with each other)!\n", "_____no_output_____" ], [ "--------\n\nYou may have read of concepts such as [Named Tensors](https://pytorch.org/docs/stable/named_tensor.html). While conceptually similar, Neural Types attached by NeMo are not as tightly bound to the PyTorch ecosystem - practically any object of a class can be attached with a neural type!\n", "_____no_output_____" ], [ "## Neural Types - Usage\n\nNeural Types sound interesting, so how do we go about adding them? Let's take a few cases below. \n\nNeural Types are one of the core foundations of NeMo - you will find them in a vast majority of Neural Modules, and every NeMo Model will have its Neural Types defined. While they are entirely optional and unintrusive, NeMo takes great care to support it so that there is no semantic incompatibility between components being used by users.", "_____no_output_____" ], [ "Let's start with a basic example of a type checked module.", "_____no_output_____" ] ], [ [ "from nemo.core.neural_types import NeuralType\nfrom nemo.core.neural_types import *", "_____no_output_____" ], [ "class EmbeddingModule(NeuralModule):\n def __init__(self):\n super().__init__()\n self.embedding = torch.nn.Embedding(num_embeddings=10, embedding_dim=30)\n\n @typecheck()\n def forward(self, x):\n return self.embedding(x)\n\n @property\n def input_types(self):\n return {\n 'x': NeuralType(axes=('B', 'T'), elements_type=Index())\n }\n\n @property\n def output_types(self):\n return {\n 'y': NeuralType(axes=('B', 'T', 'C'), elements_type=EmbeddedTextType())\n }", "_____no_output_____" ] ], [ [ "To show the benefit of Neural Types, we are going to replicate the above cases inside NeuralModules.\n\nLet's discuss how we added type checking support to the above class.\n\n1) `forward` has a decorator `@typecheck()` on it.\n\n2) `input_types` and `output_types` properties are defined.\n\nThat's it!", "_____no_output_____" ], [ "-------\n\nLet's expand on each of the above steps.\n\n- `@typecheck()` is a simple decorator that takes any class that inherits `Typing` (NeuralModule does this for us) and adds the two default properties of `input_types` and `output_types`, which by default returns None.\n\nThe `@typecheck()` decorator's explicit use ensures that, by default, neural type checking is **disabled**. NeMo does not wish to intrude on the development process of models. So users can \"opt-in\" to type checking by overriding the two properties. Therefore, the decorator ensures that users are not burdened with type checking before they wish to have it.\n\nSo what is `@typecheck()`? Simply put, you can wrap **any** function of a class that inherits `Typing` with this decorator, and it will look up the definition of the types of that class and enforce them. Typically, `torch.nn.Module` subclasses only implement `forward()` so it is most common to wrap that method, but `@typecheck()` is a very flexible decorator. Inside NeMo, we will show some advanced use cases (which are quite crucial to particular domains such as TTS).", "_____no_output_____" ], [ "------\n\nAs we see above, `@typecheck()` enforces the types. How then, do we provide this type of information to NeMo? \n\nBy overriding `input_types` and `output_types` properties of the class, we can return a dictionary mapping a string name to a `NeuralType`.\n\nIn the above case, we define a `NeuralType` as two components - \n\n- `axes`: This is the semantic information of the carried by the axes themselves. The most common axes information is from single character notation.\n\n> `B` = Batch <br>\n> `C` / `D` - Channel / Dimension (treated the same) <br>\n> `T` - Time <br>\n> `H` / `W` - Height / Width <br>\n\n- `elements_type`: This is the semantic information of \"what the tensor represents\". All such types are derived from the basic `ElementType`, and merely subclassing `ElementType` allows us to build a hierarchy of custom semantic types that can be used by NeMo!\n\nHere, we declare that the input is an element_type of `Index` (index of the character in the vocabulary) and that the output is an element_type of `EmbeddedTextType` (the text embedding)", "_____no_output_____" ] ], [ [ "embedding_module = EmbeddingModule()", "_____no_output_____" ] ], [ [ "Now let's construct the equivalent of the Case 2 above, but as a `NeuralModule`.", "_____no_output_____" ] ], [ [ "class LSTMModule(NeuralModule):\n def __init__(self):\n super().__init__()\n self.lstm = torch.nn.LSTM(1, 30, batch_first=True)\n\n @typecheck()\n def forward(self, x):\n return self.lstm(x)\n\n @property\n def input_types(self):\n return {\n 'x': NeuralType(axes=('B', 'T', 'C'), elements_type=SpectrogramType())\n }\n\n @property\n def output_types(self):\n return {\n 'y': NeuralType(axes=('B', 'T', 'C'), elements_type=EncodedRepresentation())\n }", "_____no_output_____" ] ], [ [ "------\nHere, we define the LSTM module from the Case 2 above.\n\nWe changed the input to be a rank three tensor, now representing a \"SpectrogramType\". We intentionally keep it generic - it can be a `MelSpectrogramType` or a `MFCCSpectrogramType` as it's input!\n\nThe output of an LSTM is now an `EncodedRepresentation`. Practically, this can be the output of a CNN layer, a Transformer block, or in this case, an LSTM layer. We can, of course, specialize by subclassing EncodedRepresentation and then using that!", "_____no_output_____" ] ], [ [ "lstm_module = LSTMModule()", "_____no_output_____" ] ], [ [ "------\nNow for the test !", "_____no_output_____" ] ], [ [ "# Case 1 [ERROR CELL]\nx1 = torch.randint(high=10, size=(1, 5))\nprint(\"x :\", x1)\nprint(\"embedding(x) :\", embedding_module(x1).shape)", "_____no_output_____" ] ], [ [ "-----\nYou might be wondering why we get a `TypeError` right off the bat. This `TypeError` is raised by design.\n\nPositional arguments can cause significant issues during model development, mostly when the model/module design is not finalized. To reduce the potential for mistakes caused by wrong positional arguments and enforce the name of arguments provided to the function, `Typing` requires you to **call all of your type-checked functions by kwargs only**.", "_____no_output_____" ] ], [ [ "# Case 1\nprint(\"x :\", x1)\nprint(\"embedding(x) :\", embedding_module(x=x1).shape)", "_____no_output_____" ] ], [ [ "Now let's try the same for the `LSTMModule` in Case 2", "_____no_output_____" ] ], [ [ "# Case 2 [ERROR CELL]\nx2 = torch.randn(1, 5, 1)\nprint(\"x :\", x2)\nprint(\"lstm(x) :\", lstm_module(x=x2)[0].shape) # Let's take all timestep outputs of the LSTM", "_____no_output_____" ] ], [ [ "-----\nNow we get a type error stating that the number of output arguments provided does not match what is expected.\n\nWhat exactly is going on here? Well, inside our `LSTMModule` class, we declare the output types to be a single NeuralType - an `EncodedRepresentation` of shape [B, T, C].\n\nBut the output of an LSTM layer is a tuple of two state values - the hidden state `h` and the cell state `c`!\n\nSo the neural type system raises an error saying that the number of output arguments does not match what is expected.\n\nLet's fix the above.", "_____no_output_____" ] ], [ [ "class CorrectLSTMModule(LSTMModule): # Let's inherit the wrong class to make it easy to override\n @property\n def output_types(self):\n return {\n 'h': NeuralType(axes=('B', 'T', 'C'), elements_type=EncodedRepresentation()),\n 'c': NeuralType(axes=('B', 'T', 'C'), elements_type=EncodedRepresentation()),\n }", "_____no_output_____" ], [ "lstm_module = CorrectLSTMModule()", "_____no_output_____" ], [ "# Case 2\nx2 = torch.randn(1, 5, 1)\nprint(\"x :\", x2)\nprint(\"lstm(x) :\", lstm_module(x=x2)[0].shape) # Let's take all timestep outputs of the LSTM `h` gate", "_____no_output_____" ] ], [ [ "------\nGreat! So now, the type checking system is happy.\n\nIf you looked closely, the outputs were ordinary Torch Tensors (this is good news; we don't want to be incompatible with torch Tensors after all!). So, where exactly is the type of information stored?\n\nWhen the `output_types` is overridden, and valid torch tensors are returned as a result, these tensors are attached with the attribute `neural_type`. Let's inspect this -", "_____no_output_____" ] ], [ [ "emb_out = embedding_module(x=x1)\nlstm_out = lstm_module(x=x2)[0]\n\nassert hasattr(emb_out, 'neural_type')\nassert hasattr(lstm_out, 'neural_type')", "_____no_output_____" ], [ "print(\"Embedding tensor :\", emb_out.neural_type)\nprint(\"LSTM tensor :\", lstm_out.neural_type)", "_____no_output_____" ] ], [ [ "-------\nSo we see that these tensors now have this attribute called `neural_type` and are the same shape.\n\nThis exercise's entire goal was to assert that the two outputs are semantically **not** the same object, even if they are the same shape. \n\nLet's test this!", "_____no_output_____" ] ], [ [ "emb_out.neural_type.compare(lstm_out.neural_type)", "_____no_output_____" ], [ "emb_out.neural_type == lstm_out.neural_type", "_____no_output_____" ] ], [ [ "## Neural Types - Limitations\n\nYou might have noticed one interesting fact - our inputs were just `torch.Tensor` to both typed function calls, and they had no `neural_type` assigned to them.\n\nSo why did the type check system not raise any error? \n\nThis is to maintain compatibility - type checking is meant to work on a chain of function calls - and each of these functions should themselves be wrapped with the `@typecheck()` decorator. This is also done because we don't want to overtax the forward call with dozens of checks, and therefore we only type modules that perform some higher-order logical computation. \n\n------\n\nAs an example, it is mostly unnecessary (but still possible) to type the input and output of every residual block of a ResNet model. However, it is practically important to type the encoder (no matter how many layers is inside it) and the decoder (the classification head) separately so that when one does fine-tuning, there is no semantic mismatch of the tensors input to the encoder and bound to the decoder.", "_____no_output_____" ], [ "-------\nFor this case, since it would be impractical to extend a class to attach a type to the input tensor, we can take a shortcut and directly attach the neural type to the input!", "_____no_output_____" ] ], [ [ "embedding_module = EmbeddingModule()\nx1 = torch.randint(high=10, size=(1, 5))\n\n# Attach correct neural type\nx1.neural_type = NeuralType(('B', 'T'), Index())\n\nprint(\"embedding(x) :\", embedding_module(x=x1).shape)", "_____no_output_____" ], [ "# Attach wrong neural type [ERROR CELL]\nx1.neural_type = NeuralType(('B', 'T'), LabelsType())\n\nprint(\"embedding(x) :\", embedding_module(x=x1).shape)", "_____no_output_____" ] ], [ [ "## Let's create the minGPT components\n\nNow that we have a somewhat firm grasp of neural type checking, let's begin porting the minGPT example code. Once again, most of the code will be a direct port from the [minGPT repository](https://github.com/karpathy/minGPT).\n\nHere, you will notice one thing. By just changing class imports, one `@typecheck()` on forward, and adding `input_types` and `output_types` (which are also entirely optional!), we are almost entirely done with the PyTorch Lightning port!", "_____no_output_____" ] ], [ [ "import math\nfrom typing import List, Set, Dict, Tuple, Optional\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F", "_____no_output_____" ] ], [ [ "## Creating Element Types\n\nTill now, we have used the Neural Types provided by the NeMo core. But we need not be restricted to the pre-defined element types !\n\nUsers have total flexibility in defining any hierarchy of element types as they please!", "_____no_output_____" ] ], [ [ "class AttentionType(EncodedRepresentation):\n \"\"\"Basic Attention Element Type\"\"\"\n\nclass SelfAttentionType(AttentionType):\n \"\"\"Self Attention Element Type\"\"\"\n\nclass CausalSelfAttentionType(SelfAttentionType):\n \"\"\"Causal Self Attention Element Type\"\"\"", "_____no_output_____" ] ], [ [ "## Creating the modules\n\nNeural Modules are generally top-level modules but can be used at any level of the module hierarchy.\n\nFor demonstration, we will treat an encoder comprising a block of Causal Self Attention modules as a typed Neural Module. Of course, we can also treat each Causal Self Attention layer itself as a neural module if we require it, but top-level modules are generally preferred.", "_____no_output_____" ] ], [ [ "class CausalSelfAttention(nn.Module):\n \"\"\"\n A vanilla multi-head masked self-attention layer with a projection at the end.\n It is possible to use torch.nn.MultiheadAttention here but I am including an\n explicit implementation here to show that there is nothing too scary here.\n \"\"\"\n\n def __init__(self, n_embd, block_size, n_head, attn_pdrop, resid_pdrop):\n super().__init__()\n assert n_embd % n_head == 0\n self.n_head = n_head\n # key, query, value projections for all heads\n self.key = nn.Linear(n_embd, n_embd)\n self.query = nn.Linear(n_embd, n_embd)\n self.value = nn.Linear(n_embd, n_embd)\n # regularization\n self.attn_drop = nn.Dropout(attn_pdrop)\n self.resid_drop = nn.Dropout(resid_pdrop)\n # output projection\n self.proj = nn.Linear(n_embd, n_embd)\n # causal mask to ensure that attention is only applied to the left in the input sequence\n self.register_buffer(\"mask\", torch.tril(torch.ones(block_size, block_size))\n .view(1, 1, block_size, block_size))\n def forward(self, x, layer_past=None):\n B, T, C = x.size()\n\n # calculate query, key, values for all heads in batch and move head forward to be the batch dim\n k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n\n # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)\n att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))\n att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))\n att = F.softmax(att, dim=-1)\n att = self.attn_drop(att)\n y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)\n y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side\n\n # output projection\n y = self.resid_drop(self.proj(y))\n return y\n \n\nclass Block(nn.Module):\n \"\"\" an unassuming Transformer block \"\"\"\n\n def __init__(self, n_embd, block_size, n_head, attn_pdrop, resid_pdrop):\n super().__init__()\n self.ln1 = nn.LayerNorm(n_embd)\n self.ln2 = nn.LayerNorm(n_embd)\n self.attn = CausalSelfAttention(n_embd, block_size, n_head, attn_pdrop, resid_pdrop)\n self.mlp = nn.Sequential(\n nn.Linear(n_embd, 4 * n_embd),\n nn.GELU(),\n nn.Linear(4 * n_embd, n_embd),\n nn.Dropout(resid_pdrop),\n )\n\n def forward(self, x):\n x = x + self.attn(self.ln1(x))\n x = x + self.mlp(self.ln2(x))\n return x", "_____no_output_____" ] ], [ [ "## Building the NeMo Model\n\nSince a NeMo Model is comprised of various parts, we are going to iterate on the model step by step inside this notebook. As such, we will have multiple intermediate NeMo \"Models\", which will be partial implementations, and they will inherit each other iteratively.\n\nIn a complete implementation of a NeMo Model (as found in the NeMo collections), all of these components will generally be found in a single class.\n\nLet's start by inheriting `ModelPT` - the core class of a PyTorch NeMo Model, which inherits the PyTorch Lightning Module.", "_____no_output_____" ], [ "-------\n**Remember**:\n\n - The NeMo equivalent of `torch.nn.Module` is the `NeuralModule.\n - The NeMo equivalent of the `LightningModule` is `ModelPT`.\n", "_____no_output_____" ] ], [ [ "import pytorch_lightning as ptl\nfrom nemo.core import ModelPT\nfrom omegaconf import OmegaConf", "_____no_output_____" ] ], [ [ "------\nNext, let's construct the bare minimum implementation of the NeMo Model - just the constructor, the initializer of weights, and the forward method.\n\nInitially, we will follow the steps followed by the minGPT implementation, and progressively refactor for NeMo ", "_____no_output_____" ] ], [ [ "class PTLGPT(ptl.LightningModule):\n def __init__(self,\n # model definition args\n vocab_size: int, # size of the vocabulary (number of possible tokens)\n block_size: int, # length of the model's context window in time\n n_layer: int, # depth of the model; number of Transformer blocks in sequence\n n_embd: int, # the \"width\" of the model, number of channels in each Transformer\n n_head: int, # number of heads in each multi-head attention inside each Transformer block\n # model optimization args\n learning_rate: float = 3e-4, # the base learning rate of the model\n weight_decay: float = 0.1, # amount of regularizing L2 weight decay on MatMul ops\n betas: Tuple[float, float] = (0.9, 0.95), # momentum terms (betas) for the Adam optimizer\n embd_pdrop: float = 0.1, # \\in [0,1]: amount of dropout on input embeddings\n resid_pdrop: float = 0.1, # \\in [0,1]: amount of dropout in each residual connection\n attn_pdrop: float = 0.1, # \\in [0,1]: amount of dropout on the attention matrix\n ):\n super().__init__()\n\n # save these for optimizer init later\n self.learning_rate = learning_rate\n self.weight_decay = weight_decay\n self.betas = betas\n\n # input embedding stem: drop(content + position)\n self.tok_emb = nn.Embedding(vocab_size, n_embd)\n self.pos_emb = nn.Parameter(torch.zeros(1, block_size, n_embd))\n self.drop = nn.Dropout(embd_pdrop)\n # deep transformer: just a sequence of transformer blocks\n self.blocks = nn.Sequential(*[Block(n_embd, block_size, n_head, attn_pdrop, resid_pdrop) for _ in range(n_layer)])\n # decoder: at the end one more layernorm and decode the answers\n self.ln_f = nn.LayerNorm(n_embd)\n self.head = nn.Linear(n_embd, vocab_size, bias=False) # no need for extra bias due to one in ln_f\n\n self.block_size = block_size\n self.apply(self._init_weights)\n\n print(\"number of parameters: %e\" % sum(p.numel() for p in self.parameters()))\n\n def forward(self, idx):\n b, t = idx.size()\n assert t <= self.block_size, \"Cannot forward, model block size is exhausted.\"\n\n # forward the GPT model\n token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector\n position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector\n x = self.drop(token_embeddings + position_embeddings)\n x = self.blocks(x)\n x = self.ln_f(x)\n logits = self.head(x)\n\n return logits\n\n def get_block_size(self):\n return self.block_size\n\n def _init_weights(self, module):\n \"\"\"\n Vanilla model initialization:\n - all MatMul weights \\in N(0, 0.02) and biases to zero\n - all LayerNorm post-normalization scaling set to identity, so weight=1, bias=0\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)", "_____no_output_____" ] ], [ [ "------\nLet's create a PyTorch Lightning Model above, just to make sure it works !", "_____no_output_____" ] ], [ [ "m = PTLGPT(vocab_size=100, block_size=32, n_layer=1, n_embd=32, n_head=4)", "_____no_output_____" ] ], [ [ "------\nNow, let's convert the above easily into a NeMo Model.\n\nA NeMo Model constructor generally accepts only two things - \n\n1) `cfg`: An OmegaConf DictConfig object that defines precisely the components required by the model to define its neural network architecture, data loader setup, optimizer setup, and any additional components needed for the model itself.\n\n2) `trainer`: An optional Trainer from PyTorch Lightning if the NeMo model will be used for training. It can be set after construction (if required) using the `set_trainer` method. For this notebook, we will not be constructing the config for the Trainer object.", "_____no_output_____" ], [ "## Refactoring Neural Modules\n\nAs we discussed above, Neural Modules are generally higher-level components of the Model and can potentially be replaced by equivalent Neural Modules.\n\nAs we see above, the embedding modules, deep transformer network, and final decoder layer have all been combined inside the PyTorch Lightning implementation constructor.\n\n------\n\nHowever, the decoder could have been an RNN instead of a simple Linear layer, or it could have been a 1D-CNN instead.\n\nLikewise, the deep encoder could potentially have a different implementation of Self Attention modules.\n\nThese changes cannot be easily implemented any more inside the above implementation. However, if we refactor these components into their respective NeuralModules, then we can easily replace them with equivalent modules we construct in the future!", "_____no_output_____" ], [ "### Refactoring the Embedding module\n\nLet's first refactor out the embedding module from the above implementation", "_____no_output_____" ] ], [ [ "class GPTEmbedding(NeuralModule):\n def __init__(self, vocab_size: int, n_embd: int, block_size: int, embd_pdrop: float = 0.0):\n super().__init__()\n\n # input embedding stem: drop(content + position)\n self.tok_emb = nn.Embedding(vocab_size, n_embd)\n self.pos_emb = nn.Parameter(torch.zeros(1, block_size, n_embd))\n self.drop = nn.Dropout(embd_pdrop)\n\n @typecheck()\n def forward(self, idx):\n b, t = idx.size()\n \n # forward the GPT model\n token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector\n position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector\n x = self.drop(token_embeddings + position_embeddings)\n return x\n\n @property\n def input_types(self):\n return {\n 'idx': NeuralType(('B', 'T'), Index())\n }\n\n @property\n def output_types(self):\n return {\n 'embeddings': NeuralType(('B', 'T', 'C'), EmbeddedTextType())\n }", "_____no_output_____" ] ], [ [ "### Refactoring the Encoder\n\nNext, let's refactor the Encoder - the multi layer Transformer Encoder", "_____no_output_____" ] ], [ [ "class GPTTransformerEncoder(NeuralModule):\n def __init__(self, n_embd: int, block_size: int, n_head: int, n_layer: int, attn_pdrop: float = 0.0, resid_pdrop: float = 0.0):\n super().__init__()\n\n self.blocks = nn.Sequential(*[Block(n_embd, block_size, n_head, attn_pdrop, resid_pdrop) \n for _ in range(n_layer)])\n \n @typecheck()\n def forward(self, embed):\n return self.blocks(embed)\n\n @property\n def input_types(self):\n return {\n 'embed': NeuralType(('B', 'T', 'C'), EmbeddedTextType())\n }\n\n @property\n def output_types(self):\n return {\n 'encoding': NeuralType(('B', 'T', 'C'), CausalSelfAttentionType())\n }", "_____no_output_____" ] ], [ [ "### Refactoring the Decoder\n\nFinally, let's refactor the Decoder - the small one-layer feed-forward network to decode the answer.\n\n-------\n\nNote an interesting detail - The `input_types` of the Decoder accepts the generic `EncoderRepresentation()`, where as the `neural_type` of the `GPTTransformerEncoder` has the `output_type` of `CausalSelfAttentionType`.\n\nThis is semantically *not* a mismatch! As you can see above in the inheritance chart, we declare `EncodedRepresentation` -> `AttentionType` -> `SelfAttentionType` -> `CausalSelfAttentionType`. \n\nSuch an inheritance hierarchy for the `element_type` allows future encoders (which also have a neural output type of at least `EncodedRepresentation`) to be swapped in place of the current GPT Causal Self Attention Encoder while keeping the rest of the NeMo model working just fine!", "_____no_output_____" ] ], [ [ "class GPTDecoder(NeuralModule):\n def __init__(self, n_embd: int, vocab_size: int):\n super().__init__()\n self.ln_f = nn.LayerNorm(n_embd)\n self.head = nn.Linear(n_embd, vocab_size, bias=False) # no need for extra bias due to one in ln_f\n\n @typecheck()\n def forward(self, encoding):\n x = self.ln_f(encoding)\n logits = self.head(x)\n return logits\n\n @property\n def input_types(self):\n return {\n 'encoding': NeuralType(('B', 'T', 'C'), EncodedRepresentation())\n }\n \n @property\n def output_types(self):\n return {\n 'logits': NeuralType(('B', 'T', 'C'), LogitsType())\n }\n", "_____no_output_____" ] ], [ [ "### Refactoring the NeMo GPT Model\n\nNow that we have 3 NeuralModules for the embedding, the encoder, and the decoder, let's refactor the NeMo model to take advantage of this refactor!\n\nThis time, we inherit from `ModelPT` instead of the general `LightningModule`.", "_____no_output_____" ] ], [ [ "class AbstractNeMoGPT(ModelPT):\n def __init__(self, cfg: OmegaConf, trainer: ptl.Trainer = None):\n super().__init__(cfg=cfg, trainer=trainer)\n\n # input embedding stem: drop(content + position)\n self.embedding = self.from_config_dict(self.cfg.embedding)\n # deep transformer: just a sequence of transformer blocks\n self.encoder = self.from_config_dict(self.cfg.encoder)\n # decoder: at the end one more layernorm and decode the answers\n self.decoder = self.from_config_dict(self.cfg.decoder)\n\n self.block_size = self.cfg.embedding.block_size\n self.apply(self._init_weights)\n\n print(\"number of parameters: %e\" % self.num_weights)\n\n @typecheck()\n def forward(self, idx):\n b, t = idx.size()\n assert t <= self.block_size, \"Cannot forward, model block size is exhausted.\"\n\n # forward the GPT model\n # Remember: Only kwargs are allowed !\n e = self.embedding(idx=idx)\n x = self.encoder(embed=e)\n logits = self.decoder(encoding=x)\n\n return logits\n\n def get_block_size(self):\n return self.block_size\n\n def _init_weights(self, module):\n \"\"\"\n Vanilla model initialization:\n - all MatMul weights \\in N(0, 0.02) and biases to zero\n - all LayerNorm post-normalization scaling set to identity, so weight=1, bias=0\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n @property\n def input_types(self):\n return {\n 'idx': NeuralType(('B', 'T'), Index())\n }\n\n @property\n def output_types(self):\n return {\n 'logits': NeuralType(('B', 'T', 'C'), LogitsType())\n }", "_____no_output_____" ] ], [ [ "## Creating a config for a Model\n\nAt first glance, not much changed compared to the PyTorch Lightning implementation above. Other than the constructor, which now accepts a config, nothing changed at all!\n\nNeMo operates on the concept of a NeMo Model being accompanied by a corresponding config dict (instantiated as an OmegaConf object). This enables us to prototype the model by utilizing Hydra rapidly. This includes various other benefits - such as hyperparameter optimization and serialization/deserialization of NeMo models.\n\nLet's look at how actually to construct such config objects!", "_____no_output_____" ] ], [ [ "# model definition args (required)\n# ================================\n# vocab_size: int # size of the vocabulary (number of possible tokens)\n# block_size: int # length of the model's context window in time\n# n_layer: int # depth of the model; number of Transformer blocks in sequence\n# n_embd: int # the \"width\" of the model, number of channels in each Transformer\n# n_head: int # number of heads in each multi-head attention inside each Transformer block \n\n# model definition args (optional)\n# ================================\n# embd_pdrop: float = 0.1, # \\in [0,1]: amount of dropout on input embeddings\n# resid_pdrop: float = 0.1, # \\in [0,1]: amount of dropout in each residual connection\n# attn_pdrop: float = 0.1, # \\in [0,1]: amount of dropout on the attention matrix", "_____no_output_____" ] ], [ [ "------\nAs we look at the required parameters above, we need a way to tell OmegaConf that these values are currently not set, but the user should set them before we use them.\n\nOmegaConf supports such behavior using the `MISSING` value. A similar effect can be achieved in YAML configs by using `???` as a placeholder.", "_____no_output_____" ] ], [ [ "from omegaconf import MISSING", "_____no_output_____" ], [ "# Let's create a utility for building the class path\ndef get_class_path(cls):\n return f'{cls.__module__}.{cls.__name__}'", "_____no_output_____" ] ], [ [ "### Structure of a Model config\n\nLet's first create a config for the common components of the model level config -", "_____no_output_____" ] ], [ [ "common_config = OmegaConf.create({\n 'vocab_size': MISSING,\n 'block_size': MISSING,\n 'n_layer': MISSING,\n 'n_embd': MISSING,\n 'n_head': MISSING,\n})", "_____no_output_____" ] ], [ [ "-----\nThe model config right now is still being built - it needs to contain a lot more details!\n\nA complete Model Config should have the sub-configs of all of its top-level modules as well. This means the configs of the `embedding`, `encoder`, and the `decoder`.\n", "_____no_output_____" ], [ "### Structure of sub-module config\n\nFor top-level models, we generally don't change the actual module very often, and instead, primarily change the hyperparameters of that model.\n\nSo we will make use of `Hydra`'s Class instantiation method - which can easily be accessed via the class method `ModelPT.from_config_dict()`.\n\nLet's take a few examples below -", "_____no_output_____" ] ], [ [ "embedding_config = OmegaConf.create({\n '_target_': get_class_path(GPTEmbedding),\n 'vocab_size': '${model.vocab_size}',\n 'n_embd': '${model.n_embd}',\n 'block_size': '${model.block_size}',\n 'embd_pdrop': 0.1\n})\n\nencoder_config = OmegaConf.create({\n '_target_': get_class_path(GPTTransformerEncoder),\n 'n_embd': '${model.n_embd}',\n 'block_size': '${model.block_size}',\n 'n_head': '${model.n_head}',\n 'n_layer': '${model.n_layer}',\n 'attn_pdrop': 0.1,\n 'resid_pdrop': 0.1\n})\n\ndecoder_config = OmegaConf.create({\n '_target_': get_class_path(GPTDecoder),\n # n_embd: int, vocab_size: int\n 'n_embd': '${model.n_embd}',\n 'vocab_size': '${model.vocab_size}'\n})", "_____no_output_____" ] ], [ [ "##### What is `_target_`?\n--------\n\nIn the above config, we see a `_target_` in the config. `_target_` is usually a full classpath to the actual class in the python package/user local directory. It is required for Hydra to locate and instantiate the model from its path correctly.\n\nSo why do we want to set a classpath?\n\nIn general, when developing models, we don't often change the encoder or the decoder, but we do change the hyperparameters of the encoder and decoder.\n\nThis notation helps us keep the Model level declaration of the forward step neat and precise. It also logically helps us demark which parts of the model can be easily replaced - in the future, we can easily replace the encoder with some other type of self-attention block or the decoder with an RNN or 1D-CNN neural module (as long as they have the same Neural Type definition as the current blocks).\n", "_____no_output_____" ], [ "##### What is the `${}` syntax?\n-------\n\nOmegaConf, and by extension, Hydra, supports Variable Interpolation. As you can see in the `__init__` of embedding, encoder, and decoder neural modules, they often share many parameters between each other.\n\nIt would become tedious and error-prone to set each of these constructors' values separately in each of the embedding, encoder, and decoder configs.\n\nSo instead, we define standard keys inside of the `model` level config and then interpolate these values inside of the respective configs!", "_____no_output_____" ], [ "### Attaching the model and module-level configs\n\nSo now, we have a Model level and per-module level configs for the core components. Sub-module configs generally fall under the \"model\" namespace, but you have the flexibility to define the structure as you require.\n\nLet's attach them!\n", "_____no_output_____" ] ], [ [ "model_config = OmegaConf.create({\n 'model': common_config\n})\n\n# Then let's attach the sub-module configs\nmodel_config.model.embedding = embedding_config\nmodel_config.model.encoder = encoder_config\nmodel_config.model.decoder = decoder_config", "_____no_output_____" ] ], [ [ "-----\nLet's print this config!", "_____no_output_____" ] ], [ [ "print(OmegaConf.to_yaml(model_config))", "_____no_output_____" ] ], [ [ "-----\nWait, why did OmegaConf not fill in the value of the variable interpolation for the configs yet?\n\nThis is because OmegaConf takes a deferred approach to variable interpolation. To force it ahead of time, we can use the following snippet - ", "_____no_output_____" ] ], [ [ "temp_config = OmegaConf.create(OmegaConf.to_container(model_config, resolve=True))\nprint(OmegaConf.to_yaml(temp_config))", "_____no_output_____" ] ], [ [ "-----\nNow that we have a config, let's try to create an object of the NeMo Model !", "_____no_output_____" ] ], [ [ "import copy", "_____no_output_____" ], [ "# Let's work on a copy of the model config and update it before we send it into the Model.\ncfg = copy.deepcopy(model_config)", "_____no_output_____" ], [ "# Let's set the values of the config (for some plausible small model)\ncfg.model.vocab_size = 100\ncfg.model.block_size = 128\ncfg.model.n_layer = 1\ncfg.model.n_embd = 32\ncfg.model.n_head = 4", "_____no_output_____" ], [ "print(OmegaConf.to_yaml(cfg))", "_____no_output_____" ], [ "# Try to create a model with this config [ERROR CELL]\nm = AbstractNeMoGPT(cfg.model)", "_____no_output_____" ] ], [ [ "-----\n\nYou will note that we added the `Abstract` tag for a reason to this NeMo Model and that when we try to instantiate it - it raises an error that we need to implement specific methods.\n\n1) `setup_training_data` & `setup_validation_data` - All NeMo models should implement two data loaders - the training data loader and the validation data loader. Optionally, they can go one step further and also implement the `setup_test_data` method to add support for evaluating the Model on its own.\n\nWhy do we enforce this? NeMo Models are meant to be a unified, cohesive object containing the details about the neural network underlying that Model and the data loaders to train, validate, and optionally test those models.\n\nIn doing so, once the Model is created/deserialized, it would take just a few more steps to train the Model from scratch / fine-tune/evaluate the Model on any data that the user provides, as long as this user-provided dataset is in a format supported by the Dataset / DataLoader that is used by this Model!\n\n2) `list_available_models` - This is a utility method to provide a list of pre-trained NeMo models to the user from the cloud.\n\nTypically, NeMo models can be easily packaged into a tar file (which we call a .nemo file in the earlier primer notebook). These tar files contain the model config + the pre-trained checkpoint weights of the Model, and can easily be downloaded from some cloud service. \n\nFor this notebook, we will not be implementing this method.\n\n--------\nFinally, let's create a concrete implementation of the above NeMo Model!", "_____no_output_____" ] ], [ [ "from nemo.core.classes.common import PretrainedModelInfo", "_____no_output_____" ], [ "class BasicNeMoGPT(AbstractNeMoGPT):\n\n @classmethod\n def list_available_models(cls) -> PretrainedModelInfo:\n return None\n\n def setup_training_data(self, train_data_config: OmegaConf):\n self._train_dl = None\n \n def setup_validation_data(self, val_data_config: OmegaConf):\n self._validation_dl = None\n \n def setup_test_data(self, test_data_config: OmegaConf):\n self._test_dl = None", "_____no_output_____" ] ], [ [ "------\nNow let's try to create an object of the `BasicNeMoGPT` model", "_____no_output_____" ] ], [ [ "m = BasicNeMoGPT(cfg.model)", "_____no_output_____" ] ], [ [ "## Setting up train-val-test steps\n\nThe above `BasicNeMoGPT` Model is a basic PyTorch Lightning Module, with some added functionality - \n\n1) Neural Type checks support - as defined in the Model as well as the internal modules.\n\n2) Save and restore of the Model (in the trivial case) to a tarfile.\n\nBut as the Model is right now, it crucially does not support PyTorch Lightning's `Trainer`. As such, while this Model can be called manually, it cannot be easily trained or evaluated by using the PyTorch Lightning framework.\n\n------\n\nLet's begin adding support for this then -", "_____no_output_____" ] ], [ [ "class BasicNeMoGPTWithSteps(BasicNeMoGPT):\n\n def step_(self, split, batch, batch_idx=None):\n idx, targets = batch\n logits = self(idx=idx)\n loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))\n key = 'loss' if split == 'train' else f\"{split}_loss\"\n return {key: loss}\n\n def training_step(self, *args, **kwargs):\n return self.step_('train', *args, **kwargs)\n\n def validation_step(self, *args, **kwargs):\n return self.step_('val', *args, **kwargs)\n\n def test_step(self, *args, **kwargs):\n return self.step_('test', *args, **kwargs)\n \n # This is useful for multiple validation data loader setup\n def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):\n val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()\n return {'val_loss': val_loss_mean}\n\n # This is useful for multiple test data loader setup\n def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):\n test_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean()\n return {'test_loss': test_loss_mean}", "_____no_output_____" ], [ "m = BasicNeMoGPTWithSteps(cfg=cfg.model)", "_____no_output_____" ] ], [ [ "### Setup for Multi Validation and Multi Test data loaders\n\nAs discussed in the NeMo Primer, NeMo has in-built support for multiple data loaders for validation and test steps. Therefore, as an example of how easy it is to add such support, we include the `multi_validation_epoch_end` and `multi_test_epoch_end` overrides.\n\nIt is also practically essential to collate results from more than one distributed GPUs, and then aggregate results properly at the end of the epoch. NeMo strictly enforces the correct collation of results, even if you will work on only one device! Future-proofing is baked into the model design for this case!\n\nTherefore NeMo provides the above two generic methods to support aggregation and simultaneously support multiple datasets!\n\n**Please note, you can prepend your already existing `validation_epoch_end` and `test_epoch_end` implementations with the `multi_` in the name, and that alone is sufficient to enable multi-dataset and multi-GPU support!**\n\n------\n**Note: To disable multi-dataset support, simply override `validation_epoch_end` and `test_epoch_end` instead of `multi_validation_epoch_end` and `multi_test_epoch_end`!**", "_____no_output_____" ], [ "## Setting up the optimizer / scheduler\n\nWe are relatively close to reaching feature parity with the MinGPT Model! But we are missing a crucial piece - the optimizer.\n\nAll NeMo Model's come with a default implementation of `setup_optimization()`, which will parse the provided model config to obtain the `optim` and `sched` sub-configs, and automatically configure the optimizer and scheduler.\n\nIf training GPT was as simple as plugging in an Adam optimizer over all the parameters with a cosine weight decay schedule, we could do that from the config alone.\n\n-------\n\nBut GPT is not such a trivial model - more specifically, it requires weight decay to be applied to the weight matrices but not to the biases, the embedding matrix, or the LayerNorm layers.\n\nWe can drop the support that Nemo provides for such special cases and instead utilize the PyTorch Lightning method `configure_optimizers` to perform the same task.\n\n-------\n\nNote, for NeMo Models; the `configure_optimizers` is implemented as a trivial call to `setup_optimization()` followed by returning the generated optimizer and scheduler! So we can override the `configure_optimizer` method and manage the optimizer creation manually!\n\nNeMo's goal is to provide usable defaults for the general case and simply back off to either PyTorch Lightning or PyTorch nn.Module itself in cases which the additional flexibility becomes necessary!", "_____no_output_____" ] ], [ [ "class BasicNeMoGPTWithOptim(BasicNeMoGPTWithSteps):\n\n def configure_optimizers(self):\n \"\"\"\n This long function is unfortunately doing something very simple and is being very defensive:\n We are separating out all parameters of the model into two buckets: those that will experience\n weight decay for regularization and those that won't (biases, and layernorm/embedding weights).\n We are then returning the PyTorch optimizer object.\n \"\"\"\n\n # separate out all parameters to those that will and won't experience weight decay\n decay = set()\n no_decay = set()\n whitelist_weight_modules = (torch.nn.Linear, )\n blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)\n for mn, m in self.named_modules():\n for pn, p in m.named_parameters():\n fpn = '%s.%s' % (mn, pn) if mn else pn # full param name\n\n if pn.endswith('bias'):\n # all biases will not be decayed\n no_decay.add(fpn)\n elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):\n # weights of whitelist modules will be weight decayed\n decay.add(fpn)\n elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):\n # weights of blacklist modules will NOT be weight decayed\n no_decay.add(fpn)\n\n # special case the position embedding parameter in the root GPT module as not decayed\n no_decay.add('embedding.pos_emb')\n\n # validate that we considered every parameter\n param_dict = {pn: p for pn, p in self.named_parameters()}\n inter_params = decay & no_decay\n union_params = decay | no_decay\n assert len(inter_params) == 0, \"parameters %s made it into both decay/no_decay sets!\" % (str(inter_params), )\n assert len(param_dict.keys() - union_params) == 0, \"parameters %s were not separated into either decay/no_decay set!\" \\\n % (str(param_dict.keys() - union_params), )\n\n # create the pytorch optimizer object\n optim_groups = [\n {\"params\": [param_dict[pn] for pn in sorted(list(decay))], \"weight_decay\": self.cfg.optim.weight_decay},\n {\"params\": [param_dict[pn] for pn in sorted(list(no_decay))], \"weight_decay\": 0.0},\n ]\n optimizer = torch.optim.AdamW(optim_groups, lr=self.cfg.optim.lr, betas=self.cfg.optim.betas)\n return optimizer\n", "_____no_output_____" ], [ "m = BasicNeMoGPTWithOptim(cfg=cfg.model)", "_____no_output_____" ] ], [ [ "-----\nNow let's setup the config for the optimizer !", "_____no_output_____" ] ], [ [ "OmegaConf.set_struct(cfg.model, False)\n\noptim_config = OmegaConf.create({\n 'lr': 3e-4,\n 'weight_decay': 0.1,\n 'betas': [0.9, 0.95]\n})\n\ncfg.model.optim = optim_config\n\nOmegaConf.set_struct(cfg.model, True)", "_____no_output_____" ] ], [ [ "## Setting up the dataset / data loaders\n\nSo we were able almost entirely to replicate the MinGPT implementation. \n\nRemember, NeMo models should contain all of the logic to load the Dataset and DataLoader for at least the train and validation step.\n\nWe temporarily provided empty implementations to get around it till now, but let's fill that in now!\n\n-------\n\n**Note for datasets**: Below, we will show an example using a very small dataset called `tiny_shakespeare`, found at the original [char-rnn repository](https://github.com/karpathy/char-rnn), but practically you could use any text corpus. The one suggested in minGPT is available at http://mattmahoney.net/dc/textdata.html", "_____no_output_____" ], [ "### Creating the Dataset\n\nNeMo has Neural Type checking support, even for Datasets! It's just a minor change of the import in most cases and one difference in how we handle `collate_fn`.\n\nWe could paste the dataset info from minGPT, and you'd only need to make 2 changes!\n\n-----\nIn this example, we will be writing a thin subclass over the datasets provided by `nlp` from HuggingFace!", "_____no_output_____" ] ], [ [ "from nemo.core import Dataset\nfrom torch.utils import data\nfrom torch.utils.data.dataloader import DataLoader", "_____no_output_____" ], [ "class TinyShakespeareDataset(Dataset):\n\n def __init__(self, data_path, block_size, crop=None, override_vocab=None):\n\n # load the data and crop it appropriately\n with open(data_path, 'r') as f:\n if crop is None:\n data = f.read()\n else:\n f.seek(crop[0])\n data = f.read(crop[1])\n\n # build a vocabulary from data or inherit it\n vocab = sorted(list(set(data))) if override_vocab is None else override_vocab\n\n # Add UNK\n special_tokens = ['<PAD>', '<UNK>'] # We use just <UNK> and <PAD> in the call, but can add others.\n if not override_vocab:\n vocab = [*special_tokens, *vocab] # Update train vocab with special tokens\n\n data_size, vocab_size = len(data), len(vocab)\n print('data of crop %s has %d characters, vocab of size %d.' % (str(crop), data_size, vocab_size))\n print('Num samples in dataset : %d' % (data_size // block_size))\n\n self.stoi = { ch:i for i,ch in enumerate(vocab) }\n self.itos = { i:ch for i,ch in enumerate(vocab) }\n self.block_size = block_size\n self.vocab_size = vocab_size\n self.data = data\n self.vocab = vocab\n self.special_tokens = special_tokens\n\n def __len__(self):\n return len(self.data) // self.block_size\n\n def __getitem__(self, idx):\n # attempt to fetch a chunk of (block_size + 1) items, but (block_size) will work too\n chunk = self.data[idx*self.block_size : min(len(self.data), (idx+1)*self.block_size + 1)]\n # map the string into a sequence of integers\n ixes = [self.stoi[s] if s in self.stoi else self.stoi['<UNK>'] for s in chunk ]\n # if stars align (last idx and len(self.data) % self.block_size == 0), pad with <PAD>\n if len(ixes) < self.block_size + 1:\n assert len(ixes) == self.block_size # i believe this is the only way this could happen, make sure\n ixes.append(self.stoi['<PAD>'])\n dix = torch.tensor(ixes, dtype=torch.long)\n return dix[:-1], dix[1:]\n\n @property\n def output_types(self):\n return {\n 'input': NeuralType(('B', 'T'), Index()),\n 'target': NeuralType(('B', 'T'), LabelsType())\n }", "_____no_output_____" ] ], [ [ "------\nWe didn't have to change anything until here. How then is type-checking done? \n\nNeMo does type-checking inside of the collate function implementation itself! In this case, it is not necessary to override the `collate_fn` inside the Dataset, but if we did need to override it, **NeMo requires that the private method `_collate_fn` be overridden instead**.\n\nWe can then use data loaders with minor modifications!\n\n**Also, there is no need to implement the `input_types` for Dataset, as they are the ones generating the input for the model!**", "_____no_output_____" ], [ "-----\nLet's prepare the dataset that we are going to use - Tiny Shakespeare from the following codebase [char-rnn](https://github.com/karpathy/char-rnn).", "_____no_output_____" ] ], [ [ "import os", "_____no_output_____" ], [ "if not os.path.exists('tiny-shakespeare.txt'):\n !wget https://raw.githubusercontent.com/jcjohnson/torch-rnn/master/data/tiny-shakespeare.txt", "_____no_output_____" ], [ "!head -n 5 tiny-shakespeare.txt", "_____no_output_____" ], [ "train_dataset = TinyShakespeareDataset('tiny-shakespeare.txt', cfg.model.block_size, crop=(0, int(1e6)))\nval_dataset = TinyShakespeareDataset('tiny-shakespeare.txt', cfg.model.block_size, crop=(int(1e6), int(50e3)), override_vocab=train_dataset.vocab)\ntest_dataset = TinyShakespeareDataset('tiny-shakespeare.txt', cfg.model.block_size, crop=(int(1.05e6), int(100e3)), override_vocab=train_dataset.vocab)", "_____no_output_____" ] ], [ [ "### Setting up dataset/data loader support in the Model\n\nSo we now know our data loader works. Let's integrate it as part of the Model itself!\n\nTo do this, we use the three special attributes of the NeMo Model - `self._train_dl`, `self._validation_dl` and `self._test_dl`. Once you construct your DataLoader, place your data loader to these three variables. \n\nFor multi-data loader support, the same applies! NeMo will automatically handle the management of multiple data loaders for you!", "_____no_output_____" ] ], [ [ "class NeMoGPT(BasicNeMoGPTWithOptim):\n\n def _setup_data_loader(self, cfg):\n if self.vocab is None:\n override_vocab = None\n else:\n override_vocab = self.vocab\n\n dataset = TinyShakespeareDataset(\n data_path=cfg.data_path,\n block_size=cfg.block_size,\n crop=tuple(cfg.crop) if 'crop' in cfg else None,\n override_vocab=override_vocab\n )\n\n if self.vocab is None:\n self.vocab = dataset.vocab\n\n return DataLoader(\n dataset=dataset,\n batch_size=cfg.batch_size,\n shuffle=cfg.shuffle,\n collate_fn=dataset.collate_fn, # <-- this is necessary for type checking\n pin_memory=cfg.pin_memory if 'pin_memory' in cfg else False,\n num_workers=cfg.num_workers if 'num_workers' in cfg else 0\n )\n \n def setup_training_data(self, train_data_config: OmegaConf):\n self.vocab = None\n self._train_dl = self._setup_data_loader(train_data_config)\n \n def setup_validation_data(self, val_data_config: OmegaConf):\n self._validation_dl = self._setup_data_loader(val_data_config)\n \n def setup_test_data(self, test_data_config: OmegaConf):\n self._test_dl = self._setup_data_loader(test_data_config)\n", "_____no_output_____" ] ], [ [ "### Creating the dataset / dataloader config\n\nThe final step to setup this model is to add the `train_ds`, `validation_ds` and `test_ds` configs inside the model config!", "_____no_output_____" ] ], [ [ "OmegaConf.set_struct(cfg.model, False)\n\n# Set the data path and update vocabular size\ncfg.model.data_path = 'tiny-shakespeare.txt'\ncfg.model.vocab_size = train_dataset.vocab_size\n\nOmegaConf.set_struct(cfg.model, True)", "_____no_output_____" ], [ "train_ds = OmegaConf.create({\n 'data_path': '${model.data_path}',\n 'block_size': '${model.block_size}',\n 'crop': [0, int(1e6)],\n 'batch_size': 64,\n 'shuffle': True,\n})\n\nvalidation_ds = OmegaConf.create({\n 'data_path': '${model.data_path}',\n 'block_size': '${model.block_size}',\n 'crop': [int(1e6), int(50e3)],\n 'batch_size': 4,\n 'shuffle': False,\n})\n\ntest_ds = OmegaConf.create({\n 'data_path': '${model.data_path}',\n 'block_size': '${model.block_size}',\n 'crop': [int(1.05e6), int(100e3)],\n 'batch_size': 4,\n 'shuffle': False,\n})", "_____no_output_____" ], [ "# Attach to the model config\nOmegaConf.set_struct(cfg.model, False)\n\ncfg.model.train_ds = train_ds\ncfg.model.validation_ds = validation_ds\ncfg.model.test_ds = test_ds\n\nOmegaConf.set_struct(cfg.model, True)", "_____no_output_____" ], [ "# Let's see the config now !\nprint(OmegaConf.to_yaml(cfg))", "_____no_output_____" ], [ "# Let's try creating a model now !\nmodel = NeMoGPT(cfg=cfg.model)", "_____no_output_____" ] ], [ [ "-----\nAll the data loaders load properly ! Yay!", "_____no_output_____" ], [ "# Evaluate the model - end to end!\n\nNow that the data loaders have been set up, all that's left is to train and test the model! We have most of the components required by this model - the train, val and test data loaders, the optimizer, and the type-checked forward step to perform the train-validation-test steps! \n\nBut training a GPT model from scratch is not the goal of this primer, so instead, let's do a sanity check by merely testing the model for a few steps using random initial weights.\n\nThe above will ensure that - \n\n1) Our data loaders work as intended\n\n2) The type checking system assures us that our Neural Modules are performing their forward step correctly.\n\n3) The loss is calculated, and therefore the model runs end to end, ultimately supporting PyTorch Lightning.", "_____no_output_____" ] ], [ [ "if torch.cuda.is_available():\n cuda = 1\nelse:\n cuda = 0\n\ntrainer = ptl.Trainer(gpus=cuda, test_percent_check=1.0)", "_____no_output_____" ], [ "trainer.test(model)", "_____no_output_____" ] ], [ [ "# Saving and restoring models\n\nNeMo internally keeps track of the model configuration, as well as the model checkpoints and parameters.\n\nAs long as your NeMo follows the above general guidelines, you can call the `save_to` and `restore_from` methods to save and restore your models!", "_____no_output_____" ] ], [ [ "model.save_to('gpt_model.nemo')", "_____no_output_____" ], [ "!ls -d -- *.nemo", "_____no_output_____" ], [ "temp_model = NeMoGPT.restore_from('gpt_model.nemo')", "_____no_output_____" ], [ "# [ERROR CELL]\ntemp_model.setup_test_data(temp_model.cfg.test_ds)", "_____no_output_____" ] ], [ [ "-----\n\nHmm, it seems it wasn't so easy in this case. Non-trivial models have non-trivial issues!\n\nRemember, our NeMoGPT model sets its self.vocab inside the `setup_train_data` step. But that depends on the vocabulary generated by the train set... which is **not** restored during model restoration (unless you call `setup_train_data` explicitly!).\n\nWe can quickly resolve this issue by constructing an external data file to enable save and restore support, and NeMo supports that too! We will use the `register_artifact` API in NeMo to support external files being attached to the .nemo checkpoint.", "_____no_output_____" ] ], [ [ "class NeMoGPTv2(NeMoGPT):\n \n def setup_training_data(self, train_data_config: OmegaConf):\n self.vocab = None\n self._train_dl = self._setup_data_loader(train_data_config)\n\n # Save the vocab into a text file for now\n with open('vocab.txt', 'w') as f:\n for token in self.vocab:\n f.write(f\"{token}<SEP>\")\n \n # This is going to register the file into .nemo!\n # When you later use .save_to(), it will copy this file into the tar file.\n self.register_artifact(None, 'vocab.txt')\n \n def setup_validation_data(self, val_data_config: OmegaConf):\n # This is going to try to find the same file, and if it fails, \n # it will use the copy in .nemo\n vocab_file = self.register_artifact(None, 'vocab.txt')\n \n with open(vocab_file, 'r') as f:\n vocab = []\n vocab = f.read().split('<SEP>')[:-1] # the -1 here is for the dangling <SEP> token in the file\n self.vocab = vocab\n\n self._validation_dl = self._setup_data_loader(val_data_config)\n \n def setup_test_data(self, test_data_config: OmegaConf):\n # This is going to try to find the same file, and if it fails, \n # it will use the copy in .nemo\n vocab_file = self.register_artifact(None, 'vocab.txt')\n\n with open(vocab_file, 'r') as f:\n vocab = []\n vocab = f.read().split('<SEP>')[:-1] # the -1 here is for the dangling <SEP> token in the file\n self.vocab = vocab\n\n self._test_dl = self._setup_data_loader(test_data_config)\n", "_____no_output_____" ], [ "# Let's try creating a model now !\nmodel = NeMoGPTv2(cfg=cfg.model)", "_____no_output_____" ], [ "# Now let's try to save and restore !\nmodel.save_to('gpt_model.nemo')", "_____no_output_____" ], [ "temp_model = NeMoGPTv2.restore_from('gpt_model.nemo')", "_____no_output_____" ], [ "temp_model.setup_multiple_test_data(temp_model.cfg.test_ds)", "_____no_output_____" ], [ "if torch.cuda.is_available():\n cuda = 1\nelse:\n cuda = 0\n\ntrainer = ptl.Trainer(gpus=cuda, test_percent_check=1.0)", "_____no_output_____" ], [ "trainer.test(model)", "_____no_output_____" ] ], [ [ "------\nThere we go ! Now our model's can be serialized and de-serialized without any issue, even with an external vocab file !", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
4a2c427de27b936f2a50fbb12e6b4d018f7463a1
37,096
ipynb
Jupyter Notebook
0828_intro/01_intro_to_python_and_jupyter_done.ipynb
ingolia/mcb200-2020
f1ba107efee987b8bb4e65fc6511a911261162f9
[ "MIT" ]
null
null
null
0828_intro/01_intro_to_python_and_jupyter_done.ipynb
ingolia/mcb200-2020
f1ba107efee987b8bb4e65fc6511a911261162f9
[ "MIT" ]
null
null
null
0828_intro/01_intro_to_python_and_jupyter_done.ipynb
ingolia/mcb200-2020
f1ba107efee987b8bb4e65fc6511a911261162f9
[ "MIT" ]
null
null
null
23.069652
451
0.5303
[ [ [ "**Jupyter** allows you to write and run Python code through an interactive web browser interface.\n\nEach Jupyter **notebook** is a series of **cells** that can have Python code or text.\n\nThe cell below contains Python code to carry out some simple arithmatic. You can run the code by selecting the cell and holding _shift_ while hitting _enter_. When you do this, the result of the arithmatic is displayed.\n\nHere is the Python code for some simple arithmatic\n```\n3 * 4 * 5\n```\n\nTry running the code in the cell below:", "_____no_output_____" ] ], [ [ "3 * 4 * 6", "_____no_output_____" ] ], [ [ "Now, go back and change one of the numbers. Re-run the cell by hitting _shift_+_enter_.", "_____no_output_____" ], [ "_Exercise._ Compute the sum of the first five positive numbers, 1 through 5 inclusive.", "_____no_output_____" ] ], [ [ "1+2+ 3 + 4 +5", "_____no_output_____" ] ], [ [ "This is some random text", "_____no_output_____" ], [ "### Variables\n\nPython can do much more than arithmatic. We can create and use named **variables**.\n\nTo start, we'll create a variable named `x` and give it the value 7.", "_____no_output_____" ] ], [ [ "x = 7", "_____no_output_____" ] ], [ [ "There's no output when we do this, but Jupyter will keep track of this variable and we can use it later.\n\nBelow, we'll calculate the square of `x`", "_____no_output_____" ] ], [ [ "x**2", "_____no_output_____" ] ], [ [ "We can give the variable `x` a new value, which will replace the old one.\n\nGoing forward, any time we use `x`, it will have this new value.\n\nBelow, we'll give `x` the value 17 instead of 7", "_____no_output_____" ] ], [ [ "x = 17", "_____no_output_____" ] ], [ [ "Now, we can use this new value for `x` to compute the value of `x + 1`.", "_____no_output_____" ] ], [ [ "x + 1", "_____no_output_____" ] ], [ [ "We can even use the current value of `x` to compute a new value for `x`.\n\nHere, we'll set x to twice its current value and print this new value", "_____no_output_____" ] ], [ [ "x = 2 * x\nx", "_____no_output_____" ] ], [ [ "We can have many named variables at once and use them in complicated ways", "_____no_output_____" ] ], [ [ "y = 5\nz = 3\n(y + z)/(y - z)", "_____no_output_____" ] ], [ [ "If we try to use a variable that we haven't given a value, Python will report an error to us.\n\nTry to use the value of `w`, which we haven't set yet.", "_____no_output_____" ] ], [ [ "w", "_____no_output_____" ] ], [ [ "The value for a variable is calculated at the time it is assigned. If we use the variable `a` to compute the value that we give to variable `b`, and then we later change the value of `b`, this doesn't affect `a`.\n\nIn the example below, we'll set `a` to 5, use it to compute a value for `b`, and then change `a` to 7.", "_____no_output_____" ] ], [ [ "a = 5\nb = 2 * a\na = 7", "_____no_output_____" ] ], [ [ "Check the value of `b`, which was computed when `a` was set to 5.", "_____no_output_____" ] ], [ [ "b", "_____no_output_____" ] ], [ [ "Now, check the value of `a`, which was updated to 7 after it was used to compute the value for `b`.", "_____no_output_____" ] ], [ [ "a", "_____no_output_____" ] ], [ [ "In the examples above, we used one-letter variable names similar to the ones we use in mathematics.\n\nIt's often better to use longer and more descriptive names for variables. Clearer variable names will make it easier for others to understand your Python when reading it -- and for you to understand it yourself when you come back to it weeks or months later. \n\nFor instance, here is Python code to define two variables representing the molecular masses of methionine and its oxidized derivative, methionine sulfoxide\n\n```\nmethionine_mass = 131.0405\nmeth_sulfox_mass = 147.0354\n```\n\nPaste this into the cells below and use these variables to calculate the change in molecular mass that occurs when methionine is oxidized one step.", "_____no_output_____" ] ], [ [ "methionine_mass = 131.0405\nmeth_sulfox_mass = 147.0354\nmethSulfoxMass = 147.0354\n\nmeth_sulfox_mass - methionine_mass", "_____no_output_____" ] ], [ [ "Of course, sometimes we want to work with very large or very small numbers. Python can both produce and understand scientific notation.\n\nPython defaults to scientific notation for very small numbers. For example, try printing the value of\n\n```\n1 / (1000 * 1000)\n```", "_____no_output_____" ] ], [ [ "1 / (1000 * 1000)", "_____no_output_____" ] ], [ [ "And, we can use scientific notation to write even ordinary numbers. For instance, to write 4,300, we can convert to scientific notation of 4.3 &times; 10&sup3;, which in Python is\n\n```\n4.3e3\n```\n\nUse this way of writing the number in the cell below.", "_____no_output_____" ] ], [ [ "4.3e3", "_____no_output_____" ] ], [ [ "_Exercise._ \n\nA standard plasmid miniprep could produce 10 micrograms of plasmid DNA. The cell below has a variable representing the yield of DNA from the miniprep, in grams.\n\nA typical plasmid containing a GFP reporter construct might be about 5 kilobase pairs long, and a single base pair has a molecular mass of 650 grams / mole. Add variables with descriptive names to store\n- the size of the plasmid\n- the molecular mass of one base pair", "_____no_output_____" ] ], [ [ "# mass in grams\nplasmid_mass_yield = 10e-6\n\n# size in base pairs\nplasmid_size = 5000\n\n# mol mass in (grams / mole)\nbase_pair_molmass = 650", "_____no_output_____" ] ], [ [ "_(continued)_ Use the variables you defined above to compute the value for a new variable with a descriptive name for \n- the molecular mass of the whole plasmid\n\nand then print the result of this computation", "_____no_output_____" ] ], [ [ "# mol mass in (grams / mole)\nplasmid_molmass = plasmid_size * base_pair_molmass\n'%e' % plasmid_molmass", "_____no_output_____" ] ], [ [ "_(continued)_ Now compute the yield of DNA from the miniprep, in moles\n", "_____no_output_____" ] ], [ [ "# mole = grams / (grams / mole)\nplasmid_mole_yield = plasmid_mass_yield / plasmid_molmass\nplasmid_mole_yield", "_____no_output_____" ] ], [ [ "_(continued)_ Avogadro's constant is the number of molecules per mole. Use this to compute the number of copies of plasmid DNA in the miniprep.\n\n```\navogadro = 6.02e23\n```", "_____no_output_____" ] ], [ [ "avogadro = 6.02e23\nplasmid_mole_yield\n'%e' % (plasmid_mole_yield * avogadro)", "_____no_output_____" ] ], [ [ "_(continued)_ The miniprep produces 30 µl of DNA. Define a variable for this volume and use it to compute the concentration of DNA in the plasmid miniprep.", "_____no_output_____" ] ], [ [ "# volume in liters\nvolume = 30e-6\n# moles / liters = molar\nplasmid_mole_yield / volume", "_____no_output_____" ] ], [ [ "### Data types\n\nPython keeps track of different **data types** for each variable and each output it computes. We've already seen two different data types, in fact -- one for integers (whole numbers) and one for numbers with a fractional part.\n\nWe can use `type()` to ask Python, what is the type of this value?\n\nHere is an example, asking Python the type of the number `6`. The result of `int` is short for **int**eger.", "_____no_output_____" ] ], [ [ "type(6)", "_____no_output_____" ] ], [ [ "Below we ask Python the type of the number 2.5. The result of `float` is short for **float**ing-point number, which is a slightly confusing reference to the decimal point in a number with a fractional part.", "_____no_output_____" ] ], [ [ "type(2.5)", "_____no_output_____" ] ], [ [ "All of the values that Python computes have a data type.\n\nBelow, we ask Python the type of the value computed when we do multiplication `2*3`", "_____no_output_____" ] ], [ [ "type(2*3)", "_____no_output_____" ] ], [ [ "When we multiply two integers together, the result is also an integer.\n\nDivision can create non-integers from integers, however. Even though `5` and `2` are integers, `5/2` is not an integer.", "_____no_output_____" ] ], [ [ "type(5/2)", "_____no_output_____" ] ], [ [ "Because division can create non-integers, the output of division is _always_ a `float`, even when the result happens to be a whole number and the fractional part is 0.", "_____no_output_____" ] ], [ [ "type(6/2)\n6/2", "_____no_output_____" ] ], [ [ "In fact, we can write a whole number as a `float` by adding the decimal point and zero, like `6.0`", "_____no_output_____" ] ], [ [ "6.0", "_____no_output_____" ] ], [ [ "Because Python keeps track of data types, the integer `6` and the number `6.0` are not exactly the same.", "_____no_output_____" ], [ "### Strings\n\nPython can also keep track of text in variables. We'll often use text to store DNA or protein sequences using one-letter codes. The type of this text data is `str`, because the text is a **str**ing of characters.\n\nTo write a text string in Python, enclose it in single quotes. Using quotes allows Python to distinguish a text string from the name of a variable: `'x'` is a one-letter text string, and `x` refers to a variable with a one-letter name.\n\nHere we look at the type of the string `'MCB200'`", "_____no_output_____" ] ], [ [ "type('MCB200')", "_____no_output_____" ] ], [ [ "We can join two strings together using `+`. Joining strings like this is called **concatenation**.", "_____no_output_____" ] ], [ [ "'MCB' + '200'", "_____no_output_____" ] ], [ [ "Notice that the string `'200'` is different from the number `200`. The string is a sequence of three characters that happen to be digits, and adding two strings that happen to be numbers will not perform arithmatic.\n\nHere we add the string `'200'` to the string `'100'`", "_____no_output_____" ] ], [ [ "'200' + '100'", "_____no_output_____" ] ], [ [ "What happens when we try to add a string with an integer?", "_____no_output_____" ] ], [ [ "'200' + 100", "_____no_output_____" ] ], [ [ "The string `'200'` and the integer `100` are incompatible types, and when we try to add them, it produces a \"type error\". But, what if we have an integer and we want to turn it into a string?\n\nWe can use `str(...)` to convert a number into a string, like this:", "_____no_output_____" ] ], [ [ "'MCB' + str(100+100)", "_____no_output_____" ] ], [ [ "_Exercise._ Define variables containing your first and last names. Use these variables to compute a string representing your full name. \n\n(_Hint_ You might need to add in some additional characters as well as the two name variables)", "_____no_output_____" ] ], [ [ "first_name = 'Nick'\nlast_name = 'Ingolia'\nfirst_name + ' ' + last_name", "_____no_output_____" ] ], [ [ "### Functions", "_____no_output_____" ] ], [ [ "`str()` is one example of a **function** in Python. We actually saw another example as well, `type()`.\n\nBoth of these functions take an **argument** as input and **return** a value computed using the argument. We say that we **call** a function when we run it.\n\nThe absolute value function `abs()` is also available in Python. Below we will call `abs()` on the value `-100` and see the return value.", "_____no_output_____" ] ], [ [ "abs(-100)", "_____no_output_____" ] ], [ [ "We can carry out further computations using the value returned by a function.\n\nBelow we double the result of taking the absolute value of -100. In mathematical terms, we're calculating _2 * |-100|_", "_____no_output_____" ] ], [ [ "2 * abs(-100)", "_____no_output_____" ] ], [ [ "We can also use complicated expressions as the argument to a function.\n\nAs shown below, we can compute the integer `200` using `abs()`, convert it into a string, and combine it with `'MCB'`", "_____no_output_____" ] ], [ [ "'MCB' + str(2 * abs(-100))", "_____no_output_____" ] ], [ [ "Some functions take more than one argument. For instance, `max()` finds the maximum value among all of its arguments.\n\nHere we find the largest number among `3`, `5`, and `4`:", "_____no_output_____" ] ], [ [ "max(3, 5, 4)", "_____no_output_____" ] ], [ [ "Python has a small collection of built-in functions, like `str()` and `abs()`, that are always available.\n\nAnother built-in function, `len()`, takes a string as an argument and returns the length of the string -- i.e., the nubmer of characters in the string.", "_____no_output_____" ] ], [ [ "len('MCB200')", "_____no_output_____" ] ], [ [ "The built-in function `print()` is useful for displaying the results of a computation in the middle of a cell. There were several places above where we split a single calculation across multiple cells in order to see an intermediate value. Other times, we assigned a value to a variable and then immediately used the variable just to see the result. In each of those cases, we could instead use `print()` to display the result and keep going.\n\nBelow, we use print to display some intermediate values when multiplying together two integers and then dividing them again, which produces a floating-point number.", "_____no_output_____" ] ], [ [ "x = 2\nprint(x)\nx = x * 2\nprint(x)\nx = x / 2\nprint(x)", "2\n4\n2.0\n" ] ], [ [ "The `print()` function has a special behavior with strings -- it doesn't display the quote marks and instead prints just the contents of the string.\n\nBelow we show the constrast between using `print()` on a string and displaying the string as the result of a computation.", "_____no_output_____" ] ], [ [ "x = 'MCB200'\nprint(x)\nx", "MCB200\n" ] ], [ [ "### Modules\n\nIn addition to these built-in functions, Python **modules** provide many other functions. For instance, many mathematical functions can be found in the `math` module. To use these functions, we must first **import** the `math` module. Once we do that, we can use mathematical functions such as `math.sqrt()`, which computes the square root of its argument.\n\nWe do this with\n\n```\nimport math\nmath.sqrt(49)\n```", "_____no_output_____" ] ], [ [ "import math\nmath.sqrt(49)", "_____no_output_____" ] ], [ [ "The `math` module also provides mathematical constants like π, named `math.pi`", "_____no_output_____" ] ], [ [ "math.pi", "_____no_output_____" ] ], [ [ "_Exercise._ The built-in `int()` function converts other data types to integers. Use `int()` to convert π to an integer and see the result.", "_____no_output_____" ] ], [ [ "int(math.pi)", "_____no_output_____" ] ], [ [ "_(continued)_ `int()` can also be used to convert a string to an integer. Try out this use of `int()` on a string that represents an integer.", "_____no_output_____" ] ], [ [ "2 * int('1342')", "_____no_output_____" ] ], [ [ "_(continued)_ Now, test the use of `int()` on a string that does _not_ represent an integer, something with letters or other non-digits in it.", "_____no_output_____" ] ], [ [ "int('13A2')", "_____no_output_____" ] ], [ [ "_(continued)_ Use `int()` to convert _e_ (the natural logarithm base, Euler's number), given by `math.e`, to an integer.", "_____no_output_____" ] ], [ [ "int(math.e)", "_____no_output_____" ] ], [ [ "_(continued)_ Is this different from the result you would expect? Python has a built-in function called `round()` that is specialized for converting numbers to integers. Use `round()` on _e_ instead.", "_____no_output_____" ] ], [ [ "round(math.e)", "_____no_output_____" ] ], [ [ "### Methods\n\nA **method** is a special kind of Python function that is \"attached\" to a Python type. For example, the `str` data type has many methods to carry out operations that make sense for a string.\n\nFor example, the `upper()` method returns a version of a string with all the letters converted to upper-case. \n\nBelow we demonstrate the use of `upper()` on the string `'mcb200'`. Keep in mind that `upper()` doesn't take any arguments. We still need parentheses to indicate to Python that we're calling a function, and we can just use empty parentheses.", "_____no_output_____" ] ], [ [ "'mcb200'.upper()", "_____no_output_____" ] ], [ [ "For comparison, we can see what happens when we leave off the parentheses:", "_____no_output_____" ] ], [ [ "'mcb200'.upper", "_____no_output_____" ] ], [ [ "The string method `replace()` creates a new string where a specified sub-string is replaced with something else. This method has two arguments: the substring to be changed, and the replacement.\n\nBelow, we show how `.replace()` can be used to change every occurrence of \"ight\" to \"ite\" in a short sentence, \"turn right at the light tonight\".", "_____no_output_____" ] ], [ [ "'turn right at the light tonight'.replace('ight', 'ite')", "_____no_output_____" ] ], [ [ "The string methods like `upper()` and `replace()` don't change the string itself, but instead make a new string based on the old one. We can see this by storing a string in a variable, using `upper()`, and then checking the original value of the variable.", "_____no_output_____" ] ], [ [ "original = 'mcb200'\nnew = original.upper()\nprint(original)\nprint(new)", "mcb200\nMCB200\n" ] ], [ [ "In fact, we can never change the contents of an existing string; in Python, strings are **immutable**. We can assign a new string to an existing variable and replace the existing string, just as we could assign a new number to an existing variable to replace its current value.\n\nLater on, we'll see other data types that are **mutable**.", "_____no_output_____" ], [ "_(Exercise)_ Here is a short DNA sequence written in lower-case letters. \n\n```\ndna = 'atggctacacat'\n```\n\nUse the string methods we just learned to generate a corresponding RNA sequence using upper-case letters. Recall that RNA sequences have uracil in place of thymine.", "_____no_output_____" ] ], [ [ "dna = 'atggctacacat'\ndna.replace('t','u').upper()", "_____no_output_____" ] ], [ [ "_(continued)_ Switch the order of the two string methods to reach the same result. What else needs to change?", "_____no_output_____" ] ], [ [ "dna.upper().replace('T','U')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "raw" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a2c5a3334960d6363be0748b4e952b72df5a92e
82,688
ipynb
Jupyter Notebook
all_repository/nb_ex10_3_deep-qlearn-mountaincar.ipynb
jskDr/keraspp_2021
dc46ebb4f4dea48612135136c9837da7c246534a
[ "MIT" ]
4
2021-09-21T15:35:04.000Z
2021-12-14T12:14:44.000Z
all_repository/nb_ex10_3_deep-qlearn-mountaincar.ipynb
jskDr/keraspp_2021
dc46ebb4f4dea48612135136c9837da7c246534a
[ "MIT" ]
null
null
null
all_repository/nb_ex10_3_deep-qlearn-mountaincar.ipynb
jskDr/keraspp_2021
dc46ebb4f4dea48612135136c9837da7c246534a
[ "MIT" ]
null
null
null
72.090671
23,420
0.72337
[ [ [ "## 10.4 딥러닝 기반 Q-Learning을 이용하는 강화학습", "_____no_output_____" ], [ "- 관련 패키지 불러오기 ", "_____no_output_____" ] ], [ [ "# 기본 패키지\nimport numpy as np\nimport random\nfrom collections import deque\nimport matplotlib.pyplot as plt ", "_____no_output_____" ], [ "# 강화학습 환경 패키지\nimport gym", "_____no_output_____" ], [ "# 인공지능 패키지: 텐서플로, 케라스 \n# 호환성을 위해 텐스플로에 포함된 케라스를 불러옴 \nimport tensorflow as tf # v2.4.1 at 7/25/2021\nfrom tensorflow import keras # v2.4.0 at 7/25/2021\nfrom tensorflow.keras import Model, Input\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.optimizers import Adam", "_____no_output_____" ] ], [ [ "- Q 함수를 위한 뉴럴넷 구성하기", "_____no_output_____" ] ], [ [ "def create_q_model(num_states, num_actions):\n inputs = Input(shape=(num_states,))\n layer = Dense(32, activation=\"relu\")(inputs)\n layer = Dense(16, activation=\"relu\")(layer)\n action = Dense(num_actions, activation=\"linear\")(layer)\n return Model(inputs=inputs, outputs=action)", "_____no_output_____" ], [ "model = create_q_model(4,2)\nmodel.summary()", "Model: \"model_8\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_9 (InputLayer) [(None, 4)] 0 \n_________________________________________________________________\ndense_24 (Dense) (None, 32) 160 \n_________________________________________________________________\ndense_25 (Dense) (None, 16) 528 \n_________________________________________________________________\ndense_26 (Dense) (None, 2) 34 \n=================================================================\nTotal params: 722\nTrainable params: 722\nNon-trainable params: 0\n_________________________________________________________________\n" ] ], [ [ "- Q함수 뉴럴넷의 학습에 필요한 코드 작성", "_____no_output_____" ] ], [ [ "def get_env_model(id='MountainCar-v0'):\n env = gym.make(id)\n num_states = env.observation_space.shape[0]\n num_actions = env.action_space.n\n model = create_q_model(num_states, num_actions)\n return env, model", "_____no_output_____" ], [ "def train(model, env):\n state_size = env.observation_space.shape[0]\n action_size = env.action_space.n \n states = np.zeros((10,state_size), dtype=np.float32)\n with tf.GradientTape() as tape:\n predicts = model(states)", "_____no_output_____" ], [ "env, model = get_env_model()\ntrain(model, env)\nprint('Simple processing used in training is completed!')", "Simple processing used in training is completed!\n" ], [ "env_cartpole = gym.make('CartPole-v1')\nprint('CartPole-v1: ', env_cartpole.observation_space.shape, env_cartpole.action_space.n)\nenv_mountaincar = gym.make('MountainCar-v0')\nprint('MountainCar-v0: ', env_mountaincar.observation_space.shape, env_mountaincar.action_space.n)", "CartPole-v1: (4,) 2\nMountainCar-v0: (2,) 3\n" ], [ "class World_00:\n def __init__(self):\n self.get_env_model()\n\n def get_env_model(self):\n self.env = gym.make('MountainCar-v0')\n self.num_states = env.observation_space.shape[0]\n self.num_actions = env.action_space.n\n self.model = create_q_model(self.num_states, self.num_actions)\n # print(self.model.summary())\n\n def train(self): \n states = np.zeros((10,self.num_states), dtype=np.float32)\n with tf.GradientTape() as tape:\n predicts = self.model(states)\n\nnew_world = World_00()\nnew_world.train()\nprint('Simple processing used in training is completed!')", "Simple processing used in training is completed!\n" ], [ "def env_test_model_memory(memory, env, model, n_episodes=1000, \n flag_render=False):\n for e in range(n_episodes):\n done = False\n score = 0\n s = env.reset()\n while not done:\n s_array = np.array(s).reshape((1,-1))\n Qsa = model.predict(s_array)[0]\n a = np.argmax(Qsa)\n next_s, r, done, _ = env.step(a)\n if flag_render:\n env.render()\n score += r\n memory.append([s,a,r,next_s,done])\n print(f'Episode: {e:5d} --> Score: {score:3.1f}')\n print('Notice that the max score is set to 500.0 in CartPole-v1')", "_____no_output_____" ], [ "def list_rotate(l):\n return list(zip(*l))", "_____no_output_____" ], [ "class World_01(World_00):\n def __init__(self):\n World_00.__init__(self)\n self.memory = deque(maxlen=2000)\n self.N_batch = 64\n self.t_model = create_q_model(self.num_states, self.num_actions)\n self.discount_factor = 0.99\n self.learning_rate = 0.001\n self.optimizer = Adam(lr=self.learning_rate)\n\n def trial(self, flag_render=False):\n env_test_model_memory(self.memory, self.env,\n self.model, n_episodes=10, flag_render=flag_render)\n print(len(self.memory))\n\n def train_memory(self):\n if len(self.memory) >= self.N_batch:\n memory_batch = random.sample(self.memory, self.N_batch)\n s_l,a_l,r_l,next_s_l,done_l = [np.array(x) for x in list_rotate(memory_batch)]\n model_w = self.model.trainable_variables\n with tf.GradientTape() as tape:\n Qsa_pred_l = self.model(s_l.astype(np.float32))\n a_l_onehot = tf.one_hot(a_l, self.num_actions)\n Qs_a_pred_l = tf.reduce_sum(a_l_onehot * Qsa_pred_l, \n axis=1) \n\n Qsa_tpred_l = self.t_model(next_s_l.astype(np.float32)) \n Qsa_tpred_l = tf.stop_gradient(Qsa_tpred_l)\n\n max_Q_next_s_a_l = np.amax(Qsa_tpred_l, axis=-1)\n Qs_a_l = r_l + (1 - done_l) * self.discount_factor * max_Q_next_s_a_l\n loss = tf.reduce_mean(tf.square(Qs_a_l - Qs_a_pred_l))\n grads = tape.gradient(loss, model_w)\n self.optimizer.apply_gradients(zip(grads, model_w))", "_____no_output_____" ], [ "new_world = World_01()\nnew_world.trial()\nnew_world.train_memory()\nnew_world.env.close()\nprint('Completed!')", "Episode: 0 --> Score: -200.0\nEpisode: 1 --> Score: -200.0\nEpisode: 2 --> Score: -200.0\nEpisode: 3 --> Score: -200.0\nEpisode: 4 --> Score: -200.0\nEpisode: 5 --> Score: -200.0\nEpisode: 6 --> Score: -200.0\nEpisode: 7 --> Score: -200.0\nEpisode: 8 --> Score: -200.0\nEpisode: 9 --> Score: -200.0\nNotice that the max score is set to 500.0 in CartPole-v1\n2000\nCompleted!\n" ], [ "class World_02(World_01):\n def __init__(self):\n World_01.__init__(self)\n self.epsilon = 0.2\n \n def update_t_model(self):\n self.t_model.set_weights(self.model.get_weights())\n\n def best_action(self, s):\n if random.random() <= self.epsilon:\n return random.randrange(self.num_actions)\n else:\n s_array = np.array(s).reshape((1,-1))\n Qsa = self.model.predict(s_array)[0]\n return np.argmax(Qsa)\n\n def trials(self, n_episodes=100, flag_render=False):\n memory = self.memory\n env = self.env\n model = self.model\n score_l = []\n for e in range(n_episodes):\n done = False\n score = 0\n s = env.reset()\n while not done: \n a = self.best_action(s)\n next_s, r, done, _ = env.step(a)\n if flag_render:\n env.render()\n score += r\n memory.append([s,a,r,next_s,done])\n # self.train_memory() \n s = next_s\n self.train_memory() \n self.update_t_model()\n print(f'Episode: {e:5d} --> Score: {score:3.1f}') \n score_l.append(score) \n return score_l", "_____no_output_____" ], [ "new_world = World_02()\nscore_l = new_world.trials(n_episodes=50)\nnew_world.env.close()\nnp.save('score_l.npy', score_l)", "Episode: 0 --> Score: 12.0\nEpisode: 1 --> Score: 12.0\nEpisode: 2 --> Score: 13.0\nEpisode: 3 --> Score: 9.0\nEpisode: 4 --> Score: 24.0\nEpisode: 5 --> Score: 10.0\nEpisode: 6 --> Score: 12.0\nEpisode: 7 --> Score: 9.0\nEpisode: 8 --> Score: 11.0\nEpisode: 9 --> Score: 8.0\nEpisode: 10 --> Score: 8.0\nEpisode: 11 --> Score: 8.0\nEpisode: 12 --> Score: 12.0\nEpisode: 13 --> Score: 11.0\nEpisode: 14 --> Score: 11.0\nEpisode: 15 --> Score: 10.0\nEpisode: 16 --> Score: 12.0\nEpisode: 17 --> Score: 8.0\nEpisode: 18 --> Score: 15.0\nEpisode: 19 --> Score: 10.0\nEpisode: 20 --> Score: 9.0\nEpisode: 21 --> Score: 14.0\nEpisode: 22 --> Score: 14.0\nEpisode: 23 --> Score: 14.0\nEpisode: 24 --> Score: 9.0\nEpisode: 25 --> Score: 14.0\nEpisode: 26 --> Score: 11.0\nEpisode: 27 --> Score: 12.0\nEpisode: 28 --> Score: 16.0\nEpisode: 29 --> Score: 27.0\nEpisode: 30 --> Score: 13.0\nEpisode: 31 --> Score: 12.0\nEpisode: 32 --> Score: 10.0\nEpisode: 33 --> Score: 22.0\nEpisode: 34 --> Score: 15.0\nEpisode: 35 --> Score: 63.0\nEpisode: 36 --> Score: 10.0\nEpisode: 37 --> Score: 64.0\nEpisode: 38 --> Score: 25.0\nEpisode: 39 --> Score: 11.0\nEpisode: 40 --> Score: 57.0\nEpisode: 41 --> Score: 59.0\nEpisode: 42 --> Score: 49.0\nEpisode: 43 --> Score: 51.0\nEpisode: 44 --> Score: 97.0\nEpisode: 45 --> Score: 37.0\nEpisode: 46 --> Score: 77.0\nEpisode: 47 --> Score: 34.0\nEpisode: 48 --> Score: 60.0\nEpisode: 49 --> Score: 51.0\n" ] ], [ [ "---\n### 전체코드 (분할 버전)", "_____no_output_____" ] ], [ [ "l = [[1,2],[3,4],[5,6]]\nlist(zip(*l))", "_____no_output_____" ], [ "# 기본 패키지\nimport numpy as np\nimport random\nfrom collections import deque\nimport matplotlib.pyplot as plt \n\n# 강화학습 환경 패키지\nimport gym\n\n# 인공지능 패키지: 텐서플로, 케라스 \n# 호환성을 위해 텐스플로에 포함된 케라스를 불러옴 \nimport tensorflow as tf\nfrom tensorflow import keras \nfrom tensorflow.keras import Model, Input\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.optimizers import Adam", "_____no_output_____" ], [ "def create_q_model(num_states, num_actions):\n inputs = Input(shape=(num_states,))\n layer = Dense(32, activation=\"relu\")(inputs)\n layer = Dense(16, activation=\"relu\")(layer)\n action = Dense(num_actions, activation=\"linear\")(layer)\n return Model(inputs=inputs, outputs=action)\n\ndef list_rotate(l):\n return list(zip(*l))", "_____no_output_____" ], [ "class WorldFull():\n def __init__(self):\n self.get_env_model() #? \n \n self.memory = deque(maxlen=2000)\n self.N_batch = 64\n self.t_model = create_q_model(self.num_states, self.num_actions)\n self.discount_factor = 0.99\n self.learning_rate = 0.001\n self.optimizer = Adam(lr=self.learning_rate)\n \n self.epsilon = 0.2\n \n def get_env_model(self):\n self.env = gym.make('CartPole-v1')\n self.num_states = self.env.observation_space.shape[0]\n self.num_actions = self.env.action_space.n\n self.model = create_q_model(self.num_states, self.num_actions)\n \n def update_t_model(self):\n self.t_model.set_weights(self.model.get_weights())\n\n def best_action(self, s):\n if random.random() <= self.epsilon:\n return random.randrange(self.num_actions)\n else:\n s_array = np.array(s).reshape((1,-1))\n Qsa = self.model.predict(s_array)[0]\n return np.argmax(Qsa)\n\n def train_memory(self):\n if len(self.memory) >= self.N_batch:\n memory_batch = random.sample(self.memory, self.N_batch)\n s_l,a_l,r_l,next_s_l,done_l = [np.array(x) for x in list_rotate(memory_batch)]\n model_w = self.model.trainable_variables\n with tf.GradientTape() as tape:\n Qsa_pred_l = self.model(s_l.astype(np.float32))\n a_l_onehot = tf.one_hot(a_l, self.num_actions)\n Qs_a_pred_l = tf.reduce_sum(a_l_onehot * Qsa_pred_l, \n axis=1) \n\n Qsa_tpred_l = self.t_model(next_s_l.astype(np.float32)) \n Qsa_tpred_l = tf.stop_gradient(Qsa_tpred_l)\n\n max_Q_next_s_a_l = np.amax(Qsa_tpred_l, axis=-1)\n Qs_a_l = r_l + (1 - done_l) * self.discount_factor * max_Q_next_s_a_l\n loss = tf.reduce_mean(tf.square(Qs_a_l - Qs_a_pred_l))\n grads = tape.gradient(loss, model_w)\n self.optimizer.apply_gradients(zip(grads, model_w)) \n \n def trials(self, n_episodes=100, flag_render=False):\n memory = self.memory\n env = self.env\n model = self.model\n score_l = []\n for e in range(n_episodes):\n done = False\n score = 0\n s = env.reset()\n while not done: \n a = self.best_action(s)\n next_s, r, done, _ = env.step(a)\n if flag_render:\n env.render()\n score += r\n memory.append([s,a,r,next_s,done])\n # self.train_memory() \n s = next_s\n self.train_memory() \n self.update_t_model()\n print(f'Episode: {e:5d} --> Score: {score:3.1f}') \n score_l.append(score) \n return score_l", "_____no_output_____" ], [ "new_world = WorldFull()\nscore_l = new_world.trials(n_episodes=100)\nnew_world.env.close()\nnp.save('score_l.npy', score_l)\nprint('Job completed!')", "_____no_output_____" ], [ "plt.plot(score_l)\nplt.title(\"Deep Q-Learning for Cartpole\")\nplt.xlabel(\"Episode\")\nplt.ylabel(\"Score\")", "Episode: 0 --> Score: 11.0\nEpisode: 1 --> Score: 10.0\nEpisode: 2 --> Score: 8.0\nEpisode: 3 --> Score: 8.0\nEpisode: 4 --> Score: 10.0\nEpisode: 5 --> Score: 17.0\nEpisode: 6 --> Score: 10.0\nEpisode: 7 --> Score: 9.0\nEpisode: 8 --> Score: 10.0\nEpisode: 9 --> Score: 13.0\nEpisode: 10 --> Score: 10.0\nEpisode: 11 --> Score: 10.0\nEpisode: 12 --> Score: 10.0\nEpisode: 13 --> Score: 10.0\nEpisode: 14 --> Score: 8.0\nEpisode: 15 --> Score: 12.0\nEpisode: 16 --> Score: 9.0\nEpisode: 17 --> Score: 10.0\nEpisode: 18 --> Score: 9.0\nEpisode: 19 --> Score: 8.0\nEpisode: 20 --> Score: 10.0\nEpisode: 21 --> Score: 15.0\nEpisode: 22 --> Score: 8.0\nEpisode: 23 --> Score: 8.0\nEpisode: 24 --> Score: 10.0\nEpisode: 25 --> Score: 15.0\nEpisode: 26 --> Score: 9.0\nEpisode: 27 --> Score: 9.0\nEpisode: 28 --> Score: 10.0\nEpisode: 29 --> Score: 9.0\nEpisode: 30 --> Score: 11.0\nEpisode: 31 --> Score: 8.0\nEpisode: 32 --> Score: 12.0\nEpisode: 33 --> Score: 15.0\nEpisode: 34 --> Score: 11.0\nEpisode: 35 --> Score: 11.0\nEpisode: 36 --> Score: 11.0\nEpisode: 37 --> Score: 11.0\nEpisode: 38 --> Score: 14.0\nEpisode: 39 --> Score: 49.0\nEpisode: 40 --> Score: 15.0\nEpisode: 41 --> Score: 11.0\nEpisode: 42 --> Score: 11.0\nEpisode: 43 --> Score: 16.0\nEpisode: 44 --> Score: 11.0\nEpisode: 45 --> Score: 11.0\nEpisode: 46 --> Score: 10.0\nEpisode: 47 --> Score: 75.0\nEpisode: 48 --> Score: 10.0\nEpisode: 49 --> Score: 11.0\nEpisode: 50 --> Score: 15.0\nEpisode: 51 --> Score: 12.0\nEpisode: 52 --> Score: 12.0\nEpisode: 53 --> Score: 9.0\nEpisode: 54 --> Score: 9.0\nEpisode: 55 --> Score: 10.0\nEpisode: 56 --> Score: 12.0\nEpisode: 57 --> Score: 72.0\nEpisode: 58 --> Score: 141.0\nEpisode: 59 --> Score: 52.0\nEpisode: 60 --> Score: 46.0\nEpisode: 61 --> Score: 86.0\nEpisode: 62 --> Score: 93.0\nEpisode: 63 --> Score: 64.0\nEpisode: 64 --> Score: 72.0\nEpisode: 65 --> Score: 83.0\nEpisode: 66 --> Score: 205.0\nEpisode: 67 --> Score: 109.0\nEpisode: 68 --> Score: 221.0\nEpisode: 69 --> Score: 174.0\nEpisode: 70 --> Score: 149.0\nEpisode: 71 --> Score: 221.0\nEpisode: 72 --> Score: 213.0\nEpisode: 73 --> Score: 365.0\nEpisode: 74 --> Score: 149.0\nEpisode: 75 --> Score: 132.0\nEpisode: 76 --> Score: 239.0\nEpisode: 77 --> Score: 388.0\nEpisode: 78 --> Score: 177.0\nEpisode: 79 --> Score: 197.0\nEpisode: 80 --> Score: 216.0\nEpisode: 81 --> Score: 190.0\nEpisode: 82 --> Score: 242.0\nEpisode: 83 --> Score: 239.0\nEpisode: 84 --> Score: 221.0\nEpisode: 85 --> Score: 231.0\nEpisode: 86 --> Score: 146.0\nEpisode: 87 --> Score: 154.0\nEpisode: 88 --> Score: 276.0\nEpisode: 89 --> Score: 152.0\nEpisode: 90 --> Score: 131.0\nEpisode: 91 --> Score: 146.0\nEpisode: 92 --> Score: 152.0\nEpisode: 93 --> Score: 189.0\nEpisode: 94 --> Score: 189.0\nEpisode: 95 --> Score: 163.0\nEpisode: 96 --> Score: 170.0\nEpisode: 97 --> Score: 158.0\nEpisode: 98 --> Score: 166.0\nEpisode: 99 --> Score: 180.0\nJob completed!\n" ], [ "plt.plot(score_l)\nplt.title(\"Deep Q-Learning for Cartpole\")\nplt.xlabel(\"Episode\")\nplt.ylabel(\"Score\")", "_____no_output_____" ] ], [ [ "---\n### 전체코드", "_____no_output_____" ] ], [ [ "\"\"\"\nENV: MoutainCar\n- 2nd hidden layer: 16 --> 32\n\"\"\"\n\n# 기본 패키지\nimport numpy as np\nimport random\nfrom collections import deque\nimport matplotlib.pyplot as plt \n\n# 강화학습 환경 패키지\nimport gym\n\n# 인공지능 패키지: 텐서플로, 케라스 \n# 호환성을 위해 텐스플로에 포함된 케라스를 불러옴 \nimport tensorflow as tf\nfrom tensorflow import keras \nfrom tensorflow.keras import Model, Input\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.optimizers import Adam\n\ndef create_q_model(num_states, num_actions):\n inputs = Input(shape=(num_states,))\n layer = Dense(32, activation=\"relu\")(inputs)\n layer = Dense(32, activation=\"relu\")(layer)\n action = Dense(num_actions, activation=\"linear\")(layer)\n return Model(inputs=inputs, outputs=action)\n\ndef list_rotate(l):\n return list(zip(*l))\n\nclass WorldFull():\n def __init__(self):\n self.get_env_model() #? \n \n self.memory = deque(maxlen=2000)\n self.N_batch = 64\n self.t_model = create_q_model(self.num_states, self.num_actions)\n self.discount_factor = 0.99\n self.learning_rate = 0.001\n self.optimizer = Adam(lr=self.learning_rate)\n \n self.epsilon = 0.05\n \n def get_env_model(self):\n self.env = gym.make('MountainCar-v0')\n self.num_states = self.env.observation_space.shape[0]\n self.num_actions = self.env.action_space.n\n self.model = create_q_model(self.num_states, self.num_actions)\n \n def update_t_model(self):\n self.t_model.set_weights(self.model.get_weights())\n\n def best_action(self, s):\n if random.random() <= self.epsilon:\n return random.randrange(self.num_actions)\n else:\n s_array = np.array(s).reshape((1,-1))\n Qsa = self.model.predict(s_array)[0]\n return np.argmax(Qsa)\n\n def train_memory(self):\n if len(self.memory) >= self.N_batch:\n memory_batch = random.sample(self.memory, self.N_batch)\n s_l,a_l,r_l,next_s_l,done_l = [np.array(x) for x in list_rotate(memory_batch)]\n model_w = self.model.trainable_variables\n with tf.GradientTape() as tape:\n Qsa_pred_l = self.model(s_l.astype(np.float32))\n a_l_onehot = tf.one_hot(a_l, self.num_actions)\n Qs_a_pred_l = tf.reduce_sum(a_l_onehot * Qsa_pred_l, \n axis=1) \n\n Qsa_tpred_l = self.t_model(next_s_l.astype(np.float32)) \n Qsa_tpred_l = tf.stop_gradient(Qsa_tpred_l)\n\n max_Q_next_s_a_l = np.amax(Qsa_tpred_l, axis=-1)\n Qs_a_l = r_l + (1 - done_l) * self.discount_factor * max_Q_next_s_a_l\n loss = tf.reduce_mean(tf.square(Qs_a_l - Qs_a_pred_l))\n grads = tape.gradient(loss, model_w)\n self.optimizer.apply_gradients(zip(grads, model_w)) \n \n def trials(self, n_episodes=100, flag_render=False):\n memory = self.memory\n env = self.env\n model = self.model\n score_l = []\n for e in range(n_episodes):\n done = False\n score = 0\n s = env.reset()\n while not done: \n a = self.best_action(s)\n next_s, r, done, _ = env.step(a)\n if flag_render:\n env.render()\n score += r\n memory.append([s,a,r,next_s,done])\n # self.train_memory() \n s = next_s\n self.train_memory() \n self.update_t_model()\n print(f'Episode: {e:5d} --> Score: {score:3.1f}') \n score_l.append(score) \n return score_l\n\nnew_world = WorldFull()\nscore_l = new_world.trials(n_episodes=100)\nnew_world.env.close()\nnp.save('score_l.npy', score_l)\nprint('Job completed!')\n\nplt.plot(score_l)\nplt.title(\"Deep Q-Learning for Cartpole\")\nplt.xlabel(\"Episode\")\nplt.ylabel(\"Score\")", "2021-10-17 08:37:31.238797: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcuda.so.1\n2021-10-17 08:37:31.465831: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node\nYour kernel may have been built without NUMA support.\n2021-10-17 08:37:31.466002: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1561] Found device 0 with properties: \npciBusID: 0000:01:00.0 name: NVIDIA GeForce GTX 1050 computeCapability: 6.1\ncoreClock: 1.455GHz coreCount: 5 deviceMemorySize: 2.00GiB deviceMemoryBandwidth: 104.43GiB/s\n2021-10-17 08:37:31.468567: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1\n2021-10-17 08:37:31.510279: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10\n2021-10-17 08:37:31.533913: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10\n2021-10-17 08:37:31.540206: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10\n2021-10-17 08:37:31.584587: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10\n2021-10-17 08:37:31.590358: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10\n2021-10-17 08:37:31.670595: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7\n2021-10-17 08:37:31.671524: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node\nYour kernel may have been built without NUMA support.\n2021-10-17 08:37:31.672347: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node\nYour kernel may have been built without NUMA support.\n2021-10-17 08:37:31.672545: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1703] Adding visible gpu devices: 0\n2021-10-17 08:37:31.673634: I tensorflow/core/platform/cpu_feature_guard.cc:143] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA\n2021-10-17 08:37:31.690870: I tensorflow/core/platform/profile_utils/cpu_utils.cc:102] CPU Frequency: 3599995000 Hz\n2021-10-17 08:37:31.692202: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x560e01582960 initialized for platform Host (this does not guarantee that XLA will be used). Devices:\n2021-10-17 08:37:31.692218: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version\n2021-10-17 08:37:31.694440: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node\nYour kernel may have been built without NUMA support.\n2021-10-17 08:37:31.694771: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1561] Found device 0 with properties: \npciBusID: 0000:01:00.0 name: NVIDIA GeForce GTX 1050 computeCapability: 6.1\ncoreClock: 1.455GHz coreCount: 5 deviceMemorySize: 2.00GiB deviceMemoryBandwidth: 104.43GiB/s\n2021-10-17 08:37:31.694817: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1\n2021-10-17 08:37:31.694842: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10\n2021-10-17 08:37:31.694853: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10\n2021-10-17 08:37:31.694863: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10\n2021-10-17 08:37:31.694874: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10\n2021-10-17 08:37:31.694884: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10\n2021-10-17 08:37:31.694896: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7\n2021-10-17 08:37:31.695503: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node\nYour kernel may have been built without NUMA support.\n2021-10-17 08:37:31.696304: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node\nYour kernel may have been built without NUMA support.\n2021-10-17 08:37:31.696492: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1703] Adding visible gpu devices: 0\n2021-10-17 08:37:31.697201: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1\n2021-10-17 08:37:31.963178: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1102] Device interconnect StreamExecutor with strength 1 edge matrix:\n2021-10-17 08:37:31.963213: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1108] 0 \n2021-10-17 08:37:31.963222: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1121] 0: N \n2021-10-17 08:37:31.964366: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node\nYour kernel may have been built without NUMA support.\n2021-10-17 08:37:31.964521: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1330] Could not identify NUMA node of platform GPU id 0, defaulting to 0. Your kernel may not have been built with NUMA support.\n2021-10-17 08:37:31.965097: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node\nYour kernel may have been built without NUMA support.\n2021-10-17 08:37:31.965751: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node\nYour kernel may have been built without NUMA support.\n2021-10-17 08:37:31.965946: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1247] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 1421 MB memory) -> physical GPU (device: 0, name: NVIDIA GeForce GTX 1050, pci bus id: 0000:01:00.0, compute capability: 6.1)\n2021-10-17 08:37:31.970105: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x560e0508b710 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:\n2021-10-17 08:37:31.970124: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): NVIDIA GeForce GTX 1050, Compute Capability 6.1\n2021-10-17 08:37:34.166995: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
4a2c86f68a3abead3f7a0d52529d748c1985c4fc
5,624
ipynb
Jupyter Notebook
colabs/smartsheet_report_to_bigquery.ipynb
Gregorfran/starthinker
4c9031f3001d380dbfc213a83b11ec61dfcffe47
[ "Apache-2.0" ]
null
null
null
colabs/smartsheet_report_to_bigquery.ipynb
Gregorfran/starthinker
4c9031f3001d380dbfc213a83b11ec61dfcffe47
[ "Apache-2.0" ]
6
2021-03-19T12:00:18.000Z
2022-02-10T09:43:42.000Z
colabs/smartsheet_report_to_bigquery.ipynb
Gregorfran/starthinker-gregor
4c9031f3001d380dbfc213a83b11ec61dfcffe47
[ "Apache-2.0" ]
null
null
null
34.931677
230
0.524004
[ [ [ "#1. Install Dependencies\nFirst install the libraries needed to execute recipes, this only needs to be done once, then click play.\n", "_____no_output_____" ] ], [ [ "!pip install git+https://github.com/google/starthinker\n", "_____no_output_____" ] ], [ [ "#2. Get Cloud Project ID\nTo run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play.\n", "_____no_output_____" ] ], [ [ "CLOUD_PROJECT = 'PASTE PROJECT ID HERE'\n\nprint(\"Cloud Project Set To: %s\" % CLOUD_PROJECT)\n", "_____no_output_____" ] ], [ [ "#3. Get Client Credentials\nTo read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play.\n", "_____no_output_____" ] ], [ [ "CLIENT_CREDENTIALS = 'PASTE CREDENTIALS HERE'\n\nprint(\"Client Credentials Set To: %s\" % CLIENT_CREDENTIALS)\n", "_____no_output_____" ] ], [ [ "#4. Enter SmartSheet Report To BigQuery Parameters\nMove report data into a BigQuery table.\n 1. Specify <a href='https://smartsheet-platform.github.io/api-docs/' target='_blank'>SmartSheet Report</a> token.\n 1. Locate the ID of a report by viewing its properties.\n 1. Provide a BigQuery dataset ( must exist ) and table to write the data into.\n 1. StarThinker will automatically map the correct schema.\nModify the values below for your use case, can be done multiple times, then click play.\n", "_____no_output_____" ] ], [ [ "FIELDS = {\n 'auth_read': 'user', # Credentials used for reading data.\n 'auth_write': 'service', # Credentials used for writing data.\n 'token': '', # Retrieve from SmartSheet account settings.\n 'report': '', # Retrieve from report properties.\n 'dataset': '', # Existing BigQuery dataset.\n 'table': '', # Table to create from this report.\n 'schema': '', # Schema provided in JSON list format or leave empty to auto detect.\n}\n\nprint(\"Parameters Set To: %s\" % FIELDS)\n", "_____no_output_____" ] ], [ [ "#5. Execute SmartSheet Report To BigQuery\nThis does NOT need to be modified unles you are changing the recipe, click play.\n", "_____no_output_____" ] ], [ [ "from starthinker.util.project import project\nfrom starthinker.script.parse import json_set_fields, json_expand_includes\n\nUSER_CREDENTIALS = '/content/user.json'\n\nTASKS = [\n {\n 'smartsheet': {\n 'auth': 'user',\n 'token': {'field': {'name': 'token','kind': 'string','order': 2,'default': '','description': 'Retrieve from SmartSheet account settings.'}},\n 'report': {'field': {'name': 'report','kind': 'string','order': 3,'description': 'Retrieve from report properties.'}},\n 'out': {\n 'bigquery': {\n 'auth': 'user',\n 'dataset': {'field': {'name': 'dataset','kind': 'string','order': 4,'default': '','description': 'Existing BigQuery dataset.'}},\n 'table': {'field': {'name': 'table','kind': 'string','order': 5,'default': '','description': 'Table to create from this report.'}},\n 'schema': {'field': {'name': 'schema','kind': 'json','order': 6,'description': 'Schema provided in JSON list format or leave empty to auto detect.'}}\n }\n }\n }\n }\n]\n\njson_set_fields(TASKS, FIELDS)\njson_expand_includes(TASKS)\n\nproject.initialize(_recipe={ 'tasks':TASKS }, _project=CLOUD_PROJECT, _user=USER_CREDENTIALS, _client=CLIENT_CREDENTIALS, _verbose=True)\nproject.execute()\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a2ca973977d5eab067183d1488fb875df58fa67
20,835
ipynb
Jupyter Notebook
VocScan.ipynb
MansiAyer/Voisem6
64e058931f1d2558da4a088709f1e5e0fdea5ca5
[ "MIT" ]
null
null
null
VocScan.ipynb
MansiAyer/Voisem6
64e058931f1d2558da4a088709f1e5e0fdea5ca5
[ "MIT" ]
1
2021-11-12T23:19:11.000Z
2021-11-12T23:19:11.000Z
.ipynb_checkpoints/VocScan-checkpoint.ipynb
MansiAyer/Voisem6
64e058931f1d2558da4a088709f1e5e0fdea5ca5
[ "MIT" ]
1
2021-05-11T20:10:00.000Z
2021-05-11T20:10:00.000Z
169.390244
1,942
0.707415
[ [ [ "import numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nmodel = keras.models.load_model('mymodel')\nprint('completed step?')", "completed step?\n" ], [ "import librosa\nimport pandas as pd\nimport IPython.display as ipd\n\ndef findEmotion(fileName):\n \n emotionMap = {1:'neutral', 2:'calm', 3:'happy', 4:'sad', 5:'angry', 6:'fear', 7:'disgust', 8:'surprise'}\n finalModel = model\n def featureExtraction(path):\n X, sample_rate = librosa.load(path, res_type='kaiser_fast',duration=3,sr=44100,offset=0.5)\n mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)\n return mel\n mel = featureExtraction(fileName)\n return model.predict(mel)", "_____no_output_____" ], [ "fileName = \"Train/audio/MaleNeutral.wav\"\n\nipd.Audio(fileName)\n\noutput = findEmotion(fileName)\n\nprint(\"The Predicted Output is : \", output)", "_____no_output_____" ], [ "df_dc = pd.get_dummies(df, columns=['ColumnToDummyCode'])\ndata_url = 'https://github.com/openmundi/world.csv/blob/master/countries(249)_num3.csv'\ndf = pd.read_csv(data_url, index_col=0)\n\ndf.head()\n\n#use top-5 categorical accuracy as a reference hallow layer and BiLSTM layer\n#Deep CNN has the best performance in terms of classification accuracy, find LWLRAP score", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
4a2cb3f79392bec1e67aceacd5c4298671a6623f
842,268
ipynb
Jupyter Notebook
Orig_TestEpochsNN.ipynb
kunovg/EcoBici
f0f22ccff46320e5f9ef238424dc83ca9ddbf50d
[ "MIT" ]
null
null
null
Orig_TestEpochsNN.ipynb
kunovg/EcoBici
f0f22ccff46320e5f9ef238424dc83ca9ddbf50d
[ "MIT" ]
null
null
null
Orig_TestEpochsNN.ipynb
kunovg/EcoBici
f0f22ccff46320e5f9ef238424dc83ca9ddbf50d
[ "MIT" ]
null
null
null
975.976825
137,176
0.935416
[ [ [ "import time\nimport pandas as pd\nimport numpy as np\n\nfrom keras.layers.core import Dense, Activation, Dropout\nfrom keras.layers.recurrent import LSTM\nfrom keras.models import Sequential\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.utils import shuffle\n\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import mean_absolute_error\nepochs = [30, 50, 100, 200]", "Using TensorFlow backend.\n" ], [ "epoch = 200\n# Load data\ntrain = pd.read_csv('trips_train_3.csv', header=None)\ntest = pd.read_csv('trips_test_2.csv', header=None )\nscaler = MinMaxScaler(feature_range=(-1, 1))\nwindow_size = 78 # 78 steps in one day\n# normalize features\nscaled = scaler.fit_transform(train.values)\ntrain = pd.DataFrame(scaled)\n\nseries_s = train.copy()\nfor i in range(window_size):\n train = pd.concat([train, series_s.shift(-(i+1))], axis=1)\n\ntrain.dropna(axis=0, inplace=True)\n# Hacer lo mismo para los datos de prueba\ntest = test.iloc[:24624, :] # The rest are all 0s\nscaled = scaler.fit_transform(test.values)\ntest = pd.DataFrame(scaled)\n\nseries_s = test.copy()\nfor i in range(window_size):\n test = pd.concat([test, series_s.shift(-(i+1))], axis = 1)\n\ntest.dropna(axis=0, inplace=True)\ntrain = shuffle(train)\ntrain_X = train.iloc[:,:-1]\ntrain_y = train.iloc[:,-1]\ntest_X = test.iloc[:,:-1]\ntest_y = test.iloc[:,-1]\ntrain_X = train_X.values\ntrain_y = train_y.values\ntest_X = test_X.values\ntest_y = test_y.values\ntrain_X = train_X.reshape(train_X.shape[0],train_X.shape[1],1)\ntest_X = test_X.reshape(test_X.shape[0],test_X.shape[1],1)\n\n# Define the LSTM model\nmodel = Sequential()\nmodel.add(LSTM(input_shape=(window_size,1), output_dim=window_size, return_sequences=True))\nmodel.add(Dropout(0.5))\nmodel.add(LSTM(256))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(1))\nmodel.add(Activation(\"linear\"))\nmodel.compile(loss=\"mse\", optimizer=\"adam\")\nmodel.summary()\n# Train\nstart = time.time()\nmodel.fit(train_X, train_y, batch_size=100, epochs=epoch, validation_split=0.1)\nprint(\"> Compilation Time : \", time.time() - start)\n\ndef moving_test_window_preds(n_future_preds):\n ''' n_future_preds - Represents the number of future predictions we want to make\n This coincides with the number of windows that we will move forward\n on the test data\n '''\n preds_moving = [] # Use this to store the prediction made on each test window\n moving_test_window = [test_X[0,:].tolist()] # Creating the first test window\n moving_test_window = np.array(moving_test_window) # Making it an numpy array\n\n for i in range(n_future_preds):\n preds_one_step = model.predict(moving_test_window) # Note that this is already a scaled prediction so no need to rescale this\n preds_moving.append(preds_one_step[0,0]) # get the value from the numpy 2D array and append to predictions\n preds_one_step = preds_one_step.reshape(1,1,1) # Reshaping the prediction to 3D array for concatenation with moving test window\n moving_test_window = np.concatenate((moving_test_window[:,1:,:], preds_one_step), axis=1) # This is the new moving test window, where the first element from the window has been removed and the prediction has been appended to the end\n\n preds_moving = scaler.inverse_transform(np.array(preds_moving).reshape(-1, 1))\n\n return preds_moving\n\npreds_moving = moving_test_window_preds(500)\nactuals = scaler.inverse_transform(test_y.reshape(-1, 1))\nmse = mean_squared_error(actuals[74:150], preds_moving[74:150])\nmae = mean_absolute_error(actuals[74:150], preds_moving[74:150])\n\n# Save data\nwith open('f_%s_%s_%s.txt' % (epoch, mse, mae), 'w') as f:\n for i in preds_moving:\n f.write(\"%s\\n\" % i)", "c:\\users\\kuno\\miniconda3\\lib\\site-packages\\sklearn\\utils\\validation.py:429: DataConversionWarning: Data with input dtype int64 was converted to float64 by MinMaxScaler.\n warnings.warn(msg, _DataConversionWarning)\nc:\\users\\kuno\\miniconda3\\lib\\site-packages\\ipykernel_launcher.py:40: UserWarning: Update your `LSTM` call to the Keras 2 API: `LSTM(return_sequences=True, input_shape=(78, 1), units=78)`\n" ], [ "from matplotlib import pyplot\n\npyplot.figure(figsize=(20,6))\npyplot.plot(actuals[:600])\npyplot.plot(preds_moving[:600])\npyplot.title(\"200 epochs\")\npyplot.show()", "_____no_output_____" ], [ "with open(\"f_30_191360.34899787782_369.8666947640871.txt\") as f:\n pyplot.figure(figsize=(20,6))\n pyplot.plot(actuals[:600])\n pyplot.plot(np.array([l[1:-2] for l in f.readlines()]))\n pyplot.title(\"30 epochs\")\n pyplot.show()", "_____no_output_____" ], [ "with open(\"f_50_23261.834132086205_116.21821095441517.txt\") as f:\n pyplot.figure(figsize=(20,6))\n pyplot.plot(actuals[:600])\n pyplot.plot(np.array([l[1:-2] for l in f.readlines()]))\n pyplot.title(\"50 epochs\")\n pyplot.show()", "_____no_output_____" ], [ "with open(\"f_100_8694.5463661338_66.40398304085983.txt\") as f:\n pyplot.figure(figsize=(20,6))\n pyplot.plot(actuals[:600])\n pyplot.plot(np.array([l[1:-2] for l in f.readlines()]))\n pyplot.title(\"100 epochs\")\n pyplot.show()", "_____no_output_____" ], [ "model.predict(np.array([scaler.fit_transform(actuals[:78])]))", "_____no_output_____" ], [ "pyplot.figure(figsize=(20,6))\npyplot.plot(actuals[:156])\npyplot.plot(np.concatenate(([[0] for i in range(78)], scaler.inverse_transform(res))))\npyplot.show()", "_____no_output_____" ], [ "def predict_next(info, n):\n res = []\n for i in range(n):\n base = np.concatenate((scaler.fit_transform(info[i:78]), res)) if res else scaler.fit_transform(info[:78])\n pred = model.predict(np.array([base]))\n res.append(pred[0])\n return res", "_____no_output_____" ], [ "data_temblor = np.array([[int(l.strip())] for l in open(\"trips_19_sept.csv\").readlines() if l.strip()])", "_____no_output_____" ], [ "pyplot.figure(figsize=(20,6))\npyplot.plot(data_temblor[:156])\npyplot.plot(np.concatenate(([[0] for i in range(78)], scaler.inverse_transform(predict_next(data_temblor, 50)))))\npyplot.show()", "c:\\users\\kuno\\miniconda3\\lib\\site-packages\\sklearn\\utils\\validation.py:429: DataConversionWarning: Data with input dtype int32 was converted to float64 by MinMaxScaler.\n warnings.warn(msg, _DataConversionWarning)\n" ], [ "n = 40\npyplot.figure(figsize=(20,6))\npyplot.plot(data_temblor[:560])\npyplot.plot(np.concatenate(([[0] for i in range(78 + n)], scaler.inverse_transform(predict_next(data_temblor[n:], 20)))))\npyplot.show()", "c:\\users\\kuno\\miniconda3\\lib\\site-packages\\sklearn\\utils\\validation.py:429: DataConversionWarning: Data with input dtype int32 was converted to float64 by MinMaxScaler.\n warnings.warn(msg, _DataConversionWarning)\n" ], [ "pyplot.figure(figsize=(20,6))\npyplot.plot(actuals[0:76])\npyplot.plot(actuals[76:152])\npyplot.plot(actuals[152:228])\npyplot.plot(actuals[228:304])\n# for d in range(5):\n# pyplot.plot(actuals[78*d:78*(d+1)])\npyplot.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2cc48d5628580c3252409590b0be7cdcee5221
30,837
ipynb
Jupyter Notebook
Transformer/Swin Transformer- Hierarchical Vision Transformer using Shifted Windows/SwinTransformer-review-with-practice.ipynb
koreaunitas/Deep-Learning-Paper-Reviews_
72a256935fd5154cfcef60fdf5dad447dd1e5c87
[ "MIT" ]
2
2022-01-03T07:21:47.000Z
2022-01-03T08:07:58.000Z
Transformer/Swin Transformer- Hierarchical Vision Transformer using Shifted Windows/SwinTransformer-review-with-practice.ipynb
koreaunitas/Deep-Learning-Paper-Reviews_
72a256935fd5154cfcef60fdf5dad447dd1e5c87
[ "MIT" ]
null
null
null
Transformer/Swin Transformer- Hierarchical Vision Transformer using Shifted Windows/SwinTransformer-review-with-practice.ipynb
koreaunitas/Deep-Learning-Paper-Reviews_
72a256935fd5154cfcef60fdf5dad447dd1e5c87
[ "MIT" ]
1
2022-01-15T23:14:48.000Z
2022-01-15T23:14:48.000Z
48.409733
128
0.510847
[ [ [ "# **Swin Transformer: Hierarchical Vision Transformer using Shifted Windows**\n\n**Swin Transformer (ICCV 2021 best paper award (Marr Prize))**\n\n**Authors {v-zeliu1,v-yutlin,yuecao,hanhu,v-yixwe,zhez,stevelin,bainguo}@microsoft.com**\n\n**Official Github**: https://github.com/microsoft/Swin-Transformer\n\n---\n\n**Edited By Su Hyung Choi - [Computer Vision Paper Reviews]**\n\n**[Github: @JonyChoi]** https://github.com/jonychoi/Computer-Vision-Paper-Reviews\n\nEdited Jan 4 2022\n\n---", "_____no_output_____" ], [ "## **About Swin Transformer**", "_____no_output_____" ] ], [ [ "# --------------------------------------------------------\n# Swin Transformer\n# Copyright (c) 2021 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ze Liu\n# --------------------------------------------------------\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.checkpoint as checkpoint\nfrom timm.models.layers import DropPath, to_2tuple, trunc_normal_\n\n\nclass Mlp(nn.Module):\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x\n\n\ndef window_partition(x, window_size):\n \"\"\"\n Args:\n x: (B, H, W, C)\n window_size (int): window size\n Returns:\n windows: (num_windows*B, window_size, window_size, C)\n \"\"\"\n B, H, W, C = x.shape\n x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)\n windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\n return windows\n\n\ndef window_reverse(windows, window_size, H, W):\n \"\"\"\n Args:\n windows: (num_windows*B, window_size, window_size, C)\n window_size (int): Window size\n H (int): Height of image\n W (int): Width of image\n Returns:\n x: (B, H, W, C)\n \"\"\"\n B = int(windows.shape[0] / (H * W / window_size / window_size))\n x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)\n x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)\n return x\n\n\nclass WindowAttention(nn.Module):\n r\"\"\" Window based multi-head self attention (W-MSA) module with relative position bias.\n It supports both of shifted and non-shifted window.\n Args:\n dim (int): Number of input channels.\n window_size (tuple[int]): The height and width of the window.\n num_heads (int): Number of attention heads.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set\n attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0\n proj_drop (float, optional): Dropout ratio of output. Default: 0.0\n \"\"\"\n\n def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):\n\n super().__init__()\n self.dim = dim\n self.window_size = window_size # Wh, Ww\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = qk_scale or head_dim ** -0.5\n\n # define a parameter table of relative position bias\n self.relative_position_bias_table = nn.Parameter(\n torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH\n\n # get pair-wise relative position index for each token inside the window\n coords_h = torch.arange(self.window_size[0])\n coords_w = torch.arange(self.window_size[1])\n coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww\n coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww\n relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww\n relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2\n relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0\n relative_coords[:, :, 1] += self.window_size[1] - 1\n relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1\n relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww\n self.register_buffer(\"relative_position_index\", relative_position_index)\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n\n trunc_normal_(self.relative_position_bias_table, std=.02)\n self.softmax = nn.Softmax(dim=-1)\n\n def forward(self, x, mask=None):\n \"\"\"\n Args:\n x: input features with shape of (num_windows*B, N, C)\n mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None\n \"\"\"\n B_, N, C = x.shape\n qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)\n\n q = q * self.scale\n attn = (q @ k.transpose(-2, -1))\n\n relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(\n self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH\n relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww\n attn = attn + relative_position_bias.unsqueeze(0)\n\n if mask is not None:\n nW = mask.shape[0]\n attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)\n attn = attn.view(-1, self.num_heads, N, N)\n attn = self.softmax(attn)\n else:\n attn = self.softmax(attn)\n\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B_, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\n def extra_repr(self) -> str:\n return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'\n\n def flops(self, N):\n # calculate flops for 1 window with token length of N\n flops = 0\n # qkv = self.qkv(x)\n flops += N * self.dim * 3 * self.dim\n # attn = (q @ k.transpose(-2, -1))\n flops += self.num_heads * N * (self.dim // self.num_heads) * N\n # x = (attn @ v)\n flops += self.num_heads * N * N * (self.dim // self.num_heads)\n # x = self.proj(x)\n flops += N * self.dim * self.dim\n return flops\n\n\nclass SwinTransformerBlock(nn.Module):\n r\"\"\" Swin Transformer Block.\n Args:\n dim (int): Number of input channels.\n input_resolution (tuple[int]): Input resulotion.\n num_heads (int): Number of attention heads.\n window_size (int): Window size.\n shift_size (int): Shift size for SW-MSA.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.\n drop (float, optional): Dropout rate. Default: 0.0\n attn_drop (float, optional): Attention dropout rate. Default: 0.0\n drop_path (float, optional): Stochastic depth rate. Default: 0.0\n act_layer (nn.Module, optional): Activation layer. Default: nn.GELU\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n \"\"\"\n\n def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,\n mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,\n act_layer=nn.GELU, norm_layer=nn.LayerNorm):\n super().__init__()\n self.dim = dim\n self.input_resolution = input_resolution\n self.num_heads = num_heads\n self.window_size = window_size\n self.shift_size = shift_size\n self.mlp_ratio = mlp_ratio\n if min(self.input_resolution) <= self.window_size:\n # if window size is larger than input resolution, we don't partition windows\n self.shift_size = 0\n self.window_size = min(self.input_resolution)\n assert 0 <= self.shift_size < self.window_size, \"shift_size must in 0-window_size\"\n\n self.norm1 = norm_layer(dim)\n self.attn = WindowAttention(\n dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,\n qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)\n\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n if self.shift_size > 0:\n # calculate attention mask for SW-MSA\n H, W = self.input_resolution\n img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1\n h_slices = (slice(0, -self.window_size),\n slice(-self.window_size, -self.shift_size),\n slice(-self.shift_size, None))\n w_slices = (slice(0, -self.window_size),\n slice(-self.window_size, -self.shift_size),\n slice(-self.shift_size, None))\n cnt = 0\n for h in h_slices:\n for w in w_slices:\n img_mask[:, h, w, :] = cnt\n cnt += 1\n\n mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1\n mask_windows = mask_windows.view(-1, self.window_size * self.window_size)\n attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))\n else:\n attn_mask = None\n\n self.register_buffer(\"attn_mask\", attn_mask)\n\n def forward(self, x):\n H, W = self.input_resolution\n B, L, C = x.shape\n assert L == H * W, \"input feature has wrong size\"\n\n shortcut = x\n x = self.norm1(x)\n x = x.view(B, H, W, C)\n\n # cyclic shift\n if self.shift_size > 0:\n shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))\n else:\n shifted_x = x\n\n # partition windows\n x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C\n x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C\n\n # W-MSA/SW-MSA\n attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C\n\n # merge windows\n attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)\n shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C\n\n # reverse cyclic shift\n if self.shift_size > 0:\n x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))\n else:\n x = shifted_x\n x = x.view(B, H * W, C)\n\n # FFN\n x = shortcut + self.drop_path(x)\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n\n return x\n\n def extra_repr(self) -> str:\n return f\"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, \" \\\n f\"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}\"\n\n def flops(self):\n flops = 0\n H, W = self.input_resolution\n # norm1\n flops += self.dim * H * W\n # W-MSA/SW-MSA\n nW = H * W / self.window_size / self.window_size\n flops += nW * self.attn.flops(self.window_size * self.window_size)\n # mlp\n flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio\n # norm2\n flops += self.dim * H * W\n return flops\n\n\nclass PatchMerging(nn.Module):\n r\"\"\" Patch Merging Layer.\n Args:\n input_resolution (tuple[int]): Resolution of input feature.\n dim (int): Number of input channels.\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n \"\"\"\n\n def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):\n super().__init__()\n self.input_resolution = input_resolution\n self.dim = dim\n self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)\n self.norm = norm_layer(4 * dim)\n\n def forward(self, x):\n \"\"\"\n x: B, H*W, C\n \"\"\"\n H, W = self.input_resolution\n B, L, C = x.shape\n assert L == H * W, \"input feature has wrong size\"\n assert H % 2 == 0 and W % 2 == 0, f\"x size ({H}*{W}) are not even.\"\n\n x = x.view(B, H, W, C)\n\n x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C\n x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C\n x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C\n x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C\n x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C\n x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C\n\n x = self.norm(x)\n x = self.reduction(x)\n\n return x\n\n def extra_repr(self) -> str:\n return f\"input_resolution={self.input_resolution}, dim={self.dim}\"\n\n def flops(self):\n H, W = self.input_resolution\n flops = H * W * self.dim\n flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim\n return flops\n\n\nclass BasicLayer(nn.Module):\n \"\"\" A basic Swin Transformer layer for one stage.\n Args:\n dim (int): Number of input channels.\n input_resolution (tuple[int]): Input resolution.\n depth (int): Number of blocks.\n num_heads (int): Number of attention heads.\n window_size (int): Local window size.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.\n drop (float, optional): Dropout rate. Default: 0.0\n attn_drop (float, optional): Attention dropout rate. Default: 0.0\n drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None\n use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.\n \"\"\"\n\n def __init__(self, dim, input_resolution, depth, num_heads, window_size,\n mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,\n drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):\n\n super().__init__()\n self.dim = dim\n self.input_resolution = input_resolution\n self.depth = depth\n self.use_checkpoint = use_checkpoint\n\n # build blocks\n self.blocks = nn.ModuleList([\n SwinTransformerBlock(dim=dim, input_resolution=input_resolution,\n num_heads=num_heads, window_size=window_size,\n shift_size=0 if (i % 2 == 0) else window_size // 2,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop, attn_drop=attn_drop,\n drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,\n norm_layer=norm_layer)\n for i in range(depth)])\n\n # patch merging layer\n if downsample is not None:\n self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)\n else:\n self.downsample = None\n\n def forward(self, x):\n for blk in self.blocks:\n if self.use_checkpoint:\n x = checkpoint.checkpoint(blk, x)\n else:\n x = blk(x)\n if self.downsample is not None:\n x = self.downsample(x)\n return x\n\n def extra_repr(self) -> str:\n return f\"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}\"\n\n def flops(self):\n flops = 0\n for blk in self.blocks:\n flops += blk.flops()\n if self.downsample is not None:\n flops += self.downsample.flops()\n return flops\n\n\nclass PatchEmbed(nn.Module):\n r\"\"\" Image to Patch Embedding\n Args:\n img_size (int): Image size. Default: 224.\n patch_size (int): Patch token size. Default: 4.\n in_chans (int): Number of input image channels. Default: 3.\n embed_dim (int): Number of linear projection output channels. Default: 96.\n norm_layer (nn.Module, optional): Normalization layer. Default: None\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):\n super().__init__()\n img_size = to_2tuple(img_size)\n patch_size = to_2tuple(patch_size)\n patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]\n self.img_size = img_size\n self.patch_size = patch_size\n self.patches_resolution = patches_resolution\n self.num_patches = patches_resolution[0] * patches_resolution[1]\n\n self.in_chans = in_chans\n self.embed_dim = embed_dim\n\n self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)\n if norm_layer is not None:\n self.norm = norm_layer(embed_dim)\n else:\n self.norm = None\n\n def forward(self, x):\n B, C, H, W = x.shape\n # FIXME look at relaxing size constraints\n assert H == self.img_size[0] and W == self.img_size[1], \\\n f\"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).\"\n x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C\n if self.norm is not None:\n x = self.norm(x)\n return x\n\n def flops(self):\n Ho, Wo = self.patches_resolution\n flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])\n if self.norm is not None:\n flops += Ho * Wo * self.embed_dim\n return flops\n\n\nclass SwinTransformer(nn.Module):\n r\"\"\" Swin Transformer\n A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -\n https://arxiv.org/pdf/2103.14030\n Args:\n img_size (int | tuple(int)): Input image size. Default 224\n patch_size (int | tuple(int)): Patch size. Default: 4\n in_chans (int): Number of input image channels. Default: 3\n num_classes (int): Number of classes for classification head. Default: 1000\n embed_dim (int): Patch embedding dimension. Default: 96\n depths (tuple(int)): Depth of each Swin Transformer layer.\n num_heads (tuple(int)): Number of attention heads in different layers.\n window_size (int): Window size. Default: 7\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4\n qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None\n drop_rate (float): Dropout rate. Default: 0\n attn_drop_rate (float): Attention dropout rate. Default: 0\n drop_path_rate (float): Stochastic depth rate. Default: 0.1\n norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.\n ape (bool): If True, add absolute position embedding to the patch embedding. Default: False\n patch_norm (bool): If True, add normalization after patch embedding. Default: True\n use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,\n embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],\n window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,\n drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,\n norm_layer=nn.LayerNorm, ape=False, patch_norm=True,\n use_checkpoint=False, **kwargs):\n super().__init__()\n\n self.num_classes = num_classes\n self.num_layers = len(depths)\n self.embed_dim = embed_dim\n self.ape = ape\n self.patch_norm = patch_norm\n self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))\n self.mlp_ratio = mlp_ratio\n\n # split image into non-overlapping patches\n self.patch_embed = PatchEmbed(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,\n norm_layer=norm_layer if self.patch_norm else None)\n num_patches = self.patch_embed.num_patches\n patches_resolution = self.patch_embed.patches_resolution\n self.patches_resolution = patches_resolution\n\n # absolute position embedding\n if self.ape:\n self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))\n trunc_normal_(self.absolute_pos_embed, std=.02)\n\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n # stochastic depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\n\n # build layers\n self.layers = nn.ModuleList()\n for i_layer in range(self.num_layers):\n layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),\n input_resolution=(patches_resolution[0] // (2 ** i_layer),\n patches_resolution[1] // (2 ** i_layer)),\n depth=depths[i_layer],\n num_heads=num_heads[i_layer],\n window_size=window_size,\n mlp_ratio=self.mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate,\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\n norm_layer=norm_layer,\n downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,\n use_checkpoint=use_checkpoint)\n self.layers.append(layer)\n\n self.norm = norm_layer(self.num_features)\n self.avgpool = nn.AdaptiveAvgPool1d(1)\n self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\n\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'absolute_pos_embed'}\n\n @torch.jit.ignore\n def no_weight_decay_keywords(self):\n return {'relative_position_bias_table'}\n\n def forward_features(self, x):\n x = self.patch_embed(x)\n if self.ape:\n x = x + self.absolute_pos_embed\n x = self.pos_drop(x)\n\n for layer in self.layers:\n x = layer(x)\n\n x = self.norm(x) # B L C\n x = self.avgpool(x.transpose(1, 2)) # B C 1\n x = torch.flatten(x, 1)\n return x\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.head(x)\n return x\n\n def flops(self):\n flops = 0\n flops += self.patch_embed.flops()\n for i, layer in enumerate(self.layers):\n flops += layer.flops()\n flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)\n flops += self.num_features * self.num_classes\n return flops", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ] ]
4a2cc9718fd75cbcf00e629d0d832676018cae6f
458,045
ipynb
Jupyter Notebook
spatial_filters-gw-chap3/power-law.ipynb
arve0/FY3490
d9b97857d25d48b1bd2b3c0c945b4d77b1a956fe
[ "MIT" ]
2
2020-12-25T12:57:34.000Z
2021-10-07T04:30:53.000Z
spatial_filters-gw-chap3/power-law.ipynb
arve0/FY3490
d9b97857d25d48b1bd2b3c0c945b4d77b1a956fe
[ "MIT" ]
null
null
null
spatial_filters-gw-chap3/power-law.ipynb
arve0/FY3490
d9b97857d25d48b1bd2b3c0c945b4d77b1a956fe
[ "MIT" ]
1
2020-12-25T12:57:37.000Z
2020-12-25T12:57:37.000Z
4,202.247706
204,525
0.953662
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4a2cdfb8f401a2bc05842461fce5fdfab3851af9
242,760
ipynb
Jupyter Notebook
notebooks/rc_nfsp_training_v1.ipynb
armandli/ReconChessRL
3f3f018fd347ee17452ef6ad725d82f2f11678c6
[ "MIT" ]
4
2021-08-19T14:06:01.000Z
2021-12-24T06:34:23.000Z
notebooks/rc_nfsp_training_v1.ipynb
armandli/ReconChessRL
3f3f018fd347ee17452ef6ad725d82f2f11678c6
[ "MIT" ]
2
2021-09-18T08:34:01.000Z
2022-03-23T07:06:05.000Z
notebooks/rc_nfsp_training_v1.ipynb
armandli/ReconChessRL
3f3f018fd347ee17452ef6ad725d82f2f11678c6
[ "MIT" ]
1
2021-09-18T08:30:23.000Z
2021-09-18T08:30:23.000Z
55.197817
78
0.716432
[ [ [ "import torch\nfrom senseis.learning.rc_qconfig import NFSPConfig\nfrom senseis.learning.rc_nfsp_trainer1 import RCNFSPTrainer1\nfrom senseis.reporters.rc_reporter import RCEpisodicReporter", "_____no_output_____" ], [ "use_cuda = torch.cuda.is_available()\ndevice = torch.device('cuda' if use_cuda else 'cpu')\ncpu = torch.device('cpu')", "_____no_output_____" ], [ "config = NFSPConfig(\n device=device,\n action_alpha_model_filename='../models/rc_action_alpha_model_v7',\n action_beta_model_filename='../models/rc_action_beta_model_v7',\n sense_model_filename='../models/rc_sense_model_v7',\n episodes=4098,\n iterations=2,\n eb_size=256,\n batchsize=128,\n learning_rate=0.0001,\n weight_decay=0.000001,\n pg_epsilon=0.00000001,\n mu=0.2,\n action_alpha_hidden_size=384,\n action_beta_hidden_size=384,\n sense_hidden_size=384,\n)", "_____no_output_____" ], [ "reporter = RCEpisodicReporter(config.batchsize, 1, 1)", "_____no_output_____" ], [ "trainer = RCNFSPTrainer1(config, reporter)", "_____no_output_____" ], [ "trainer.train()", "winner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner False win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\nwinner True win reason WinReason.KING_CAPTURE\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
4a2ce13ca4316cae76917d38771b14255bac6b10
295,220
ipynb
Jupyter Notebook
Elevation_prediction.ipynb
twcmchang/colorful-moth
08eb89a3843df4d81961752a8c11f8c546018fb7
[ "MIT" ]
11
2019-09-02T06:22:54.000Z
2022-02-22T07:43:44.000Z
Elevation_prediction.ipynb
twcmchang/colorful-moth
08eb89a3843df4d81961752a8c11f8c546018fb7
[ "MIT" ]
null
null
null
Elevation_prediction.ipynb
twcmchang/colorful-moth
08eb89a3843df4d81961752a8c11f8c546018fb7
[ "MIT" ]
null
null
null
167.263456
107,022
0.812641
[ [ [ "import os\nos.environ['CUDA_VISIBLE_DEVICES'] = '2'\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport skimage.io as io\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport tensorflow as tf\nimport keras\nfrom keras.applications import ResNet50\nfrom keras.applications.resnet50 import preprocess_input\nfrom keras.models import Model\nfrom keras.layers import GlobalAveragePooling2D, Dense, Dropout, Activation, Input, Lambda, BatchNormalization\nfrom keras.optimizers import Adam\nfrom keras.utils import to_categorical\n\nfrom imgaug import augmenters as iaa\nfrom datetime import datetime", "/usr/local/lib/python3.5/dist-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nUsing TensorFlow backend.\n" ], [ "# %load keras_utils.py\nimport keras\nimport numpy as np\nimport skimage.io as io\n\nclass DataGenerator(keras.utils.Sequence):\n 'Generates data for Keras'\n def __init__(self, list_IDs, labels, center_IDs=None, batch_size=32, dim=(256,256,3), shuffle=True, img_preprocess=None, img_aug = None):\n 'Initialization'\n self.dim = dim\n self.batch_size = batch_size\n self.labels = labels\n self.list_IDs = list_IDs\n self.center_IDs = center_IDs\n self.n_classes = labels.shape[1]\n self.shuffle = shuffle\n self.on_epoch_end()\n self.indexes = list(range(0, len(self.list_IDs)))\n self.img_aug = img_aug\n self.img_preprocess = img_preprocess\n\n def __len__(self):\n 'Denotes the number of batches per epoch'\n return int(np.floor(len(self.list_IDs) / self.batch_size))\n\n def __getitem__(self, index):\n 'Generate one batch of data'\n # Generate indexes of the batch\n indexes = self.indexes[index*self.batch_size:min((index+1)*self.batch_size, len(self.list_IDs))]\n \n 'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)\n # Initialization\n X = np.empty((self.batch_size, *self.dim))\n Y = np.empty((self.batch_size, self.n_classes), dtype=int)\n M = np.empty((self.batch_size), dtype=int)\n\n # Generate data\n for i, ID in enumerate(indexes):\n\n # Store sample\n X[i,] = io.imread(self.list_IDs[ID]).astype(float)\n \n # Store class\n Y[i,] = self.labels[ID]\n \n if self.img_aug is not None:\n X = self.img_aug.augment_images(X.astype(np.uint8))\n \n X = self.__data_preprocess(X.astype(float))\n \n if self.center_IDs is None:\n return X, Y\n else:\n for i, ID in enumerate(indexes):\n M[i] = self.center_IDs[ID]\n return [X,M], [Y,M]\n\n def on_epoch_end(self):\n 'Updates indexes after each epoch'\n self.indexes = np.arange(len(self.list_IDs))\n if self.shuffle == True:\n np.random.shuffle(self.indexes)\n \n def __data_preprocess(self, img):\n if self.img_preprocess is None:\n processed_img = img/255.0\n else:\n processed_img = self.img_preprocess(img)\n return processed_img \n", "_____no_output_____" ], [ "FLAG_savedir = '/home/put_data/moth/metadata/5_fold/'\nFLAG_sfold = 5\n\nidx_fold = 4\n\nFLAG_hidden = 1024\nFLAG_dropout = 0.0\nFLAG_base_model = 'ResNet50'\nFLAG_batch_size = 32", "_____no_output_____" ], [ "X = pd.read_csv('/home/put_data/moth/metadata/1121_updated_metadata_flickr_summary_used_final.csv',index_col=0)", "_____no_output_____" ], [ "X.head()", "_____no_output_____" ], [ "with open(os.path.join('/home/put_data/moth/metadata/1121_Y_mean_dict.pickle'), 'rb') as handle:\n Y_dict = pickle.load(handle)", "_____no_output_____" ], [ "FLAG_model_save = '/home/put_data/moth/code/cmchang/regression/fullcrop_dp{0}_newaug-rmhue+old_species_keras_resnet_fold_{1}_{2}'.format(int(FLAG_dropout*100), datetime.now().strftime('%Y%m%d'), \n idx_fold)\nif not os.path.exists(FLAG_model_save):\n os.makedirs(FLAG_model_save)\nprint('directory: {}'.format(FLAG_model_save))", "make a directory: /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4\n" ], [ "X['img_rmbg_path'] = X.Number.apply(lambda x: '/home/put_data/moth/data/whole_crop/'+str(x)+'.png')", "_____no_output_____" ], [ "plt.imshow(io.imread(X.img_rmbg_path[0]))", "_____no_output_____" ], [ "sel = list()\nfor i in range(X.shape[0]):\n if os.path.exists(X['img_rmbg_path'][i]):\n sel.append(True)\n else:\n sel.append(False)", "_____no_output_____" ], [ "X = X[sel]", "_____no_output_____" ], [ "Xtrain = X[(X.Species.duplicated() == False)]\nXsplit = X[(X.Species.duplicated() == True)]\n\nprint(\"Unique: {0}; Duplicate: {1}\".format(Xtrain.shape, Xsplit.shape))", "Unique: (1951, 36); Duplicate: (21243, 36)\n" ], [ "from sklearn.model_selection import train_test_split\nXmerge, Xtest = train_test_split(Xsplit, test_size = 0.2, random_state=0)\nXtrain = pd.concat([Xtrain, Xmerge])", "_____no_output_____" ], [ "Ytrain = np.vstack(Xtrain['Species'].apply(lambda x: Y_dict[x]))\nYtest = np.vstack(Xtest['Species'].apply(lambda x: Y_dict[x]))\n\nprint('Xtrain.shape: {0}, Ytrain.shape: {1}'.format(Xtrain.shape, Ytrain.shape))\nprint('Xtest.shape: {0}, Ytest.shape: {1}'.format(Xtest.shape, Ytest.shape))", "Xtrain.shape: (18945, 36), Ytrain.shape: (18945, 1)\nXtest.shape: (4249, 36), Ytest.shape: (4249, 1)\n" ], [ "Xtrain.to_csv(os.path.join(FLAG_model_save,'train.csv'), index=False)", "_____no_output_____" ], [ "Xtest.to_csv(os.path.join(FLAG_model_save,'test.csv'), index=False)", "_____no_output_____" ], [ "sometimes = lambda aug: iaa.Sometimes(0.5, aug)\n\naugseq = iaa.Sequential([\n iaa.Fliplr(0.5)\n ,sometimes(iaa.Affine(\n scale={\"x\": (0.9, 1.1), \"y\": (0.9, 1.1)}, # scale images to 80-120% of their size, individually per axis\n translate_percent={\"x\": (-0.1, 0.1), \"y\": (-0.1, 0.1)}, # translate by -20 to +20 percent (per axis)\n rotate=(-30, 30), # rotate by -45 to +45 degrees\n cval=255 # if mode is constant, use a cval between 0 and 255\n ))\n])\n", "_____no_output_____" ], [ "# Parameters\ninput_shape = (256, 256, 3)\nn_classes = Ytest.shape[1]\nbatch_size = FLAG_batch_size\nisCenterloss = False", "_____no_output_____" ], [ "from keras.regularizers import l2", "_____no_output_____" ], [ "img_input = Input(shape=input_shape)\nextractor = ResNet50(input_tensor=img_input, include_top=False, weights='imagenet', pooling='avg')\nx1 = Dense(FLAG_hidden)(extractor.output)\nx1 = BatchNormalization()(x1)\nx1 = Activation(activation='relu')(x1)\noutput = Dense(n_classes, activation='linear', name='output_layer')(x1)", "_____no_output_____" ], [ "train_params = {'dim': input_shape,\n 'batch_size': FLAG_batch_size,\n 'shuffle': True,\n 'img_aug': augseq,\n 'img_preprocess': tf.contrib.keras.applications.resnet50.preprocess_input}\n\nvalid_params = {'dim': input_shape,\n 'batch_size': FLAG_batch_size,\n 'shuffle': False,\n 'img_aug': None,\n 'img_preprocess': tf.contrib.keras.applications.resnet50.preprocess_input}\n\n\nmodel = Model(inputs=img_input, outputs=output)\nmodel.compile(optimizer=Adam(lr=5e-5, beta_1=0.5), \n loss=\"mean_squared_error\")\n# Generators\ntraining_generator = DataGenerator(list_IDs = list(Xtrain['img_rmbg_path']), labels = Ytrain, center_IDs = None, **train_params)\nvalidation_generator = DataGenerator(list_IDs = list(Xtest['img_rmbg_path']), labels = Ytest, center_IDs = None, **valid_params)\n", "_____no_output_____" ], [ "model.summary()", "____________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n====================================================================================================\ninput_2 (InputLayer) (None, 256, 256, 3) 0 \n____________________________________________________________________________________________________\nzero_padding2d_2 (ZeroPadding2D) (None, 262, 262, 3) 0 input_2[0][0] \n____________________________________________________________________________________________________\nconv1 (Conv2D) (None, 128, 128, 64) 9472 zero_padding2d_2[0][0] \n____________________________________________________________________________________________________\nbn_conv1 (BatchNormalization) (None, 128, 128, 64) 256 conv1[0][0] \n____________________________________________________________________________________________________\nactivation_51 (Activation) (None, 128, 128, 64) 0 bn_conv1[0][0] \n____________________________________________________________________________________________________\nmax_pooling2d_2 (MaxPooling2D) (None, 63, 63, 64) 0 activation_51[0][0] \n____________________________________________________________________________________________________\nres2a_branch2a (Conv2D) (None, 63, 63, 64) 4160 max_pooling2d_2[0][0] \n____________________________________________________________________________________________________\nbn2a_branch2a (BatchNormalizatio (None, 63, 63, 64) 256 res2a_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_52 (Activation) (None, 63, 63, 64) 0 bn2a_branch2a[0][0] \n____________________________________________________________________________________________________\nres2a_branch2b (Conv2D) (None, 63, 63, 64) 36928 activation_52[0][0] \n____________________________________________________________________________________________________\nbn2a_branch2b (BatchNormalizatio (None, 63, 63, 64) 256 res2a_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_53 (Activation) (None, 63, 63, 64) 0 bn2a_branch2b[0][0] \n____________________________________________________________________________________________________\nres2a_branch2c (Conv2D) (None, 63, 63, 256) 16640 activation_53[0][0] \n____________________________________________________________________________________________________\nres2a_branch1 (Conv2D) (None, 63, 63, 256) 16640 max_pooling2d_2[0][0] \n____________________________________________________________________________________________________\nbn2a_branch2c (BatchNormalizatio (None, 63, 63, 256) 1024 res2a_branch2c[0][0] \n____________________________________________________________________________________________________\nbn2a_branch1 (BatchNormalization (None, 63, 63, 256) 1024 res2a_branch1[0][0] \n____________________________________________________________________________________________________\nadd_17 (Add) (None, 63, 63, 256) 0 bn2a_branch2c[0][0] \n bn2a_branch1[0][0] \n____________________________________________________________________________________________________\nactivation_54 (Activation) (None, 63, 63, 256) 0 add_17[0][0] \n____________________________________________________________________________________________________\nres2b_branch2a (Conv2D) (None, 63, 63, 64) 16448 activation_54[0][0] \n____________________________________________________________________________________________________\nbn2b_branch2a (BatchNormalizatio (None, 63, 63, 64) 256 res2b_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_55 (Activation) (None, 63, 63, 64) 0 bn2b_branch2a[0][0] \n____________________________________________________________________________________________________\nres2b_branch2b (Conv2D) (None, 63, 63, 64) 36928 activation_55[0][0] \n____________________________________________________________________________________________________\nbn2b_branch2b (BatchNormalizatio (None, 63, 63, 64) 256 res2b_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_56 (Activation) (None, 63, 63, 64) 0 bn2b_branch2b[0][0] \n____________________________________________________________________________________________________\nres2b_branch2c (Conv2D) (None, 63, 63, 256) 16640 activation_56[0][0] \n____________________________________________________________________________________________________\nbn2b_branch2c (BatchNormalizatio (None, 63, 63, 256) 1024 res2b_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_18 (Add) (None, 63, 63, 256) 0 bn2b_branch2c[0][0] \n activation_54[0][0] \n____________________________________________________________________________________________________\nactivation_57 (Activation) (None, 63, 63, 256) 0 add_18[0][0] \n____________________________________________________________________________________________________\nres2c_branch2a (Conv2D) (None, 63, 63, 64) 16448 activation_57[0][0] \n____________________________________________________________________________________________________\nbn2c_branch2a (BatchNormalizatio (None, 63, 63, 64) 256 res2c_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_58 (Activation) (None, 63, 63, 64) 0 bn2c_branch2a[0][0] \n____________________________________________________________________________________________________\nres2c_branch2b (Conv2D) (None, 63, 63, 64) 36928 activation_58[0][0] \n____________________________________________________________________________________________________\nbn2c_branch2b (BatchNormalizatio (None, 63, 63, 64) 256 res2c_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_59 (Activation) (None, 63, 63, 64) 0 bn2c_branch2b[0][0] \n____________________________________________________________________________________________________\nres2c_branch2c (Conv2D) (None, 63, 63, 256) 16640 activation_59[0][0] \n____________________________________________________________________________________________________\nbn2c_branch2c (BatchNormalizatio (None, 63, 63, 256) 1024 res2c_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_19 (Add) (None, 63, 63, 256) 0 bn2c_branch2c[0][0] \n activation_57[0][0] \n____________________________________________________________________________________________________\nactivation_60 (Activation) (None, 63, 63, 256) 0 add_19[0][0] \n____________________________________________________________________________________________________\nres3a_branch2a (Conv2D) (None, 32, 32, 128) 32896 activation_60[0][0] \n____________________________________________________________________________________________________\nbn3a_branch2a (BatchNormalizatio (None, 32, 32, 128) 512 res3a_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_61 (Activation) (None, 32, 32, 128) 0 bn3a_branch2a[0][0] \n____________________________________________________________________________________________________\nres3a_branch2b (Conv2D) (None, 32, 32, 128) 147584 activation_61[0][0] \n____________________________________________________________________________________________________\nbn3a_branch2b (BatchNormalizatio (None, 32, 32, 128) 512 res3a_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_62 (Activation) (None, 32, 32, 128) 0 bn3a_branch2b[0][0] \n____________________________________________________________________________________________________\nres3a_branch2c (Conv2D) (None, 32, 32, 512) 66048 activation_62[0][0] \n____________________________________________________________________________________________________\nres3a_branch1 (Conv2D) (None, 32, 32, 512) 131584 activation_60[0][0] \n____________________________________________________________________________________________________\nbn3a_branch2c (BatchNormalizatio (None, 32, 32, 512) 2048 res3a_branch2c[0][0] \n____________________________________________________________________________________________________\nbn3a_branch1 (BatchNormalization (None, 32, 32, 512) 2048 res3a_branch1[0][0] \n____________________________________________________________________________________________________\nadd_20 (Add) (None, 32, 32, 512) 0 bn3a_branch2c[0][0] \n bn3a_branch1[0][0] \n____________________________________________________________________________________________________\nactivation_63 (Activation) (None, 32, 32, 512) 0 add_20[0][0] \n____________________________________________________________________________________________________\nres3b_branch2a (Conv2D) (None, 32, 32, 128) 65664 activation_63[0][0] \n____________________________________________________________________________________________________\nbn3b_branch2a (BatchNormalizatio (None, 32, 32, 128) 512 res3b_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_64 (Activation) (None, 32, 32, 128) 0 bn3b_branch2a[0][0] \n____________________________________________________________________________________________________\nres3b_branch2b (Conv2D) (None, 32, 32, 128) 147584 activation_64[0][0] \n____________________________________________________________________________________________________\nbn3b_branch2b (BatchNormalizatio (None, 32, 32, 128) 512 res3b_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_65 (Activation) (None, 32, 32, 128) 0 bn3b_branch2b[0][0] \n____________________________________________________________________________________________________\nres3b_branch2c (Conv2D) (None, 32, 32, 512) 66048 activation_65[0][0] \n____________________________________________________________________________________________________\nbn3b_branch2c (BatchNormalizatio (None, 32, 32, 512) 2048 res3b_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_21 (Add) (None, 32, 32, 512) 0 bn3b_branch2c[0][0] \n activation_63[0][0] \n____________________________________________________________________________________________________\nactivation_66 (Activation) (None, 32, 32, 512) 0 add_21[0][0] \n____________________________________________________________________________________________________\nres3c_branch2a (Conv2D) (None, 32, 32, 128) 65664 activation_66[0][0] \n____________________________________________________________________________________________________\nbn3c_branch2a (BatchNormalizatio (None, 32, 32, 128) 512 res3c_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_67 (Activation) (None, 32, 32, 128) 0 bn3c_branch2a[0][0] \n____________________________________________________________________________________________________\nres3c_branch2b (Conv2D) (None, 32, 32, 128) 147584 activation_67[0][0] \n____________________________________________________________________________________________________\nbn3c_branch2b (BatchNormalizatio (None, 32, 32, 128) 512 res3c_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_68 (Activation) (None, 32, 32, 128) 0 bn3c_branch2b[0][0] \n____________________________________________________________________________________________________\nres3c_branch2c (Conv2D) (None, 32, 32, 512) 66048 activation_68[0][0] \n____________________________________________________________________________________________________\nbn3c_branch2c (BatchNormalizatio (None, 32, 32, 512) 2048 res3c_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_22 (Add) (None, 32, 32, 512) 0 bn3c_branch2c[0][0] \n activation_66[0][0] \n____________________________________________________________________________________________________\nactivation_69 (Activation) (None, 32, 32, 512) 0 add_22[0][0] \n____________________________________________________________________________________________________\nres3d_branch2a (Conv2D) (None, 32, 32, 128) 65664 activation_69[0][0] \n____________________________________________________________________________________________________\nbn3d_branch2a (BatchNormalizatio (None, 32, 32, 128) 512 res3d_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_70 (Activation) (None, 32, 32, 128) 0 bn3d_branch2a[0][0] \n____________________________________________________________________________________________________\nres3d_branch2b (Conv2D) (None, 32, 32, 128) 147584 activation_70[0][0] \n____________________________________________________________________________________________________\nbn3d_branch2b (BatchNormalizatio (None, 32, 32, 128) 512 res3d_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_71 (Activation) (None, 32, 32, 128) 0 bn3d_branch2b[0][0] \n____________________________________________________________________________________________________\nres3d_branch2c (Conv2D) (None, 32, 32, 512) 66048 activation_71[0][0] \n____________________________________________________________________________________________________\nbn3d_branch2c (BatchNormalizatio (None, 32, 32, 512) 2048 res3d_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_23 (Add) (None, 32, 32, 512) 0 bn3d_branch2c[0][0] \n activation_69[0][0] \n____________________________________________________________________________________________________\nactivation_72 (Activation) (None, 32, 32, 512) 0 add_23[0][0] \n____________________________________________________________________________________________________\nres4a_branch2a (Conv2D) (None, 16, 16, 256) 131328 activation_72[0][0] \n____________________________________________________________________________________________________\nbn4a_branch2a (BatchNormalizatio (None, 16, 16, 256) 1024 res4a_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_73 (Activation) (None, 16, 16, 256) 0 bn4a_branch2a[0][0] \n____________________________________________________________________________________________________\nres4a_branch2b (Conv2D) (None, 16, 16, 256) 590080 activation_73[0][0] \n____________________________________________________________________________________________________\nbn4a_branch2b (BatchNormalizatio (None, 16, 16, 256) 1024 res4a_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_74 (Activation) (None, 16, 16, 256) 0 bn4a_branch2b[0][0] \n____________________________________________________________________________________________________\nres4a_branch2c (Conv2D) (None, 16, 16, 1024) 263168 activation_74[0][0] \n____________________________________________________________________________________________________\nres4a_branch1 (Conv2D) (None, 16, 16, 1024) 525312 activation_72[0][0] \n____________________________________________________________________________________________________\nbn4a_branch2c (BatchNormalizatio (None, 16, 16, 1024) 4096 res4a_branch2c[0][0] \n____________________________________________________________________________________________________\nbn4a_branch1 (BatchNormalization (None, 16, 16, 1024) 4096 res4a_branch1[0][0] \n____________________________________________________________________________________________________\nadd_24 (Add) (None, 16, 16, 1024) 0 bn4a_branch2c[0][0] \n bn4a_branch1[0][0] \n____________________________________________________________________________________________________\nactivation_75 (Activation) (None, 16, 16, 1024) 0 add_24[0][0] \n____________________________________________________________________________________________________\nres4b_branch2a (Conv2D) (None, 16, 16, 256) 262400 activation_75[0][0] \n____________________________________________________________________________________________________\nbn4b_branch2a (BatchNormalizatio (None, 16, 16, 256) 1024 res4b_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_76 (Activation) (None, 16, 16, 256) 0 bn4b_branch2a[0][0] \n____________________________________________________________________________________________________\nres4b_branch2b (Conv2D) (None, 16, 16, 256) 590080 activation_76[0][0] \n____________________________________________________________________________________________________\nbn4b_branch2b (BatchNormalizatio (None, 16, 16, 256) 1024 res4b_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_77 (Activation) (None, 16, 16, 256) 0 bn4b_branch2b[0][0] \n____________________________________________________________________________________________________\nres4b_branch2c (Conv2D) (None, 16, 16, 1024) 263168 activation_77[0][0] \n____________________________________________________________________________________________________\nbn4b_branch2c (BatchNormalizatio (None, 16, 16, 1024) 4096 res4b_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_25 (Add) (None, 16, 16, 1024) 0 bn4b_branch2c[0][0] \n activation_75[0][0] \n____________________________________________________________________________________________________\nactivation_78 (Activation) (None, 16, 16, 1024) 0 add_25[0][0] \n____________________________________________________________________________________________________\nres4c_branch2a (Conv2D) (None, 16, 16, 256) 262400 activation_78[0][0] \n____________________________________________________________________________________________________\nbn4c_branch2a (BatchNormalizatio (None, 16, 16, 256) 1024 res4c_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_79 (Activation) (None, 16, 16, 256) 0 bn4c_branch2a[0][0] \n____________________________________________________________________________________________________\nres4c_branch2b (Conv2D) (None, 16, 16, 256) 590080 activation_79[0][0] \n____________________________________________________________________________________________________\nbn4c_branch2b (BatchNormalizatio (None, 16, 16, 256) 1024 res4c_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_80 (Activation) (None, 16, 16, 256) 0 bn4c_branch2b[0][0] \n____________________________________________________________________________________________________\nres4c_branch2c (Conv2D) (None, 16, 16, 1024) 263168 activation_80[0][0] \n____________________________________________________________________________________________________\nbn4c_branch2c (BatchNormalizatio (None, 16, 16, 1024) 4096 res4c_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_26 (Add) (None, 16, 16, 1024) 0 bn4c_branch2c[0][0] \n activation_78[0][0] \n____________________________________________________________________________________________________\nactivation_81 (Activation) (None, 16, 16, 1024) 0 add_26[0][0] \n____________________________________________________________________________________________________\nres4d_branch2a (Conv2D) (None, 16, 16, 256) 262400 activation_81[0][0] \n____________________________________________________________________________________________________\nbn4d_branch2a (BatchNormalizatio (None, 16, 16, 256) 1024 res4d_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_82 (Activation) (None, 16, 16, 256) 0 bn4d_branch2a[0][0] \n____________________________________________________________________________________________________\nres4d_branch2b (Conv2D) (None, 16, 16, 256) 590080 activation_82[0][0] \n____________________________________________________________________________________________________\nbn4d_branch2b (BatchNormalizatio (None, 16, 16, 256) 1024 res4d_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_83 (Activation) (None, 16, 16, 256) 0 bn4d_branch2b[0][0] \n____________________________________________________________________________________________________\nres4d_branch2c (Conv2D) (None, 16, 16, 1024) 263168 activation_83[0][0] \n____________________________________________________________________________________________________\nbn4d_branch2c (BatchNormalizatio (None, 16, 16, 1024) 4096 res4d_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_27 (Add) (None, 16, 16, 1024) 0 bn4d_branch2c[0][0] \n activation_81[0][0] \n____________________________________________________________________________________________________\nactivation_84 (Activation) (None, 16, 16, 1024) 0 add_27[0][0] \n____________________________________________________________________________________________________\nres4e_branch2a (Conv2D) (None, 16, 16, 256) 262400 activation_84[0][0] \n____________________________________________________________________________________________________\nbn4e_branch2a (BatchNormalizatio (None, 16, 16, 256) 1024 res4e_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_85 (Activation) (None, 16, 16, 256) 0 bn4e_branch2a[0][0] \n____________________________________________________________________________________________________\nres4e_branch2b (Conv2D) (None, 16, 16, 256) 590080 activation_85[0][0] \n____________________________________________________________________________________________________\nbn4e_branch2b (BatchNormalizatio (None, 16, 16, 256) 1024 res4e_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_86 (Activation) (None, 16, 16, 256) 0 bn4e_branch2b[0][0] \n____________________________________________________________________________________________________\nres4e_branch2c (Conv2D) (None, 16, 16, 1024) 263168 activation_86[0][0] \n____________________________________________________________________________________________________\nbn4e_branch2c (BatchNormalizatio (None, 16, 16, 1024) 4096 res4e_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_28 (Add) (None, 16, 16, 1024) 0 bn4e_branch2c[0][0] \n activation_84[0][0] \n____________________________________________________________________________________________________\nactivation_87 (Activation) (None, 16, 16, 1024) 0 add_28[0][0] \n____________________________________________________________________________________________________\nres4f_branch2a (Conv2D) (None, 16, 16, 256) 262400 activation_87[0][0] \n____________________________________________________________________________________________________\nbn4f_branch2a (BatchNormalizatio (None, 16, 16, 256) 1024 res4f_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_88 (Activation) (None, 16, 16, 256) 0 bn4f_branch2a[0][0] \n____________________________________________________________________________________________________\nres4f_branch2b (Conv2D) (None, 16, 16, 256) 590080 activation_88[0][0] \n____________________________________________________________________________________________________\nbn4f_branch2b (BatchNormalizatio (None, 16, 16, 256) 1024 res4f_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_89 (Activation) (None, 16, 16, 256) 0 bn4f_branch2b[0][0] \n____________________________________________________________________________________________________\nres4f_branch2c (Conv2D) (None, 16, 16, 1024) 263168 activation_89[0][0] \n____________________________________________________________________________________________________\nbn4f_branch2c (BatchNormalizatio (None, 16, 16, 1024) 4096 res4f_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_29 (Add) (None, 16, 16, 1024) 0 bn4f_branch2c[0][0] \n activation_87[0][0] \n____________________________________________________________________________________________________\nactivation_90 (Activation) (None, 16, 16, 1024) 0 add_29[0][0] \n____________________________________________________________________________________________________\nres5a_branch2a (Conv2D) (None, 8, 8, 512) 524800 activation_90[0][0] \n____________________________________________________________________________________________________\nbn5a_branch2a (BatchNormalizatio (None, 8, 8, 512) 2048 res5a_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_91 (Activation) (None, 8, 8, 512) 0 bn5a_branch2a[0][0] \n____________________________________________________________________________________________________\nres5a_branch2b (Conv2D) (None, 8, 8, 512) 2359808 activation_91[0][0] \n____________________________________________________________________________________________________\nbn5a_branch2b (BatchNormalizatio (None, 8, 8, 512) 2048 res5a_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_92 (Activation) (None, 8, 8, 512) 0 bn5a_branch2b[0][0] \n____________________________________________________________________________________________________\nres5a_branch2c (Conv2D) (None, 8, 8, 2048) 1050624 activation_92[0][0] \n____________________________________________________________________________________________________\nres5a_branch1 (Conv2D) (None, 8, 8, 2048) 2099200 activation_90[0][0] \n____________________________________________________________________________________________________\nbn5a_branch2c (BatchNormalizatio (None, 8, 8, 2048) 8192 res5a_branch2c[0][0] \n____________________________________________________________________________________________________\nbn5a_branch1 (BatchNormalization (None, 8, 8, 2048) 8192 res5a_branch1[0][0] \n____________________________________________________________________________________________________\nadd_30 (Add) (None, 8, 8, 2048) 0 bn5a_branch2c[0][0] \n bn5a_branch1[0][0] \n____________________________________________________________________________________________________\nactivation_93 (Activation) (None, 8, 8, 2048) 0 add_30[0][0] \n____________________________________________________________________________________________________\nres5b_branch2a (Conv2D) (None, 8, 8, 512) 1049088 activation_93[0][0] \n____________________________________________________________________________________________________\nbn5b_branch2a (BatchNormalizatio (None, 8, 8, 512) 2048 res5b_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_94 (Activation) (None, 8, 8, 512) 0 bn5b_branch2a[0][0] \n____________________________________________________________________________________________________\nres5b_branch2b (Conv2D) (None, 8, 8, 512) 2359808 activation_94[0][0] \n____________________________________________________________________________________________________\nbn5b_branch2b (BatchNormalizatio (None, 8, 8, 512) 2048 res5b_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_95 (Activation) (None, 8, 8, 512) 0 bn5b_branch2b[0][0] \n____________________________________________________________________________________________________\nres5b_branch2c (Conv2D) (None, 8, 8, 2048) 1050624 activation_95[0][0] \n____________________________________________________________________________________________________\nbn5b_branch2c (BatchNormalizatio (None, 8, 8, 2048) 8192 res5b_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_31 (Add) (None, 8, 8, 2048) 0 bn5b_branch2c[0][0] \n activation_93[0][0] \n____________________________________________________________________________________________________\nactivation_96 (Activation) (None, 8, 8, 2048) 0 add_31[0][0] \n____________________________________________________________________________________________________\nres5c_branch2a (Conv2D) (None, 8, 8, 512) 1049088 activation_96[0][0] \n____________________________________________________________________________________________________\nbn5c_branch2a (BatchNormalizatio (None, 8, 8, 512) 2048 res5c_branch2a[0][0] \n____________________________________________________________________________________________________\nactivation_97 (Activation) (None, 8, 8, 512) 0 bn5c_branch2a[0][0] \n____________________________________________________________________________________________________\nres5c_branch2b (Conv2D) (None, 8, 8, 512) 2359808 activation_97[0][0] \n____________________________________________________________________________________________________\nbn5c_branch2b (BatchNormalizatio (None, 8, 8, 512) 2048 res5c_branch2b[0][0] \n____________________________________________________________________________________________________\nactivation_98 (Activation) (None, 8, 8, 512) 0 bn5c_branch2b[0][0] \n____________________________________________________________________________________________________\nres5c_branch2c (Conv2D) (None, 8, 8, 2048) 1050624 activation_98[0][0] \n____________________________________________________________________________________________________\nbn5c_branch2c (BatchNormalizatio (None, 8, 8, 2048) 8192 res5c_branch2c[0][0] \n____________________________________________________________________________________________________\nadd_32 (Add) (None, 8, 8, 2048) 0 bn5c_branch2c[0][0] \n activation_96[0][0] \n____________________________________________________________________________________________________\nactivation_99 (Activation) (None, 8, 8, 2048) 0 add_32[0][0] \n____________________________________________________________________________________________________\navg_pool (AveragePooling2D) (None, 1, 1, 2048) 0 activation_99[0][0] \n____________________________________________________________________________________________________\nglobal_average_pooling2d_2 (Glob (None, 2048) 0 avg_pool[0][0] \n____________________________________________________________________________________________________\ndense_2 (Dense) (None, 1024) 2098176 global_average_pooling2d_2[0][0] \n____________________________________________________________________________________________________\nbatch_normalization_2 (BatchNorm (None, 1024) 4096 dense_2[0][0] \n____________________________________________________________________________________________________\nactivation_100 (Activation) (None, 1024) 0 batch_normalization_2[0][0] \n____________________________________________________________________________________________________\noutput_layer (Dense) (None, 1) 1025 activation_100[0][0] \n====================================================================================================\nTotal params: 25,691,009\nTrainable params: 25,635,841\nNon-trainable params: 55,168\n____________________________________________________________________________________________________\n" ], [ "csv_logger = keras.callbacks.CSVLogger(os.path.join(FLAG_model_save, 'training.log'))\ncheckpoint = keras.callbacks.ModelCheckpoint(os.path.join(FLAG_model_save, 'model.h5'), \n monitor='val_loss', \n verbose=1, \n save_best_only=True,\n save_weights_only=False,\n mode='min',\n period=1)\n\nearlystop = keras.callbacks.EarlyStopping(monitor='val_loss', mode='min',patience=20,min_delta=0.01)", "_____no_output_____" ], [ "# Train model on dataset\nmodel.fit_generator(generator=training_generator,\n validation_data=validation_generator,\n steps_per_epoch=Xtrain.shape[0]/FLAG_batch_size, \n validation_steps=Xtest.shape[0]/FLAG_batch_size,\n epochs=200,\n callbacks=[csv_logger, checkpoint, earlystop])", "Epoch 1/200\n592/592 [============================>.] - ETA: 0s - loss: 1021389.9641Epoch 00000: val_loss improved from inf to 928932.13816, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 305s - loss: 1020976.4823 - val_loss: 928932.1382\nEpoch 2/200\n592/592 [============================>.] - ETA: 0s - loss: 987279.9959Epoch 00001: val_loss improved from 928932.13816 to 886246.27373, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 261s - loss: 988873.8030 - val_loss: 886246.2737\nEpoch 3/200\n592/592 [============================>.] - ETA: 0s - loss: 945664.8391Epoch 00002: val_loss improved from 886246.27373 to 827497.64709, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 263s - loss: 946230.9317 - val_loss: 827497.6471\nEpoch 4/200\n592/592 [============================>.] - ETA: 0s - loss: 893814.2986Epoch 00003: val_loss improved from 827497.64709 to 794859.04887, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 262s - loss: 894498.4770 - val_loss: 794859.0489\nEpoch 5/200\n592/592 [============================>.] - ETA: 0s - loss: 829496.6133Epoch 00004: val_loss improved from 794859.04887 to 725501.36889, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 260s - loss: 830308.2893 - val_loss: 725501.3689\nEpoch 6/200\n592/592 [============================>.] - ETA: 0s - loss: 768280.1698Epoch 00005: val_loss improved from 725501.36889 to 651158.40414, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 261s - loss: 768687.0224 - val_loss: 651158.4041\nEpoch 7/200\n592/592 [============================>.] - ETA: 0s - loss: 705974.0706Epoch 00006: val_loss improved from 651158.40414 to 597306.56720, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 260s - loss: 707308.0505 - val_loss: 597306.5672\nEpoch 8/200\n592/592 [============================>.] - ETA: 0s - loss: 645959.3379Epoch 00007: val_loss did not improve\n593/592 [==============================] - 263s - loss: 646440.5412 - val_loss: 664547.7185\nEpoch 9/200\n592/592 [============================>.] - ETA: 0s - loss: 586438.7499Epoch 00008: val_loss improved from 597306.56720 to 591501.20771, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 262s - loss: 586138.8162 - val_loss: 591501.2077\nEpoch 10/200\n592/592 [============================>.] - ETA: 0s - loss: 531268.4547Epoch 00009: val_loss improved from 591501.20771 to 454551.45477, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 261s - loss: 531660.1813 - val_loss: 454551.4548\nEpoch 11/200\n592/592 [============================>.] - ETA: 0s - loss: 475919.8845Epoch 00010: val_loss improved from 454551.45477 to 402489.10221, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 261s - loss: 477242.3431 - val_loss: 402489.1022\nEpoch 12/200\n592/592 [============================>.] - ETA: 0s - loss: 427811.5922Epoch 00011: val_loss improved from 402489.10221 to 357057.59904, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 261s - loss: 428720.7632 - val_loss: 357057.5990\nEpoch 13/200\n592/592 [============================>.] - ETA: 0s - loss: 379211.8473Epoch 00012: val_loss improved from 357057.59904 to 347139.36161, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 261s - loss: 379700.1536 - val_loss: 347139.3616\nEpoch 14/200\n592/592 [============================>.] - ETA: 0s - loss: 331157.9216Epoch 00013: val_loss did not improve\n593/592 [==============================] - 260s - loss: 331626.3514 - val_loss: 368596.1980\nEpoch 15/200\n592/592 [============================>.] - ETA: 0s - loss: 291580.8387Epoch 00014: val_loss improved from 347139.36161 to 300951.35115, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 260s - loss: 292493.9678 - val_loss: 300951.3512\nEpoch 16/200\n592/592 [============================>.] - ETA: 0s - loss: 254458.6986Epoch 00015: val_loss did not improve\n593/592 [==============================] - 259s - loss: 255034.2527 - val_loss: 340188.9515\nEpoch 17/200\n592/592 [============================>.] - ETA: 0s - loss: 219353.5441Epoch 00016: val_loss improved from 300951.35115 to 243496.13675, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 260s - loss: 219789.2736 - val_loss: 243496.1367\nEpoch 18/200\n592/592 [============================>.] - ETA: 0s - loss: 190077.4751Epoch 00017: val_loss improved from 243496.13675 to 222578.81414, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 260s - loss: 190248.7648 - val_loss: 222578.8141\nEpoch 19/200\n592/592 [============================>.] - ETA: 0s - loss: 164850.4522Epoch 00018: val_loss improved from 222578.81414 to 168758.82484, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 261s - loss: 165073.1409 - val_loss: 168758.8248\nEpoch 20/200\n592/592 [============================>.] - ETA: 0s - loss: 141033.2835Epoch 00019: val_loss improved from 168758.82484 to 122362.11733, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 260s - loss: 142451.5218 - val_loss: 122362.1173\nEpoch 21/200\n592/592 [============================>.] - ETA: 0s - loss: 118870.7812Epoch 00020: val_loss did not improve\n593/592 [==============================] - 259s - loss: 119068.5552 - val_loss: 129877.9428\nEpoch 22/200\n592/592 [============================>.] - ETA: 0s - loss: 101134.6848Epoch 00021: val_loss did not improve\n593/592 [==============================] - 258s - loss: 101379.8927 - val_loss: 137084.0702\nEpoch 23/200\n592/592 [============================>.] - ETA: 0s - loss: 77093.2650Epoch 00022: val_loss improved from 122362.11733 to 86649.04218, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 260s - loss: 77406.6322 - val_loss: 86649.0422\nEpoch 24/200\n592/592 [============================>.] - ETA: 0s - loss: 57366.8848Epoch 00023: val_loss improved from 86649.04218 to 74596.38792, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 260s - loss: 57620.0576 - val_loss: 74596.3879\nEpoch 25/200\n592/592 [============================>.] - ETA: 0s - loss: 48020.1256Epoch 00024: val_loss did not improve\n593/592 [==============================] - 258s - loss: 48066.2446 - val_loss: 81196.4952\nEpoch 26/200\n592/592 [============================>.] - ETA: 0s - loss: 42734.0391Epoch 00025: val_loss improved from 74596.38792 to 63920.46079, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 260s - loss: 42841.8718 - val_loss: 63920.4608\nEpoch 27/200\n592/592 [============================>.] - ETA: 0s - loss: 37903.8291Epoch 00026: val_loss improved from 63920.46079 to 61340.73101, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 260s - loss: 37980.4478 - val_loss: 61340.7310\nEpoch 28/200\n592/592 [============================>.] - ETA: 0s - loss: 34357.5324Epoch 00027: val_loss improved from 61340.73101 to 60484.66414, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 260s - loss: 34352.9054 - val_loss: 60484.6641\nEpoch 29/200\n592/592 [============================>.] - ETA: 0s - loss: 31392.0784Epoch 00028: val_loss did not improve\n593/592 [==============================] - 258s - loss: 31527.5584 - val_loss: 63791.9741\nEpoch 30/200\n592/592 [============================>.] - ETA: 0s - loss: 30213.7762Epoch 00029: val_loss did not improve\n593/592 [==============================] - 259s - loss: 30336.9423 - val_loss: 66174.2203\nEpoch 31/200\n592/592 [============================>.] - ETA: 0s - loss: 27392.7433Epoch 00030: val_loss did not improve\n593/592 [==============================] - 260s - loss: 27406.7464 - val_loss: 70780.1101\nEpoch 32/200\n592/592 [============================>.] - ETA: 0s - loss: 25234.2949Epoch 00031: val_loss did not improve\n593/592 [==============================] - 258s - loss: 25228.3057 - val_loss: 72485.6886\nEpoch 33/200\n592/592 [============================>.] - ETA: 0s - loss: 22899.1423Epoch 00032: val_loss did not improve\n593/592 [==============================] - 260s - loss: 22960.6996 - val_loss: 75663.0199\nEpoch 34/200\n592/592 [============================>.] - ETA: 0s - loss: 21748.4423Epoch 00033: val_loss improved from 60484.66414 to 54262.66071, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 260s - loss: 21791.8323 - val_loss: 54262.6607\nEpoch 35/200\n592/592 [============================>.] - ETA: 0s - loss: 20561.9067Epoch 00034: val_loss did not improve\n593/592 [==============================] - 259s - loss: 20598.8052 - val_loss: 59210.7183\nEpoch 36/200\n592/592 [============================>.] - ETA: 0s - loss: 18934.5306Epoch 00035: val_loss improved from 54262.66071 to 51871.77372, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 261s - loss: 18949.8359 - val_loss: 51871.7737\nEpoch 37/200\n592/592 [============================>.] - ETA: 0s - loss: 18685.3810Epoch 00036: val_loss did not improve\n593/592 [==============================] - 259s - loss: 18700.4971 - val_loss: 59058.0998\nEpoch 38/200\n592/592 [============================>.] - ETA: 0s - loss: 16216.7455Epoch 00037: val_loss improved from 51871.77372 to 49378.85138, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 260s - loss: 16218.9279 - val_loss: 49378.8514\nEpoch 39/200\n592/592 [============================>.] - ETA: 0s - loss: 15794.8985Epoch 00038: val_loss improved from 49378.85138 to 45696.31615, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 261s - loss: 15814.2744 - val_loss: 45696.3161\nEpoch 40/200\n592/592 [============================>.] - ETA: 0s - loss: 14708.3457Epoch 00039: val_loss did not improve\n593/592 [==============================] - 258s - loss: 14744.2422 - val_loss: 48029.6605\nEpoch 41/200\n592/592 [============================>.] - ETA: 0s - loss: 14208.2287Epoch 00040: val_loss did not improve\n593/592 [==============================] - 260s - loss: 14218.2464 - val_loss: 45941.6203\nEpoch 42/200\n592/592 [============================>.] - ETA: 0s - loss: 14308.6616Epoch 00041: val_loss did not improve\n593/592 [==============================] - 258s - loss: 14390.9555 - val_loss: 59479.5414\nEpoch 43/200\n592/592 [============================>.] - ETA: 0s - loss: 13610.0854Epoch 00042: val_loss did not improve\n593/592 [==============================] - 259s - loss: 13655.8182 - val_loss: 70847.7207\nEpoch 44/200\n592/592 [============================>.] - ETA: 0s - loss: 12791.0953Epoch 00043: val_loss did not improve\n593/592 [==============================] - 260s - loss: 12877.9284 - val_loss: 87924.2782\nEpoch 45/200\n592/592 [============================>.] - ETA: 0s - loss: 11914.6242Epoch 00044: val_loss did not improve\n593/592 [==============================] - 258s - loss: 11934.3358 - val_loss: 49184.1183\nEpoch 46/200\n592/592 [============================>.] - ETA: 0s - loss: 11487.9445Epoch 00045: val_loss did not improve\n593/592 [==============================] - 258s - loss: 11505.3586 - val_loss: 58369.4991\nEpoch 47/200\n592/592 [============================>.] - ETA: 0s - loss: 11815.8111Epoch 00046: val_loss did not improve\n593/592 [==============================] - 258s - loss: 11872.4387 - val_loss: 50686.6825\nEpoch 48/200\n592/592 [============================>.] - ETA: 0s - loss: 10696.1990Epoch 00047: val_loss did not improve\n593/592 [==============================] - 259s - loss: 10729.3381 - val_loss: 57157.5842\nEpoch 49/200\n592/592 [============================>.] - ETA: 0s - loss: 11822.7583Epoch 00048: val_loss did not improve\n593/592 [==============================] - 259s - loss: 11826.3895 - val_loss: 64670.5583\nEpoch 50/200\n592/592 [============================>.] - ETA: 0s - loss: 11471.5338Epoch 00049: val_loss did not improve\n593/592 [==============================] - 259s - loss: 11488.6025 - val_loss: 48798.5368\nEpoch 51/200\n592/592 [============================>.] - ETA: 0s - loss: 9760.4770Epoch 00050: val_loss improved from 45696.31615 to 41200.96470, saving model to /home/put_data/moth/code/cmchang/regression/fullcrop_dp0_newaug-rmhue+old_species_keras_resnet_fold_20181122_4/model.h5\n593/592 [==============================] - 260s - loss: 9784.1161 - val_loss: 41200.9647\nEpoch 52/200\n592/592 [============================>.] - ETA: 0s - loss: 9249.0570Epoch 00051: val_loss did not improve\n593/592 [==============================] - 258s - loss: 9282.2065 - val_loss: 44926.1924\nEpoch 53/200\n592/592 [============================>.] - ETA: 0s - loss: 10779.0742Epoch 00052: val_loss did not improve\n593/592 [==============================] - 259s - loss: 10816.0023 - val_loss: 63058.1681\nEpoch 54/200\n592/592 [============================>.] - ETA: 0s - loss: 9844.1734Epoch 00053: val_loss did not improve\n593/592 [==============================] - 259s - loss: 9855.9891 - val_loss: 43366.9217\nEpoch 55/200\n592/592 [============================>.] - ETA: 0s - loss: 9919.9319Epoch 00054: val_loss did not improve\n593/592 [==============================] - 258s - loss: 9948.0691 - val_loss: 45820.8189\nEpoch 56/200\n592/592 [============================>.] - ETA: 0s - loss: 8287.6093Epoch 00055: val_loss did not improve\n593/592 [==============================] - 259s - loss: 8288.7636 - val_loss: 42919.6668\nEpoch 57/200\n592/592 [============================>.] - ETA: 0s - loss: 7766.9636Epoch 00056: val_loss did not improve\n" ], [ "loss = pd.read_table(csv_logger.filename, delimiter=',')\n\nplt.plot(loss.epoch, loss.loss, label='loss')\nplt.plot(loss.epoch, loss.val_loss, label='val_loss')\nplt.legend()\nplt.xlabel('epoch')\nplt.ylabel('MSE')\nplt.savefig(os.path.join(FLAG_model_save, 'loss.png'))", "_____no_output_____" ], [ "best_epoch = np.argmin(loss.val_loss)", "/home/cmchang/.local/lib/python3.5/site-packages/numpy/core/fromnumeric.py:52: FutureWarning: 'argmin' is deprecated, use 'idxmin' instead. The behavior of 'argmin'\nwill be corrected to return the positional minimum in the future.\nUse 'series.values.argmin' to get the position of the minimum now.\n return getattr(obj, method)(*args, **kwds)\n" ], [ "header = 'model_save,base_model,batch_size,hidden,dropout,epoch,loss,val_loss\\n'\nrow = '{0},{1},{2},{3},{4},{5},{6:.4f},{7:.4f}\\n'.format(FLAG_model_save, \n FLAG_base_model,\n FLAG_batch_size,\n FLAG_hidden,\n FLAG_dropout,\n best_epoch,\n loss.iloc[best_epoch]['loss'],\n loss.iloc[best_epoch]['val_loss'])", "_____no_output_____" ], [ "if os.path.exists('result_summary.csv'):\n with open('result_summary.csv','a') as myfile:\n myfile.write(row)\nelse:\n with open('result_summary.csv','w') as myfile:\n myfile.write(header)\n myfile.write(row)", "_____no_output_____" ] ], [ [ "### evaluation over validation dataset", "_____no_output_____" ] ], [ [ "from keras.models import load_model\nmodel = load_model(os.path.join(FLAG_model_save,'model.h5'))", "_____no_output_____" ], [ "TestImg = list()\nfor i in range(Xtest.shape[0]):\n img = io.imread(list(Xtest['img_rmbg_path'])[i])\n TestImg.append(img)\nTestImg = np.stack(TestImg)\n\nTestInput = preprocess_input(TestImg.astype(float))", "_____no_output_____" ], [ "Pred = model.predict(TestInput)", "_____no_output_____" ], [ "from sklearn.metrics import mean_squared_error, mean_absolute_error\nfrom scipy.stats import pearsonr\n\nplt.scatter(Ytest, Pred, s=0.7)\nplt.xlabel('true')\nplt.ylabel('pred')\nplt.title('rmse={0:.4f}, cor={1:.4f}'.format(np.sqrt(mean_squared_error(y_true=Ytest, y_pred=Pred)),\n pearsonr(Ytest, Pred)[0][0]))\nplt.savefig(os.path.join(FLAG_model_save, 'scatter_per_img.png'))", "_____no_output_____" ], [ "result = pd.DataFrame({'Species':Xtest.Species,\n 'pred':Pred.reshape(-1),\n 'true':Ytest.reshape(-1)})", "_____no_output_____" ], [ "result.to_csv(os.path.join(FLAG_model_save, 'predictions.csv'), index=False)", "_____no_output_____" ] ], [ [ "### by species", "_____no_output_____" ] ], [ [ "Xtest = Xtest.reset_index()\nXtest.drop(columns='index', inplace=True)\n\nYtest = np.vstack(Xtest['Species'].apply(lambda x: Y_dict[x]))", "_____no_output_____" ], [ "df_species_group = Xtest.groupby('Species').apply(\n lambda g: pd.Series({\n 'indices': g.index.tolist(),\n # 'Alt_class': g['Alt_class'].unique().tolist()[0]\n }))\ndf_species_group = df_species_group.sample(frac=1).reset_index()\ndisplay(df_species_group.head())", "_____no_output_____" ], [ "species_ypred = list()\nspecies_ytest = list()\nfor i in range(len(df_species_group)):\n tidx = df_species_group.iloc[i][1]\n species_ypred.append(np.mean(Pred[tidx], axis=0))\n species_ytest.append(np.mean(Ytest[tidx], axis=0))\n \nspecies_ypred = np.stack(species_ypred)\nspecies_ytest = np.stack(species_ytest)", "_____no_output_____" ], [ "plt.scatter(species_ytest, species_ypred, s=0.7)\nplt.xlabel('true')\nplt.ylabel('pred')\nplt.title('rmse={0:.4f}, cor={1:.4f}'.format(mean_squared_error(y_true=species_ytest, y_pred=species_ypred)**0.5,\n pearsonr(species_ytest, species_ypred)[0][0]))\nplt.savefig(os.path.join(FLAG_model_save, 'scatter_per_species.png'))", "_____no_output_____" ], [ "result = pd.DataFrame({'Species':df_species_group.Species,\n 'pred':species_ypred.reshape(-1),\n 'true':species_ytest.reshape(-1)})", "_____no_output_____" ], [ "result.to_csv(os.path.join(FLAG_model_save, 'predictions_species.csv'), index=False)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
4a2cfb5bc801e5e86bad87a19a4adc1480dff3a0
3,119
ipynb
Jupyter Notebook
notebooks/01_AM_Transform_Covid_Tweets.ipynb
Madhour/CovaxAnalytica
6cd5d2ea990dacdb2e35a8fac9ed0d5429ff99a1
[ "MIT" ]
1
2021-07-07T11:22:58.000Z
2021-07-07T11:22:58.000Z
notebooks/01_AM_Transform_Covid_Tweets.ipynb
Madhour/CovaxAnalytica
6cd5d2ea990dacdb2e35a8fac9ed0d5429ff99a1
[ "MIT" ]
null
null
null
notebooks/01_AM_Transform_Covid_Tweets.ipynb
Madhour/CovaxAnalytica
6cd5d2ea990dacdb2e35a8fac9ed0d5429ff99a1
[ "MIT" ]
null
null
null
22.766423
171
0.549535
[ [ [ "import pandas as pd\nimport numpy as np\nimport reverse_geocode", "_____no_output_____" ] ], [ [ "# [OUTDATED!] Transform overall COVID-19 tweets\n\n- add country based on coordinates\n- store as csv", "_____no_output_____" ] ], [ [ "hydrated_json = pd.read_json(\"../data/raw/Hydrated_Tweets.jsonl\", lines=True)", "_____no_output_____" ] ], [ [ "Drop faulty rows (i.e. rows without coordinates):", "_____no_output_____" ] ], [ [ "hydrated_json = hydrated_json.drop(hydrated_json[hydrated_json['coordinates'].isnull()].index).reset_index(drop=True)", "_____no_output_____" ] ], [ [ "Add country name based on coordinates:", "_____no_output_____" ] ], [ [ "hydrated_json[\"country\"] = \"\"\nfor i in range(len(hydrated_json)):\n hydrated_json[\"country\"][i] = reverse_geocode.search(tuple([hydrated_json[\"geo\"][i][\"coordinates\"],(1,1)]))[0]['country'] #second tuple is to avoid error", "<ipython-input-4-8763949fbb50>:5: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n hydrated_json[\"country\"][i] = reverse_geocode.search(tuple([hydrated_json[\"geo\"][i][\"coordinates\"],(1,1)]))[0]['country'] #second tuple is to avoid error\n" ], [ "len(hydrated_json[\"country\"].unique())", "_____no_output_____" ] ], [ [ "Export dataset as CSV:", "_____no_output_____" ] ], [ [ "hydrated_json.to_csv(\"../data/interim/Hydrated_Tweets_with_countries.csv\")", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
4a2d068bc5f247004c62611fdbb75671a909217c
64,078
ipynb
Jupyter Notebook
session/summarization/bigbird/export-bigbird-summary-small.ipynb
ahmed3991/malaya
d90be6d5b2a1393a3f8b8b1ffa8ae676cdaa083c
[ "MIT" ]
1
2021-03-19T22:42:34.000Z
2021-03-19T22:42:34.000Z
session/summarization/bigbird/export-bigbird-summary-small.ipynb
ahmed3991/malaya
d90be6d5b2a1393a3f8b8b1ffa8ae676cdaa083c
[ "MIT" ]
null
null
null
session/summarization/bigbird/export-bigbird-summary-small.ipynb
ahmed3991/malaya
d90be6d5b2a1393a3f8b8b1ffa8ae676cdaa083c
[ "MIT" ]
null
null
null
44.935484
3,274
0.644418
[ [ [ "import os\n\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '/home/husein/t5/prepare/mesolitica-tpu.json'\nos.environ['CUDA_VISIBLE_DEVICES'] = ''", "_____no_output_____" ], [ "from bigbird import modeling\nfrom bigbird import utils\nimport tensorflow as tf\nimport numpy as np", "_____no_output_____" ], [ "import sentencepiece as spm\n\nvocab = '/home/husein/b2b/sp10m.cased.t5.model'\nsp = spm.SentencePieceProcessor()\nsp.Load(vocab)\n\nclass Encoder:\n def __init__(self, sp):\n self.sp = sp\n self.vocab_size = sp.GetPieceSize() + 100\n \n def encode(self, s):\n return self.sp.EncodeAsIds(s)\n \n def decode(self, ids, strip_extraneous=False):\n return self.sp.DecodeIds(list(ids))\n \nencoder = Encoder(sp)", "_____no_output_____" ], [ "top_p = tf.placeholder(tf.float32, None, name = 'top_p')\ntemperature = tf.placeholder(tf.float32, None, name = 'temperature')", "_____no_output_____" ], [ "bert_config = {\n # transformer basic configs\n 'attention_probs_dropout_prob': 0.1,\n 'hidden_act': 'relu',\n 'hidden_dropout_prob': 0.1,\n 'hidden_size': 512,\n 'initializer_range': 0.02,\n 'intermediate_size': 3072,\n 'max_position_embeddings': 4096,\n 'max_encoder_length': 2048,\n 'max_decoder_length': 512,\n 'num_attention_heads': 8,\n 'num_hidden_layers': 6,\n 'type_vocab_size': 2,\n 'scope': 'pegasus',\n 'use_bias': False,\n 'rescale_embedding': True,\n 'vocab_model_file': None,\n # sparse mask configs\n 'attention_type': 'block_sparse',\n 'norm_type': 'prenorm',\n 'block_size': 64,\n 'num_rand_blocks': 3,\n 'vocab_size': 32128,\n 'beam_size': 1,\n 'alpha': 1.0,\n 'couple_encoder_decoder': False,\n 'num_warmup_steps': 10000,\n 'learning_rate': 0.1,\n 'label_smoothing': 0.0,\n 'optimizer': 'Adafactor',\n 'use_tpu': True,\n 'top_p': top_p,\n 'temperature': temperature\n}", "_____no_output_____" ], [ "model = modeling.TransformerModel(bert_config)", "_____no_output_____" ], [ "X = tf.placeholder(tf.int32, [None, None])", "_____no_output_____" ], [ "r = model(X, training = False)\nr", "WARNING:tensorflow:From /home/husein/malaya/Malaya/pretrained-model/bigbird/bigbird/modeling.py:226: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.where in 2.0, which has the same broadcast rule as np.where\nWARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensorflow_core/python/autograph/converters/directives.py:119: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead.\n\n" ], [ "logits = tf.identity(r[0][2], name = 'logits')\nlogits", "_____no_output_____" ], [ "files = tf.gfile.Glob('gs://mesolitica-tpu-general/t2t-summarization-v2/data/seq2*')", "_____no_output_____" ], [ "batch_size = 4\ndata_fields = {\n 'inputs': tf.VarLenFeature(tf.int64),\n 'targets': tf.VarLenFeature(tf.int64),\n}\ndata_len = {\n 'inputs': 2048,\n 'targets': 1024,\n}\n\ndef parse(serialized_example):\n\n features = tf.parse_single_example(\n serialized_example, features = data_fields\n )\n for k in features.keys():\n features[k] = features[k].values\n features[k] = tf.pad(\n features[k], [[0, data_len[k] - tf.shape(features[k])[0]]]\n )\n features[k].set_shape((data_len[k]))\n\n return features\n\ndef _decode_record(example, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\nd = tf.data.TFRecordDataset(files)\nd = d.map(parse, num_parallel_calls = 32)\nd = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, data_fields),\n batch_size = batch_size,\n num_parallel_batches = 4,\n drop_remainder = True,\n )\n)\nd = d.make_one_shot_iterator().get_next()\nd", "WARNING:tensorflow:From /home/husein/.local/lib/python3.6/site-packages/tensorflow_core/python/autograph/converters/directives.py:119: The name tf.parse_single_example is deprecated. Please use tf.io.parse_single_example instead.\n\n" ], [ "import tensorflow as tf\n\nckpt_path = tf.train.latest_checkpoint('gs://mesolitica-tpu-general/bigbird-summarization-small')\nckpt_path", "_____no_output_____" ], [ "sess = tf.InteractiveSession()\nsess.run(tf.global_variables_initializer())", "_____no_output_____" ], [ "r_ = sess.run(d)", "_____no_output_____" ], [ "encoder.decode(r_['inputs'][0].tolist())", "_____no_output_____" ], [ "encoder.decode(r_['targets'][0].tolist())", "_____no_output_____" ], [ "# import re\n# import collections\n\n# def get_assignment_map_from_checkpoint(tvars, init_checkpoint):\n# \"\"\"Compute the union of the current variables and checkpoint variables.\"\"\"\n# assignment_map = {}\n# initialized_variable_names = {}\n\n# name_to_variable = collections.OrderedDict()\n# for var in tvars:\n# name = var.name\n# m = re.match('^(.*):\\\\d+$', name)\n# if m is not None:\n# name = m.group(1)\n# name_to_variable[name] = var\n\n# init_vars = tf.train.list_variables(init_checkpoint)\n# assignment_map = collections.OrderedDict()\n# for x in init_vars:\n# (name, var) = (x[0], x[1])\n\n# l = 'pegasus/' + name\n# l = l.replace('embeddings/weights', 'embeddings/word_embeddings')\n# l = l.replace('self/output', 'output')\n# l = l.replace('ffn/dense_1', 'output/dense')\n# l = l.replace('ffn', 'intermediate')\n# l = l.replace('memory_attention/output', 'attention/encdec_output')\n# l = l.replace('memory_attention', 'attention/encdec')\n\n# if l not in name_to_variable:\n# continue\n# assignment_map[name] = name_to_variable[l]\n# initialized_variable_names[l + ':0'] = 1\n\n# return (assignment_map, initialized_variable_names)", "_____no_output_____" ], [ "# t = tf.trainable_variables()\n# assignment_map, initialized_variable_names = get_assignment_map_from_checkpoint(t, ckpt_path)", "_____no_output_____" ], [ "saver = tf.train.Saver()\nsaver.restore(sess, ckpt_path)", "INFO:tensorflow:Restoring parameters from gs://mesolitica-tpu-general/bigbird-summarization-small/model.ckpt-389700\n" ], [ "# var_lists = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n# saver = tf.train.Saver(var_list = var_lists)\n# saver.restore(sess, 'gs://mesolitica-tpu-general/bigbird-summarization-small/model.ckpt-0')", "_____no_output_____" ], [ "import re\nfrom unidecode import unidecode\n\ndef cleaning(string):\n return re.sub(r'[ ]+', ' ', unidecode(string.replace('\\n', ' '))).strip()", "_____no_output_____" ], [ "string = \"\"\"\nKUALA LUMPUR: Hakim Mahkamah Tinggi, Mohd Nazlan Mohd Ghazali menyifatkan kes penyelewengan dana RM42 juta milik SRC International Sdn Bhd dihadapi Datuk Seri Najib Razak adalah kesalahan salah guna kedudukan, pecah amanah jenayah dan pengubahan wang haram yang paling teruk.\n\nMohd Nazlan yang mensabitkan Najib terhadap kesemua tujuh tuduhan dan memerintahkan bekas Perdana Menteri itu dipenjara 12 tahun, dan didenda RM210 juta, berkata ia bukan sahaja disebabkan oleh alasan bagaimana jenayah itu dilakukan, malah kes berprofil tinggi berkenaan turut membabitkan sejumlah wang yang sangat besar.\n\nMelalui alasan penghakiman penuh setebal 801 muka surat itu, Mohd Nazlan, berkata kes terbabit mempunyai elemen yang memberikan kesan ke atas kepentingan awam kerana dana RM42 juta itu adalah milik Kementerian Kewangan (Diperbadankan) (MKD) yang berkemungkinan berasal daripada dana pencen Kumpulan Wang Persaraan (Diperbadankan) (KWAP) berjumlah RM4 bilion.\n\n\"Dan yang paling penting ia membabitkan individu yang pada ketika itu berada dalam pada tertinggi dalam kerajaan,\" katanya.\n\nPada 28 Julai lalu, Mohd Nazlan memerintahkan Najib dipenjarakan 10 tahun masing-masing bagi tiga tuduhan pecah amanah wang RM42 juta milik SRC.\n\nHakim turut memerintahkan Najib dipenjara 12 tahun dan denda RM210 juta (jika gagal bayar, lima tahun penjara) bagi tuduhan menyalahgunakan kedudukan.\n\nBagi tuduhan pengubahan wang haram pula, Mohd Nazlan memerintahkan Najib dipenjara 10 tahun bagi setiap tuduhan.\n\nSementara itu, Mohd Nazlan berkata, Najib selaku tertuduh tidak menunjukkan penyesalan, malah mempertahankan pembelaan beliau tidak mengetahui mengenai wang RM42 juta milik SRC itu dalam rayuannya bagi diringankan hukuman.\n\n\"Tetapi saya tidak boleh menafikan beliau adalah Perdana Menteri negara ini dan tidak boleh mempersoalkan sumbangannya untuk kebaikan dan kesejahteraan masyarakat dalam pelbagai cara kerana beliau adalah Perdana Menteri selama sembilan tahun.\n\n\"Sejarah politik akan terus diperdebatkan sama ada dari segi keseimbangan, beliau melakukan lebih banyak kebaikan daripada keburukan.\n\n\"Walau apa pun, ia adalah tidak selari dengan idea sesebuah pentadbiran negara yang bersih daripada rasuah yang tidak boleh bertolak ansur dengan sebarang penyalahgunaan kuasa,\" katanya.\n\nMahkamah Rayuan menetapkan pada 15 Oktober ini bagi pengurusan kes rayuan Najib terhadap sabitan dan hukuman terhadapnya.\n\"\"\"", "_____no_output_____" ], [ "string2 = \"\"\"\nGabungan parti Warisan, Pakatan Harapan, dan Upko hari ini mendedahkan calon-calon masing-masing untuk pilihan raya negeri Sabah, tetapi ketika pengumuman itu berlangsung, perwakilan PKR di dewan itu dilihat ‘gelisah’ seperti ‘tidak senang duduk’.\n\nSekumpulan anggota PKR kemudian dilihat meninggalkan dewan di Pusat Konvensyen Antarabangsa Sabah di Kota Kinabalu selepas berbincang dengan ketua PKR Sabah Christina Liew.\n\nSemakan senarai-senarai calon berkenaan mendapati PKR hanya memperolehi separuh daripada jumlah kerusi yang diharapkan.\n\nSemalam, PKR Sabah mengumumkan akan bertanding di 14 kerusi tetapi ketika Presiden Warisan Shafie Apdal mengumumkan calon gabungan tersebut hari ini, PKR hanya diberikan tujuh kerusi untuk bertanding.\n\nKerusi yang diberikan adalah Api-Api, Inanam, Tempasuk, Tamparuli, Matunggong, Klias, dan Sook.\n\nKlias dan Sook adalah dua kerusi yang diberikan kepada PKR, sementara lima kerusi selebihnya pernah ditandingi oleh PKR pada pilihan raya umum 2018.\n\nDalam pengumuman PKR Sabah semalam, parti itu menjangkakan Warisan akan turut menyerahkan kerusi Kemabong, Membakut, dan Petagas kepada mereka.\n\nWalau bagaimanapun, Warisan menyerahkan kerusi Kemabong kepada Upko dan mengekalkan bertanding untuk kerusi Membakut dan Petagas.\n\nPKR juga menuntut empat daripada 13 kerusi baru yang diperkenalkan iaitu Segama, Limbahau, Sungai Manila, dan Pintasan tetapi Warisan membolot semua kerusi itu.\n\nSebagai pertukaran untuk kerusi yang diintainya, PKR bersedia untuk menyerahkan kerusi Kadaimaian, Kuala Penyu, dan Karanaan. Namun, ini dijangka tidak akan berlaku memandangkan parti tersebut tidak berpuas hati dengan agihan kerusi seperti yang diharapkan itu.\n\nSelepas perwakilan dari PKR dan Liew keluar dari dewan tersebut, wartawan kemudian menyusuri Liew untuk mendapatkan penjelasannya.\n\nWalau bagaimanapun, Liew enggan memberikan sebarang komen dan berkata bahawa dia ingin ke tandas.\n\nLiew dan perwakilan PKR kemudian tidak kembali ke dalam dewan tersebut.\n\nApabila calon pilihan raya yang diumumkan diminta naik ke atas pentas untuk sesi bergambar, Liew tidak kelihatan.\n\nBilangan kerusi yang ditandingi oleh PKR kali ini hanya kurang satu kerusi daripada yang ditandingi parti itu pada PRU 2018.\n\nDalam perkembangan berkaitan, DAP dan Amanah dikatakan tidak mempunyai sebarang masalah dengan kerusi yang diberikan untuk PRN Sabah.\n\nSementara itu, Presiden Upko Madius Tangau enggan mengulas adakah dia berpuas hati dengan agihan kerusi tersebut. Madius kekal di majlis tersebut sehingga ia berakhir.\n\nPartinya diberikan 12 kerusi, iaitu lebih tujuh kerusi berbanding PRU lalu.\n\nDAP dan Amanah akan bertanding di bawah logo Warisan sementara PKR dan Upko akan menggunakan logo masing-masing.\n\nDAP akan bertanding di tujuh kerusi, jumlah yang sama seperti yang mereka tandingi pada PRU lalu, sementara Amanah diberi satu kerusi.\n\nWarisan akan bertanding sebanyak 54 kerusi.\n\nPerkembangan terbaru ini mungkin mencetuskan pergeseran di antara PKR dan Warisan. PKR boleh memilih untuk bertanding di lebih banyak kerusi daripada 14 yang dituntutnya manakala Warisan juga boleh bertanding di kerusi sekutunya.\n\nBarisan pemimpin tertinggi PKR dan Warisan hanya mempunyai dua hari sebelum hari penamaan calon pada Sabtu untuk mengurangkan pergeseran.\n\"\"\"", "_____no_output_____" ], [ "string3 = \"\"\"\nPenubuhan universiti sukan seperti diutarakan Ketua Unit Sukan Kementerian Pengajian Tinggi, Dr Pekan Ramli dan disokong Pakar Pembangunan Sukan dan Reakreasi Luar, Universiti Pendidikan Sultan Idris (UPSI), Prof Dr Md Amin Md Taaf seperti disiarkan akhbar ini, memberikan sinar harapan kepada kewujudan institusi sedemikian.\n\nIa menjadi impian atlet negara untuk mengejar kejayaan dalam bidang sukan dan kecemerlangan dalam akademik untuk menjamin masa depan lebih baik apabila bersara daripada arena sukan kelak.\n\nPelbagai pandangan, idea, kaedah, bukti dan cadangan dilontarkan pakar berikutan pentingnya universiti sukan yang akan memberi impak besar sama ada pada peringkat kebangsaan mahupun antarabangsa.\n\nNegara lain sudah lama meraih laba dengan kewujudan universiti sukan seperti China, Korea, Japan, Taiwan, India dan Vietnam. Mereka menghasilkan atlet universiti yang mempamerkan keputusan cemerlang pada peringkat tinggi seperti Sukan Olimpik, Kejohanan Dunia dan Sukan Asia.\n\nJusteru, kejayaan mereka perlu dijadikan rujukan demi memajukan sukan tanah air. Jika kita merujuk pendekatan Asia, kewujudan universiti sukan penting dan memberi kesan positif dalam melonjakkan prestasi sukan lebih optimum.\n\nNamun, jika kita melihat pendekatan Eropah, universiti sukan bukan antara organisasi atau institusi penting yang diberi perhatian dalam menyumbang kepada pemenang pingat.\n\nAntara isu dalam universiti sukan ialah kos tinggi, lokasi, prasarana sukan, pertindihan kursus dengan universiti sedia ada dan impak terhadap dunia sukan negara hingga mengundang persoalan kewajaran dan kerelevanan penubuhannya.\n\nNamun sebagai bekas atlet memanah negara dan Olympian (OLY) di Sukan Olimpik 2004 di Athens, Greece serta bekas pelajar Sekolah Sukan Bukit Jalil hingga berjaya dalam dunia akademik, saya mendapati terdapat beberapa faktor sering menjadi halangan dalam rutin harian mereka.\n\nAntaranya, faktor masa yang terpaksa bergegas menghadiri kuliah selepas tamat sesi latihan yang mengambil masa 15 hingga 20 minit dengan menunggang motosikal; kereta (20-30 minit) atau pengangkutan disediakan Majlis Sukan Negara (MSN) ke Universiti Putra Malaysia (UPM).\n\nJika mereka menuntut di Universiti Teknologi MARA (UiTM) atau Universiti Malaya (UM), ia mungkin lebih lama.\n\nWalaupun di universiti tersedia dengan kemudahan kolej dan kemudahan sukan, mereka memilih pulang ke MSN untuk menjalani latihan bersama pasukan dan jurulatih di padang atau gelanggang latihan rasmi.\n\nIni berlanjutan selagi bergelar atlet negara yang perlu memastikan prestasi sentiasa meningkat dari semasa ke semasa tanpa mengabaikan tugas sebagai pelajar.\n\nAlangkah baiknya jika sebahagian Sekolah Sukan Bukit Jalil itu sendiri dijadikan Kolej atau Universiti Sukan Malaysia kerana lengkap dari segi kemudahan prasarana sukannya dan proses pengajaran dan pembelajaran (PdP) dalam bidang Sains Sukan, Kejurulatihan, Pendidikan Jasmani dan setaraf dengannya.\n\nPengambilan setiap semester pula hanya terhad kepada atlet berstatus kebangsaan dan antarabangsa sahaja supaya hasrat melahirkan lebih ramai atlet bertaraf Olimpik mudah direalisasikan.\n\nContohnya, bekas atlet lompat bergalah negara, Roslinda Samsu yang juga pemenang pingat perak Sukan Asia Doha 2006 dan Penerima Anugerah Khas Majlis Anugerah Sukan KPT 2012, terpaksa mengambil masa lebih kurang sembilan tahun untuk menamatkan ijazah Sarjana Muda Pendidikan Jasmani di UPM sepanjang 14 tahun terbabit dalam sukan olahraga.\n\nSepanjang tempoh bergelar atlet kebangsaan dan mahasiswa, beliau juga memenangi pingat Emas Sukan SEA empat siri berturut-turut pada 2005, 2007, 2009 dan 2011.\n\nBegitu juga atlet kebangsaan seperti Leong Mun Yee (UPM); Pandalela Renong (UM); Bryan Nickson Lomas (UM); Cheng Chu Sian (UPM); Marbawi Sulaiman (UiTM) dan Norasheela Khalid (UPM).\n\nJika disenaraikan, mungkin lebih ramai lagi. Namun, pernah terlintas di fikiran mengapa hanya atlet dari sukan terjun yang dapat memenangi pingat di Sukan Olimpik? Bagaimana dengan atlet lain yang juga layak secara merit? Apakah kekangan atau masalah dihadapi sebagai atlet dan mahasiswa?\n\nAdakah kewujudan universiti sukan akan memberi impak besar kepada kemajuan sukan negara? Jika dirancang dan diatur dengan cekap dan sistematik, ia perkara tidak mustahil dicapai.\n\"\"\"", "_____no_output_____" ], [ "cleaning(string2)", "_____no_output_____" ], [ "pad_sequences = tf.keras.preprocessing.sequence.pad_sequences", "_____no_output_____" ], [ "encoded = encoder.encode(f'ringkasan: {cleaning(string2)}') + [1]\ns = pad_sequences([encoded], padding='post', maxlen = 2048)", "_____no_output_____" ], [ "%%time\nl = sess.run(r[0][2], feed_dict = {X: s, top_p: 0.0, temperature: 0.0})", "CPU times: user 48.9 s, sys: 3.77 s, total: 52.7 s\nWall time: 3.93 s\n" ], [ "encoder.decode(l[0].tolist())", "_____no_output_____" ], [ "saver = tf.train.Saver(tf.trainable_variables())\nsaver.save(sess, 'output/model.ckpt')", "_____no_output_____" ], [ "strings = ','.join(\n [\n n.name\n for n in tf.get_default_graph().as_graph_def().node\n if ('Variable' in n.op\n or 'Placeholder' in n.name\n or 'top_p' in n.name\n or 'temperature' in n.name\n or 'logits' in n.name\n or 'alphas' in n.name\n or 'self/Softmax' in n.name)\n and 'adam' not in n.name\n and 'beta' not in n.name\n and 'global_step' not in n.name\n and 'gradients' not in n.name\n ]\n)\nstrings.split(',')", "_____no_output_____" ], [ "def freeze_graph(model_dir, output_node_names):\n\n if not tf.gfile.Exists(model_dir):\n raise AssertionError(\n \"Export directory doesn't exists. Please specify an export \"\n 'directory: %s' % model_dir\n )\n\n checkpoint = tf.train.get_checkpoint_state(model_dir)\n input_checkpoint = checkpoint.model_checkpoint_path\n\n absolute_model_dir = '/'.join(input_checkpoint.split('/')[:-1])\n output_graph = absolute_model_dir + '/frozen_model.pb'\n clear_devices = True\n with tf.Session(graph = tf.Graph()) as sess:\n saver = tf.train.import_meta_graph(\n input_checkpoint + '.meta', clear_devices = clear_devices\n )\n saver.restore(sess, input_checkpoint)\n output_graph_def = tf.graph_util.convert_variables_to_constants(\n sess,\n tf.get_default_graph().as_graph_def(),\n output_node_names.split(','),\n )\n with tf.gfile.GFile(output_graph, 'wb') as f:\n f.write(output_graph_def.SerializeToString())\n print('%d ops in the final graph.' % len(output_graph_def.node))", "_____no_output_____" ], [ "freeze_graph('output', strings)", "INFO:tensorflow:Restoring parameters from output/model.ckpt\n" ], [ "from tensorflow.tools.graph_transforms import TransformGraph", "_____no_output_____" ], [ "transforms = ['add_default_attributes',\n 'remove_nodes(op=Identity, op=CheckNumerics, op=Dropout)',\n 'fold_batch_norms',\n 'fold_old_batch_norms',\n 'quantize_weights(fallback_min=-10, fallback_max=10)',\n 'strip_unused_nodes',\n 'sort_by_execution_order']", "_____no_output_____" ], [ "pb = 'output/frozen_model.pb'\n\ninput_graph_def = tf.GraphDef()\nwith tf.gfile.FastGFile(pb, 'rb') as f:\n input_graph_def.ParseFromString(f.read())\n \ninputs = ['Placeholder', 'top_p', 'temperature']\ntransformed_graph_def = TransformGraph(input_graph_def, \n inputs,\n ['logits'], transforms)\n\nwith tf.gfile.GFile(f'{pb}.quantized', 'wb') as f:\n f.write(transformed_graph_def.SerializeToString())", "WARNING:tensorflow:From <ipython-input-38-f9c2d7850f78>:4: FastGFile.__init__ (from tensorflow.python.platform.gfile) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.gfile.GFile.\n" ], [ "def load_graph(frozen_graph_filename, **kwargs):\n with tf.gfile.GFile(frozen_graph_filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\n # https://github.com/onnx/tensorflow-onnx/issues/77#issuecomment-445066091\n # to fix import T5\n for node in graph_def.node:\n if node.op == 'RefSwitch':\n node.op = 'Switch'\n for index in xrange(len(node.input)):\n if 'moving_' in node.input[index]:\n node.input[index] = node.input[index] + '/read'\n elif node.op == 'AssignSub':\n node.op = 'Sub'\n if 'use_locking' in node.attr:\n del node.attr['use_locking']\n elif node.op == 'AssignAdd':\n node.op = 'Add'\n if 'use_locking' in node.attr:\n del node.attr['use_locking']\n elif node.op == 'Assign':\n node.op = 'Identity'\n if 'use_locking' in node.attr:\n del node.attr['use_locking']\n if 'validate_shape' in node.attr:\n del node.attr['validate_shape']\n if len(node.input) == 2:\n node.input[0] = node.input[1]\n del node.input[1]\n\n with tf.Graph().as_default() as graph:\n tf.import_graph_def(graph_def)\n return graph\n", "_____no_output_____" ], [ "g = load_graph('output/frozen_model.pb')\nx = g.get_tensor_by_name('import/Placeholder:0')\ntop_p = g.get_tensor_by_name('import/top_p:0')\ntemperature = g.get_tensor_by_name('import/temperature:0')\nlogits = g.get_tensor_by_name('import/logits:0')\ntest_sess = tf.InteractiveSession(graph = g)", "_____no_output_____" ], [ "%%time\nl = test_sess.run(logits, feed_dict = {x: s, top_p: 0.0, temperature: 0.0})\nencoder.decode([i for i in l[0].tolist() if i > 0])", "CPU times: user 49.4 s, sys: 3.73 s, total: 53.1 s\nWall time: 5.24 s\n" ], [ "g = load_graph('output/frozen_model.pb.quantized')\nx = g.get_tensor_by_name('import/Placeholder:0')\ntop_p = g.get_tensor_by_name('import/top_p:0')\ntemperature = g.get_tensor_by_name('import/temperature:0')\nlogits = g.get_tensor_by_name('import/logits:0')\ntest_sess = tf.InteractiveSession(graph = g)", "_____no_output_____" ], [ "%%time\nl = test_sess.run(logits, feed_dict = {x: s, top_p: 0.0, temperature: 0.0})\nencoder.decode([i for i in l[0].tolist() if i > 0])", "CPU times: user 45.8 s, sys: 3.32 s, total: 49.1 s\nWall time: 7.56 s\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2d0d3526fe37ea02030397fb8cbd497bf31cf9
404,987
ipynb
Jupyter Notebook
notebooks/EnsembleSkeletonsBeta1IPR2.ipynb
lazarusA/betaSkeletons
afcaeba1c449bb2f4e72f79661846776f07012a3
[ "MIT" ]
null
null
null
notebooks/EnsembleSkeletonsBeta1IPR2.ipynb
lazarusA/betaSkeletons
afcaeba1c449bb2f4e72f79661846776f07012a3
[ "MIT" ]
null
null
null
notebooks/EnsembleSkeletonsBeta1IPR2.ipynb
lazarusA/betaSkeletons
afcaeba1c449bb2f4e72f79661846776f07012a3
[ "MIT" ]
null
null
null
77.912082
230
0.594456
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4a2d24cd2f5ee21910a79140659d13f993ddca6a
10,279
ipynb
Jupyter Notebook
Importing FAF matrices.ipynb
AequilibraE/examples_api
5d9652274b53a442a9d0a98542fdcc86b0741e32
[ "Apache-2.0" ]
2
2018-07-19T18:12:01.000Z
2020-08-14T08:04:25.000Z
Importing FAF matrices.ipynb
AequilibraE/examples_api
5d9652274b53a442a9d0a98542fdcc86b0741e32
[ "Apache-2.0" ]
null
null
null
Importing FAF matrices.ipynb
AequilibraE/examples_api
5d9652274b53a442a9d0a98542fdcc86b0741e32
[ "Apache-2.0" ]
null
null
null
34.844068
121
0.45987
[ [ [ "# Objective\nImport the FAF freight matrices provided with FAF into AequilibraE's matrix format\n\n## Input data\n\n* FAF: https://faf.ornl.gov/fafweb/\n* Matrices: https://faf.ornl.gov/fafweb/Data/FAF4.4_HiLoForecasts.zip\n* Zones System: http://www.census.gov/econ/cfs/AboutGeographyFiles/CFS_AREA_shapefile_010215.zip\n* FAF User Guide: https://faf.ornl.gov/fafweb/data/FAF4%20User%20Guide.pdf\n* The blog post (with data): http://www.xl-optim.com/matrix-api-and-multi-class-assignment", "_____no_output_____" ], [ "# The code\nWe import all libraries we will need, including the AequilibraE, after putting it in our Python path ", "_____no_output_____" ] ], [ [ "import sys\n# On Linux\n# sys.path.append('/home/pedro/.qgis2/python/plugins/AequilibraE')\n# On Windows\nsys.path.append('C:/Users/Pedro/.qgis2/python/plugins/AequilibraE')\nimport pandas as pd\nimport numpy as np\nimport os\nfrom aequilibrae.matrix import AequilibraeMatrix\nfrom scipy.sparse import coo_matrix", "_____no_output_____" ] ], [ [ "Now we set all the paths for files and parameters we need", "_____no_output_____" ] ], [ [ "data_folder = 'Y:/ALL DATA/DATA/Pedro/Professional/Data/USA/FAF/4.4'\ndata_file = 'FAF4.4_HiLoForecasts.csv'\nsctg_names_file = 'sctg_codes.csv' # Simplified to 50 characters, which is AequilibraE's limit\noutput_folder = data_folder", "_____no_output_____" ] ], [ [ "We import the the matrices", "_____no_output_____" ] ], [ [ "matrices = pd.read_csv(os.path.join(data_folder, data_file), low_memory=False)\nprint matrices.head(10)\n", " fr_orig dms_orig dms_dest fr_dest fr_inmode dms_mode fr_outmode sctg2 \\\n0 11 11 1 1 \n1 11 19 1 1 \n2 11 129 1 1 \n3 11 131 1 1 \n4 11 139 1 1 \n5 11 280 1 1 \n6 11 379 1 1 \n7 11 472 1 1 \n8 11 479 1 1 \n9 12 12 1 1 \n\n trade_type tons_2012 ... tmiles_2015 tmiles_2020 tmiles_2025 \\\n0 1 17.3922 ... 0.3817 0.4318 0.4791 \n1 1 220.0650 ... 25.7995 27.4014 28.5980 \n2 1 1.0854 ... 0.5211 0.5033 0.4917 \n3 1 6.1655 ... 1.4469 1.3902 1.4049 \n4 1 2.7483 ... 0.7550 0.6872 0.6569 \n5 1 69.7733 ... 16.8310 16.5863 15.8050 \n6 1 7.1994 ... 3.0364 2.8807 2.6488 \n7 1 2.0029 ... 0.3240 0.3106 0.2884 \n8 1 23.1675 ... 4.8585 4.6936 4.3909 \n9 1 13.2773 ... 0.3417 0.3951 0.4391 \n\n tmiles_2030 tmiles_2035 tmiles_2040 tmiles_2045 curval_2013 \\\n0 0.5265 0.5854 0.6449 0.6656 36.7205 \n1 29.6591 30.9511 32.5587 33.8550 283.6623 \n2 0.4819 0.4688 0.4860 0.5272 1.3439 \n3 1.4619 1.5058 1.5015 1.6357 10.7475 \n4 0.6488 0.6353 0.6072 0.6476 4.5875 \n5 15.2127 14.8790 14.8032 15.5691 113.2926 \n6 2.5256 2.5153 2.4165 2.5699 6.5644 \n7 0.2698 0.2553 0.2457 0.2521 4.1936 \n8 4.2165 4.1382 4.0512 4.2904 17.5325 \n9 0.4800 0.5261 0.5729 0.6069 51.1500 \n\n curval_2014 curval_2015 \n0 46.3411 42.7443 \n1 333.8676 301.8007 \n2 1.4783 1.3083 \n3 10.9878 9.6642 \n4 4.5780 3.9517 \n5 98.2313 84.4300 \n6 7.2946 6.4304 \n7 4.4779 3.9434 \n8 19.1832 16.9555 \n9 62.3887 57.5727 \n\n[10 rows x 66 columns]\n" ] ], [ [ "We import the sctg codes", "_____no_output_____" ] ], [ [ "sctg_names = pd.read_csv(os.path.join(data_folder, sctg_names_file), low_memory=False)\nsctg_names.set_index('Code', inplace=True)\nsctg_descr = list(sctg_names['Commodity Description'])\nprint sctg_names.head(5)\n\n", " Commodity Description\nCode \n1 Animals and Fish (live)\n2 Cereal Grains (includes seed)\n3 Agricultural Products\n4 Animal Feed, Eggs, Honey, Prod of Animal Origin\n5 Meat, Poultry, Fish, Seafood, and Preparations\n" ] ], [ [ "We now process the matrices to collect all the data we need, such as:\n* the list of zones\n* CSTG codes\n* Matrices/scenarios we are importing", "_____no_output_____" ] ], [ [ "# lists the zones\nall_zones = np.array(sorted(list(set( list(matrices.dms_orig.unique()) + list(matrices.dms_dest.unique())))))\n\n# Count them and create a 0-based index\nnum_zones = all_zones.shape[0]\nidx = np.arange(num_zones)\n\n# Creates the indexing dataframes\norigs = pd.DataFrame({\"from_index\": all_zones, \"from\":idx})\ndests = pd.DataFrame({\"to_index\": all_zones, \"to\":idx})\n\n# adds the new index columns to the pandas dataframe\nmatrices = matrices.merge(origs, left_on='dms_orig', right_on='from_index', how='left')\nmatrices = matrices.merge(dests, left_on='dms_dest', right_on='to_index', how='left')\n\n# Lists sctg codes and all the years/scenarios we have matrices for\nmat_years = [x for x in matrices.columns if 'tons' in x]\nsctg_codes = matrices.sctg2.unique()\n\n", "_____no_output_____" ] ], [ [ "We now import one matrix for each year, saving all the SCTG codes as different matrix cores in our zoning system\n\n", "_____no_output_____" ] ], [ [ "# aggregate the matrix according to the relevant criteria\nagg_matrix = matrices.groupby(['from', 'to', 'sctg2'])[mat_years].sum()\n\n# returns the indices\nagg_matrix.reset_index(inplace=True)\n\n\nfor y in mat_years:\n mat = AequilibraeMatrix()\n \n kwargs = {'file_name': os.path.join(output_folder, y + '.aem'),\n 'zones': num_zones,\n 'matrix_names': sctg_descr}\n \n mat.create_empty(**kwargs)\n mat.index[:] = all_zones[:]\n # for all sctg codes\n for i in sctg_names.index:\n prod_name = sctg_names['Commodity Description'][i]\n mat_filtered_sctg = agg_matrix[agg_matrix.sctg2 == i]\n \n m = coo_matrix((mat_filtered_sctg[y], (mat_filtered_sctg['from'], mat_filtered_sctg['to'])),\n shape=(num_zones, num_zones)).toarray().astype(np.float64)\n \n mat.matrix[prod_name][:,:] = m[:,:]\n \n ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a2d3412116f3657076931418eff445c7cf4ae0f
170,759
ipynb
Jupyter Notebook
House Prices_Test_1.ipynb
malavika8/HousePricePrediction
aacf984e80c5bddf4f0876eb9e369d2b2bfad5d9
[ "BSD-2-Clause" ]
null
null
null
House Prices_Test_1.ipynb
malavika8/HousePricePrediction
aacf984e80c5bddf4f0876eb9e369d2b2bfad5d9
[ "BSD-2-Clause" ]
null
null
null
House Prices_Test_1.ipynb
malavika8/HousePricePrediction
aacf984e80c5bddf4f0876eb9e369d2b2bfad5d9
[ "BSD-2-Clause" ]
null
null
null
83.41915
58,096
0.755691
[ [ [ "## House Prices: Advanced Regression Techniques : Kaggle Competition", "_____no_output_____" ], [ "### Import Libraries", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns", "_____no_output_____" ] ], [ [ "### Import Data", "_____no_output_____" ] ], [ [ "test_df=pd.read_csv('test.csv')", "_____no_output_____" ], [ "test_df.head()", "_____no_output_____" ], [ "test_df.shape", "_____no_output_____" ] ], [ [ "### Step1: Check for missing values", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(figsize=(20,5)) # To change fig shape for better representation\nsns.heatmap(test_df.isnull(),yticklabels=False,cbar=False, ax=ax)", "_____no_output_____" ], [ "def missing_zero_values_table(dataframe):\n zero_val = (dataframe == 0.00).astype(int).sum(axis=0)\n mis_val = dataframe.isnull().sum()\n mis_val_percent = 100 * dataframe.isnull().sum() / len(dataframe)\n mz_table = pd.concat([zero_val, mis_val, mis_val_percent], axis=1)\n mz_table = mz_table.rename(\n columns = {0 : 'Zero Values', 1 : 'Missing Values', 2 : '% of Total Values'})\n zero_val = (dataframe == 0.00).astype(int).sum(axis=0)\n mis_val = dataframe.isnull().sum()\n mis_val_percent = 100 * dataframe.isnull().sum() / len(dataframe)\n mz_table = pd.concat([zero_val, mis_val, mis_val_percent], axis=1)\n mz_table = mz_table.rename(\n columns = {0 : 'Zero Values', 1 : 'Missing Values', 2 : '% of Total Values'})\n mz_table['Data Type'] = dataframe.dtypes\n mz_table = mz_table[\n mz_table.iloc[:,1] != 0].sort_values(\n '% of Total Values', ascending=False).round(1)\n print (\"Your selected dataframe has \" + str(dataframe.shape[1]) + \" columns and \" + str(dataframe.shape[0]) + \" Rows.\\n\" \n \"There are \" + str(mz_table.shape[0]) +\n \" columns that have missing values.\")\n return mz_table", "_____no_output_____" ], [ "missing_zero_values_table(test_df)", "Your selected dataframe has 80 columns and 1459 Rows.\nThere are 33 columns that have missing values.\n" ] ], [ [ "### Step 2: Filling Missing values and droping columns whose missing >70%", "_____no_output_____" ] ], [ [ "# droping columns whose missing >70%\n# this will remain same as in Train\ntest_df.drop(['PoolQC','MiscFeature','Alley','Fence'],axis=1,inplace=True)", "_____no_output_____" ], [ "missing_zero_values_table(test_df)", "Your selected dataframe has 76 columns and 1459 Rows.\nThere are 29 columns that have missing values.\n" ] ], [ [ "#### Handling Missing data : categorical data with MODE & numerical data with MEAN", "_____no_output_____" ] ], [ [ "test_df['FireplaceQu'].value_counts()", "_____no_output_____" ], [ "test_df['FireplaceQu'].fillna(value='Gd', inplace=True) ", "_____no_output_____" ], [ "test_df['LotFrontage'].mean()", "_____no_output_____" ], [ "test_df['LotFrontage'].fillna(value=70.05, inplace=True)", "_____no_output_____" ], [ "test_df['GarageType'].value_counts()", "_____no_output_____" ], [ "test_df['GarageType'].fillna(value='Attchd', inplace=True) ", "_____no_output_____" ], [ "test_df['GarageYrBlt'].value_counts()", "_____no_output_____" ], [ "test_df['GarageYrBlt'].fillna(value=2005, inplace=True) ", "_____no_output_____" ], [ "test_df['GarageFinish'].value_counts()", "_____no_output_____" ], [ "test_df['GarageFinish'].fillna(value='Unf', inplace=True) ", "_____no_output_____" ], [ "test_df['GarageQual'].value_counts()", "_____no_output_____" ], [ "test_df['GarageQual'].fillna(value='TA', inplace=True) ", "_____no_output_____" ], [ "test_df['GarageCond'].value_counts()", "_____no_output_____" ], [ "test_df['GarageCond'].fillna(value='TA', inplace=True) ", "_____no_output_____" ], [ "test_df['BsmtExposure'].value_counts()", "_____no_output_____" ], [ "test_df['BsmtExposure'].fillna(value='No', inplace=True)", "_____no_output_____" ], [ "test_df['BsmtFinType1'].value_counts()", "_____no_output_____" ], [ "test_df['BsmtFinType1'].fillna(value='Unf', inplace=True)", "_____no_output_____" ], [ "test_df['BsmtFinType2'].value_counts()", "_____no_output_____" ], [ "test_df['BsmtFinType2'].fillna(value='Unf', inplace=True)", "_____no_output_____" ], [ "test_df['BsmtCond'].value_counts()", "_____no_output_____" ], [ "test_df['BsmtCond'].fillna(value='TA', inplace=True)", "_____no_output_____" ], [ "test_df['BsmtQual'].value_counts()", "_____no_output_____" ], [ "test_df['BsmtQual'].fillna(value='TA', inplace=True)", "_____no_output_____" ], [ "test_df['MasVnrArea'].mean()", "_____no_output_____" ], [ "test_df['MasVnrArea'].fillna(value=103.6, inplace=True)", "_____no_output_____" ], [ "test_df['MasVnrType'].value_counts()", "_____no_output_____" ], [ "test_df['MasVnrType'].fillna(value='None', inplace=True)", "_____no_output_____" ], [ "test_df['Electrical'].value_counts()", "_____no_output_____" ], [ "test_df['Electrical'].fillna(value='SBrkr', inplace=True)", "_____no_output_____" ], [ "test_df.shape", "_____no_output_____" ], [ "missing_zero_values_table(test_df)", "Your selected dataframe has 76 columns and 1459 Rows.\nThere are 15 columns that have missing values.\n" ], [ "#df.drop(['Id'],axis=1,inplace=True)", "_____no_output_____" ], [ "missing_zero_values_table(test_df)", "Your selected dataframe has 76 columns and 1459 Rows.\nThere are 15 columns that have missing values.\n" ], [ "test_df['Utilities']=test_df['Utilities'].fillna(test_df['Utilities'].mode()[0])\ntest_df['Exterior1st']=test_df['Exterior1st'].fillna(test_df['Exterior1st'].mode()[0])\ntest_df['Exterior2nd']=test_df['Exterior2nd'].fillna(test_df['Exterior2nd'].mode()[0])\ntest_df['BsmtFinType1']=test_df['BsmtFinType1'].fillna(test_df['BsmtFinType1'].mode()[0])\ntest_df['BsmtFinSF1']=test_df['BsmtFinSF1'].fillna(test_df['BsmtFinSF1'].mean())\ntest_df['BsmtFinSF2']=test_df['BsmtFinSF2'].fillna(test_df['BsmtFinSF2'].mean())\ntest_df['BsmtUnfSF']=test_df['BsmtUnfSF'].fillna(test_df['BsmtUnfSF'].mean())\ntest_df['TotalBsmtSF']=test_df['TotalBsmtSF'].fillna(test_df['TotalBsmtSF'].mean())\ntest_df['BsmtFullBath']=test_df['BsmtFullBath'].fillna(test_df['BsmtFullBath'].mode()[0])\ntest_df['BsmtHalfBath']=test_df['BsmtHalfBath'].fillna(test_df['BsmtHalfBath'].mode()[0])\ntest_df['KitchenQual']=test_df['KitchenQual'].fillna(test_df['KitchenQual'].mode()[0])\ntest_df['Functional']=test_df['Functional'].fillna(test_df['Functional'].mode()[0])\ntest_df['GarageCars']=test_df['GarageCars'].fillna(test_df['GarageCars'].mean())\ntest_df['GarageArea']=test_df['GarageArea'].fillna(test_df['GarageArea'].mean())\ntest_df['SaleType']=test_df['SaleType'].fillna(test_df['SaleType'].mode()[0])", "_____no_output_____" ], [ "fig, ax = plt.subplots(figsize=(20,5)) \nsns.heatmap(test_df.isnull(),yticklabels=False,cbar=False,cmap='YlGnBu',ax=ax)", "_____no_output_____" ], [ "missing_zero_values_table(test_df)", "Your selected dataframe has 76 columns and 1459 Rows.\nThere are 1 columns that have missing values.\n" ], [ "test_df.shape", "_____no_output_____" ], [ "test_df.to_csv('cleaned_test.csv',index=False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2d35bf0961f83c4cc924dc05dfedfa28e1c755
232,866
ipynb
Jupyter Notebook
tutorial/notebooks/intro_task_state.ipynb
effigies/pydra
f5c43ec2dfeafad78c3cd8361afaa9658b95da4e
[ "Apache-2.0" ]
null
null
null
tutorial/notebooks/intro_task_state.ipynb
effigies/pydra
f5c43ec2dfeafad78c3cd8361afaa9658b95da4e
[ "Apache-2.0" ]
null
null
null
tutorial/notebooks/intro_task_state.ipynb
effigies/pydra
f5c43ec2dfeafad78c3cd8361afaa9658b95da4e
[ "Apache-2.0" ]
null
null
null
326.142857
37,588
0.93142
[ [ [ "## Introduction to Tasks with States\n\nTask might be run for a single set of input values or we can generate multiple sets, that will be called \"states\". If we want to run our `Task` multiple times we have to provide input that is iterable and specify the way we want to map values of the inputs to the specific states. In order to do it, we set so-called `splitter`. \n\nLet's start from a simple `FunctionTask` that takes a list as an input:", "_____no_output_____" ] ], [ [ "import pydra\n\[email protected]\ndef add_two(x):\n return x + 2\n\ntask1 = add_two(x=[1, 2, 3])", "_____no_output_____" ] ], [ [ "Before we set any splitter, the task's `state` should be `None`", "_____no_output_____" ] ], [ [ "task1.state is None", "_____no_output_____" ] ], [ [ "We can now check the results of our task:", "_____no_output_____" ], [ "Now, we can set the `splitter` by using the `split` method. Since our task has only one input, there is only one option to create set of inputs, i.e. `splitter=\"x\"`:", "_____no_output_____" ] ], [ [ "task1.split(splitter=\"x\")", "_____no_output_____" ] ], [ [ "Now, we can check that our task has a `state`:", "_____no_output_____" ] ], [ [ "task1.state", "_____no_output_____" ] ], [ [ "And if we can print information about our state", "_____no_output_____" ] ], [ [ "print(task1.state)", "_____no_output_____" ] ], [ [ "within the `state` information about the splitter has been stored: ", "_____no_output_____" ] ], [ [ "task1.state.splitter", "_____no_output_____" ] ], [ [ "Note, that *pydra* adds name of the function to the name of the input.\n\nNow, we can run the task and check results:", "_____no_output_____" ] ], [ [ "task1()\ntask1.result()", "_____no_output_____" ] ], [ [ "This time, we got a list that contains three `Result` objects, one for each value of `x`.\n\nFor tasks with a state *pydra* prepare all sets of inputs and run the task for each of the set. We could simply represent this by the following figure:", "_____no_output_____" ], [ "![nd_spl_1.png](attachment:nd_spl_1.png)", "_____no_output_____" ], [ "### Multiple inputs\n\nWe can also use `State` for functions with multiple inputs:", "_____no_output_____" ] ], [ [ "@pydra.mark.task\ndef add_var(a, b):\n return a + b", "_____no_output_____" ] ], [ [ "Now we have more options to define `splitter`, it depends on the type of inputs and on our application. For example, we could have `a` that is a list and `b` that is a single value:", "_____no_output_____" ] ], [ [ "task2 = add_var(a=[1, 2, 3], b=10).split(splitter=\"a\")\ntask2()\ntask2.result()", "_____no_output_____" ] ], [ [ "Now we have three results for each element from the `a` list and the value of `b` is always the same. ", "_____no_output_____" ], [ "![nd_spl_2.png](attachment:nd_spl_2.png)", "_____no_output_____" ], [ "But we can have lists for both inputs, let's assume that `a` and `b` are two elements lists.", "_____no_output_____" ] ], [ [ "task3 = add_var(a=[1, 2], b=[10, 100])", "_____no_output_____" ] ], [ [ "Now, we have two options to map the input values, we might want to run the task for two sets of values: (`a`=1, `b`=10) and (`a`=2, `b`=100), or we might want to run the task for four sets: (`a`=1, `b`=10), (`a`=1, `b`=100), (`a`=2, `b`=10) and (`a`=2, `b`=100). \n\nThe first situation will be represented by the so-called \"scalar\" splitter, the later by the so-called \"outer\" splitter. ", "_____no_output_____" ], [ "#### Scalar splitter\n\nLet's start from the scalar splitter, that uses parentheses in the syntax:", "_____no_output_____" ] ], [ [ "task3.split(splitter=(\"a\", \"b\"))\ntask3()\ntask3.result()", "_____no_output_____" ] ], [ [ "As we expected, we have two outputs: `1+10=11` and `2+100=102`. \n\nWe can represent the execution by the graph:", "_____no_output_____" ], [ "![nd_spl_4.png](attachment:nd_spl_4.png)", "_____no_output_____" ], [ "#### Outer splitter\n\nFor the outer splitter we will use brackets:", "_____no_output_____" ] ], [ [ "task4 = add_var(a=[1, 2], b=[10, 100])\ntask4.split(splitter=[\"a\", \"b\"])\ntask4()\ntask4.result()", "_____no_output_____" ] ], [ [ "Now, we have results for all of the combinations of values from `a` and `b`.", "_____no_output_____" ], [ "![nd_spl_3.png](attachment:nd_spl_3.png)", "_____no_output_____" ], [ "For more inputs we can create more complex splitter, and use scalar and outer splitters together. Note, that the scalar splitter can only work for lists that have the same length, but the outer splitter doesn't have this limitation. \n\nLet's run one more example that takes four inputs, `x` and `y` components of two vectors, and calculates all possible sums of vectors. `x` components should be kept together with corresponding `y` components (i.e. scalar splitters: `(\"x1\", \"y1\")` and `(\"x2\", \"y2\")`), but we should use outer splitter for two vectors to get all combinations.", "_____no_output_____" ] ], [ [ "@pydra.mark.task\ndef add_vector(x1, y1, x2, y2):\n return (x1 + x2, y1 + y2)\n\ntask5 = add_vector(name=\"add_vect\", output_names=[\"x\", \"y\"], \n x1=[10, 20], y1=[1, 2], x2=[10, 20, 30], y2=[10, 20, 30])\ntask5.split(splitter=[(\"x1\", \"y1\"), (\"x2\", \"y2\")])\ntask5()\ntask5.result()", "_____no_output_____" ] ], [ [ "We should get six outputs: two elements for vector1 times three elements for vector2.", "_____no_output_____" ], [ "### Combining the output\n\nWhen we use `splitter`, we can also define `combiner`, if we want to combine together the results.\n\nIf we take the `task4` as an example and combine all results for each element of the input `b`, we can modify the task as follows:", "_____no_output_____" ] ], [ [ "task5 = add_var(a=[1, 2], b=[10, 100])\ntask5.split(splitter=[\"a\", \"b\"])\n# adding combiner\ntask5.combine(combiner=\"b\")\ntask5()\ntask5.result()", "_____no_output_____" ] ], [ [ "Now our result contains two elements, each one is a list. The first one contains results for `a=1` and both values of `b`, and the second contains results for `a=2` and both values of `b`.", "_____no_output_____" ], [ "![nd_spl_3_comb1.png](attachment:nd_spl_3_comb1.png)", "_____no_output_____" ], [ "But we could also group all elements from the input `a` and have a different combined output:", "_____no_output_____" ] ], [ [ "task6 = add_var(a=[1, 2], b=[10, 100])\ntask6.split(splitter=[\"a\", \"b\"])\n# changing the combiner\ntask6.combine(combiner=\"a\")\ntask6()\ntask6.result()", "_____no_output_____" ] ], [ [ "We still have two elements in our results, but this time the first element contains results for `b=10` and both values of `a`, and the second contains results for `b=100` and both values of `a`.", "_____no_output_____" ], [ "![nd_spl_3_comb2.png](attachment:nd_spl_3_comb2.png)", "_____no_output_____" ], [ "We can also combine all elements by providing a list of all inputs to the `combiner`:", "_____no_output_____" ] ], [ [ "task7 = add_var(a=[1, 2], b=[10, 100])\ntask7.split(splitter=[\"a\", \"b\"])\n# combining all inputs\ntask7.combine(combiner=[\"a\", \"b\"])\ntask7()\ntask7.result()", "_____no_output_____" ] ], [ [ "This time the output contains one element that is a list of all outputs:", "_____no_output_____" ], [ "![nd_spl_3_comb3.png](attachment:nd_spl_3_comb3.png)", "_____no_output_____" ], [ "### Exercise 1\n\nLet's say we want to calculate squares and cubes of integers from 2 to 5, and combine separately all squares and all cubes:", "_____no_output_____" ], [ "First we will define a function that returns powers:", "_____no_output_____" ] ], [ [ "@pydra.mark.task\ndef power(x, n):\n return x**n", "_____no_output_____" ] ], [ [ "Now we can create a task that takes two lists as its input, outer splitter for `x` and `n`, and combine all `x`: ", "_____no_output_____" ] ], [ [ "task_ex1 = power(x=[2, 3, 4, 5], n=[2, 3]).split(splitter=[\"x\", \"n\"]).combine(\"x\")\ntask_ex1(plugin=\"cf\")\ntask_ex1.result()", "_____no_output_____" ] ], [ [ "The result should contain two list, the first one is for squares, the second for cubes.", "_____no_output_____" ] ], [ [ "squares_list = [el.output.out for el in task_ex1.result()[0]]\ncubes_list = [el.output.out for el in task_ex1.result()[1]]\nprint(f\"squares: {squares_list}\")\nprint(f\"cubes: {cubes_list}\")", "_____no_output_____" ] ], [ [ "### Parallel execution\n\nWe run task multiple times for multiple sets of input, but we didn't talk about the execution time. Let's create a function that sleeps for a second and run for four values:", "_____no_output_____" ] ], [ [ "import time\n\[email protected]\ndef add_two_sleep(x):\n time.sleep(1)\n return x + 2\n\ntask8 = add_two_sleep(x=[1, 2, 3, 4]).split(splitter=\"x\")\nt0 = time.time()\ntask8()\nprint(f'total time: {time.time() - t0}')\ntask8.result()", "_____no_output_____" ] ], [ [ "The total time will depend on the machine you are using, but it could be below `1.1s`, so clearly the tasks are running in parallel!\n\nIf we run `Task` that has a `State`, pydra will automatically create a `Submitter` with a default `Worker` that is `cf`, i.e. `ConcurrentFutures`.\n\nWe could also create a `Submitter` first, and than use it to run the task:", "_____no_output_____" ] ], [ [ "task9 = add_two_sleep(x=[1, 2, 3, 4]).split(splitter=\"x\")\n\nt0 = time.time()\nwith pydra.Submitter(plugin=\"cf\") as sub:\n task9(submitter=sub)\nprint(f'total time: {time.time() - t0}')\nprint(f\"results: {task9.result()}\")\n", "_____no_output_____" ] ], [ [ "or we can provide the name of the plugin:", "_____no_output_____" ] ], [ [ "task10 = add_two_sleep(x=[1, 2, 3, 4]).split(splitter=\"x\")\n\nt0 = time.time()\ntask10(plugin=\"cf\")\nprint(f'total time: {time.time() - t0}')\nprint(f\"results: {task10.result()}\")\n", "_____no_output_____" ] ], [ [ "The last option for running the task is to create a `Submitter` first and run the submitter (`Submitter` is also a callable object) with the task as a `runnable`:", "_____no_output_____" ] ], [ [ "task11 = add_two_sleep(x=[1, 2, 3, 4]).split(splitter=\"x\")\n\nt0 = time.time()\nwith pydra.Submitter(plugin=\"cf\") as sub:\n sub(runnable=task11)\nprint(f'total time: {time.time() - t0}')\nprint(f\"results: {task11.result()}\")\n", "_____no_output_____" ] ], [ [ "All of the execution time should be similar, since all taska are run by *pydra* in the same way, i.e. *pydra* creates a submitter with `ConcurrentFutures` worker, if a number of processors is not provided, `ConcurrentFutures` takes all available processors as `max_workers`. However, if we want to set a specific number of processors, we can set it using `n_procs` when creating a `Submitter`. Let's see how the execution time changes when we use `n_procs=2`.\n", "_____no_output_____" ] ], [ [ "task12 = add_two_sleep(x=[1, 2, 3, 4]).split(splitter=\"x\")\n\nt0 = time.time()\nwith pydra.Submitter(plugin=\"cf\", n_procs=2) as sub:\n sub(runnable=task12)\nprint(f'total time: {time.time() - t0}')\nprint(f\"results: {task11.result()}\")\n", "_____no_output_____" ] ], [ [ "Now, the total time could be significantly different. For example, if your machine has at least 4 processors, the previous `tasks8` - `task11` took around 1s to run, but the task12 took around 2s.\nIf you have 2 processors or less, you should not see any difference in the execution time.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a2d48446487bb45d988197879cff9c69a0da16c
5,711
ipynb
Jupyter Notebook
Text Classification/Yelp review classification using NLTK.ipynb
bitaan/ML-Projects-101
197262a4223a1c1d4dc9e7efc18d8964d64f4f59
[ "MIT" ]
null
null
null
Text Classification/Yelp review classification using NLTK.ipynb
bitaan/ML-Projects-101
197262a4223a1c1d4dc9e7efc18d8964d64f4f59
[ "MIT" ]
null
null
null
Text Classification/Yelp review classification using NLTK.ipynb
bitaan/ML-Projects-101
197262a4223a1c1d4dc9e7efc18d8964d64f4f59
[ "MIT" ]
null
null
null
24.722944
158
0.545088
[ [ [ "import json as j\nimport pandas as pd\nimport re\nimport numpy as np\nfrom nltk.corpus import stopwords\nfrom nltk.stem import SnowballStemmer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.svm import LinearSVC\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_selection import SelectKBest, chi2", "_____no_output_____" ], [ "json_data = None\n\nwith open('yelp_academic_dataset_review.json') as data_file:\n\n lines = data_file.readlines()\n joined_lines = \"[\" + \",\".join(lines) + \"]\"\n\n json_data = j.loads(joined_lines)", "_____no_output_____" ], [ "import nltk\nnltk.download('stopwords')", "[nltk_data] Downloading package stopwords to\n[nltk_data] C:\\Users\\Bitaan\\AppData\\Roaming\\nltk_data...\n[nltk_data] Unzipping corpora\\stopwords.zip.\n" ], [ "data = pd.DataFrame(json_data)\n\nstemmer = SnowballStemmer('english')\nwords = stopwords.words(\"english\")", "_____no_output_____" ], [ "data['cleaned'] = data['text'].apply(lambda x: \" \".join([stemmer.stem(i) for i in re.sub(\"[^a-zA-Z]\", \" \", x).split() if i not in words]).lower())", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_test_split(data['cleaned'], data.stars, test_size=0.2)", "_____no_output_____" ], [ "pipeline = Pipeline([('vect', TfidfVectorizer(ngram_range=(1, 2), stop_words=\"english\", sublinear_tf=True)),\n ('chi', SelectKBest(chi2, k=10000)),\n ('clf', LinearSVC(C=1.0, penalty='l1', max_iter=3000, dual=False))])", "_____no_output_____" ], [ "model = pipeline.fit(X_train, y_train)\n\nvectorizer = model.named_steps['vect']\nchi = model.named_steps['chi']\nclf = model.named_steps['clf']", "_____no_output_____" ], [ "feature_names = vectorizer.get_feature_names()\nfeature_names = [feature_names[i] for i in chi.get_support(indices=True)]\nfeature_names = np.asarray(feature_names)", "_____no_output_____" ], [ "target_names = ['1', '2', '3', '4', '5']\nprint(\"top 10 keywords per class:\")", "top 10 keywords per class:\n" ], [ "for i, label in enumerate(target_names):\n top10 = np.argsort(clf.coef_[i])[-10:]\n print(\"%s: %s\" % (label, \" \".join(feature_names[top10])))", "1: disgust bogus revolt singl star place joke horribl nose dive zero star worst negat star\n2: nd star particular clean respond state half experi experienc better meh experienc chase waiter turn faucet left movi second star\n3: mean ok overal ok unfortun bread just expect fell littl torn place noth blew border close cool unfortun definit ok\n4: lose star th star knock star margarita nice reason star took star thing star yes fan fifth star yay fan\n5: guy rock woohoo good equal amaz penzey redseven amaz favor tri best worth everi stop read\n" ], [ "print(\"accuracy score: \" + str(model.score(X_test, y_test)))", "accuracy score: 0.5814449132269148\n" ], [ "print(model.predict(['that was an awesome place. Great food!']))", "[5]\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2d498c3a2acacc87b184c3e2f5f23d5296a6b0
3,558
ipynb
Jupyter Notebook
src/graph_notebook/notebooks/01-Getting-Started/03-Using-RDF-and-SPARQL-to-Access-the-Graph.ipynb
joywa/graph-notebook
2c55b4fb5b6fb3c3205d0786a45a9101a44288a4
[ "ISC", "Apache-2.0", "CC0-1.0" ]
null
null
null
src/graph_notebook/notebooks/01-Getting-Started/03-Using-RDF-and-SPARQL-to-Access-the-Graph.ipynb
joywa/graph-notebook
2c55b4fb5b6fb3c3205d0786a45a9101a44288a4
[ "ISC", "Apache-2.0", "CC0-1.0" ]
null
null
null
src/graph_notebook/notebooks/01-Getting-Started/03-Using-RDF-and-SPARQL-to-Access-the-Graph.ipynb
joywa/graph-notebook
2c55b4fb5b6fb3c3205d0786a45a9101a44288a4
[ "ISC", "Apache-2.0", "CC0-1.0" ]
null
null
null
31.486726
379
0.61973
[ [ [ "Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: Apache-2.0", "_____no_output_____" ], [ "# Using RDF and SPARQL to Access the Graph\n\nSPARQL is a query language for the Resource Description Framework (RDF), which is a graph data format designed for the web. Amazon Neptune is compatible with SPARQL 1.1. This means that you can connect to a Neptune DB instance and query the graph using the query language described in the [SPARQL 1.1 Query Language](https://www.w3.org/TR/sparql11-query/) specification.\n\nA query in SPARQL consists of a SELECT clause to specify the variables to return and a WHERE clause to specify which data to match in the graph. If you are unfamiliar with SPARQL queries, see [Writing Simple Queries](https://www.w3.org/TR/sparql11-query/#WritingSimpleQueries) in the [SPARQL 1.1 Query Language](https://www.w3.org/TR/sparql11-query/).", "_____no_output_____" ], [ "The HTTP endpoint for SPARQL queries to a Neptune DB instance is https://your-neptune-endpoint:port/sparql.\n\nIssue the below SPARQL UPDATE using the `%%sparql` magic ", "_____no_output_____" ] ], [ [ "%%sparql\n\nINSERT DATA { <https://test.com/s> <https://test.com/p> <https://test.com/o> . }", "_____no_output_____" ] ], [ [ "Enter the following to submit a SPARQL QUERY using the `%%sparql` magic:", "_____no_output_____" ] ], [ [ "%%sparql\nSELECT ?s ?p ?o WHERE {?s ?p ?o} LIMIT 10", "_____no_output_____" ] ], [ [ "The preceding example returns up to 10 of the triples (subject-predicate-object) in the graph by using the ?s ?p ?o query with a limit of 10. To query for something else, replace it with another SPARQL query.\n\n**Note**\n\nThe default MIME type of a response is application/sparql-results+json for SELECT and ASK queries.\n\nThe default MIME type of a response is application/n-quads for CONSTRUCT and DESCRIBE queries.\n\nFor a list of all available MIME types, see [SPARQL HTTP API](https://docs.aws.amazon.com/neptune/latest/userguide/sparql-api-reference.html).", "_____no_output_____" ], [ "**What's next?**\n\n\nNow that you've tried your hand at SPARQL queries, take your learning to the next level with these datasets:\n\n\n[Explore English Premier League using SPARQL](/notebooks/02-Visualization/EPL-SPARQL.ipynb)\n\n\n[Explore global air route data using SPARQL](/notebooks/02-Visualization/Air-Routes-SPARQL.ipynb)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
4a2d4ebe511a2b04a75e70ec7335e24dc5ccdca2
871,104
ipynb
Jupyter Notebook
notebooks/Skewnormal.ipynb
jschiavon/optispd
fb3f904a1f1099d31cbcaf27dfc63e5a9e77c9f5
[ "MIT" ]
null
null
null
notebooks/Skewnormal.ipynb
jschiavon/optispd
fb3f904a1f1099d31cbcaf27dfc63e5a9e77c9f5
[ "MIT" ]
null
null
null
notebooks/Skewnormal.ipynb
jschiavon/optispd
fb3f904a1f1099d31cbcaf27dfc63e5a9e77c9f5
[ "MIT" ]
null
null
null
1,014.090803
350,820
0.950729
[ [ [ "import jax.numpy as jnp\nfrom jax import jit, grad, jvp, random\nfrom jax.scipy.stats import multivariate_normal as mvn\nfrom jax.scipy.stats import norm\n\nfrom scipy.optimize import minimize, NonlinearConstraint\nfrom itertools import product\n\nfrom jax.config import config\nconfig.update('jax_enable_x64', True)\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_theme(\"poster\", \"darkgrid\")\n%matplotlib inline\nplt.rcParams[\"figure.figsize\"] = (16,12)\n\nfrom optispd.manifold import SPD, Product, Euclidean\nfrom optispd.minimizer import minimizer", "_____no_output_____" ], [ "n = 1000\np = 2\ntol = 1e-6\nseed = 0\nrng = random.PRNGKey(seed)", "WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)\n" ], [ "def compute_delta(sl, cor):\n bilin = jnp.einsum('i,ij,j', sl, cor, sl)\n return jnp.matmul(cor, sl) / jnp.sqrt(1 + bilin)\n\ndef compute_omega(cor, sl):\n delta = compute_delta(sl, cor)\n omega = jnp.append(\n jnp.append(cor, jnp.expand_dims(delta, 1), axis=1),\n jnp.expand_dims(jnp.append(delta, 1.), 0),\n axis=0\n )\n return omega\n\ndef sample_skew(key, loc, cov, sl, shape=(1,)):\n p = loc.shape[-1]\n scale = jnp.sqrt(jnp.diag(cov))\n cor = jnp.einsum('i,ij,j->ij', 1./scale, cov, 1./scale)\n omega = compute_omega(cor, sl)\n X = random.multivariate_normal(\n key=key, shape=shape,\n mean=jnp.zeros(shape=(p + 1),),\n cov=omega,\n )\n X0 = jnp.expand_dims(X[:, -1], 1)\n X = X[:, :-1]\n Z = jnp.where(X0 > 0, X, - X)\n return loc + jnp.einsum('i,ji->ji', scale, Z)\n\ndef capital_phi(alpha, y):\n return jnp.sum(norm.logcdf(jnp.matmul(alpha, y)))\n\ndef small_phi(cov, y):\n mean = jnp.zeros(shape=(cov.shape[-1],))\n return jnp.sum(mvn.logpdf(y, mean=mean, cov=cov))\n\ndef loglik_skew(cov, slant, data):\n scale = jnp.sqrt(jnp.diag(cov))\n al = jnp.einsum('i,i->i', 1./scale, slant)\n Phi = capital_phi(al, data.T)\n phi = small_phi(cov, data)\n return 2 + phi + Phi\n\ndef pdf_skew(y, cov, slant):\n scale = jnp.sqrt(jnp.diag(cov))\n al = jnp.einsum('i,i->i', 1./scale, slant)\n Phi = norm.logcdf(jnp.matmul(al, y.T))\n phi = mvn.logpdf(y, mean=jnp.zeros_like(slant), cov=cov)\n return jnp.exp(2 + phi + Phi)", "_____no_output_____" ], [ "rng, *key = random.split(rng, 3)\n\ntmean = jnp.zeros(shape=(p,))\n\ntcov = random.normal(key[0], shape=(p, p))\ntcov = jnp.matmul(tcov, tcov.T)\n\ntslant = random.uniform(key[1], shape=(p,), maxval=5)\n\nprint(\"True values:\")\nprint(\"\\tMean: {}\".format(tmean))\nprint(\"\\tSigma: {} (Eigs: {})\".format(tcov.ravel(), jnp.linalg.eigvalsh(tcov)))\nprint(\"\\tSkew: {}\".format(tslant))", "True values:\n\tMean: [0. 0.]\n\tSigma: [4.27065481 1.21378081 1.21378081 1.53117271] (Eigs: [1.07076229 4.73106523])\n\tSkew: [2.72181592 1.29104493]\n" ], [ "rng, key = random.split(rng)\ndata = sample_skew(key, tmean, tcov, tslant, shape=(n,))\n\nnloglik = jit(lambda x, y: - loglik_skew(x, y, data))\n\ntrue_loglik = nloglik(tcov, tslant)\nprint(\"\\tLoglik: {:.2f}\".format(true_loglik))", "\tLoglik: 3809.62\n" ], [ "plot_data = pd.DataFrame(\n data,\n columns=['x', 'y']\n )\nl = 100\nx = jnp.linspace(jnp.min(data[:, 0]), jnp.max(data[:, 0]), l)\ny = jnp.linspace(jnp.min(data[:, 1]), jnp.max(data[:, 1]), l)\nxy = jnp.array(list(product(x, y)))\n\nZ_skew = pdf_skew(xy, tcov, tslant).reshape(l, l).T\nZ_norm = mvn.pdf(xy, jnp.zeros(p,), cov=tcov).reshape(l, l).T\n\ng = sns.jointplot(data=plot_data, x='x', y='y', alpha=0.4, height=16)\ng.ax_joint.contour(x, y, Z_norm, colors='k', alpha=0.7, levels=4, linestyles='dashed')\ng.ax_joint.contour(x, y, Z_skew, colors='k', levels=5)\nplt.show();", "_____no_output_____" ], [ "k = 0\nmaxit = 100\n\nman = SPD(p=p)\n\noptimizer = minimizer(\n man, method='rsd',\n maxiter=1, mingradnorm=tol,\n verbosity=0, logverbosity=False\n )\n\nrng, *key = random.split(rng, 5)\nsig = random.normal(key[0], shape=(p,p))\nsig = jnp.matmul(sig, sig.T)\n\nth = random.uniform(key[1], shape=(p,), maxval=10)\n\nlogl = [nloglik(sig, th)]\nprint(logl)", "[Buffer(31860.80040701, dtype=float64)]\n" ], [ "while True:\n print(\"Iteration {} starts from:\".format(k))\n print(\"\\tSigma : {}\".format(sig.ravel()))\n print(\"\\t(Eigs: {})\".format(jnp.linalg.eigvalsh(sig)))\n print(\"\\tTheta: {} (norm: {})\".format(th, jnp.linalg.norm(th)))\n print(\"\\tLoglik : {:.2f}\".format(logl[-1]))\n\n loglik_sig = jit(lambda x: nloglik(x, th))\n gradient_sig = jit(grad(loglik_sig))\n\n res = optimizer.solve(loglik_sig, gradient_sig, x=sig)\n\n sig = res.x\n\n print('\\t...')\n\n loglik_th = jit(lambda x: nloglik(sig, x))\n gradient_psi = jit(grad(loglik_th))\n\n res = minimize(loglik_th, th,\n method=\"cg\",\n jac=gradient_psi,\n tol=tol,\n options={'maxiter':10}\n )\n th = res.x\n\n logl.append(nloglik(sig, th))\n k += 1\n\n print(\"And ends at:\")\n print(\"\\tSigma : {}\".format(sig.ravel()))\n print(\"\\t(Eigs: {})\".format(jnp.linalg.eigvalsh(sig)))\n print(\"\\tTheta: {} (norm: {})\".format(th, jnp.linalg.norm(th)))\n print(\"\\tLoglik : {:.2f}\".format(logl[-1]))\n\n if jnp.isclose(logl[-2], logl[-1], rtol=tol) or k == maxit:\n break\n\n if jnp.isnan(logl[-1]).any():\n print(\"PANIC! NAN APPEARS\")\n break\n \n print(\"\\n---\\n\")\n", "Iteration 0 starts from:\n\tSigma : [0.48867063 0.04752427 0.04752427 0.02829955]\n\t(Eigs: [0.02344479 0.49352538])\n\tTheta: [5.95399676 1.12798829] (norm: 6.059903879997522)\n\tLoglik : 31860.80\n\t...\nAnd ends at:\n\tSigma : [389080.5582915 317073.78926896 317073.78926896 435329.68317158]\n\t(Eigs: [ 94289.19909593 730121.04236715])\n\tTheta: [865.4105095 696.65443168] (norm: 1110.973783278624)\n\tLoglik : 14510.47\n\n---\n\nIteration 1 starts from:\n\tSigma : [389080.5582915 317073.78926896 317073.78926896 435329.68317158]\n\t(Eigs: [ 94289.19909593 730121.04236715])\n\tTheta: [865.4105095 696.65443168] (norm: 1110.973783278624)\n\tLoglik : 14510.47\n\t...\nAnd ends at:\n\tSigma : [206530.5876424 168308.86223203 168308.86223203 231081.40340902]\n\t(Eigs: [ 50050.08083072 387561.91022069])\n\tTheta: [630.51431152 507.56382199] (norm: 809.4253087386695)\n\tLoglik : 13877.14\n\n---\n\nIteration 2 starts from:\n\tSigma : [206530.5876424 168308.86223203 168308.86223203 231081.40340902]\n\t(Eigs: [ 50050.08083072 387561.91022069])\n\tTheta: [630.51431152 507.56382199] (norm: 809.4253087386695)\n\tLoglik : 13877.14\n\t...\nAnd ends at:\n\tSigma : [109628.76312645 89340.93950778 89340.93950778 122661.62555474]\n\t(Eigs: [ 26566.91936334 205723.46931786])\n\tTheta: [459.37241808 369.79683001] (norm: 589.7225737345193)\n\tLoglik : 13243.81\n\n---\n\nIteration 3 starts from:\n\tSigma : [109628.76312645 89340.93950778 89340.93950778 122661.62555474]\n\t(Eigs: [ 26566.91936334 205723.46931786])\n\tTheta: [459.37241808 369.79683001] (norm: 589.7225737345193)\n\tLoglik : 13243.81\n\t...\nAnd ends at:\n\tSigma : [58191.00713181 47422.91483963 47422.91483963 65109.8622837 ]\n\t(Eigs: [ 14101.50736928 109199.36204623])\n\tTheta: [334.68141234 269.42170071] (norm: 429.6506727267973)\n\tLoglik : 12610.48\n\n---\n\nIteration 4 starts from:\n\tSigma : [58191.00713181 47422.91483963 47422.91483963 65109.8622837 ]\n\t(Eigs: [ 14101.50736928 109199.36204623])\n\tTheta: [334.68141234 269.42170071] (norm: 429.6506727267973)\n\tLoglik : 12610.48\n\t...\nAnd ends at:\n\tSigma : [30886.63068441 25171.83852413 25171.83852413 34560.03471653]\n\t(Eigs: [ 7484.57423298 57962.09116796])\n\tTheta: [243.83054998 196.28871928] (norm: 313.0217219640108)\n\tLoglik : 11977.14\n\n---\n\nIteration 5 starts from:\n\tSigma : [30886.63068441 25171.83852413 25171.83852413 34560.03471653]\n\t(Eigs: [ 7484.57423298 57962.09116796])\n\tTheta: [243.83054998 196.28871928] (norm: 313.0217219640108)\n\tLoglik : 11977.14\n\t...\nAnd ends at:\n\tSigma : [16392.83946027 13360.45573263 13360.45573263 18343.48157976]\n\t(Eigs: [ 3972.15258656 30764.16845347])\n\tTheta: [177.6355482 143.00429196] (norm: 228.04520494994978)\n\tLoglik : 11343.78\n\n---\n\nIteration 6 starts from:\n\tSigma : [16392.83946027 13360.45573263 13360.45573263 18343.48157976]\n\t(Eigs: [ 3972.15258656 30764.16845347])\n\tTheta: [177.6355482 143.00429196] (norm: 228.04520494994978)\n\tLoglik : 11343.78\n\t...\nAnd ends at:\n\tSigma : [8699.19830901 7090.69834266 7090.69834266 9735.35686301]\n\t(Eigs: [ 2107.67779953 16326.87737249])\n\tTheta: [129.4025048 104.17999153] (norm: 166.12789916818588)\n\tLoglik : 10710.40\n\n---\n\nIteration 7 starts from:\n\tSigma : [8699.19830901 7090.69834266 7090.69834266 9735.35686301]\n\t(Eigs: [ 2107.67779953 16326.87737249])\n\tTheta: [129.4025048 104.17999153] (norm: 166.12789916818588)\n\tLoglik : 10710.40\n\t...\nAnd ends at:\n\tSigma : [4615.23702191 3762.56536313 3762.56536313 5165.96414793]\n\t(Eigs: [1117.97242889 8663.22874095])\n\tTheta: [94.25398919 75.88989834] (norm: 121.00864079667825)\n\tLoglik : 10076.98\n\n---\n\nIteration 8 starts from:\n\tSigma : [4615.23702191 3762.56536313 3762.56536313 5165.96414793]\n\t(Eigs: [1117.97242889 8663.22874095])\n\tTheta: [94.25398919 75.88989834] (norm: 121.00864079667825)\n\tLoglik : 10076.98\n\t...\nAnd ends at:\n\tSigma : [2447.38285081 1995.91753593 1995.91753593 2740.42727779]\n\t(Eigs: [ 592.61658702 4595.19354157])\n\tTheta: [68.63640955 55.27362159] (norm: 88.12564870641373)\n\tLoglik : 9443.48\n\n---\n\nIteration 9 starts from:\n\tSigma : [2447.38285081 1995.91753593 1995.91753593 2740.42727779]\n\t(Eigs: [ 592.61658702 4595.19354157])\n\tTheta: [68.63640955 55.27362159] (norm: 88.12564870641373)\n\tLoglik : 9443.48\n\t...\nAnd ends at:\n\tSigma : [1296.64893788 1058.14420874 1058.14420874 1452.90162596]\n\t(Eigs: [ 313.75082785 2435.79973599])\n\tTheta: [49.95904341 40.24633847] (norm: 64.15351726985041)\n\tLoglik : 8809.83\n\n---\n\nIteration 10 starts from:\n\tSigma : [1296.64893788 1058.14420874 1058.14420874 1452.90162596]\n\t(Eigs: [ 313.75082785 2435.79973599])\n\tTheta: [49.95904341 40.24633847] (norm: 64.15351726985041)\n\tLoglik : 8809.83\n\t...\nAnd ends at:\n\tSigma : [685.83963821 560.36197112 560.36197112 769.4652635 ]\n\t(Eigs: [ 165.73266122 1289.57224049])\n\tTheta: [36.32855562 29.28058468] (norm: 46.65958200195696)\n\tLoglik : 8175.95\n\n---\n\nIteration 11 starts from:\n\tSigma : [685.83963821 560.36197112 560.36197112 769.4652635 ]\n\t(Eigs: [ 165.73266122 1289.57224049])\n\tTheta: [36.32855562 29.28058468] (norm: 46.65958200195696)\n\tLoglik : 8175.95\n\t...\nAnd ends at:\n\tSigma : [361.66733574 296.15506706 296.15506706 406.71631523]\n\t(Eigs: [ 87.18142772 681.20222325])\n\tTheta: [26.38501926 21.29383925] (norm: 33.90570499407643)\n\tLoglik : 7541.80\n\n---\n\nIteration 12 starts from:\n\tSigma : [361.66733574 296.15506706 296.15506706 406.71631523]\n\t(Eigs: [ 87.18142772 681.20222325])\n\tTheta: [26.38501926 21.29383925] (norm: 33.90570499407643)\n\tLoglik : 7541.80\n\t...\nAnd ends at:\n\tSigma : [189.67499051 155.93236911 155.93236911 214.19202824]\n\t(Eigs: [ 45.52003483 358.34698393])\n\tTheta: [19.10767358 15.45289585] (norm: 24.574278829155954)\n\tLoglik : 6907.56\n\n---\n\nIteration 13 starts from:\n\tSigma : [189.67499051 155.93236911 155.93236911 214.19202824]\n\t(Eigs: [ 45.52003483 358.34698393])\n\tTheta: [19.10767358 15.45289585] (norm: 24.574278829155954)\n\tLoglik : 6907.56\n\t...\nAnd ends at:\n\tSigma : [ 98.55488065 81.56373282 81.56373282 112.07859282]\n\t(Eigs: [ 23.47319569 187.16027777])\n\tTheta: [13.77342187 11.17814046] (norm: 17.738601247368912)\n\tLoglik : 6274.94\n\n---\n\nIteration 14 starts from:\n\tSigma : [ 98.55488065 81.56373282 81.56373282 112.07859282]\n\t(Eigs: [ 23.47319569 187.16027777])\n\tTheta: [13.77342187 11.17814046] (norm: 17.738601247368912)\n\tLoglik : 6274.94\n\t...\nAnd ends at:\n\tSigma : [50.51204288 42.20999712 42.20999712 58.03222246]\n\t(Eigs: [11.89499106 96.64927429])\n\tTheta: [9.86052978 8.04345815] (norm: 12.725064497672442)\n\tLoglik : 5651.41\n\n---\n\nIteration 15 starts from:\n\tSigma : [50.51204288 42.20999712 42.20999712 58.03222246]\n\t(Eigs: [11.89499106 96.64927429])\n\tTheta: [9.86052978 8.04345815] (norm: 12.725064497672442)\n\tLoglik : 5651.41\n\t...\nAnd ends at:\n\tSigma : [25.57184417 21.54364886 21.54364886 29.6321754 ]\n\t(Eigs: [ 5.96291603 49.24110354])\n\tTheta: [7.01590663 5.74764537] (norm: 9.06964018838929)\n\tLoglik : 5063.27\n\n---\n\nIteration 16 starts from:\n\tSigma : [25.57184417 21.54364886 21.54364886 29.6321754 ]\n\t(Eigs: [ 5.96291603 49.24110354])\n\tTheta: [7.01590663 5.74764537] (norm: 9.06964018838929)\n\tLoglik : 5063.27\n\t...\nAnd ends at:\n\tSigma : [13.12761243 10.91357552 10.91357552 15.00028663]\n\t(Eigs: [ 3.11028084 25.01761821])\n\tTheta: [5.02684682 4.08938478] (norm: 6.4801432675903525)\n\tLoglik : 4573.07\n\n---\n\nIteration 17 starts from:\n\tSigma : [13.12761243 10.91357552 10.91357552 15.00028663]\n\t(Eigs: [ 3.11028084 25.01761821])\n\tTheta: [5.02684682 4.08938478] (norm: 6.4801432675903525)\n\tLoglik : 4573.07\n\t...\nAnd ends at:\n\tSigma : [7.52122191 5.4992573 5.4992573 7.50222214]\n\t(Eigs: [ 2.01245652 13.01098753])\n\tTheta: [3.80493268 2.89203241] (norm: 4.779263980981762)\n\tLoglik : 4229.62\n\n---\n\nIteration 18 starts from:\n\tSigma : [7.52122191 5.4992573 5.4992573 7.50222214]\n\t(Eigs: [ 2.01245652 13.01098753])\n\tTheta: [3.80493268 2.89203241] (norm: 4.779263980981762)\n\tLoglik : 4229.62\n\t...\nAnd ends at:\n\tSigma : [5.36930144 2.83507812 2.83507812 3.77502336]\n\t(Eigs: [1.6271499 7.5171749])\n\tTheta: [3.21485706 2.05148343] (norm: 3.8136452570871757)\n\tLoglik : 3979.92\n\n---\n\nIteration 19 starts from:\n\tSigma : [5.36930144 2.83507812 2.83507812 3.77502336]\n\t(Eigs: [1.6271499 7.5171749])\n\tTheta: [3.21485706 2.05148343] (norm: 3.8136452570871757)\n\tLoglik : 3979.92\n\t...\nAnd ends at:\n\tSigma : [4.57065682 1.63973735 1.63973735 2.09237217]\n\t(Eigs: [1.27622564 5.38680335])\n\tTheta: [2.96614299 1.52731165] (norm: 3.3362681385380735)\n\tLoglik : 3838.57\n\n---\n\nIteration 20 starts from:\n\tSigma : [4.57065682 1.63973735 1.63973735 2.09237217]\n\t(Eigs: [1.27622564 5.38680335])\n\tTheta: [2.96614299 1.52731165] (norm: 3.3362681385380735)\n\tLoglik : 3838.57\n" ], [ "def nloglik(X):\n y = jnp.concatenate([data.T, jnp.ones(shape=(1, n))], axis=0)\n datapart = jnp.trace(jnp.linalg.solve(X, jnp.matmul(y, y.T)))\n return 0.5 * (n * jnp.linalg.slogdet(X)[1] + datapart)\n\nfun_rep = jit(nloglik)\ngra_rep = jit(grad(fun_rep))\n\nman_norm = SPD(p+1)\nopt = minimizer(man_norm, method='rlbfgs', verbosity=1)\nres = opt.solve(fun_rep, gra_rep, x=jnp.identity(p+1))\n\nmuhat = res.x[-1, :-1]\ncovhat = res.x[:-1, :-1] - jnp.outer(muhat, muhat)", "Starting Riemannian Limited memory BFGS on manifold of (3 x 3) positive definite matrices\nOptimization completed.\n\t12 iterations in 1.225 s\n" ], [ "plt.plot(jnp.array(logl), label=\"Estimated loglikelihood\")\nplt.hlines(y=true_loglik, xmin=0, xmax=k, colors='k', linestyles='--', label=\"Loglikelihood of true values\")\nplt.yscale('log')\nplt.legend(loc='best')\nplt.show()", "_____no_output_____" ], [ "print(\"True values:\")\nprint(\"\\tSigma: {} (Eigs: {})\".format(tcov.ravel(), jnp.linalg.eigvalsh(tcov)))\nprint(\"\\tTheta: {} (norm: {})\".format(tslant, jnp.linalg.norm(tslant)))\nprint(\"\\tLoglik: {:.2f}\".format(true_loglik))\n\nprint(\"Estimated values:\")\nprint(\"\\tSigma: {} (Eigs: {})\".format(sig.ravel(), jnp.linalg.eigvalsh(sig)))\nprint(\"\\tTheta: {} (norm: {})\".format(th, jnp.linalg.norm(th)))\nprint(\"\\tLoglik: {:.2f}\".format(logl[-1]))\n\nprint(\"Estimated normal values:\")\nprint(\"\\tCovariance: {}\".format(covhat.ravel()))\nprint(\"\\tMean: {}\".format(muhat))", "True values:\n\tSigma: [4.27065481 1.21378081 1.21378081 1.53117271] (Eigs: [1.07076229 4.73106523])\n\tTheta: [2.72181592 1.29104493] (norm: 3.0124871648569727)\n\tLoglik: 3809.62\nEstimated values:\n\tSigma: [4.2159309 1.18082108 1.18082108 1.44935544] (Eigs: [1.01390092 4.65138542])\n\tTheta: [2.84871838 1.27114663] (norm: 3.119456709677313)\n\tLoglik: 3808.40\nEstimated normal values:\n\tCovariance: [1.98317827 0.18625879 0.18625879 1.00633833]\n\tMean: [1.49424251 0.66559847]\n" ], [ "l = 100\nx = jnp.linspace(jnp.min(data[:, 0]), jnp.max(data[:, 0]), l)\ny = jnp.linspace(jnp.min(data[:, 1]), jnp.max(data[:, 1]), l)\nxy = jnp.array(list(product(x, y)))\nZ_est = pdf_skew(xy, sig, th).reshape(l, l).T\nZ_tru = pdf_skew(xy, tcov, tslant).reshape(l, l).T\nZ_norm = mvn.pdf(xy, muhat.ravel(), covhat).reshape(l, l).T\n\ng = sns.jointplot(data=plot_data, x='x', y='y', alpha=0.3, height=16)\ng.ax_joint.contour(x, y, Z_tru, colors='r', alpha=0.5, levels=5, linestyles='dashed')\ng.ax_joint.contour(x, y, Z_est, colors='r', levels=5)\ng.ax_joint.contour(x, y, Z_norm, colors='g', levels=5)\nplt.show();", "_____no_output_____" ], [ "sns.kdeplot(data=plot_data, x='x', y='y', alpha=0.4, fill=True)\n#sns.scatterplot(data=plot_data, x='x', y='y', alpha=0.3)\nplt.contour(x, y, Z_est, colors='r', levels=4)\nplt.contour(x, y, Z_norm, colors='k', levels=4, linestyles='dashed')\nplt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2d555ba0d5ffe5070ad5abacc4fbb1a2d02d76
9,125
ipynb
Jupyter Notebook
Lessons/06_Feature_Selection.ipynb
Bluelord/ML_Mastery_Python
f406cc7c996e3a100a8cbf0f923572a5f8970c13
[ "MIT" ]
null
null
null
Lessons/06_Feature_Selection.ipynb
Bluelord/ML_Mastery_Python
f406cc7c996e3a100a8cbf0f923572a5f8970c13
[ "MIT" ]
null
null
null
Lessons/06_Feature_Selection.ipynb
Bluelord/ML_Mastery_Python
f406cc7c996e3a100a8cbf0f923572a5f8970c13
[ "MIT" ]
null
null
null
32.358156
351
0.493808
[ [ [ "<a href=\"https://colab.research.google.com/github/Bluelord/ML_Mastery_Python/blob/main/06_Feature_Selection.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Feature Selction\n\n---\n", "_____no_output_____" ] ], [ [ "from google.colab import drive\ndrive.mount('/content/drive')", "Mounted at /content/drive\n" ] ], [ [ "Features in the dataset are very important for our model perfomance accuracy, it can also decrese performance of many ML models, especially for linear ML model. Feture selction is a crusial step befor modeling your ML model, the advantages of selecting relavent features will **Reduce Overfitting, Improves Accuracy, Reduces Traning Time**. ", "_____no_output_____" ], [ "## Univariable Selection\n\nStatistical test can be done on the data to find the stringest relation with the out, **SelectKBest** class use to select the best feature from the test, in this example **Chi-Square** test is used non-negative features.\n\n\n", "_____no_output_____" ] ], [ [ "# Feature Extraction with univariable Statictical test (Chi-Squaare for classification)\nfrom pandas import read_csv\nfrom numpy import set_printoptions\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2\n# Load Data\nfilename = \"/content/drive/MyDrive/Colab Notebooks/ML Mastery python/Dataset/pima-indians-diabetes.csv\"\nnames = ['preg', 'plas','pres','skin','test','mass','pedi','age','class']\ndataframe = read_csv(filename, names=names)\narray = dataframe.values\n# separate array into input & output \nX = array[:,0:8]\nY = array[:,8]\n# Feature extraction\n##########################################\ntest = SelectKBest(score_func=chi2, k = 4) # we can change the score fun & k is the no of feature we want to use\nfit = test.fit(X, Y)\n###########################################\n#Summarize scores \nset_printoptions(precision=3)\nprint(fit.scores_)\nfeatures = fit.transform(X)\n# Summarizing the selected features\nprint(features[:,:])", "[ 111.52 1411.887 17.605 53.108 2175.565 127.669 5.393 181.304]\n[[148. 0. 33.6 50. ]\n [ 85. 0. 26.6 31. ]\n [183. 0. 23.3 32. ]\n ...\n [121. 112. 26.2 30. ]\n [126. 0. 30.1 47. ]\n [ 93. 0. 30.4 23. ]]\n" ] ], [ [ "## Recursive Features Elimination\n\nRFE uses the model accuracy to identify which features are important by recursivly removing the features and building new model dataset again and again. **RFE** class from sklearn is used for this.\n", "_____no_output_____" ] ], [ [ "# Feature extraction wtih RFE\nfrom sklearn.feature_selection import RFE\nfrom sklearn.linear_model import LogisticRegression\n# Load Data\n# Feature extraction\n##########################################\nmodel = LogisticRegression(solver='liblinear') # In this we need a model for recurrently select the feature and test the dataset.\nrfe = RFE(model, 3)\nfit = rfe.fit(X,Y)\n###########################################\n# Summarizing the selected features\nprint(\"Num Features: %d:\" % fit.n_features_)\nprint(\"Selected Features:\" % fit.support_)\nprint(\"Feature Ranking:\" % fit.ranking_)", "Num Features: 3:\nSelected Features:\nFeature Ranking:\n" ] ], [ [ "## Principal Component Analysis\n\nPCA is one if the data comprasion technique, it choose the number of features or the principal components and result into new dataset. \n", "_____no_output_____" ] ], [ [ "# Feature Extraction with PCA \nfrom sklearn.decomposition import PCA\n# Load Data\n\n# Feature extraction\n##########################################\npca = PCA(n_components= 3)\nfit = pca.fit(X)\n# Summarize components\nprint(\"Explained Varience: %s\" % fit.explained_variance_ratio_)\nprint(fit.components_)", "Explained Varience: [0.889 0.062 0.026]\n[[-2.022e-03 9.781e-02 1.609e-02 6.076e-02 9.931e-01 1.401e-02\n 5.372e-04 -3.565e-03]\n [-2.265e-02 -9.722e-01 -1.419e-01 5.786e-02 9.463e-02 -4.697e-02\n -8.168e-04 -1.402e-01]\n [-2.246e-02 1.434e-01 -9.225e-01 -3.070e-01 2.098e-02 -1.324e-01\n -6.400e-04 -1.255e-01]]\n" ] ], [ [ "## Feature Importance \n\nDecision trees like Random forest & Extra Trees can be used to estimate the importance of Features. Score given to the features by this class higlight the importance of the features in the dataset.\n", "_____no_output_____" ] ], [ [ "# Feature Extraction with Extra Trees Classifier\nfrom sklearn.ensemble import ExtraTreesClassifier\n# Load Data\n\n# Feature extraction\nmodel = ExtraTreesClassifier(n_estimators=100)\nmodel.fit(X,Y)\nprint(model.feature_importances_)", "[0.112 0.236 0.099 0.078 0.077 0.141 0.119 0.138]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a2d5affe9a470bac1868d694a21c3406ba090a2
94,081
ipynb
Jupyter Notebook
FFA_Model_Training/ensemble_model.ipynb
GunnerStone/AlgonautsChallenge_Fall2021
c9d0b288dd3896ffeac4934423f47e43eea85c17
[ "MIT" ]
null
null
null
FFA_Model_Training/ensemble_model.ipynb
GunnerStone/AlgonautsChallenge_Fall2021
c9d0b288dd3896ffeac4934423f47e43eea85c17
[ "MIT" ]
null
null
null
FFA_Model_Training/ensemble_model.ipynb
GunnerStone/AlgonautsChallenge_Fall2021
c9d0b288dd3896ffeac4934423f47e43eea85c17
[ "MIT" ]
null
null
null
97.493264
44,914
0.810355
[ [ [ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom torch.utils.data import TensorDataset, DataLoader\nimport os\nimport timm\nfrom tqdm.notebook import tqdm\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\n\n#define variables specific to this model\nsubject = 'sub01'\nroi = 'FFA'\nensemble_model_name = 'model_weights_{}_{}_ensemble.pt'.format(subject, roi)\n\nRAW_model_name = 'model_weights_{}_{}_RAW.pt'.format(subject, roi)\nRAFT_model_name = 'model_weights_{}_{}_RAFT.pt'.format(subject, roi)\nBDCN_model_name = 'model_weights_{}_{}_BDCN.pt'.format(subject, roi)\nMFCC_model_name = 'model_weights_{}_{}_MFCC.pt'.format(subject, roi)\n\n# define global variables / hyperparameters\nbatch_size = 16\nnum_epochs = 20\nlearning_rate = 0.001\n\n# detect if GPU/CPU device\nuse_cuda = torch.cuda.is_available()\nprint('CUDA available:', use_cuda)\n\n# set RNG seed for reproducibility\nseed = 1\ntorch.manual_seed(seed)\nnp.random.seed(seed)\n\n# setup gpu things\ndtype = 'float32' if use_cuda else 'float64' # GPU does better with float32 numbers\ntorchtype = {'float32': torch.float32, 'float64': torch.float64}[dtype]\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# flush out the cache\ntorch.cuda.empty_cache()", "CUDA available: True\n" ], [ "# Function that takes arrays x1, y1, x2, y2\n# each y has dimensions (num_samples, num_classes) \n# finds the similar labels in y1 and y2\n# uses the indices of similar labels to find the corresponding x1 and x2\n# returns the x1, x2, y1\ndef find_similar_labels(x1, y1, x2, y2):\n # for every label in y1, see if there is a similar label in y2\n # if there is, add the corresponding x1 and x2 to the list\n # return the list of x1, x2, y1, y2\n x1_list = []\n x2_list = []\n y1_list = []\n for i, label in enumerate(y1):\n # labels are both floats, so check if they are close enough\n # if they are close enough, add the corresponding x1 and x2 to the list\n # if they are not close enough, do nothing\n # if there is no similar label, do nothing\n similar_label_idx = -1\n similar_label = False\n for j, label2 in enumerate(y2):\n if np.allclose(label, label2, atol=0.1):\n similar_label = True\n similar_label_idx = j\n break\n if similar_label:\n x1_list.append(x1[i])\n x2_list.append(x2[j])\n y1_list.append(label)\n return np.array(x1_list), np.array(x2_list), np.array(y1_list)", "_____no_output_____" ], [ "# Training a RNN with PyTorch for roi V1 with RAW data\n\n# Load the entire training data into main memory\n# This is a huge dataset, so we need to do this in chunks\n\n#isolate subject of interests' data\nnum_subjects = 10\nsoi = subject\nnum_classes = -1\n\n# read in every npy file in the directory Gunners_training_data/V1/RAW and store them in a list \nRAW_training_data = []\nRAW_training_labels = []\n\nRAFT_training_data = []\nRAFT_training_labels = []\n\nBDCN_training_data = []\nBDCN_training_labels = []\n\nMFCC_training_data = []\nMFCC_training_labels = []\n\n# load in every Nth file in the directory\nculling_scale = 1\n\npreprocessing = 'MFCC'\ninput_data_dims = (39,261)\ninput_channels = 1\n# for each file in the MFCC directory\nfor i, file in enumerate(os.listdir('../Gunners_training_data/{}/{}'.format(roi, preprocessing))):\n # if the file name contains the soi string\n if not soi in file:\n continue\n # if the file is a .npy file\n if file.endswith('.npy'):\n # read in the file\n data = np.load('../Gunners_training_data/{}/{}/'.format(roi,preprocessing) + file, allow_pickle=True)\n \n # print out first voxel label\n # if i == 0:\n # print(data[0][1][0])\n\n # for each sample, make sure its dimensions are correct, if not then skip it\n if data[0][0].shape != input_data_dims:\n # if the shape is larger than the input_data_dims, then crop it\n if data[0][0].shape[1] > input_data_dims[1]:\n data[0][0] = data[0][0][:,:input_data_dims[1]]\n if data[0][0].shape != input_data_dims:\n continue\n\n # for each sample, add the data to the training_data list\n data[0][0] = np.expand_dims(data[0][0],axis=0)\n MFCC_training_data.append(data[0][0])\n # for each sample, add the label to the training_labels list\n MFCC_training_labels.append(data[0][1])\n\nnum_classes = len(MFCC_training_labels[0])\n\npreprocessing = 'RAW'\ninput_data_dims = (32,32,225)\ninput_channels = 225\n# for each file in the RAW directory\nfor i, file in enumerate(os.listdir('../Gunners_training_data/{}/{}'.format(roi, preprocessing))):\n # if the file name contains the soi string\n if not soi in file:\n continue\n # if the file is a .npy file\n if file.endswith('.npy'):\n # read in the file\n data = np.load('../Gunners_training_data/{}/{}/'.format(roi,preprocessing) + file, allow_pickle=True)\n \n # print out first voxel label\n # if i == 0:\n # print(data[0][1][0])\n\n # for each sample, make sure its dimensions are 32x32x225, if not then skip it\n if data[0][0].shape != input_data_dims:\n continue\n \n # for each sample, add the data to the training_data list\n RAW_training_data.append(data[0][0])\n\n # for each label, add the data to the training_data list\n RAW_training_labels.append(data[0][1])\n\n# cull MFCCS that dont have RAW data and vice versa\nMFCC_training_data = np.array(MFCC_training_data)\nMFCC_training_labels = np.array(MFCC_training_labels)\nRAW_training_data = np.array(RAW_training_data)\nRAW_training_labels = np.array(RAW_training_labels)\n\nprint(\"Finding similar labels between MFCC and RAW\")\nMFCC_training_data, RAW_training_data, MFCC_training_labels = find_similar_labels(MFCC_training_data, MFCC_training_labels, RAW_training_data, RAW_training_labels)\n\npreprocessing = 'RAFT'\ninput_data_dims = (32,32,222)\ninput_channels = 222\n# for each file in the RAW directory\nfor i, file in enumerate(os.listdir('../Gunners_training_data/{}/{}'.format(roi, preprocessing))):\n # if the file name contains the soi string\n if not soi in file:\n continue\n # if the file is a .npy file\n if file.endswith('.npy'):\n # read in the file\n data = np.load('../Gunners_training_data/{}/{}/'.format(roi,preprocessing) + file, allow_pickle=True)\n \n \n # print out first voxel label\n # if i == 0:\n # print(data[0][1][0])\n\n # for each sample, make sure its dimensions are 32x32x225, if not then skip it\n if data[0][0].shape != input_data_dims:\n continue\n \n # for each sample, add the data to the training_data list\n RAFT_training_data.append(data[0][0])\n\n # for each label, add the data to the training_data list\n RAFT_training_labels.append(data[0][1])\n\n\n# cull MFCCs/RAW that dont have RAFT data and vice versa\nRAFT_training_data = np.array(RAFT_training_data)\nRAFT_training_labels = np.array(RAFT_training_labels)\n\nprint(\"Finding similar labels between MFCC and RAFT\")\nRAFT_training_data, MFCC_training_data, MFCC_training_labels = find_similar_labels(RAFT_training_data, RAFT_training_labels, MFCC_training_data, MFCC_training_labels)\n\npreprocessing = 'BDCN'\ninput_data_dims = (32,32,75)\ninput_channels = 75\n# for each file in the RAW directory\nfor i, file in enumerate(os.listdir('../Gunners_training_data/{}/{}'.format(roi, preprocessing))):\n # if the file name contains the soi string\n if not soi in file:\n continue\n # if the file is a .npy file\n if file.endswith('.npy'):\n # read in the file\n data = np.load('../Gunners_training_data/{}/{}/'.format(roi,preprocessing) + file, allow_pickle=True)\n \n \n # print out first voxel label\n # if i == 0:\n # print(data[0][1][0])\n\n # for each sample, make sure its dimensions are 32x32x225, if not then skip it\n if data[0][0].shape != input_data_dims:\n continue\n \n # for each sample, add the data to the training_data list\n BDCN_training_data.append(data[0][0])\n\n # for each label, add the data to the training_data list\n BDCN_training_labels.append(data[0][1])\n\n\n# cull MFCCs/RAW that dont have RAFT data and vice versa\nBDCN_training_data = np.array(BDCN_training_data)\nBDCN_training_labels = np.array(BDCN_training_labels)\n\nprint(\"Finding similar labels between MFCC and BDCN\")\nBDCN_training_data, MFCC_training_data, MFCC_training_labels = find_similar_labels(BDCN_training_data, BDCN_training_labels, MFCC_training_data, MFCC_training_labels)\n\nprint('Number of RAW training samples: ', len(RAW_training_data))\nprint('Number of MFCC training samples: ', len(MFCC_training_data))\nprint('Number of RAFT training samples: ', len(RAFT_training_data))\nprint('Number of BDCN training samples: ', len(BDCN_training_data))\nprint('Number of voxel activations (classes): ', num_classes)\n\n# Only keep MFFC_training_labels; release memory of other label arrays\nRAW_training_labels = None\nRAFT_training_labels = None\nBDCN_training_labels = None\n\n#normalize all labels to be between -1 and 1\nMFCC_training_labels = np.array(MFCC_training_labels)\nMFCC_training_labels = (MFCC_training_labels - np.min(MFCC_training_labels)) / (np.max(MFCC_training_labels) - np.min(MFCC_training_labels))\n#print the value range of the labels\nprint('Value range of labels: ', np.min(MFCC_training_labels), np.max(MFCC_training_labels))\nnum_classes = MFCC_training_labels[0].shape[0]\nprint('Number of voxel activations (classes): ', num_classes)\n \n# verify the data is loaded correctly and is in numpy arrays\nRAW_training_data = np.array(RAW_training_data)\nRAFT_training_data = np.array(RAFT_training_data)\nBDCN_training_data = np.array(BDCN_training_data)\nMFCC_training_data = np.array(MFCC_training_data)\n\n# combine all training data into one array for a pytorch Dataset object\nRAW_training_data = torch.tensor(RAW_training_data).type(torchtype)\nRAFT_training_data = torch.tensor(RAFT_training_data).type(torchtype)\nBDCN_training_data = torch.tensor(BDCN_training_data).type(torchtype)\nMFCC_training_data = torch.tensor(MFCC_training_data).type(torchtype)\ntraining_labels = torch.tensor(MFCC_training_labels).type(torchtype)\n\n# permute the data so that the first dimension is the number of samples, the second is the number of channels\n# not viable for MFCC 2d data\nRAW_training_data = RAW_training_data.permute(0,3,1,2)\nRAFT_training_data = RAFT_training_data.permute(0,3,1,2)\nBDCN_training_data = BDCN_training_data.permute(0,3,1,2)\n\n\n#print the dims of training_data tensor\nprint('RAW_training_data tensor dims:', RAW_training_data.shape)\nprint('RAFT_training_data tensor dims:', RAFT_training_data.shape)\nprint('BDCN_training_data tensor dims:', BDCN_training_data.shape)\nprint('MFCC_training_data tensor dims:', MFCC_training_data.shape)\nprint('training_labels tensor dims:', training_labels.shape)\n\n# create a dataset from the tensors\nRAW_dataset = TensorDataset(RAW_training_data,training_labels)\nRAFT_dataset = TensorDataset(RAFT_training_data,training_labels)\nBDCN_dataset = TensorDataset(BDCN_training_data,training_labels)\nMFCC_dataset = TensorDataset(MFCC_training_data,training_labels)\n\n# split the data into training and validation sets\ntrain_size = int(0.8 * len(RAW_training_data))\nvalid_size = len(RAW_training_data) - train_size\n\n# create training and validation sets\nRAW_dataset, RAW_validation_data = torch.utils.data.random_split(RAW_dataset, [train_size, valid_size])\nRAFT_dataset, RAFT_validation_data = torch.utils.data.random_split(RAFT_dataset, [train_size, valid_size])\nBDCN_dataset, BDCN_validation_data = torch.utils.data.random_split(BDCN_dataset, [train_size, valid_size])\nMFCC_dataset, MFCC_validation_data = torch.utils.data.random_split(MFCC_dataset, [train_size, valid_size])\n\n# # combine all the training datasets into a big one\n# train_data = [RAW_dataset, RAFT_dataset, BDCN_dataset, MFCC_dataset]\n# # concatenate all the validation datasets\n# valid_data = torch.utils.data.ConcatDataset([RAW_validation_data, RAFT_validation_data, BDCN_validation_data, MFCC_validation_data])\n\n# print out the range of values for labels\nprint('Value range of labels: ', np.min(MFCC_training_labels), np.max(MFCC_training_labels))\n\n\n# create training and validation dataloaders\n\nRAW_train_loader = DataLoader(RAW_dataset, batch_size = batch_size, shuffle=True)\nRAFT_train_loader = DataLoader(RAFT_dataset, batch_size = batch_size, shuffle=True)\nBDCN_train_loader = DataLoader(BDCN_dataset, batch_size = batch_size, shuffle=True)\nMFCC_train_loader = DataLoader(MFCC_dataset, batch_size = batch_size, shuffle=True)\n\nRAW_val_loader = DataLoader(RAW_validation_data, batch_size = batch_size, shuffle=False)\nRAFT_val_loader = DataLoader(RAFT_validation_data, batch_size = batch_size, shuffle=False)\nBDCN_val_loader = DataLoader(BDCN_validation_data, batch_size = batch_size, shuffle=False)\nMFCC_val_loader = DataLoader(MFCC_validation_data, batch_size = batch_size, shuffle=False)\n", "Finding similar labels between MFCC and RAW\nFinding similar labels between MFCC and RAFT\nFinding similar labels between MFCC and BDCN\nNumber of RAW training samples: 598\nNumber of MFCC training samples: 598\nNumber of RAFT training samples: 598\nNumber of BDCN training samples: 598\nNumber of voxel activations (classes): 68\nValue range of labels: 0.0 1.0\nNumber of voxel activations (classes): 68\nRAW_training_data tensor dims: torch.Size([598, 225, 32, 32])\nRAFT_training_data tensor dims: torch.Size([598, 222, 32, 32])\nBDCN_training_data tensor dims: torch.Size([598, 75, 32, 32])\nMFCC_training_data tensor dims: torch.Size([598, 1, 39, 261])\ntraining_labels tensor dims: torch.Size([598, 68])\nValue range of labels: 0.0 1.0\n" ], [ "class MyEnsemble(nn.Module):\n def __init__(self, modelRAW, modelRAFT, modelBDCN, modelMFCC, nb_classes=num_classes):\n super(MyEnsemble, self).__init__()\n self.modelRAW = modelRAW\n self.modelRAFT = modelRAFT\n self.modelBDCN = modelBDCN\n self.modelMFCC = modelMFCC\n # Remove last linear layer\n # self.modelRAW.fc = nn.Identity()\n # self.modelBDCN.fc = nn.Identity()\n # self.modelRAFT.fc = nn.Identity()\n # self.modelMFCC.fc = nn.Identity()\n \n # Create new classifier\n self.classifier1 = nn.Linear(num_classes*4, 2048)\n self.classifier2 = nn.Linear(2048, nb_classes)\n \n def forward(self, dataRAW, dataRAFT, dataBDCN, dataMFCC):\n x1 = self.modelRAW(dataRAW)\n x1 = x1.view(x1.size(0), -1)\n x2 = self.modelBDCN(dataBDCN)\n x2 = x2.view(x2.size(0), -1)\n x3 = self.modelRAFT(dataRAFT) \n x3 = x3.view(x3.size(0), -1)\n x4 = self.modelMFCC(dataMFCC) \n x4 = x4.view(x4.size(0), -1)\n \n x = torch.cat((x1, x2, x3, x4), dim=1).to(device)\n # print(x.shape)\n # print(x.shape)\n x = torch.sigmoid(self.classifier2(F.relu(self.classifier1(F.relu(x)))))\n return x", "_____no_output_____" ], [ "# define the RAW model\n# define RNN model with 225 channels\ninput_channels = 225\nmodelRAW = timm.create_model('densenet121', num_classes=num_classes, in_chans=input_channels, pretrained=False).to(device)\nmodelRAW = nn.Sequential(modelRAW, nn.Sigmoid())\n\nmodelRAW.eval()\n# make the model use floats\nmodelRAW.float()\n\n# load pretrained weighst from the file\nmodelRAW.load_state_dict(torch.load('{}'.format(RAW_model_name)))\n\n# define optimizer\noptimizerRAW = torch.optim.Adam(modelRAW.parameters(), lr=learning_rate)\n\n# scheduler for Learning Rate\nschedulerRAW = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizerRAW, 'min', patience=2, verbose=True)", "_____no_output_____" ], [ "# define the RAFT model\n# define RNN model with 222 channels\ninput_channels = 222\nmodelRAFT = timm.create_model('cspresnext50', num_classes=num_classes, in_chans=input_channels, pretrained=False).to(device)\nmodelRAFT = nn.Sequential(modelRAFT, nn.Sigmoid())\n\nmodelRAFT.eval()\n# make the model use floats\nmodelRAFT.float()\n\n# load pretrained weighst from the file\nmodelRAFT.load_state_dict(torch.load('{}'.format(RAFT_model_name)))\n\n# define optimizer\noptimizerRAFT = torch.optim.Adam(modelRAFT.parameters(), lr=learning_rate)\n\n# scheduler for Learning Rate\nschedulerRAFT = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizerRAFT, 'min', patience=2, verbose=True)", "_____no_output_____" ], [ "\n# define the BDCN model\n# define RNN model with 222 channels\ninput_channels = 75\nmodelBDCN = timm.create_model('densenet121', num_classes=num_classes, in_chans=input_channels, pretrained=False).to(device)\nmodelBDCN = nn.Sequential(modelBDCN, nn.Sigmoid())\n\nmodelBDCN.eval()\n# make the model use floats\nmodelBDCN.float()\n\n# load pretrained weighst from the file\nmodelBDCN.load_state_dict(torch.load('{}'.format(BDCN_model_name)))\n\n# define optimizer\noptimizerBDCN = torch.optim.Adam(modelBDCN.parameters(), lr=learning_rate)\n\n# scheduler for Learning Rate\nschedulerBDCN = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizerBDCN, 'min', patience=2, verbose=True)\n", "_____no_output_____" ], [ "\n# define the MFCC model\n# define RNN model with 222 channels\ninput_channels = 1\nmodelMFCC = timm.create_model('densenet121', num_classes=num_classes, in_chans=input_channels, pretrained=False).to(device)\nmodelMFCC = nn.Sequential(modelMFCC, nn.Sigmoid())\nmodelMFCC.eval()\n# make the model use floats\nmodelMFCC.float()\n\n# load pretrained weighst from the file\nmodelMFCC.load_state_dict(torch.load('{}'.format(MFCC_model_name)))\n\n# define optimizer\noptimizerMFCC = torch.optim.Adam(modelMFCC.parameters(), lr=learning_rate)\n\n# scheduler for Learning Rate\nschedulerMFCC = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizerMFCC, 'min', patience=2, verbose=True)", "_____no_output_____" ], [ "# create a loss function that is 1 - the correlation coefficient\ndef corrcoef_loss_function(output, target):\n x = output\n y = target\n\n vx = x - torch.mean(x)\n vy = y - torch.mean(y)\n\n cost = torch.sum(vx * vy) / (torch.sqrt(torch.sum(vx ** 2)) * torch.sqrt(torch.sum(vy ** 2)))\n # mse_loss = torch.mean((output - target) ** 2)\n return (1 - cost)**3", "_____no_output_____" ], [ "# define the ensemble model\nmodel = MyEnsemble(modelRAW, modelRAFT, modelBDCN, modelMFCC)\n\n\n# define optimizer\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n# scheduler for Learning Rate\nscheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=2, verbose=True)\n\n# define loss function for multi-variable regression\nloss_fn = corrcoef_loss_function\n\nif use_cuda:\n #put model on gpu\n model = model.to(device)", "_____no_output_____" ], [ "# keep track of training/validation loss\ntrain_losses = []\nvalid_losses = []\n\n# train the model\n#progress bar for training\npbar = tqdm(range(num_epochs))\n\nfor epoch in pbar:\n # keep track of training and validation loss\n train_loss = 0.0\n valid_loss = 0.0\n \n # keep track of training and validation accuracy\n train_accuracy = 0.0\n valid_accuracy = 0.0\n \n # set the model to training mode\n model.train()\n \n # train the model for one epoch\n for RAW_data, RAFT_data, BDCN_data, MFCC_data in zip(RAW_train_loader, RAFT_train_loader, BDCN_train_loader, MFCC_train_loader):\n image_batch1, labels = RAW_data\n image_batch2, _ = RAFT_data\n image_batch3, _ = BDCN_data\n image_batch4, _ = MFCC_data\n # move tensors to GPU if CUDA is available\n if use_cuda:\n image_batch1, labels = image_batch1.to(device), labels.to(device)\n image_batch2, image_batch3, image_batch4 = image_batch2.to(device), image_batch3.to(device), image_batch4.to(device)\n \n \n # zero out the gradients\n optimizer.zero_grad()\n \n # forward pass\n output = model(dataRAW = image_batch1, dataRAFT = image_batch2, dataBDCN = image_batch3, dataMFCC = image_batch4)\n \n # calculate loss\n loss = loss_fn(output, labels)\n \n # backpropagate\n loss.backward()\n \n # update the weights\n optimizer.step()\n \n # calculate the training loss\n train_loss += loss.item()\n \n # set the model to evaluation mode\n model.eval()\n # evaluate the model on the validation set\n for RAW_data, RAFT_data, BDCN_data, MFCC_data in zip(RAW_val_loader, RAFT_val_loader, BDCN_val_loader, MFCC_val_loader):\n \n image_batch1, labels = RAW_data\n image_batch2, _ = RAFT_data\n image_batch3, _ = BDCN_data\n image_batch4, _ = MFCC_data\n # move tensors to GPU if CUDA is available\n if use_cuda:\n image_batch1, labels = image_batch1.to(device), labels.to(device)\n image_batch2, image_batch3, image_batch4 = image_batch2.to(device), image_batch3.to(device), image_batch4.to(device)\n #put model on gpu\n model = model.to(device)\n \n # validation forward pass\n output = model(image_batch1, image_batch2, image_batch3, image_batch4)\n\n # calculate the validation loss\n valid_loss += loss_fn(output, labels).item()\n \n # calculate the average training loss and accuracy\n train_loss = train_loss/len(RAW_train_loader)\n\n # calculate the average validation loss and accuracy\n valid_loss = valid_loss/len(RAW_val_loader)\n\n # ping the learning rate scheduler\n scheduler.step(valid_loss)\n\n # append the training and validation loss and accuracy to the lists\n train_losses.append(train_loss)\n valid_losses.append(valid_loss)\n\n # if current validation loss was best so far, save the model weights in memory\n best_valid_loss = min(valid_losses)\n if valid_loss == best_valid_loss:\n my_best_weights = model.state_dict() \n \n # display the epoch training loss\n pbar.set_postfix({\n 'Epoch':'{}/{}'.format(epoch+1, num_epochs), \n 'Training Loss': '{:.4f}'.format(train_loss) , \n 'Validation loss' : '{:.4f}'.format(valid_loss)})\n# assign the best weights to the model\nmodel.load_state_dict(my_best_weights)\n\n#print the epoch of the best validation loss\nprint('Best validation loss: ', min(valid_losses))\nprint('Epoch of best validation loss: ', valid_losses.index(min(valid_losses))+1)\n# print the model summary\n# print(model)\n", "_____no_output_____" ], [ "# load best model weights to the model\nmodel.load_state_dict(my_best_weights)\n# save the best model weights to a file\ntorch.save(model.state_dict(), '{}'.format(ensemble_model_name))", "_____no_output_____" ], [ "# plot the training and validation loss and accuracy and a vertical line on the x-axis at the epoch of the best validation loss\nbest_epoch = valid_losses.index(min(valid_losses))+1\nplt.figure(figsize=(12,8))\nplt.plot(range(1,num_epochs+1), train_losses, label='Training Loss')\nplt.plot(range(1,num_epochs+1), valid_losses, label='Validation Loss')\nplt.axvline(best_epoch, color='r', linestyle='--', label='Best Validation Loss Epoch')\nplt.xlabel('Epoch')\nplt.ylabel('Loss')\nplt.legend()\nplt.show()\n\n", "_____no_output_____" ], [ "# load the ensemble model from the file\nmodel.load_state_dict(torch.load('{}'.format(ensemble_model_name)))", "_____no_output_____" ], [ "model.eval()\n# display a side by side comparison of the original label and the predicted label\ndef display_side_by_side(original, prediction):\n #add title to the figure\n fig = plt.figure(figsize=(15,5))\n ax = fig.add_subplot(1, 3, 1)\n ax.imshow(original)\n ax.set_title('Original')\n ax = fig.add_subplot(1, 3, 2)\n ax.imshow(prediction)\n ax.set_title('Prediction')\n # calculate the mean squared error\n mse = (original - prediction)**2\n # display mse next to the other comparisons\n ax = fig.add_subplot(1, 3, 3)\n ax.set_title('MSE: {:.4f}'.format (mse.mean()))\n ax.imshow(mse)\n plt.show()\n\n# display a figure of the mean squared error between the original label and the predicted label on matplotlib\ndef display_mse(original, prediction):\n mse = np.mean((original - prediction)**2)\n print('Mean Squared Error: ', mse)\n plt.imshow((original - prediction)**2)\n plt.show()\n\n# print(training_labels[0].unsqueeze(0).numpy().shape)\n\n# resized_original = training_labels[0].unsqueeze(0).numpy().reshape(8,29)\n# resized_prediction = model(training_data[0].unsqueeze(0).to(device)).detach().cpu().numpy().reshape(8,29)\n\n#draw a correlation coefficient graph between the original label and the predicted label\ndef draw_correlation_coefficient(original, prediction):\n # calculate the correlation coefficient\n corr_coeff = np.corrcoef(original, prediction)[0,1]\n # display the correlation coefficient\n print('Correlation Coefficient: ', corr_coeff)\n # plot the correlation coefficient graph\n plt.plot(original, prediction, 'o')\n plt.xlabel('Original')\n plt.ylabel('Prediction')\n plt.title('Correlation Coefficient: {:.2f}'.format(corr_coeff))\n plt.show()\n\n\n#print out value ranges of prediction\n# print('Prediction Range: ', np.min(resized_prediction), np.max(resized_prediction))\n\n# display a side by side comparison of the original label and the predicted label\n# display_side_by_side(resized_original,resized_prediction)\n# display_mse(resized_original,resized_prediction)\n\n# draw_correlation_coefficient(training_labels[0].unsqueeze(0).numpy(),model(training_data[0].unsqueeze(0).to(device)).detach().cpu().numpy())\n\n#find out the correlation coefficient between the original label and the predicted label for the entire dataset\ndef find_correlation_coeff(model):\n # calculate the correlation coefficient\n corr_coeff_list = []\n # separate the pytorch dataset into the data and labels\n # set the model to evaluation mode\n \n \n # evaluate the model on the validation set\n for RAW_data, RAFT_data, BDCN_data, MFCC_data in zip(RAW_val_loader, RAFT_val_loader, BDCN_val_loader, MFCC_val_loader):\n \n image_batch1, labels = RAW_data\n image_batch2, _ = RAFT_data\n image_batch3, _ = BDCN_data\n image_batch4, _ = MFCC_data\n \n print(labels.cpu().numpy()[0][0])\n # print the range of labels\n\n\n # move tensors to GPU if CUDA is available\n if use_cuda:\n image_batch1, labels = image_batch1.to(device), labels.to(device)\n image_batch2, image_batch3, image_batch4 = image_batch2.to(device), image_batch3.to(device), image_batch4.to(device)\n #put model on gpu\n model = model.to(device)\n model.eval()\n \n # validation forward pass\n output = model(image_batch1, image_batch2, image_batch3, image_batch4)\n # calculate the correlation coefficient for every image in the batch\n # for every image in the batch\n for i in range(len(output)):\n # calculate the correlation coefficient\n corr_coeff = np.corrcoef(labels.cpu().numpy()[i].T, output[i].detach().cpu().numpy().T)[0,1]\n # append the correlation coefficient to the list\n corr_coeff_list.append(corr_coeff)\n # calculate the mean correlation coefficient\n mean_corr_coeff = np.mean(corr_coeff_list)\n #print the highest correlation coefficient and lowest correlation coefficient\n print('Highest Correlation Coefficient: ', max(corr_coeff_list))\n print('Lowest Correlation Coefficient: ', min(corr_coeff_list))\n\n # plot a histogram of the correlation coefficients\n plt.hist(corr_coeff_list, bins=20)\n plt.xlabel('Correlation Coefficient')\n plt.ylabel('Frequency')\n plt.title('Histogram of Correlation Coefficients')\n plt.show()\n \n # display the mean correlation coefficient\n print('Mean Correlation Coefficient: ', mean_corr_coeff)\n return mean_corr_coeff\n\nmodel.eval()\nprint ( 'Mean Correlation Coefficient: ', find_correlation_coeff(model))", "0.5342679\n0.46087846\n0.7616386\n0.46232122\n0.6952026\n0.61253744\n0.62646216\n0.6098769\nHighest Correlation Coefficient: 0.5295490680356694\nLowest Correlation Coefficient: -0.5312363793180136\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2d5b668463c18810e80716402a69fe1df753d6
10,375
ipynb
Jupyter Notebook
amftrack/notebooks/validation/frame_frame_validation.ipynb
Cocopyth/MscThesis
60162bc779a3a668e7447b60bb9a4b2a616b8093
[ "MIT" ]
1
2021-06-10T02:51:53.000Z
2021-06-10T02:51:53.000Z
amftrack/notebooks/validation/frame_frame_validation.ipynb
Cocopyth/MscThesis
60162bc779a3a668e7447b60bb9a4b2a616b8093
[ "MIT" ]
null
null
null
amftrack/notebooks/validation/frame_frame_validation.ipynb
Cocopyth/MscThesis
60162bc779a3a668e7447b60bb9a4b2a616b8093
[ "MIT" ]
null
null
null
37.727273
253
0.539952
[ [ [ "%matplotlib widget\n\nimport os \nimport sys \nsys.path.insert(0, os.getenv('HOME')+'/pycode/MscThesis/')\nimport pandas as pd\nfrom amftrack.util import get_dates_datetime, get_dirname, get_plate_number, get_postion_number,get_begin_index\nimport ast\nfrom amftrack.plotutil import plot_t_tp1\nfrom scipy import sparse\nfrom datetime import datetime\nfrom amftrack.pipeline.functions.node_id import orient\nimport pickle\nimport scipy.io as sio\nfrom pymatreader import read_mat\nfrom matplotlib import colors\nimport cv2\nimport imageio\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom skimage.filters import frangi\nfrom skimage import filters\nfrom random import choice\nimport scipy.sparse\nimport os\nfrom amftrack.pipeline.functions.extract_graph import from_sparse_to_graph, generate_nx_graph, sparse_to_doc\nfrom skimage.feature import hessian_matrix_det\nfrom amftrack.pipeline.functions.experiment_class_surf import Experiment, Edge, Node, Hyphae, plot_raw_plus\nfrom amftrack.pipeline.paths.directory import run_parallel, find_state, directory_scratch, directory_project\nfrom amftrack.notebooks.analysis.util import * \nfrom scipy import stats\nfrom scipy.ndimage.filters import uniform_filter1d\nfrom statsmodels.stats import weightstats as stests\nfrom amftrack.pipeline.functions.hyphae_id_surf import get_pixel_growth_and_new_children\nfrom collections import Counter\nfrom IPython.display import clear_output\nfrom amftrack.notebooks.analysis.data_info import *\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"\nplt.rcParams.update({\n \"font.family\": \"verdana\",\n'font.weight' : 'normal',\n'font.size': 20})\nfrom amftrack.plotutil import plot_node_skel", "_____no_output_____" ], [ "lapse = 60\nexp = get_exp((38,131,131+lapse),directory_project)", "begin = 2021-08-18 13:24:00 \n end = 2021-08-23 15:24:00\n" ], [ "exp2 = Experiment(38,directory_project)\nexp2.copy(exp)", "_____no_output_____" ], [ "exp = exp2", "_____no_output_____" ], [ "def transform_skeleton_final_for_show(skeleton_doc,Rot,trans):\n skeleton_transformed={}\n transformed_keys = np.round(np.transpose(np.dot(Rot,np.transpose(np.array(list(skeleton_doc.keys())))))+trans).astype(np.int)\n i=0\n for pixel in list(transformed_keys):\n i+=1\n skeleton_transformed[(pixel[0],pixel[1])]=1\n skeleton_transformed_sparse=sparse.lil_matrix((27000, 60000))\n for pixel in list(skeleton_transformed.keys()):\n i+=1\n skeleton_transformed_sparse[(pixel[0],pixel[1])]=1\n return(skeleton_transformed_sparse)\n\ndef get_skeleton_non_aligned(exp,boundaries,t,directory):\n i = t\n plate = exp.plate \n listdir=os.listdir(directory) \n dates = exp.dates\n date =dates [i]\n directory_name = get_dirname(date, plate)\n path_snap=directory+directory_name\n skel = read_mat(path_snap+'/Analysis/skeleton.mat')\n skelet = skel['skeleton']\n skelet = sparse_to_doc(skelet)\n# Rot= skel['R']\n# trans = skel['t']\n skel_aligned = transform_skeleton_final_for_show(skelet,np.array([[1,0],[0,1]]),np.array([0,0]))\n output = skel_aligned[boundaries[2]:boundaries[3],boundaries[0]:boundaries[1]].todense()\n kernel = np.ones((5,5),np.uint8)\n output = cv2.dilate(output.astype(np.uint8),kernel,iterations = 1)\n return(output)", "_____no_output_____" ], [ "from amftrack.util import get_skeleton\n\ndef plot_raw_plus_random(exp,compress=5,ranges = 1000):\n t0 = choice(range(exp.ts))\n node_ch = choice([node for node in exp.nodes if node.is_in(t0) and node.degree(t0)==1])\n# node_ch = choice(exp.nodes)\n# t0 = choice(node_ch.ts())\n node_ch.show_source_image(t0,t0+1)\n for index,t in enumerate([t0,t0+1]):\n date = exp.dates[t]\n anchor_time = t0 \n center = node_ch.pos(anchor_time)[1],node_ch.pos(anchor_time)[0]\n window = (center[0]-ranges,center[0]+ranges,center[1]-ranges,center[1]+ranges)\n skelet= get_skeleton_non_aligned(exp,window,t,exp.directory)\n tips = [node.label for node in exp.nodes if t in node.ts() and node.degree(t) ==1 and node.pos(t)[1]>=window[0]-ranges and node.pos(t)[1]<=window[1]+ranges and node.pos(t)[0]>=window[2]-ranges and node.pos(t)[0]<=window[3]+ranges]\n junction = [node.label for node in exp.nodes if t in node.ts() and node.degree(t) >=2 and node.pos(t)[1]>=window[0]-ranges and node.pos(t)[1]<=window[1]+ranges and node.pos(t)[0]>=window[2]-ranges and node.pos(t)[0]<=window[3]+ranges]\n directory_name = get_dirname(date,exp.plate)\n path_snap = exp.directory + directory_name\n skel = read_mat(path_snap + \"/Analysis/skeleton_pruned_realigned.mat\")\n Rot = skel[\"R\"]\n trans = skel[\"t\"]\n im = read_mat(path_snap+'/Analysis/raw_image.mat')['raw']\n size = 8\n fig = plt.figure(figsize = (9,9))\n ax = fig.add_subplot(111)\n ax.imshow(im[(window[2]//compress):(window[3]//compress),(window[0]//compress):(window[1]//compress)])\n ax.imshow(cv2.resize(skelet,(2*ranges//compress,2*ranges//compress)),alpha = 0.2)\n shift=(window[2],window[0])\n greys = [1,0.5]\n for i,node_list in enumerate([tips,junction]):\n grey = greys[i]\n bbox = dict(boxstyle=\"circle\", fc=colors.rgb2hex((grey, grey, grey)))\n # ax.text(right, top, time,\n # horizontalalignment='right',\n # verticalalignment='bottom',\n # transform=ax.transAxes,color='white')\n for node in node_list:\n # print(self.positions[ts[i]])\n if node in exp.positions[t].keys():\n xs,ys = exp.positions[t][node]\n rottrans = np.dot(np.linalg.inv(Rot), np.array([xs, ys] - trans))\n ys, xs = round(rottrans[0]), round(rottrans[1])\n tex = ax.text(\n (xs - shift[1]) // compress,\n (ys - shift[0]) // compress,\n str(node),\n ha=\"center\",\n va=\"center\",\n size=size,\n bbox=bbox,\n )\n plt.show()", "_____no_output_____" ], [ "plt.close('all')\nplot_raw_plus_random(exp,compress=5,ranges = 700)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
4a2d5ed9b6e1a76c33b871039152efe9e9672f40
452,502
ipynb
Jupyter Notebook
Assignment_5/bank_notes/Code Bak Note.ipynb
KyleLeePiupiupiu/CS677_Assignment
c38278e81f4e58cc6ef020fade2c075e9fc09bf7
[ "MIT" ]
null
null
null
Assignment_5/bank_notes/Code Bak Note.ipynb
KyleLeePiupiupiu/CS677_Assignment
c38278e81f4e58cc6ef020fade2c075e9fc09bf7
[ "MIT" ]
null
null
null
Assignment_5/bank_notes/Code Bak Note.ipynb
KyleLeePiupiupiu/CS677_Assignment
c38278e81f4e58cc6ef020fade2c075e9fc09bf7
[ "MIT" ]
null
null
null
915.995951
202,462
0.951541
[ [ [ "import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix", "_____no_output_____" ] ], [ [ "# Question1", "_____no_output_____" ] ], [ [ "bk = pd.read_csv('./data_banknote_authentication.csv')\nbk = bk.rename({'class':'Class'}, axis='columns')\n\ncolor = []\nfor c in bk.Class:\n if c == 0:\n color.append('Green')\n else:\n color.append('red')\n\nbk['Color'] = color", "_____no_output_____" ], [ "bk0 = bk[bk.Class == 0]\nbk1 = bk[bk.Class == 1]\nprint(bk0.describe())\nprint()\nprint(bk1.describe())\nprint()\nprint(bk.describe())", " variance skewness curtosis entropy Class\ncount 762.000000 762.000000 762.000000 762.000000 762.0\nmean 2.276686 4.256627 0.796718 -1.147640 0.0\nstd 2.019348 5.138792 3.239894 2.125077 0.0\nmin -4.285900 -6.932100 -4.941700 -8.548200 0.0\n25% 0.883345 0.450063 -1.709700 -2.228250 0.0\n50% 2.553100 5.668800 0.700605 -0.552380 0.0\n75% 3.884450 8.691975 2.652925 0.423257 0.0\nmax 6.824800 12.951600 8.829400 2.449500 0.0\n\n variance skewness curtosis entropy Class\ncount 610.000000 610.000000 610.000000 610.000000 610.0\nmean -1.868443 -0.993576 2.148271 -1.246641 1.0\nstd 1.881183 5.404884 5.261811 2.070984 0.0\nmin -7.042100 -13.773100 -5.286100 -7.588700 1.0\n25% -3.061450 -5.810025 -1.357500 -2.458375 1.0\n50% -1.806100 0.172775 0.373720 -0.661650 1.0\n75% -0.541770 3.189275 5.626350 0.341790 1.0\nmax 2.391700 9.601400 17.927400 2.135300 1.0\n\n variance skewness curtosis entropy Class\ncount 1372.000000 1372.000000 1372.000000 1372.000000 1372.000000\nmean 0.433735 1.922353 1.397627 -1.191657 0.444606\nstd 2.842763 5.869047 4.310030 2.101013 0.497103\nmin -7.042100 -13.773100 -5.286100 -8.548200 0.000000\n25% -1.773000 -1.708200 -1.574975 -2.413450 0.000000\n50% 0.496180 2.319650 0.616630 -0.586650 0.000000\n75% 2.821475 6.814625 3.179250 0.394810 1.000000\nmax 6.824800 12.951600 17.927400 2.449500 1.000000\n" ] ], [ [ "# Question2", "_____no_output_____" ] ], [ [ "# Split data and pairplot\n\n## bk0\nx0 = bk0[['variance', 'skewness', 'curtosis', 'entropy']]\ny0 = bk0[['Class']]\nx0Train, x0Test, y0Train, y0Test = train_test_split(x0, y0, test_size=0.5, random_state=0)\n\nf0 = sns.pairplot(x0Train)\nf0.fig.suptitle(\"class 0\")\n\n## bk1\nx1 = bk1[['variance', 'skewness', 'curtosis', 'entropy']]\ny1 = bk1[['Class']]\nx1Train, x1Test, y1Train, y1Test = train_test_split(x1, y1, test_size=0.5, random_state=0)\n\nf1 = sns.pairplot(x1Train)\nf1.fig.suptitle(\"class 1\")", "_____no_output_____" ], [ "# easy model\nf = plt.figure()\nf.set_size_inches(12,24)\n\n## variance\nva = f.add_subplot(4,2,1)\na0 = x0Train.variance\na1 = x1Train.variance\nva.plot(a0, np.zeros_like(a0) + 0, '.', color = 'green')\nva.plot(a1, np.zeros_like(a1) + 0.1, '.', color = 'red')\nva.set_title('variance')\n\nvah = f.add_subplot(4,2,2)\nvah.hist(a0, color='green')\nvah.hist(a1, color = 'red', alpha=0.3)\nvah.set_title('variance')\n\n## skewness \nsk = f.add_subplot(4,2,3)\na0 = x0Train.skewness\na1 = x1Train.skewness\nsk.plot(a0, np.zeros_like(a0) + 0, '.', color = 'green')\nsk.plot(a1, np.zeros_like(a1) + 0.1, '.', color = 'red')\nsk.set_title('skewness')\n\nskh = f.add_subplot(4,2,4)\nskh.hist(a0, color='green')\nskh.hist(a1, color = 'red', alpha=0.3)\nskh.set_title('skewness')\n\n## curtosis \ncu = f.add_subplot(4,2,5)\na0 = x0Train.curtosis\na1 = x1Train.curtosis\ncu.plot(a0, np.zeros_like(a0) + 0, '.', color = 'green')\ncu.plot(a1, np.zeros_like(a1) + 0.1, '.', color = 'red')\ncu.set_title('curtosis')\n\ncuh = f.add_subplot(4,2,6)\ncuh.hist(a0, color='green')\ncuh.hist(a1, color = 'red', alpha=0.3)\ncuh.set_title('curtosis')\n\n## entropy \nen = f.add_subplot(4,2,7)\na0 = x0Train.entropy\na1 = x1Train.entropy\nen.plot(a0, np.zeros_like(a0) + 0, '.', color = 'green')\nen.plot(a1, np.zeros_like(a1) + 0.1, '.', color = 'red')\nen.set_title('entropy')\n\nenh = f.add_subplot(4,2,8)\nenh.hist(a0, color='green')\nenh.hist(a1, color = 'red', alpha=0.3)\nenh.set_title('entropy')\n\n\n", "_____no_output_____" ], [ "# Predict lable\nx = bk[['variance', 'skewness', 'curtosis', 'entropy']]\ny = bk[['Class']]\nxTrain, xTest, yTrain, yTest = train_test_split(x, y, test_size=0.5, random_state=0)\n\nyPredict = []\nfor v in xTest.variance:\n if v >= 0 :\n yPredict.append(0)\n else:\n yPredict.append(1)\n\n# True False\ntp = 0\ntn = 0\nfp = 0\nfn = 0\nacc = 0\nfor (p, t) in zip(yPredict, yTest.Class):\n if p == 0 and t == 0:\n tp += 1\n elif p == 1 and t == 1:\n tn += 1\n elif p == 0 and t == 1:\n fp += 1\n elif p == 1 and t == 0:\n fn += 1\n \n if p == t:\n acc = acc + 1\n\nprint(\"TP:{} FP:{} TN:{} FN:{} TPR:{} TNR:{} Accuracy:{}\".format(tp, fp, tn, fn, tp/(tp + fn), tn/(tn + fp), acc / len(yPredict)))\n\n## Confusion Matrix I choose \n# 0 is good 1 is bad\ntemp = confusion_matrix(yTest, yPredict)\nprint(temp)\n\ntn = temp[0][0]\nfn = temp[1][0]\ntp = temp[1][1]\nfp = temp[0][1]\n\ntpr = tp / (tp + fn)\ntnr = tn / (tn + fp)\n\nprint('TPR = {}, TNR = {}, tp fp tn fn = {} {} {} {}'.format(tpr, tnr, tp, fp, tn, fn))\n", "_____no_output_____" ] ], [ [ "# Qestion3", "_____no_output_____" ] ], [ [ "# KNN\nkList = [3,5,7,9,11]\naccuracy = []\nfor k in kList:\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(xTrain, yTrain)\n yPredict = knn.predict(xTest)\n accuracy.append(accuracy_score(yTest, yPredict))\n \nplt.plot(kList, accuracy)\nprint(accuracy)", "[0.9985422740524781, 0.9985422740524781, 1.0, 0.9941690962099126, 0.9941690962099126]\n" ], [ "# k = 7 is optimal\nknn = KNeighborsClassifier(n_neighbors=7)\nknn.fit(xTrain, yTrain)\nyPredict = knn.predict(xTest)\n# True False\ntp = 0\ntn = 0\nfp = 0\nfn = 0\nacc = 0\nfor (p, t) in zip(yPredict, yTest.Class):\n if p == 0 and t == 0:\n tp += 1\n elif p == 1 and t == 1:\n tn += 1\n elif p == 0 and t == 1:\n fp += 1\n elif p == 1 and t == 0:\n fn += 1\n \n if p == t:\n acc = acc + 1\n\nprint(\"TP:{} FP:{} TN:{} FN:{} TPR:{} TNR:{} Accuracy:{}\".format(tp, fp, tn, fn, tp/(tp + fn), tn/(tn + fp), acc / len(yPredict)))\n\n", "TP:385 FP:0 TN:301 FN:0 TPR:1.0 TNR:1.0 Accuracy:1.0\n" ], [ "# BU ID 64501194\n# Take 1 1 9 4\nx = {'variance':[1], 'skewness':[1], 'curtosis':[9], 'entropy':[4]}\nx = pd.DataFrame.from_dict(x)\n## my simple classifier\nyPredict = 1\nprint(\"my simple classifier: {}\".format(yPredict))\n## for best knn\nknn = KNeighborsClassifier(n_neighbors=7)\nknn.fit(xTrain, yTrain)\nyPredict = knn.predict(x)\nprint(\"knn(n=7): {}\".format(yPredict))\n\n\n", "my simple classifier: 1\nknn(n=7): [0]\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
4a2d5ff43efb9aa51a5d5ae5cde45104b3467ba2
48,513
ipynb
Jupyter Notebook
Analyzing the Stroop Effect.ipynb
Harveye2/Statistics-with-Python-Analysing-the-Stroop-Effect
c4983f1f06942e9584740171d3e6b3efa0dc3cce
[ "MIT" ]
null
null
null
Analyzing the Stroop Effect.ipynb
Harveye2/Statistics-with-Python-Analysing-the-Stroop-Effect
c4983f1f06942e9584740171d3e6b3efa0dc3cce
[ "MIT" ]
null
null
null
Analyzing the Stroop Effect.ipynb
Harveye2/Statistics-with-Python-Analysing-the-Stroop-Effect
c4983f1f06942e9584740171d3e6b3efa0dc3cce
[ "MIT" ]
null
null
null
114.148235
14,820
0.834725
[ [ [ "# Analysing the Stroop Effect", "_____no_output_____" ], [ "## Introduction", "_____no_output_____" ], [ "The aim of this project was to investigate a classic phenomenon from experimental psychology called the Stroop Effect. The Stroop Effect is a demonstration of interference in the reaction time of a task. The Stroop task investigated for this project was a list of congruent and incongruent coloured words. The congruent words were colour words whose names matched the colors in which they were printed (eg. the word 'blue' is printed in blue ink). The incongruent words were colour words whose names did not match the colors in which they were printed (eg. the word 'blue' is printed in red ink). \n\nFor the purpose of this project, a data set was provided, containing the reaction times of 24 paricipants, to name the congruent and incongruent words outloud. The aim of this project was to identify if, on average, indivduals have a longer reaction time to name the colours of incongruent words compared to congruent words.", "_____no_output_____" ], [ "## Methods", "_____no_output_____" ], [ "The aim was to investigate if, on average, indivduals have a longer reaction time to name the colours of the incongruent words compared to congruent words. However, we are dealing with a small sample set of just 24 data points for each test, and any diffreence observed between the reaction times from this dataset could just be by random chance. Therefore, we would like to determine the liklihood of observing a difference in the mean reaction times for the population at large. To do this, we can perform a hypothesis test to assess whether the sample means (x_bar) are different because the two population means (µ) are different, or just by chance. The hypothesis for this task is as follows:\n\nThe null hypothesis: There is no difference between the average reaction time of naming the colours of the congruent words and the incongrunet words. \n\nThe alternative hypothesis: There is a difference between the average reaction time of naming the colours of the congruent words and the incongrunet words. \n\nIn summary: Ho: µ_diff = 0 and HA: µdiff is not equal to 0 (where µdiff is the difference in the mean reaction times of naming the colours for the congruent and incongruent words). \n\nIn order to test if the null hypothesis is true, a paired t-test can be implemented. In this case a t-test is preferred over a z-test, due to the fact that the sample size is small (24 individuals) and a t-test addresses the uncertainty of the standard error estimate for small sample sizes. The t-test is considered quite robust to violations of the normal distribution. This means that, without knowing the population paramters, we can assume a normal population distribution, without serious error being introduced into the test. Importantly, the reaction time results between the two tests are not independent of each other. The reaction speed of one individual in the congruent test is likely not independent of their reaction speed in the incongruent test eg. if a person is good at such tests, they are likely to have relatively quick reaction times for both the congruent and incongruent words. Therefore, these data sets are said to be paired. Thus, a paired t-test will be performed in order to asses the hypothesis.\n\nBootstrapping is another method which can be implemented here to test if the null hypothesis is true. Bootstrapping is a simulation based method, which takes samples from the original sample, with replacement. Sample statistics can be derived from the bootstrap samples, and can be replicated multiple times to build-up a sampling distribution which gives us an idea of what the population sampling distribution would look like if we had access to it. In this case, we are dealing with a very small sample size of 24, which makes it difficult to assume normality. Therefore, bootstrap simulations allow us a build up a sampling distribution, rather than just assuming normality, from which we can measure confidence intervals and determine p-values to test the validity of the null hypothesis.", "_____no_output_____" ], [ "## Descriptive Statistics", "_____no_output_____" ] ], [ [ "import pandas as pd\ndata = pd.read_csv('stroopdata.csv')\ndata.describe()", "_____no_output_____" ] ], [ [ "From the descriptive statistics table, the measures of centre of the data are provided by the mean and median (the 50th percentile) values. These data give an indication of how the reaction times between the two data sets differ. The mean of the congruent data set is lower than the mean of the incongruent data set (14.05 vs. 22.02. respectively), which suggests that, on average, the incongruent test takes a longer time to complete than the congruent test. The standard deviation (std) value provides an insight into the variability in the 2 data sets. The standard deviation is higher for the incongruent reaction times (4.80) than the congruent reaction times (3.56). This suggests that the individual results for the incongruent data are more varied. However, further exploratory data analysis and statistical analysis is required to gain better insights from these descriptive statistics.", "_____no_output_____" ], [ "## Data Visualisation", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport pandas as pd\ndata = pd.read_csv('stroopdata.csv')\n\n_ = data.boxplot()\n_ = plt.ylabel('reaction time (s)')\nplt.show()\n_ = plt.hist(data['Congruent'], bins = 24)\n_ = plt.xlabel('reaction times')\n_ = plt.ylabel('counts')\n_ = plt.title('Congruent Test')\n_ = plt.xlim(5, 40)\nplt.show()\n_ = plt.hist(data['Incongruent'], bins=24)\n_ = plt.xlabel('reaction times')\n_ = plt.ylabel('counts')\n_ = plt.title('Incongruent Test')\n_ = plt.xlim(5, 40)\nplt.show()", "_____no_output_____" ] ], [ [ "The boxplot clearly shows the difference in the interquartile range (IQR) of the congruent test reaction times comared to the incongruent test reaction times i.e. the middle 50% of the congruent test reaction times are lower than the middle 50% of the incongruent test reaction times. In the case of the boxplot for the incongruent data, two data points are visible at approximately 35 seconds, which is more than 2 IQR away from the median (2 * 5.33 + 21.02 = 31.68) and therefore, these points can be denoted as outliers. These outliers will increase the mean of the incongruent data and thus result in a right-skewed distribution of the reaction times, as illustrated in the histogram. Although, outliers are not present in the congruent data boxplot, the distribution of reaction times is also right-skewed, as shown in the histogram of the data set.", "_____no_output_____" ], [ "## Statistical Tests", "_____no_output_____" ] ], [ [ "import scipy.stats as stats\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n#set random number generator for reproducibility\nnp.random.seed(123456789)\n\n#load data and get arrays of both data sets\ndata = pd.read_csv('stroopdata.csv')\ncon = np.array(data['Congruent'])\nincon = np.array(data['Incongruent'])\n\n#Define bootsrap replicate function\ndef bootstrap_replicate(data, func):\n '''Generate bootstrap replicate of 1D data'''\n bs_sample = np.random.choice(data, len(data))\n return func(bs_sample)\n\n#Define multiple bootsrap replicates function\ndef draw_bs_reps(data, func, size=1):\n '''draw bootstrap replicates'''\n bs_replicates = np.empty(size)\n for i in range(size):\n bs_replicates[i] = bootstrap_replicate(data, func)\n return bs_replicates\n\n# GENERATE BOOTSTRAP REPLICATES FOR CONFIDENCE INTERVALS\n# 1) Prepare data - flatten con and incon to 1D arrays\ndiff_obs = np.mean(con) - np.mean(incon)\ncon_1D = con.flatten()\nincon_1D = incon.flatten()\n\n# 2) compute bs_replicates for confidence interval\nbs_replicates_con = draw_bs_reps(con_1D, np.mean, size=10000)\nbs_replicates_incon = draw_bs_reps(incon_1D, np.mean, size=10000)\nbs_replicates_diff_mean = bs_replicates_con - bs_replicates_incon\nconf_int = np.percentile(bs_replicates_diff_mean, [2.5, 97.5])\n\n# 3) plot the difference in mean from bs_replicates \n_=plt.hist(bs_replicates_diff_mean, bins=30, normed=True, edgecolor = 'black')\n_=plt.title('Bootstrap replicates for the difference in mean between Congruent and Incongruent data')\n_=plt.xlabel('Difference in Mean (s)')\n_=plt.ylabel('PDF')\nplt.show()\n\n# 4) print diff mean & confidence intervals\nprint('Difference in observed mean =', diff_obs, 's')\nprint('95% Confidence Interaval from bs_reps =', conf_int, 's')\n\n# GENERATE BOOTSTRAP RELICATES FOR p-VALUES\n# 1) Concatenate data for bootstrap samples\ndata_concat = np.concatenate((con, incon))\nconcat_mean = np.mean(data_concat)\n\n# 2) shift arrays & flatten shifted arrays to 1D \ncon_shift = con - np.mean(con) + concat_mean\nincon_shift = incon - np.mean(incon) + concat_mean\ncon_shift_1D = con_shift.flatten()\nincon_shift_1D = incon_shift.flatten()\n\n# 3) compute multiple bs_replicates of shifted arrays\nbs_replicates_con_shift = draw_bs_reps(con_shift_1D, np.mean, size=10000)\nbs_replicates_incon_shift = draw_bs_reps(incon_shift_1D, np.mean, size=10000)\n\n# 4) get difference of means of replicates\nbs_replicates_diff_mean_shift = bs_replicates_con_shift - bs_replicates_incon_shift\n\n# 4) plot the difference of means of bs_replicates for null hypothesis\n_=plt.hist(bs_replicates_diff_mean_shift, bins=30, normed=True, edgecolor = 'black')\n_=plt.title('Bootstrap replicates for the difference in mean between shifted Congruent and Incongruent data')\n_=plt.xlabel('Difference in Mean (s)')\n_=plt.ylabel('PDF under null')\nplt.show()\n\n# 5) compute p-value: p_bs\np_bs = np.sum(bs_replicates_diff_mean_shift <= diff_obs) / len(bs_replicates_diff_mean_shift)\n\n# 6) print p-value and average bootstrap diff in mean\nprint('p-value bs_rep = ', p_bs)\nprint('Difference in bs_replicates mean =', np.sum(bs_replicates_diff_mean_shift) / len(bs_replicates_diff_mean_shift))\n\n# METHOD 2: Use t-test on data set to find p-values \nt_test = stats.ttest_rel(con, incon)\nprint(t_test)", "_____no_output_____" ] ], [ [ "## Results & Discussion", "_____no_output_____" ], [ "The differnece between the average time taken to complete the congruent test and the incongruent test was -7.96 seconds. This result was calculated from a sample size of just 24 individuals, and indicates that on average, indivduals take almost 8 seconds longer to complete the incongruent test. Due to the small sample size, the results were simulated 10000 times, using bootstrap simulations. The 95% confidence intervals computed from the bootstrap samples show that we are 95% confident that it takes between 5.64 and 10.36 seconds longer to complete the incongruent test compared to the congruent test. \n\nIn order to determine that this result was not obtained by random chance, a hypothesis test was carried out. The null hypothesis states that the mean times taken to complete the congruent test and the incongruent test are equal. In order to test this hypothesis, both datasets were shifted to have the same mean, and then 10000 bootstrap simulations were carried out on the shifted data. The difference in mean was calulated from the boostrap replicates and the results were plotted. As shown in the plot, the distribution of the difference in means is centred at around 0, and the average difference in mean, from the boostrap replicates, was calculated as -0.01 seconds. \nThe p-value was calulated as approximately 0, which indicates that the probability of getting the observed difference of mean from the experiment (-7.96 s) is almost 0, given that the null hypothesis is true, which states that the mean difference between the time taken to complete both tests is 0. Therefore, given that the p-value is approximately 0, we can reject the null hypothesis and conclude that there is a statistically significant difference between the time taken to complete the congruent test and incongruent test, and that on average, individuals take longer to complete the inconguent test. \n\nAs an alternative approach, a t-test was carried out on the paired data, using a t-test function from the Scipy package. This function computed a test statistic of -8.02 and a p-value of approximately 0. Therefore, these results are inline with the results obtained from the boostrap simulations method.\n", "_____no_output_____" ], [ "## Conclusion", "_____no_output_____" ], [ "Overall, these results matched with the expected results. In the congruent test the colours of the words match the spelling of the words. However, in the incongruent test, the colour of the words do not match the spelling of the words, which results in confusion of the information your brain is receiving. As a result, it is expected that the incongruent test will take a longer time to complete than the congruent test (ref: https://faculty.washington.edu/chudler/words.html#seffect).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
4a2d776d96abd62fe1abe27e76f4777bdb7031f4
9,554
ipynb
Jupyter Notebook
notebooks/devel/speed_test_bayes_opt.ipynb
Joshuaalbert/bayes_tec_screens
655c4ec29427c7bb0616d5752c34207714a0151c
[ "Apache-2.0" ]
null
null
null
notebooks/devel/speed_test_bayes_opt.ipynb
Joshuaalbert/bayes_tec_screens
655c4ec29427c7bb0616d5752c34207714a0151c
[ "Apache-2.0" ]
null
null
null
notebooks/devel/speed_test_bayes_opt.ipynb
Joshuaalbert/bayes_tec_screens
655c4ec29427c7bb0616d5752c34207714a0151c
[ "Apache-2.0" ]
null
null
null
40.483051
287
0.511409
[ [ [ "import tensorflow as tf\nfrom bayes_tec.bayes_opt.maximum_likelihood_tec import *\nimport numpy as np\nfloat_type = tf.float64\n\ndef test_solve():\n \n import numpy as np\n from seaborn import jointplot\n import pylab as plt\n plt.style.use('ggplot')\n freqs = np.linspace(120e6,160e6,20)\n tec_conversion = -8.448e9/freqs\n true_tec = np.random.uniform(-0.2,0.2,size=int(1e3))#np.array([0.004]*1000)\n noise_rads = np.random.uniform(0.05,0.8,size=int(1e3))#np.array([0.3]*1000)# a lot of noise on almost flat TEC is hard\n true_phase = true_tec[...,None] * tec_conversion\n phase = true_phase + noise_rads[...,None]*np.random.normal(size=true_phase.shape)\n\n tec_min, phase_sigma = solve_ml_tec(phase,freqs,batch_size=int(1e3),verbose=True)\n plt.scatter(true_tec,tec_min)\n plt.xlabel(\"True tec\")\n plt.ylabel(\"Pred tec\")\n plt.show()\n \n \n jointplot(true_tec,tec_min,kind='hex')\n plt.show()\n jointplot(true_tec,tec_min,kind='kde',alpha=0.6,marker='+',color='k')\n plt.show()\n \n plt.scatter(noise_rads, phase_sigma)\n plt.xlabel(\"Pred phase noise\")\n plt.ylabel(\"True phase noise\")\n plt.show()\n jointplot(noise_rads, phase_sigma,kind='hex')\n plt.show()\n jointplot(noise_rads, phase_sigma,kind='kde',alpha=0.6,marker='+',color='k')\n plt.show()\n\ndef diagnostics():\n \n import numpy as np\n import pylab as plt\n plt.style.use('ggplot')\n freqs = np.linspace(120e6,160e6,20)\n tec_conversion = -8.448e9/freqs\n true_tec = np.random.uniform(-0.3,0.3,size=1000)#np.array([0.004]*1000)\n noise_rads = np.array([0.3]*1000)# a lot of noise on almost flat TEC is hard\n true_phase = true_tec[...,None] * tec_conversion\n phase = true_phase + noise_rads[...,None]*np.random.normal(size=true_phase.shape)\n \n _tec = true_tec[0]\n \n with tf.Session(graph=tf.Graph()) as sess:\n t_pl = tf.placeholder(float_type)\n phase_pl = tf.placeholder(float_type)\n tec_conversion_pl = tf.placeholder(float_type)\n X_init, Y_init = init_population(phase_pl,tec_conversion_pl,N=5)\n Xcur, Ycur = X_init, Y_init\n X_,Y_,aq_,fmean_,fvar_ = [],[],[],[],[]\n for i in range(21):\n res = bayes_opt_iter(phase_pl, tec_conversion_pl, Xcur, Ycur, max_tec=0.4, t = t_pl)\n X_.append(res.X)\n Y_.append(res.Y)\n aq_.append(res.aq)\n fmean_.append(res.fmean)\n fvar_.append(res.fvar)\n Xcur = res.X\n Ycur = res.Y\n X, Y, aq, fmean, fvar = sess.run([X_, Y_, aq_, fmean_, fvar_], feed_dict={t_pl:1.,\n phase_pl:phase,\n tec_conversion_pl:tec_conversion})\n \n indices = (np.arange(Y[-1].shape[0],dtype=np.int64), np.argmin(Y[-1][:,:,0],axis=1), np.zeros(Y[-1].shape[0], dtype=np.int64))\n tec_min = X[-1][indices]\n plt.scatter(tec_min, true_tec)\n plt.xlabel(\"pred. tec\")\n plt.ylabel(\"true tec\")\n plt.title(\"Scatter of solutions\")\n plt.show()\n\n plt.hist(indices[1],bins=20)\n plt.title(\"Where was fmin attained\")\n plt.xlabel(\"iteration including random init pop\")\n plt.show()\n\n scatter = []\n for j in range(Y[-1].shape[1]):\n indices = (np.arange(Y[-1].shape[0],dtype=np.int64), np.argmin(Y[-1][:,:j+1,0],axis=1), np.zeros(Y[-1].shape[0], dtype=np.int64))\n tec_j = X[-1][indices]\n scatter.append(np.percentile(np.abs(tec_j - true_tec),95))\n\n plt.plot(scatter)\n plt.title(\"95% conf interval of |true_tec - pred_tec|\")\n plt.xlabel(\"iteration\")\n plt.ylabel(\"mean delta tec\")\n plt.show()\n\n\n tec_array = np.linspace(-0.4, 0.4, 100)\n for i, (x, y, a, f, v) in enumerate(zip(X, Y, aq, fmean, fvar)):\n y = y - y.mean(1,keepdims=True)\n y = y / (np.std(y,axis=1,keepdims=True) + 1e-6)\n \n \n plt.plot(tec_array, f[0,:], label=r'$\\mathbb{E}[f]$')\n plt.fill_between(tec_array, f[0,:] - 2*np.sqrt(v[0,:]), f[0,:] + 2*np.sqrt(v[0,:]),alpha=0.5, label=r'$\\pm 2\\sigma_f$')\n a = a - np.min(a,axis=1,keepdims=True)\n a = 3*a/np.max(a,axis=1,keepdims=True)\n plt.plot(tec_array,a[0,:],label='norm. acquisition func.')\n plt.scatter(x[0, :-1, 0], y[0,:-1, 0],c='k',label='sampled points')\n plt.scatter(x[0, -1, 0], y[0,-1, 0],c='red',label='New sample point')\n plt.vlines(_tec,-2,2,label='global. min',linestyles='--')\n plt.xlabel(\"tec\")\n plt.ylabel(\"normalized neg-log-likelihood\")\n plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,\n ncol=2, mode=\"expand\", borderaxespad=0.)\n plt.title(\"Iteration {}\".format(i))\n plt.show()\n \ndef test_speed(N=1e6):\n \n import numpy as np\n from timeit import default_timer\n freqs = np.linspace(120e6,160e6,20)\n tec_conversion = -8.448e9/freqs\n true_tec = np.random.uniform(-0.2,0.2,size=int(N))#np.array([0.004]*1000)\n noise_rads = np.random.uniform(0.05,0.8,size=int(N))#np.array([0.3]*1000)# a lot of noise on almost flat TEC is hard\n true_phase = true_tec[...,None] * tec_conversion\n phase = true_phase + noise_rads[...,None]*np.random.normal(size=true_phase.shape)\n\n t0 = default_timer()\n tec_min, phase_sigma = solve_ml_tec(phase,freqs,batch_size=int(N),verbose=True)\n t1 = default_timer()\n t = t1 - t0\n \n print(\"Time {} [time] {} [samples/s] {} [ms/sample]\".format(t,N/t, t/N*1000))\n \n ", "/net/lofar1/data1/albert/miniconda3/envs/kerastf/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n" ], [ "# test_speed(N=5e6)", "2018-09-15 15:11:39,087 Starting batch 0\n2018-09-15 16:20:39,947 Finished batch 0\nTime 4150.763997003436 [time] 1204.597515929514 [samples/s] 0.8301527994006873 [ms/sample]\n" ], [ "from bayes_tec.datapack import DataPack\nfrom timeit import default_timer\n\nwith DataPack('../../scripts/data/killms_datapack.hdf5') as datapack:\n phase,axes = datapack.phase\n _, freqs = datapack.get_freqs(axes['freq'])\n Npol, Nd, Na, Nf, Nt = phase.shape\n phase = phase.transpose((0,1,2,4,3))\n phase = phase.reshape((-1, Nf))\n t0 = default_timer()\n tec_ml, sigma_ml = solve_ml_tec(phase, freqs, batch_size=int(1e6),max_tec=0.3, n_iter=21, t=1.,num_proposal=75, verbose=True)\n t1 = default_timer()\n print(t1-t0)\n tec_ml = tec_ml.reshape((Npol, Nd, Na, Nt))\n sigma_ml = sigma_ml.reshape((Npol, Nd, Na, Nt))\n with h5py.File('ml_results.hdf5') as f:\n f['tec'] = tec_ml\n f['sigma'] = sigma_ml", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
4a2d7db2866958ddd21bf87b454f38961c8abdbe
4,423
ipynb
Jupyter Notebook
notebooks/4. Pipeline final/5. Armado de poliza_ID.feather.ipynb
Voolkia/ML
9b8de9d87cddeb8d13b3ac757f18e68c110c7408
[ "BSD-3-Clause" ]
null
null
null
notebooks/4. Pipeline final/5. Armado de poliza_ID.feather.ipynb
Voolkia/ML
9b8de9d87cddeb8d13b3ac757f18e68c110c7408
[ "BSD-3-Clause" ]
null
null
null
notebooks/4. Pipeline final/5. Armado de poliza_ID.feather.ipynb
Voolkia/ML
9b8de9d87cddeb8d13b3ac757f18e68c110c7408
[ "BSD-3-Clause" ]
null
null
null
21.681373
80
0.417364
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "table = \"TB_POLIZAS_1a.tsv\"\nlocation = \"../../data/raw\"\nsep = '\\t'\nencoding = 'latin1'\ndecimal = ','", "_____no_output_____" ], [ "df_polizas = pd.read_csv(f\"{location}/{table}\",\n sep=sep,\n encoding=encoding,\n decimal=decimal,\n usecols = [\"NUM_SECU_POL\", \"CIF_ID\"])", "_____no_output_____" ], [ "df_polizas.head()", "_____no_output_____" ], [ "df_polizas.drop_duplicates(inplace=True)", "_____no_output_____" ], [ "df_polizas.reset_index(drop=True).to_feather(\"poliza_x_cliente.feather\")", "_____no_output_____" ], [ "df_polizas.shape", "_____no_output_____" ], [ "df_polizas['CIF_ID'].nunique()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2da9e18d6bae92b1695869f96fc6d560af1d66
566,966
ipynb
Jupyter Notebook
module4-sequence-your-narrative/LS_DS_124_Sequence_your_narrative_Assignment.ipynb
strangelycutlemon/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
040edc65c4430fe07be64f978e5e4a1cb4ea0a99
[ "MIT" ]
null
null
null
module4-sequence-your-narrative/LS_DS_124_Sequence_your_narrative_Assignment.ipynb
strangelycutlemon/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
040edc65c4430fe07be64f978e5e4a1cb4ea0a99
[ "MIT" ]
null
null
null
module4-sequence-your-narrative/LS_DS_124_Sequence_your_narrative_Assignment.ipynb
strangelycutlemon/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
040edc65c4430fe07be64f978e5e4a1cb4ea0a99
[ "MIT" ]
null
null
null
92.853914
102,870
0.678732
[ [ [ "<a href=\"https://colab.research.google.com/github/strangelycutlemon/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling/blob/master/module4-sequence-your-narrative/LS_DS_124_Sequence_your_narrative_Assignment.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "_Lambda School Data Science_\n\n# Sequence Your Narrative - Assignment\n\nToday we will create a sequence of visualizations inspired by [Hans Rosling's 200 Countries, 200 Years, 4 Minutes](https://www.youtube.com/watch?v=jbkSRLYSojo).\n\nUsing this [data from Gapminder](https://github.com/open-numbers/ddf--gapminder--systema_globalis/):\n- [Income Per Person (GDP Per Capital, Inflation Adjusted) by Geo & Time](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--income_per_person_gdppercapita_ppp_inflation_adjusted--by--geo--time.csv)\n- [Life Expectancy (in Years) by Geo & Time](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--life_expectancy_years--by--geo--time.csv)\n- [Population Totals, by Geo & Time](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--population_total--by--geo--time.csv)\n- [Entities](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--entities--geo--country.csv)\n- [Concepts](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--concepts.csv)", "_____no_output_____" ], [ "Objectives\n- sequence multiple visualizations\n- combine qualitative anecdotes with quantitative aggregates\n\nLinks\n- [Hans Rosling’s TED talks](https://www.ted.com/speakers/hans_rosling)\n- [Spiralling global temperatures from 1850-2016](https://twitter.com/ed_hawkins/status/729753441459945474)\n- \"[The Pudding](https://pudding.cool/) explains ideas debated in culture with visual essays.\"\n- [A Data Point Walks Into a Bar](https://lisacharlotterost.github.io/2016/12/27/datapoint-in-bar/): a thoughtful blog post about emotion and empathy in data storytelling", "_____no_output_____" ], [ "# ASSIGNMENT\n \n\n1. Replicate the Lesson Code\n2. Take it further by using the same gapminder dataset to create a sequence of visualizations that combined tell a story of your choosing.\n\nGet creative! Use text annotations to call out specific countries, maybe: change how the points are colored, change the opacity of the points, change their sized, pick a specific time window. Maybe only work with a subset of countries, change fonts, change background colors, etc. make it your own!", "_____no_output_____" ] ], [ [ "# TODO\n!pip freeze\n!pip install --upgrade seaborn\nimport seaborn as sns\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd", "absl-py==0.7.1\nalabaster==0.7.12\nalbumentations==0.1.12\naltair==3.2.0\nastor==0.8.0\nastropy==3.0.5\natari-py==0.1.15\natomicwrites==1.3.0\nattrs==19.1.0\naudioread==2.1.8\nautograd==1.3\nBabel==2.7.0\nbackcall==0.1.0\nbackports.tempfile==1.0\nbackports.weakref==1.0.post1\nbeautifulsoup4==4.6.3\nbleach==3.1.0\nblis==0.2.4\nbokeh==1.0.4\nboto==2.49.0\nboto3==1.9.205\nbotocore==1.12.205\nBottleneck==1.2.1\nbranca==0.3.1\nbs4==0.0.1\nbz2file==0.98\ncachetools==3.1.1\ncertifi==2019.6.16\ncffi==1.12.3\nchainer==5.4.0\nchardet==3.0.4\nClick==7.0\ncloudpickle==0.6.1\ncmake==3.12.0\ncolorlover==0.3.0\ncommunity==1.0.0b1\ncontextlib2==0.5.5\nconvertdate==2.1.3\ncoverage==3.7.1\ncoveralls==0.5\ncrcmod==1.7\ncufflinks==0.14.6\ncvxopt==1.2.3\ncvxpy==1.0.24\ncycler==0.10.0\ncymem==2.0.2\nCython==0.29.13\ndaft==0.0.4\ndask==1.1.5\ndataclasses==0.6\ndatascience==0.10.6\ndecorator==4.4.0\ndefusedxml==0.6.0\ndescartes==1.1.0\ndill==0.3.0\ndistributed==1.25.3\nDjango==2.2.4\ndlib==19.16.0\ndm-sonnet==1.34\ndocopt==0.6.2\ndocutils==0.14\ndopamine-rl==1.0.5\neasydict==1.9\necos==2.0.7.post1\neditdistance==0.5.3\nen-core-web-sm==2.1.0\nentrypoints==0.3\nephem==3.7.6.0\net-xmlfile==1.0.1\nfa2==0.3.5\nfancyimpute==0.4.3\nfastai==1.0.57\nfastdtw==0.3.2\nfastprogress==0.1.21\nfastrlock==0.4\nfbprophet==0.5\nfeather-format==0.4.0\nfeaturetools==0.4.1\nfilelock==3.0.12\nfix-yahoo-finance==0.0.22\nFlask==1.1.1\nfolium==0.8.3\nfsspec==0.4.1\nfuture==0.16.0\ngast==0.2.2\nGDAL==2.2.2\ngdown==3.6.4\ngensim==3.6.0\ngeographiclib==1.49\ngeopy==1.17.0\ngevent==1.4.0\ngin-config==0.2.0\nglob2==0.7\ngoogle==2.0.2\ngoogle-api-core==1.14.2\ngoogle-api-python-client==1.7.10\ngoogle-auth==1.4.2\ngoogle-auth-httplib2==0.0.3\ngoogle-auth-oauthlib==0.4.0\ngoogle-cloud-bigquery==1.14.0\ngoogle-cloud-core==1.0.3\ngoogle-cloud-datastore==1.8.0\ngoogle-cloud-language==1.2.0\ngoogle-cloud-storage==1.16.1\ngoogle-cloud-translate==1.5.0\ngoogle-colab==1.0.0\ngoogle-pasta==0.1.7\ngoogle-resumable-media==0.3.2\ngoogleapis-common-protos==1.6.0\ngoogledrivedownloader==0.4\ngraph-nets==1.0.4\ngraphviz==0.10.1\ngreenlet==0.4.15\ngrpcio==1.15.0\ngspread==3.0.1\ngspread-dataframe==3.0.3\ngunicorn==19.9.0\ngym==0.10.11\nh5py==2.8.0\nHeapDict==1.0.0\nholidays==0.9.11\nhtml5lib==1.0.1\nhttpimport==0.5.16\nhttplib2==0.11.3\nhumanize==0.5.1\nhyperopt==0.1.2\nideep4py==2.0.0.post3\nidna==2.8\nimage==1.5.27\nimageio==2.4.1\nimagesize==1.1.0\nimbalanced-learn==0.4.3\nimblearn==0.0\nimgaug==0.2.9\nimportlib-metadata==0.19\nimutils==0.5.2\ninflect==2.1.0\nintel-openmp==2019.0\nintervaltree==2.1.0\nipykernel==4.6.1\nipython==5.5.0\nipython-genutils==0.2.0\nipython-sql==0.3.9\nipywidgets==7.5.1\nitsdangerous==1.1.0\njdcal==1.4.1\njedi==0.15.1\njieba==0.39\nJinja2==2.10.1\njmespath==0.9.4\njoblib==0.13.2\njpeg4py==0.1.4\njsonschema==2.6.0\njupyter==1.0.0\njupyter-client==5.3.1\njupyter-console==5.2.0\njupyter-core==4.5.0\nkaggle==1.5.5\nkapre==0.1.3.1\nKeras==2.2.4\nKeras-Applications==1.0.8\nKeras-Preprocessing==1.1.0\nkeras-vis==0.4.1\nkiwisolver==1.1.0\nknnimpute==0.1.0\nlibrosa==0.6.3\nlightgbm==2.2.3\nllvmlite==0.29.0\nlmdb==0.96\nlucid==0.3.8\nlunardate==0.2.0\nlxml==4.2.6\nmagenta==0.3.19\nMarkdown==3.1.1\nMarkupSafe==1.1.1\nmatplotlib==3.0.3\nmatplotlib-venn==0.11.5\nmesh-tensorflow==0.0.5\nmido==1.2.6\nmir-eval==0.5\nmissingno==0.4.2\nmistune==0.8.4\nmizani==0.5.4\nmkl==2019.0\nmlxtend==0.14.0\nmore-itertools==7.2.0\nmoviepy==0.2.3.5\nmpi4py==3.0.2\nmpmath==1.1.0\nmsgpack==0.5.6\nmultiprocess==0.70.8\nmultitasking==0.0.9\nmurmurhash==1.0.2\nmusic21==5.5.0\nnatsort==5.5.0\nnbconvert==5.6.0\nnbformat==4.4.0\nnetworkx==2.3\nnibabel==2.3.3\nnltk==3.2.5\nnose==1.3.7\nnotebook==5.2.2\nnp-utils==0.5.10.0\nnumba==0.40.1\nnumexpr==2.6.9\nnumpy==1.16.4\nnvidia-ml-py3==7.352.0\noauth2client==4.1.3\noauthlib==3.1.0\nokgrade==0.4.3\nolefile==0.46\nopencv-contrib-python==3.4.3.18\nopencv-python==3.4.5.20\nopenpyxl==2.5.9\nosqp==0.5.0\npackaging==19.1\npalettable==3.2.0\npandas==0.24.2\npandas-datareader==0.7.4\npandas-gbq==0.4.1\npandas-profiling==1.4.1\npandocfilters==1.4.2\nparso==0.5.1\npathlib==1.0.1\npatsy==0.5.1\npexpect==4.7.0\npickleshare==0.7.5\nPillow==4.3.0\npip-tools==3.9.0\nplac==0.9.6\nplotly==3.6.1\nplotnine==0.5.1\npluggy==0.7.1\nportpicker==1.2.0\nprefetch-generator==1.0.1\npreshed==2.0.1\npretty-midi==0.2.8\nprettytable==0.7.2\nprogressbar2==3.38.0\nprometheus-client==0.7.1\npromise==2.2.1\nprompt-toolkit==1.0.16\nprotobuf==3.7.1\npsutil==5.4.8\npsycopg2==2.7.6.1\nptyprocess==0.6.0\npy==1.8.0\npyarrow==0.14.1\npyasn1==0.4.6\npyasn1-modules==0.2.6\npycocotools==2.0.0\npycparser==2.19\npydot==1.3.0\npydot-ng==2.0.0\npydotplus==2.0.2\npyemd==0.5.1\npyglet==1.4.1\nPygments==2.1.3\npygobject==3.26.1\npymc3==3.7\npymongo==3.8.0\npymystem3==0.2.0\nPyOpenGL==3.1.0\npyparsing==2.4.2\npyrsistent==0.15.4\npysndfile==1.3.7\nPySocks==1.7.0\npystan==2.19.0.0\npytest==3.6.4\npython-apt==1.6.4\npython-chess==0.23.11\npython-dateutil==2.5.3\npython-louvain==0.13\npython-rtmidi==1.3.0\npython-slugify==3.0.3\npython-utils==2.3.0\npytz==2018.9\nPyWavelets==1.0.3\nPyYAML==3.13\npyzmq==17.0.0\nqtconsole==4.5.2\nrequests==2.21.0\nrequests-oauthlib==1.2.0\nresampy==0.2.1\nretrying==1.3.3\nrpy2==2.9.5\nrsa==4.0\ns3fs==0.3.3\ns3transfer==0.2.1\nscikit-image==0.15.0\nscikit-learn==0.21.3\nscipy==1.3.1\nscreen-resolution-extra==0.0.0\nscs==2.1.1.post2\nseaborn==0.9.0\nsemantic-version==2.6.0\nSend2Trash==1.5.0\nsetuptools-git==1.2\nShapely==1.6.4.post2\nsimplegeneric==0.8.1\nsix==1.12.0\nsklearn==0.0\nsklearn-pandas==1.8.0\nsmart-open==1.8.4\nsnowballstemmer==1.9.0\nsortedcontainers==2.1.0\nspacy==2.1.8\nSphinx==1.8.5\nsphinxcontrib-websupport==1.1.2\nSQLAlchemy==1.3.6\nsqlparse==0.3.0\nsrsly==0.0.7\nstable-baselines==2.2.1\nstatsmodels==0.10.1\nsympy==1.1.1\ntables==3.4.4\ntabulate==0.8.3\ntblib==1.4.0\ntensor2tensor==1.11.0\ntensorboard==1.14.0\ntensorboardcolab==0.0.22\ntensorflow==1.14.0\ntensorflow-estimator==1.14.0\ntensorflow-hub==0.5.0\ntensorflow-metadata==0.14.0\ntensorflow-probability==0.7.0\ntermcolor==1.1.0\nterminado==0.8.2\ntestpath==0.4.2\ntext-unidecode==1.2\ntextblob==0.15.3\ntextgenrnn==1.4.1\ntfds-nightly==1.1.0.dev201908090105\ntflearn==0.3.2\nTheano==1.0.4\nthinc==7.0.8\ntoolz==0.10.0\ntorch==1.1.0\ntorchsummary==1.5.1\ntorchtext==0.3.1\ntorchvision==0.3.0\ntornado==4.5.3\ntqdm==4.28.1\ntraitlets==4.3.2\ntweepy==3.6.0\ntyping==3.7.4\ntzlocal==1.5.1\numap-learn==0.3.9\nuritemplate==3.0.0\nurllib3==1.24.3\nvega-datasets==0.7.0\nwasabi==0.2.2\nwcwidth==0.1.7\nwebencodings==0.5.1\nWerkzeug==0.15.5\nwidgetsnbextension==3.5.1\nwordcloud==1.5.0\nwrapt==1.11.2\nxarray==0.11.3\nxgboost==0.90\nxkit==0.0.0\nxlrd==1.1.0\nxlwt==1.3.0\nyellowbrick==0.9.1\nzict==1.0.0\nzipp==0.5.2\nzmq==0.0.0\nRequirement already up-to-date: seaborn in /usr/local/lib/python3.6/dist-packages (0.9.0)\nRequirement already satisfied, skipping upgrade: pandas>=0.15.2 in /usr/local/lib/python3.6/dist-packages (from seaborn) (0.24.2)\nRequirement already satisfied, skipping upgrade: numpy>=1.9.3 in /usr/local/lib/python3.6/dist-packages (from seaborn) (1.16.4)\nRequirement already satisfied, skipping upgrade: matplotlib>=1.4.3 in /usr/local/lib/python3.6/dist-packages (from seaborn) (3.0.3)\nRequirement already satisfied, skipping upgrade: scipy>=0.14.0 in /usr/local/lib/python3.6/dist-packages (from seaborn) (1.3.1)\nRequirement already satisfied, skipping upgrade: pytz>=2011k in /usr/local/lib/python3.6/dist-packages (from pandas>=0.15.2->seaborn) (2018.9)\nRequirement already satisfied, skipping upgrade: python-dateutil>=2.5.0 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.15.2->seaborn) (2.5.3)\nRequirement already satisfied, skipping upgrade: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=1.4.3->seaborn) (0.10.0)\nRequirement already satisfied, skipping upgrade: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=1.4.3->seaborn) (1.1.0)\nRequirement already satisfied, skipping upgrade: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=1.4.3->seaborn) (2.4.2)\nRequirement already satisfied, skipping upgrade: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.5.0->pandas>=0.15.2->seaborn) (1.12.0)\nRequirement already satisfied, skipping upgrade: setuptools in /usr/local/lib/python3.6/dist-packages (from kiwisolver>=1.0.1->matplotlib>=1.4.3->seaborn) (41.0.1)\n" ], [ "income = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--income_per_person_gdppercapita_ppp_inflation_adjusted--by--geo--time.csv')\nlifespan = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--life_expectancy_years--by--geo--time.csv')\npopulation = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--population_total--by--geo--time.csv')\nentities = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--entities--geo--country.csv')\nconcepts = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--concepts.csv')\nincome.shape, lifespan.shape, population.shape, entities.shape, concepts.shape\n\n", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "lifespan.head()", "_____no_output_____" ], [ "population.head()", "_____no_output_____" ], [ "pd.options.display.max_columns = 500\nentities.head()", "_____no_output_____" ], [ "concepts[concepts['name_short'].str.contains('trade', na=False)]", "_____no_output_____" ] ], [ [ "https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf", "_____no_output_____" ] ], [ [ "print(income.shape)\nprint(lifespan.shape)", "(46513, 3)\n(44370, 3)\n" ] ], [ [ " ## Explore data", "_____no_output_____" ] ], [ [ "# Inner merge is default, which is just what we want\n# However, a historian might like to do a left merge and look at missing data\ndf = pd.merge(income, lifespan)\nprint(df.shape)\ndf.head()", "(40437, 4)\n" ], [ "\n# df1 = pd.merge(income, lifespan, how='outer',\n# left_index=True, right_on=['income', 'lifespan'],\n# indicator=True)\n\n# df1.query('_merge != \"both\"')\n# df1.isna().sum()", "_____no_output_____" ], [ "df.isna().sum()", "_____no_output_____" ], [ "df= pd.merge(df, population)\n\ndf.head()", "_____no_output_____" ], [ "entities['world_6region'].value_counts()", "_____no_output_____" ], [ "entities['world_4region'].value_counts()", "_____no_output_____" ], [ "entities.head()", "_____no_output_____" ], [ "entity_columns_to_keep = ['country', 'name', 'world_6region']\n\nentities = entities[entity_columns_to_keep]\n\nentities.head()", "_____no_output_____" ], [ "merged = pd.merge(df, entities, left_on=\"geo\", right_on=\"country\")\nmerged.head()", "_____no_output_____" ], [ "merged = merged.drop('geo', axis='columns')\nmerged.head()", "_____no_output_____" ], [ "merged = merged.rename(columns = {\n 'country': 'country_code',\n 'time': 'year', \n 'income_per_person_gdppercapita_ppp_inflation_adjusted': 'income', \n 'life_expectancy_years': 'lifespan',\n 'population_total': 'population',\n 'name': 'country',\n 'world_6region': '6region',\n 'world_4region': '4region'\n})", "_____no_output_____" ], [ "merged.country.unique()", "_____no_output_____" ], [ "usa = merged[merged.country == 'United States']\nchina = merged[merged.country == 'China']\n", "_____no_output_____" ], [ "usa[usa.year.isin([1818, 1918, 2018])]", "_____no_output_____" ], [ "import seaborn as sns\n\nnow = merged[merged.year == 2018]\nnow.head()\n# sns.relplot(x=now['income'], y=now['lifespan'])", "_____no_output_____" ], [ "\nsns.relplot(x='income', y='lifespan', hue='world_6region', size='population', sizes=(10, 400), data=now);\nplt.xscale('log')\nplt.ylim(0,85)", "_____no_output_____" ], [ "now.sort_values('income', ascending=False)", "_____no_output_____" ], [ "now_qatar = now[now.country=='Qatar']\nnow_qatar.head()", "_____no_output_____" ], [ "sns.relplot(x='income', y='lifespan', hue='6region', size='population', sizes=(10, 400), data=now);\n\nplt.xscale('log')\nplt.ylim(0,85)\nplt.title(\"The World in 2018\")\nplt.text(x=now_qatar.income-5000, y=now_qatar.lifespan+1, s='Qatar')\nplt.show()", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "## Plot visualization", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ], [ [ "## Analyze outliers", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ], [ [ "## Plot multiple years", "_____no_output_____" ] ], [ [ "years = [1818, 1918, 2018]\ncenturies = merged[merged.year.isin(years)]\n\n\n\nfig=sns.relplot(x='income', y='lifespan', hue='6region', size='population', sizes=(10, 400), col='year', data=centuries);\n# fig.set(facecolor='grey')\n# plt.set_facecolor(\"blue\")\nplt.xscale('log')\n# plt.ylim(0,85)\n# plt.title(\"The World in three centuries\")\nplt.text(x=now_qatar.income-5000, y=now_qatar.lifespan+1, s='Qatar')\nplt.show()", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "fig=sns.relplot(x='income',y='lifespan', hue='6region', size='population', sizes=(30,400), data=now);\nfig.set(facecolor='b')\nplt.xscale('log')\nplt.ylim(20,85)\nplt.title(\"The World in 2018\")\nplt.show()", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "## Point out a story", "_____no_output_____" ] ], [ [ "years = [1918, 1938, 1978, 1998, 2018]\n\ndecades = merged[merged.year.isin(years)]", "_____no_output_____" ], [ "sns.relplot(x='income', y='lifespan', hue='6region', size='population', col='year', data=decades)\nplt.show()", "_____no_output_____" ], [ "for year in years:\n fig=sns.relplot(x='income', \n y='lifespan', \n hue='6region', \n size='population', \n sizes=(10, 400), \n data=merged[merged.year==year]);\n plt.xscale('log')\n plt.xlim((150,150000))\n plt.ylim((0,90))\n plt.title(year)\n plt.text(x=now_qatar.income-5000, y=now_qatar.lifespan+1, s='Qatar')\n plt.show()", "_____no_output_____" ] ], [ [ "# **Exploration**", "_____no_output_____" ] ], [ [ "concepts.head()", "_____no_output_____" ], [ "# search for keyword in concepts\nconcepts[concepts['name_short'].str.contains('health', na=False)]", "_____no_output_____" ], [ "from google.colab import files\nuploaded = files.upload()", "_____no_output_____" ], [ "health = pd.read_csv('/content/indicator_government share of total health spending.csv')\nhealth", "_____no_output_____" ], [ "remove_me = 'General government expenditure on health as percentage of total expenditure on health'\nhealth.columns = [column.replace(remove_me, 'Country') for column in health] \nhealth\nhealth = health.melt(id_vars='Country').dropna()\n", "_____no_output_____" ], [ "lifespan.head()", "_____no_output_____" ], [ "health.head()", "_____no_output_____" ], [ "# FINALLY. Renaming columns is the most annoying part of these exercises,\n# because I can never remember what I did during the previous week\n# to make it work.\n\nentity_columns_to_keep = ['country', 'name', 'world_6region']\n\nentities = entities[entity_columns_to_keep]\n\nentities.head()", "_____no_output_____" ], [ "entities.columns = ['geo', 'Country', '6region']\n\ndf = pd.merge(lifespan, entities)\n\ndf.head()", "_____no_output_____" ], [ "health.columns = ['Country', 'time', 'spending']\nhealth.head()", "_____no_output_____" ], [ "print(health.dtypes, df.dtypes)", "Country object\ntime object\nspending float64\ndtype: object geo object\ntime int64\nlife_expectancy_years float64\nCountry object\n6region object\ndtype: object\n" ], [ "s = health.time\nhealth.time = pd.to_numeric(s, errors='raise')\nhealth.dtypes", "_____no_output_____" ], [ "df = pd.merge(df, health)\ndf = pd.merge(df, population)\ndf.head()", "_____no_output_____" ] ], [ [ "# **Ready to plot**", "_____no_output_____" ] ], [ [ "sns.relplot(x='spending', y='life_expectancy_years', hue='6region', size='population_total', data=df);\n\nplt.ylim(0,100)\nplt.title(\"Lifespan and Gov share of health spending\")\nplt.show()", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ], [ [ "df_now = df[df.time == 2010]\ndf_now.head()", "_____no_output_____" ], [ "sns.relplot(x='spending', y='life_expectancy_years', hue='6region', size='population_total', data=df_now);\n\nplt.ylim(0,100)\nplt.title(\"Lifespan and Gov share of health spending\")\nplt.show()", "_____no_output_____" ] ], [ [ "# Plotting multiple **years**", "_____no_output_____" ] ], [ [ "years = [1995, 2000, 2005, 2010]", "_____no_output_____" ], [ "for year in years:\n fig=sns.relplot(x='spending', \n y='life_expectancy_years', \n hue='6region', \n size='population_total', \n sizes=(10, 100), \n data=df[df.time==year]);\n\n plt.xlim(0,100)\n plt.ylim(0,90)\n plt.title((year, \"Lifespan and Gov share of health spending\"))\n plt.show()", "_____no_output_____" ] ], [ [ "# STRETCH OPTIONS\n\n## 1. Animate!\n\n- [How to Create Animated Graphs in Python](https://towardsdatascience.com/how-to-create-animated-graphs-in-python-bb619cc2dec1)\n- Try using [Plotly](https://plot.ly/python/animations/)!\n- [The Ultimate Day of Chicago Bikeshare](https://chrisluedtke.github.io/divvy-data.html) (Lambda School Data Science student)\n- [Using Phoebe for animations in Google Colab](https://colab.research.google.com/github/phoebe-project/phoebe2-docs/blob/2.1/tutorials/animations.ipynb)\n\n## 2. Study for the Sprint Challenge\n\n- Concatenate DataFrames\n- Merge DataFrames\n- Reshape data with `pivot_table()` and `.melt()`\n- Be able to reproduce a FiveThirtyEight graph using Matplotlib or Seaborn.\n\n## 3. Work on anything related to your portfolio site / Data Storytelling Project", "_____no_output_____" ] ], [ [ "# TODO", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
4a2db3a950316a5e8dc6f4e7f23717ff3670a34b
766,226
ipynb
Jupyter Notebook
0001/dot-syntax.ipynb
genkuroki/public
339ea5dfd424492a6b21d1df299e52d48902de18
[ "MIT" ]
10
2021-06-06T00:33:49.000Z
2022-01-24T06:56:08.000Z
0001/dot-syntax.ipynb
genkuroki/public
339ea5dfd424492a6b21d1df299e52d48902de18
[ "MIT" ]
null
null
null
0001/dot-syntax.ipynb
genkuroki/public
339ea5dfd424492a6b21d1df299e52d48902de18
[ "MIT" ]
3
2021-08-02T11:58:34.000Z
2021-12-11T11:46:05.000Z
91.840585
7,792
0.813565
[ [ [ "(1:9)' .* (1:9)", "_____no_output_____" ], [ "using Plots\nf(x, y) = exp(-x^2 + x*y/2 - y^2)\nx = y = range(-3, 3; length=301)\nz = f.(x', y)\nsurface(x, y, z; colorbar=false, size=(720, 540), camera=(60, 60), color=:CMRmap)", "_____no_output_____" ], [ "using Plots\nf(x, y) = exp(-x^2 + x*y/2 - y^2)\nx = y = range(-3, 3; length=301)\nz = f.(x', y)\nsurface(x, y, z; colorbar=false, size=(720, 540), camera=(60, 60), color=:gist_earth)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
4a2db4e19ca48cfee76d46b00f33f32c8fd8af9b
51,558
ipynb
Jupyter Notebook
notebooks/Dataset C - Obesity Level Estimation/Synthetic data evaluation/Resemblance/4_Data_Labelling_Resemblance_DatasetC.ipynb
Vicomtech/STDG-evaluation-metrics
4662c2cc60f7941723a876a6032b411e40f5ec62
[ "MIT" ]
4
2021-08-20T18:21:09.000Z
2022-01-12T09:30:29.000Z
notebooks/Dataset C - Obesity Level Estimation/Synthetic data evaluation/Resemblance/4_Data_Labelling_Resemblance_DatasetC.ipynb
Vicomtech/STDG-evaluation-metrics
4662c2cc60f7941723a876a6032b411e40f5ec62
[ "MIT" ]
null
null
null
notebooks/Dataset C - Obesity Level Estimation/Synthetic data evaluation/Resemblance/4_Data_Labelling_Resemblance_DatasetC.ipynb
Vicomtech/STDG-evaluation-metrics
4662c2cc60f7941723a876a6032b411e40f5ec62
[ "MIT" ]
null
null
null
61.52506
9,000
0.540808
[ [ [ "# Data Labelling Analysis (DLA) Dataset C", "_____no_output_____" ] ], [ [ "#import libraries\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport os\nprint('Libraries imported!!')", "Libraries imported!!\n" ], [ "#define directory of functions and actual directory\nHOME_PATH = '' #home path of the project\nFUNCTIONS_DIR = 'EVALUATION FUNCTIONS/RESEMBLANCE'\nACTUAL_DIR = os.getcwd()\n\n#change directory to functions directory\nos.chdir(HOME_PATH + FUNCTIONS_DIR)\n\n#import functions for data labelling analisys\nfrom data_labelling import mix_data\nfrom data_labelling import split_data\nfrom data_labelling import DataPreProcessor\nfrom data_labelling import ClassificationModels\n\n#change directory to actual directory\nos.chdir(ACTUAL_DIR)\nprint('Functions imported!!')", "Functions imported!!\n" ] ], [ [ "## 1. Read real and synthetic datasets\nIn this part real and synthetic datasets are read.", "_____no_output_____" ] ], [ [ "#Define global variables\nDATA_TYPES = ['Real','GM','SDV','CTGAN','WGANGP']\nSYNTHESIZERS = ['GM','SDV','CTGAN','WGANGP']\nFILEPATHS = {'Real' : HOME_PATH + 'REAL DATASETS/TRAIN DATASETS/C_Obesity_Data_Real_Train.csv',\n 'GM' : HOME_PATH + 'SYNTHETIC DATASETS/GM/C_Obesity_Data_Synthetic_GM.csv',\n 'SDV' : HOME_PATH + 'SYNTHETIC DATASETS/SDV/C_Obesity_Data_Synthetic_SDV.csv',\n 'CTGAN' : HOME_PATH + 'SYNTHETIC DATASETS/CTGAN/C_Obesity_Data_Synthetic_CTGAN.csv',\n 'WGANGP' : HOME_PATH + 'SYNTHETIC DATASETS/WGANGP/C_Obesity_Data_Synthetic_WGANGP.csv'}\ncategorical_columns = ['Gender','family_history_with_overweight','FAVC','CAEC','SMOKE','SCC','CALC','MTRANS','Obesity_level']\ndata = dict()", "_____no_output_____" ], [ "#iterate over all datasets filepaths and read each dataset\nfor name, path in FILEPATHS.items() :\n data[name] = pd.read_csv(path)\n for col in categorical_columns :\n data[name][col] = data[name][col].astype('category')\ndata", "_____no_output_____" ] ], [ [ "## 2. Mix real data with synthetic data", "_____no_output_____" ] ], [ [ "mixed_data = dict()\nfor name in SYNTHESIZERS :\n mixed_data[name] = mix_data(data['Real'], data[name])\nmixed_data", "_____no_output_____" ] ], [ [ "- 0 for real data\n- 1 for synthetic data", "_____no_output_____" ], [ "## 2. Split train and test data", "_____no_output_____" ] ], [ [ "train_len = 0.8\ntrain_data = dict()\ntest_data = dict()\nfor name in SYNTHESIZERS :\n print(name)\n train_data[name], test_data[name] = split_data(mixed_data[name], train_len)\n print(train_data[name].shape, test_data[name].shape)\n print('Train data', train_data[name].groupby('Label').size())\n print('Test data', test_data[name].groupby('Label').size())\n print('##############################################')", "GM\n(2700, 18) (675, 18)\nTrain data Label\n0.0 1345\n1.0 1355\ndtype: int64\nTest data Label\n0.0 342\n1.0 333\ndtype: int64\n##############################################\nSDV\n(2700, 18) (675, 18)\nTrain data Label\n0.0 1342\n1.0 1358\ndtype: int64\nTest data Label\n0.0 345\n1.0 330\ndtype: int64\n##############################################\nCTGAN\n(2700, 18) (675, 18)\nTrain data Label\n0.0 1348\n1.0 1352\ndtype: int64\nTest data Label\n0.0 339\n1.0 336\ndtype: int64\n##############################################\nWGANGP\n(2700, 18) (675, 18)\nTrain data Label\n0.0 1373\n1.0 1327\ndtype: int64\nTest data Label\n0.0 315\n1.0 360\ndtype: int64\n##############################################\n" ] ], [ [ "## 3. Train Classifiers", "_____no_output_____" ] ], [ [ "categorical_columns = ['Gender','family_history_with_overweight','FAVC','CAEC','SMOKE','SCC','CALC','MTRANS','Obesity_level']\nnumerical_columns = ['Age','Height','Weight','FCVC','NCP','CH2O','FAF','TUE']\ncategories = [np.array([0, 1]), np.array([0, 1]), np.array([0, 1]), np.array([0, 1, 2, 3]), np.array([0, 1]), \n np.array([0, 1]), np.array([0, 1, 2, 3]), np.array([0, 1, 2, 3, 4]), np.array([0, 1, 2, 3, 4, 5, 6])]\n\n#initialize classifiers\nclassifiers_all = dict()\ndata_preprocessors = dict()\n\ntarget = 'Label'\n\nfor name in SYNTHESIZERS : \n print(name)\n \n classifiers_all[name] = ClassificationModels()\n \n data_preprocessors[name] = DataPreProcessor(categorical_columns, numerical_columns, categories)\n x_train = data_preprocessors[name].preprocess_train_data(train_data[name].iloc[:, train_data[name].columns != target])\n y_train = train_data[name].loc[:, target]\n \n classifiers_all[name].train_classifiers(x_train, y_train)\n \n print('####################################################')", "GM\nRF Trained\nKNN Trained\nDT Trained\nSVM Trained\nMLP Trained\n####################################################\nSDV\nRF Trained\nKNN Trained\nDT Trained\nSVM Trained\nMLP Trained\n####################################################\nCTGAN\nRF Trained\nKNN Trained\nDT Trained\nSVM Trained\nMLP Trained\n####################################################\nWGANGP\nRF Trained\nKNN Trained\nDT Trained\nSVM Trained\nMLP Trained\n####################################################\n" ] ], [ [ "## 5. Evaluate Classifiers", "_____no_output_____" ] ], [ [ "results_all = dict()\n\nfor name in SYNTHESIZERS : \n print(name)\n \n x_test = data_preprocessors[name].preprocess_test_data(test_data[name].loc[:, test_data[name].columns != target])\n print(x_test.shape)\n y_test = test_data[name].loc[:, target]\n \n classifiers_all[name].evaluate_classifiers(x_test, y_test)\n print('####################################################')", "GM\n(675, 38)\nRF Tested\n model accuracy precision recall f1\n0 RF 0.9644 0.9585 0.97 0.9642\nKNN Tested\n model accuracy precision recall f1\n0 KNN 0.7067 0.9091 0.4505 0.6024\nDT Tested\n model accuracy precision recall f1\n0 DT 0.9422 0.9509 0.9309 0.9408\nSVM Tested\n model accuracy precision recall f1\n0 SVM 0.5022 0.4969 0.7297 0.5912\nMLP Tested\n model accuracy precision recall f1\n0 MLP 0.7807 0.7761 0.7808 0.7784\n####################################################\nSDV\n(675, 38)\nRF Tested\n model accuracy precision recall f1\n0 RF 0.9674 0.9611 0.9727 0.9669\nKNN Tested\n model accuracy precision recall f1\n0 KNN 0.7896 0.895 0.6455 0.75\nDT Tested\n model accuracy precision recall f1\n0 DT 0.9067 0.887 0.9273 0.9067\nSVM Tested\n model accuracy precision recall f1\n0 SVM 0.563 0.5552 0.5333 0.544\nMLP Tested\n model accuracy precision recall f1\n0 MLP 0.8578 0.912 0.7848 0.8436\n####################################################\nCTGAN\n(675, 38)\nRF Tested\n model accuracy precision recall f1\n0 RF 0.9941 0.994 0.994 0.994\nKNN Tested\n model accuracy precision recall f1\n0 KNN 0.8489 0.95 0.7351 0.8289\nDT Tested\n model accuracy precision recall f1\n0 DT 0.963 0.967 0.9583 0.9626\nSVM Tested\n model accuracy precision recall f1\n0 SVM 0.6385 0.6292 0.6667 0.6474\nMLP Tested\n model accuracy precision recall f1\n0 MLP 0.9511 0.9662 0.9345 0.9501\n####################################################\nWGANGP\n(675, 38)\nRF Tested\n model accuracy precision recall f1\n0 RF 0.9659 1.0 0.9361 0.967\nKNN Tested\n model accuracy precision recall f1\n0 KNN 0.96 0.937 0.9917 0.9636\nDT Tested\n model accuracy precision recall f1\n0 DT 0.5526 0.9028 0.1806 0.3009\nSVM Tested\n model accuracy precision recall f1\n0 SVM 0.5511 0.6913 0.2861 0.4047\nMLP Tested\n model accuracy precision recall f1\n0 MLP 0.5674 1.0 0.1889 0.3178\n####################################################\n" ] ], [ [ "## 6. Analyse models results", "_____no_output_____" ] ], [ [ "fig, axs = plt.subplots(nrows=1, ncols=4, figsize=(8, 2.5))\naxs_idxs = [[0,0], [0,1], [1,0], [1,1]]\naxs_idxs = [0, 1, 2, 3]\nidx = dict(zip(SYNTHESIZERS,axs_idxs))\n\nfor name in SYNTHESIZERS :\n ax_plot = axs[idx[name]]\n classifiers_all[name].plot_classification_metrics(ax_plot)\n ax_plot.set_title(name, fontsize=10)\n\nplt.tight_layout()\nfig.savefig('DATA LABELLING RESULTS/CLASSIFICATION_METRICS.svg', bbox_inches='tight')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a2db9dc17c8ecd1a2c1b09a44f272bee937a79e
6,520
ipynb
Jupyter Notebook
data_structure/Even-After-Odd-Nodes.ipynb
chensuhui03/udacity_python_practice
01f909acb657c4555bfd64c6430494541f643686
[ "MIT" ]
null
null
null
data_structure/Even-After-Odd-Nodes.ipynb
chensuhui03/udacity_python_practice
01f909acb657c4555bfd64c6430494541f643686
[ "MIT" ]
null
null
null
data_structure/Even-After-Odd-Nodes.ipynb
chensuhui03/udacity_python_practice
01f909acb657c4555bfd64c6430494541f643686
[ "MIT" ]
null
null
null
23.537906
284
0.469018
[ [ [ "### Problem Statement\n\nGiven a linked list with integer data, arrange the elements in such a manner that all nodes with even numbers are placed after odd numbers. **Do not create any new nodes and avoid using any other data structure. The relative order of even and odd elements must not change.** \n\n**Example:**\n* `linked list = 1 2 3 4 5 6`\n* `output = 1 3 5 2 4 6`", "_____no_output_____" ] ], [ [ "class Node:\n def __init__(self, data):\n self.data = data\n self.next = None", "_____no_output_____" ] ], [ [ "### Exercise - Write the function definition here\n", "_____no_output_____" ] ], [ [ "def even_after_odd(head):\n \"\"\"\n :param - head - head of linked list\n return - updated list with all even elements are odd elements \n \"\"\"\n if head is None:\n return head\n \n odd_head = None\n odd_tail = None\n \n even_head = None\n even_tail = None\n \n current = head\n \n while current:\n if current.data % 2 == 1:\n if odd_head is None:\n odd_head = current\n odd_tail = odd_head\n else: \n odd_tail.next = current\n odd_tail = odd_tail.next\n else:\n if even_head is None:\n even_head = current\n even_tail = even_head\n else:\n even_tail.next = current\n even_tail = even_tail.next\n \n current = current.next\n \n if odd_head is None: \n return even_head\n \n odd_tail.next = even_head\n \n return odd_head", "_____no_output_____" ] ], [ [ "<span class=\"graffiti-highlight graffiti-id_xpuflcm-id_9q4n7o8\"><i></i><button>Show Solution</button></span>", "_____no_output_____" ], [ "### Test - Let's test your function", "_____no_output_____" ] ], [ [ "# helper functions for testing purpose\ndef create_linked_list(arr):\n if len(arr)==0:\n return None\n head = Node(arr[0])\n tail = head\n for data in arr[1:]:\n tail.next = Node(data)\n tail = tail.next\n return head\n\ndef print_linked_list(head):\n while head:\n print(head.data, end=' ')\n head = head.next\n print()", "_____no_output_____" ], [ "def test_function(test_case):\n head = test_case[0]\n solution = test_case[1]\n \n node_tracker = dict({})\n node_tracker['nodes'] = list()\n temp = head\n while temp:\n node_tracker['nodes'].append(temp)\n temp = temp.next\n\n head = even_after_odd(head) \n temp = head\n index = 0\n try:\n while temp:\n if temp.data != solution[index] or temp not in node_tracker['nodes']:\n print(\"Fail\")\n return\n temp = temp.next\n index += 1\n print(\"Pass\") \n except Exception as e:\n print(\"Fail\")", "_____no_output_____" ], [ "arr = [1, 2, 3, 4, 5, 6]\nsolution = [1, 3, 5, 2, 4, 6]\n\nhead = create_linked_list(arr)\ntest_case = [head, solution]\ntest_function(test_case)", "Pass\n" ], [ "arr = [1, 3, 5, 7]\nsolution = [1, 3, 5, 7]\n\nhead = create_linked_list(arr)\ntest_case = [head, solution]\ntest_function(test_case)", "Pass\n" ], [ "arr = [2, 4, 6, 8]\nsolution = [2, 4, 6, 8]\nhead = create_linked_list(arr)\ntest_case = [head, solution]\ntest_function(test_case)", "Pass\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ] ]
4a2dbb96bf057a34671dfa6e8e969ba9ca3dbef6
209,572
ipynb
Jupyter Notebook
docs/source/validation.ipynb
kerkelae/disimpy
51e7cbc7880a9f9eb80f2c351c98f67b8890e304
[ "MIT" ]
15
2020-10-06T18:17:14.000Z
2022-02-11T14:33:06.000Z
docs/source/validation.ipynb
kerkelae/disimpy
51e7cbc7880a9f9eb80f2c351c98f67b8890e304
[ "MIT" ]
6
2020-08-07T13:50:48.000Z
2022-03-11T13:26:58.000Z
docs/source/validation.ipynb
kerkelae/disimpy
51e7cbc7880a9f9eb80f2c351c98f67b8890e304
[ "MIT" ]
7
2020-01-20T19:04:34.000Z
2022-03-09T09:51:53.000Z
433.89648
104,988
0.941624
[ [ [ "# Validation\n\nThis notebook contains examples of some of the simulations that have been used to validate Disimpy's functionality by comparing the simulated signals to analytical solutions and signals generated by other simulators. Here, we simulate free diffusion and restricted diffusion inside cylinders and spheres.", "_____no_output_____" ] ], [ [ "# Import the required packages and modules\n\nimport os\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom disimpy import gradients, simulations, substrates, utils\nfrom disimpy.gradients import GAMMA", "_____no_output_____" ], [ "# Define the simulation parameters\n\nn_walkers = int(1e6) # Number of random walkers\nn_t = int(1e3) # Number of time points\ndiffusivity = 2e-9 # In SI units (m^2/s)", "_____no_output_____" ] ], [ [ "## Free diffusion\n\nIn the case of free diffusion, the analytical expression for the signal is $S = S_0 \\exp(-bD)$, where $S_0$ is the signal without diffusion-weighting, $b$ is the b-value, and $D$ is the diffusivity.", "_____no_output_____" ] ], [ [ "# Create a Stejskal-Tanner gradient array with ∆ = 40 ms and δ = 30 ms\n\nT = 70e-3\ngradient = np.zeros((1, 700, 3))\ngradient[0, 1:300, 0] = 1\ngradient[0, -300:-1, 0] = -1\nbs = np.linspace(1, 3e9, 100)\ngradient = np.concatenate([gradient for _ in bs], axis=0)\ndt = T / (gradient.shape[1] - 1)\ngradient, dt = gradients.interpolate_gradient(gradient, dt, n_t)\ngradient = gradients.set_b(gradient, dt, bs)\n\n\n# Show the waveform of the measurement with the highest b-value\n\nfig, ax = plt.subplots(1, figsize=(7, 4))\nfor i in range(3):\n ax.plot(np.linspace(0, T, n_t), gradient[-1, :, i])\nax.legend(['G$_x$', 'G$_y$', 'G$_z$'])\nax.set_xlabel('Time (s)')\nax.set_ylabel('Gradient magnitude (T/m)')\nplt.show()", "_____no_output_____" ], [ "# Run the simulation\n\nsubstrate = substrates.free()\nsignals = simulations.simulation(\n n_walkers, diffusivity, gradient, dt, substrate)\n\n\n# Plot the results\n\nfig, ax = plt.subplots(1, figsize=(7, 4))\nax.plot(bs, np.exp(-bs * diffusivity), color='tab:orange')\nax.scatter(bs, signals / n_walkers, s=10, marker='o')\nax.legend(['Analytical signal', 'Simulated signal'])\nax.set_xlabel('b (ms/μm$^2$)')\nax.set_ylabel('S/S$_0$')\nax.set_yscale('log')\nplt.show()", "Starting simulation\nNumber of random walkers = 1000000\nNumber of steps = 1000\nStep length = 9.171704949125858e-07 m\nStep duration = 7.010014306151645e-05 s\nSimulation finished\n" ] ], [ [ "## Restricted diffusion and comparison to MISST\n\nHere, diffusion inside cylinders and spheres is simulated and the signals are compared to those calculated with [MISST](http://mig.cs.ucl.ac.uk/index.php?n=Tutorial.MISST) that uses matrix operators to calculate the time evolution of the diffusion signal inside simple geometries. The cylinder is simulated using a triangular mesh and the sphere as an analytically defined surface.", "_____no_output_____" ] ], [ [ "# Load and show the cylinder mesh used in the simulations\n\nmesh_path = os.path.join(\n os.path.dirname(simulations.__file__), 'tests', 'cylinder_mesh_closed.pkl')\nwith open(mesh_path, 'rb') as f:\n example_mesh = pickle.load(f)\nfaces = example_mesh['faces']\nvertices = example_mesh['vertices']\ncylinder_substrate = substrates.mesh(\n vertices, faces, periodic=True, init_pos='intra')\nutils.show_mesh(cylinder_substrate)", "Aligning the corner of the simulated voxel with the origin\nMoved the vertices by [0. 0. 0.]\nDividing mesh into subvoxels\nFinished dividng mesh into subvoxels\n" ], [ "# Run the simulation\n\nsignals = simulations.simulation(\n n_walkers, diffusivity, gradient, dt, cylinder_substrate)\n\n\n# Load MISST signals\n\ntests_dir = os.path.join(os.path.dirname(gradients.__file__), 'tests')\nmisst_signals = np.loadtxt(os.path.join(tests_dir,\n 'misst_cylinder_signal_smalldelta_30ms_bigdelta_40ms_radius_5um.txt'))\n\n\n# Plot the results\n\nfig, ax = plt.subplots(1, figsize=(7, 4))\nax.scatter(bs, signals / n_walkers, s=10, marker='o')\nax.scatter(bs, misst_signals, s=10, marker='.')\nax.set_xlabel('b (ms/μm$^2$)')\nax.set_ylabel('S/S$_0$')\nax.legend(['Disimpy', 'MISST'])\nax.set_title('Diffusion in a cylinder')\nax.set_yscale('log')\nplt.show()", "Starting simulation\nNumber of random walkers = 1000000\nNumber of steps = 1000\nStep length = 9.171704949125858e-07 m\nStep duration = 7.010014306151645e-05 s\nCalculating initial positions\nFinished calculating initial positions\nSimulation finished\n" ], [ "# Run the simulation\n\nsphere_substrate = substrates.sphere(5e-6)\nsignals = simulations.simulation(\n n_walkers, diffusivity, gradient, dt, sphere_substrate)\n\n\n# Load MISST signals\n\ntests_dir = os.path.join(os.path.dirname(gradients.__file__), 'tests')\nmisst_signals = np.loadtxt(os.path.join(tests_dir,\n 'misst_sphere_signal_smalldelta_30ms_bigdelta_40ms_radius_5um.txt'))\n\n\n# Plot the results\n\nfig, ax = plt.subplots(1, figsize=(7, 4))\nax.scatter(bs, signals / n_walkers, s=10, marker='o')\nax.scatter(bs, misst_signals, s=10, marker='.')\nax.set_xlabel('b (ms/μm$^2$)')\nax.set_ylabel('S/S$_0$')\nax.legend(['Disimpy', 'MISST'])\nax.set_title('Diffusion in a sphere')\nax.set_yscale('log')\nplt.show()", "Starting simulation\nNumber of random walkers = 1000000\nNumber of steps = 1000\nStep length = 9.171704949125858e-07 m\nStep duration = 7.010014306151645e-05 s\nSimulation finished\n" ] ], [ [ "## Signal diffraction pattern\n\nIn the case of restricted diffusion in a cylinder perpendicular to the direction of the diffusion encoding gradient with short pulses and long diffusion time, the signal minimum occurs at $0.61 · 2 · \\pi/r$, where $r$ is the cylinder radius. Details are provided by [Avram et al](https://doi.org/10.1002/nbm.1277), for example.", "_____no_output_____" ] ], [ [ "# Create a Stejskal-Tanner gradient array with ∆ = 0.5 s and δ = 0.1 ms\n\nT = 501e-3\ngradient = np.zeros((1, n_t, 3))\ngradient[0, 1:2, 0] = 1\ngradient[0, -2:-1, 0] = -1\ndt = T / (gradient.shape[1] - 1)\nbs = np.linspace(1, 1e11, 250)\ngradient = np.concatenate([gradient for _ in bs], axis=0)\ngradient = gradients.set_b(gradient, dt, bs)\nq = gradients.calc_q(gradient, dt)\nqs = np.max(np.linalg.norm(q, axis=2), axis=1)\n\n\n# Show the waveform of the measurement with the highest b-value\n\nfig, ax = plt.subplots(1, figsize=(7, 4))\nfor i in range(3):\n ax.plot(np.linspace(0, T, n_t), gradient[-1, :, i])\nax.legend(['G$_x$', 'G$_y$', 'G$_z$'])\nax.set_xlabel('Time (s)')\nax.set_ylabel('Gradient magnitude (T/m)')\nplt.show()\n\n\n# Run the simulation\n\nradius = 10e-6\nsubstrate = substrates.cylinder(\n radius=radius, orientation=np.array([0., 0., 1.]))\nsignals = simulations.simulation(\n n_walkers, diffusivity, gradient, dt, substrate)\n\n\n# Plot the results\n\nfig, ax = plt.subplots(1, figsize=(7, 4))\nax.scatter(1e-6 * qs, signals / n_walkers, s=10, marker='o')\nminimum = 1e-6 * .61 * 2 * np.pi / radius\nax.plot([minimum, minimum], [0, 1], ls='--', lw=2, color='tab:orange')\nax.legend(['Analytical minimum', 'Simulated signal'])\nax.set_xlabel('q (μm$^{-1}$)')\nax.set_ylabel('S/S$_0$')\nax.set_yscale('log')\nax.set_ylim([1e-4, 1])\nax.set_xlim([0, max(1e-6 * qs)])\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
4a2dbd578c34c6fb67c7c3a0cb0dc2b8ce1960b0
9,749
ipynb
Jupyter Notebook
4thru12daysSwift.ipynb
der0pa/der0pa.github.io
72f825728c2b1e2f68958afa9de4465b9871e95a
[ "MIT" ]
null
null
null
4thru12daysSwift.ipynb
der0pa/der0pa.github.io
72f825728c2b1e2f68958afa9de4465b9871e95a
[ "MIT" ]
null
null
null
4thru12daysSwift.ipynb
der0pa/der0pa.github.io
72f825728c2b1e2f68958afa9de4465b9871e95a
[ "MIT" ]
null
null
null
25.454308
238
0.397579
[ [ [ "<a href=\"https://colab.research.google.com/github/der0pa/der0pa.github.io/blob/master/4thru12daysSwift.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "This workbook is for days 4 thru 12 summary.\nDays 1 thru 3 are in a .swift file on github\n", "_____no_output_____" ] ], [ [ "// for loops ... runs code over arrays and ranges untill exhausted\n\nlet count = 1...5\n\nfor _ in count {\n print(\"count@\")\n}", "count@\r\ncount@\r\ncount@\r\ncount@\r\ncount@\r\n" ], [ "// while loops ... runs loop while bool is 'true'\n\nvar number = 1 // give var 'number' its starting value\n\nwhile number <= 5 {\n print(number)\n number += 1 // add 1 at each iterations as long as 'true'\n}\n\nprint(\"Ready or not, here I come\") ", "1\r\n2\r\n3\r\n4\r\n5\r\nReady or not, here I come\r\n" ], [ "// Repeat loop -not common - bool is last with repeat\n\nvar number = 1\n\nrepeat {\n print(number)\n number += 1\n} while number <= 5\n\nprint(\"Ready or not, repeat is complete\")", "1\r\n2\r\n3\r\n4\r\n5\r\nReady or not, repeat is complete\r\n" ], [ "// this will not print \"this is false\" using 'while'\nwhile false {\n print(\"this is false\")\n}", "_____no_output_____" ], [ "// this will print \"this is false\" using 'repeat'\nrepeat {\n print(\"this is false\")\n} while false", "this is false\r\n" ], [ "// exit a loop early using 'break'\n\nvar countDown = 5\n\nwhile countDown >= 0 {\n print(countDown)\n \n if countDown == 2 {\n print(\"break out of loop at 2 not 0\")\n break\n }\n countDown -= 1\n}\n\nprint(\"Blast off!\")", "5\r\n4\r\n3\r\n2\r\nbreak out of loop at 2 not 0\r\nBlast off!\r\n" ], [ "// this will print multiplication table up to 5 * 5\n// loops can be nested inside 'outerLoop' ie: 'loop label'\n\nfor i in 1...5 {\n for j in 1...5 {\n let product = i * j\n print(\"\\(i) * \\(j) is: \\(product)\")\n }\n}", "_____no_output_____" ], [ "outerLoopLabel: for i in 1...5 {\n for j in 1...5 { // inner loop start\n let product = i * j\n print(\"\\(i) * \\(j) is: \\(product)\")\n \n if product == 20 {\n print(\"maximum product 'break' at 20\")\n break outerLoopLabel\n }\n }\n}", "_____no_output_____" ], [ "// use 'continue' to skip over or not include values\n// divide an int by 2 and 0 remainder will be an even int\n// divide an int by 2 with 1 as remainder will be odd int\n\nfor i in 1...10 {\n if i % 2 == 1 {\n continue // will 'skip' odd ints \n }\n print(i) // and will print only even ints\n}", "2\r\n4\r\n6\r\n8\r\n10\r\n" ], [ "// Infinite loop are used in applications \n// app will run 'loop' until you select 'exit'\n\nvar myAppValue = 0\nwhile true {\n print(\"running...\") // infinite loop without 'if' clause\n myAppValue += 1\n // if break is your 'exit' button on your app\n if myAppValue == 10 {\n break // app will run for 10 loops and exit\n } \n}\n \nprint(\"exit has been selected\")\n", "running...\r\nrunning...\r\nrunning...\r\nrunning...\r\nrunning...\r\nrunning...\r\nrunning...\r\nrunning...\r\nrunning...\r\nrunning...\r\nexit has been selected\r\n" ] ], [ [ "Day 5 summary below:", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
4a2dbe661269f6ae65794906d0a9ef42762cd4e5
28,233
ipynb
Jupyter Notebook
assignment_1/Assignment.ipynb
Rexhaif/hse-compling
98d839fa44831a1d867e80515907b122affd8494
[ "MIT" ]
null
null
null
assignment_1/Assignment.ipynb
Rexhaif/hse-compling
98d839fa44831a1d867e80515907b122affd8494
[ "MIT" ]
null
null
null
assignment_1/Assignment.ipynb
Rexhaif/hse-compling
98d839fa44831a1d867e80515907b122affd8494
[ "MIT" ]
null
null
null
28.604863
631
0.463111
[ [ [ "!wget -q https://raw.githubusercontent.com/mannefedov/compling_nlp_hse_course/master/data/zhivago.txt", "_____no_output_____" ], [ "!ls -lh", "total 1.9M\n-rw-r--r-- 1 root root 778 Nov 8 20:08 Assignment.ipynb\n-rw-r--r-- 1 root root 1.9M Nov 8 20:08 zhivago.txt\n" ], [ "import re\nimport string\n\nfrom collections import Counter\n\nimport razdel\nimport nltk\nimport rusenttokenize\n\nfrom pymystem3 import Mystem\nfrom pymorphy2 import MorphAnalyzer\nfrom nltk.stem.snowball import SnowballStemmer\nfrom nltk.corpus import stopwords\nfrom tqdm.auto import tqdm", "_____no_output_____" ], [ "with open(\"./zhivago.txt\", 'r', encoding='utf-8') as f:\n text = f.read()", "_____no_output_____" ] ], [ [ "## Задание 1 - Очистка", "_____no_output_____" ] ], [ [ "## удаляем xml-like теги\n_text = re.sub('(\\<(/?[^>]+)>)', ' ', text)\n## строки из логов загрузчика\n_text = re.sub('\\d{2}.\\d{2}.\\d{4}', '', _text)\n_text = re.sub('[^\\S]*\\.(ru)\\S*', '', _text)\n_text = re.sub('\\d{1}.\\d{1}', '', _text)\n## Цифры, Латиницу(логи загрузчика), скобочки(ибо зачем)\n_text = re.sub('[0-9a-zA-Z«»]', '', _text)\n## Странные штуки в конце\n_text = re.sub(\"[/+]\", '', _text)\n## -\n_text = re.sub('\\s–\\s', '', _text)\n## лишние пробелы\ntext = re.sub(\"\\s+\", ' ', _text)", "_____no_output_____" ] ], [ [ "## Задание 2 - токенизация/разделение", "_____no_output_____" ] ], [ [ "punctuation = \"\".join((set(string.punctuation) - set(\".\")))", "_____no_output_____" ], [ "text = text.translate(str.maketrans('', '', punctuation)).strip()", "_____no_output_____" ], [ "sentences = rusenttokenize.ru_sent_tokenize(text)", "_____no_output_____" ], [ "## Приводим к нижнему регистру после токенизации, т.к отсутствие регистра может повлиять на корректность токенизации\ntokenized_sentences = [\n tuple(token.text.lower() for token in razdel.tokenize(sentence)) for sentence in tqdm(sentences)\n]", "_____no_output_____" ] ], [ [ "### 2.1 - Повторяющиеся предложения", "_____no_output_____" ] ], [ [ "counter = Counter(tokenized_sentences)", "_____no_output_____" ], [ "repeating_sentences = list(map(\n lambda x: (\" \".join(x[0]), x[1]), # детокенизируем для отображения\n filter(\n lambda x: x[1] >= 2 and x[0][0] != '–', # встречающиеся два и более раз, не являющиеся прямой речью (начинается с -)\n counter.most_common()\n )\n))", "_____no_output_____" ] ], [ [ "Повторяющиеся предложения есть, всего их(без учета прямой речи) -", "_____no_output_____" ] ], [ [ "print(len(repeating_sentences))", "58\n" ] ], [ [ "Примеры таких предложений:", "_____no_output_____" ] ], [ [ "repeating_sentences[:10]", "_____no_output_____" ] ], [ [ "### 2.2 - Самый частотный токен", "_____no_output_____" ] ], [ [ "frequencies = Counter()\nfor sentence in tokenized_sentences:\n frequencies.update(sentence)", "_____no_output_____" ], [ "most_frequent = list(filter(lambda x: len(x[0]) > 6, frequencies.most_common()))[0]", "_____no_output_____" ], [ "print(f\"Самый частотный токен длинее 6 символов - {most_frequent[0]}, он встречается {most_frequent[1]} раз\")", "Самый частотный токен длинее 6 символов - андреевич, он встречается 285 раз\n" ] ], [ [ "## 3 - Стемминг", "_____no_output_____" ] ], [ [ "stemmer = SnowballStemmer('russian')", "_____no_output_____" ], [ "all_words = list([word for sentence in tokenized_sentences for word in sentence])", "_____no_output_____" ], [ "stemmed_words = list(map(stemmer.stem, all_words))", "_____no_output_____" ] ], [ [ "### 3.2 Слово не изменилось после стеммизации\nЕсли интерпретировать такую ошибку как сказано в условии - то таких ошибок очень много", "_____no_output_____" ] ], [ [ "## ошибки не-стеммизации\nnon_stemmed_idx = list(filter(lambda x: len(x[1]) > 4 and x[1] == stemmed_words[x[0]], enumerate(tqdm(all_words))))", "_____no_output_____" ], [ "len(non_stemmed_idx)", "_____no_output_____" ] ], [ [ "Однако я думаю что большинство из них не представляют собой действительно ошибки, просто слово является само себе \"стеммой\"", "_____no_output_____" ] ], [ [ "for i, w in non_stemmed_idx[:30]:\n print(f\"{i:^5}|{all_words[i]:^20} == {stemmed_words[i]:^20}\")", " 0 | борис == борис \n 1 | леонидович == леонидович \n 2 | пастернак == пастернак \n 3 | доктор == доктор \n 5 | доктор == доктор \n 18 | принес == принес \n 45 | доктор == доктор \n 63 | человек == человек \n 66 | пишет == пишет \n 107 | жертв == жертв \n 114 | перед == перед \n 136 | строк == строк \n 140 | могут == могут \n 162 | борис == борис \n 163 | пастернак == пастернак \n 164 | доктор == доктор \n 167 | дышат == дышат \n 177 | доктор == доктор \n 179 | борис == борис \n 180 | пастернак == пастернак \n 192 | перед == перед \n 215 | период == период \n 223 | покамест == покамест \n 238 | будет == будет \n 250 | подскажет == подскажет \n 253 | чисел == чисел \n 257 | сейчас == сейчас \n 260 | будет == будет \n 269 | будут == будут \n 297 | конец == конец \n" ] ], [ [ "Но и ошибки тоже есть, см. слова #260 и #269 - две словоформы одной лексемы (\"будет\"), и они не изменились после стеммирования, хотя нужно было бы", "_____no_output_____" ], [ "### 3.1 Одна стемма для разных слов", "_____no_output_____" ] ], [ [ "stem2words = {}\nfor i, word in enumerate(all_words):\n stemm = stemmed_words[i]\n if stemm not in stem2words:\n stem2words[stemm] = set()\n \n stem2words[stemm].add(word)", "_____no_output_____" ] ], [ [ "Будем смотреть такие слова, длина формы которых очень сильно отличается от длины стеммы", "_____no_output_____" ] ], [ [ "import numpy as np\nerror_pairs = {}\nfor key, forms in stem2words.items():\n if len(key) <= 6 and np.mean([abs(len(key) - len(form)) for form in forms]) >= 5 and len(forms) > 2:\n error_pairs[key] = forms", "_____no_output_____" ], [ "list(error_pairs.items())", "_____no_output_____" ] ], [ [ "Среди таких слов можно найти несколько примеров ошибок, удовлетворяющих условию\n- 'пузыр': пузырившегося(деепричастие?) и пузырями(сущ.)\n- 'выси': выси(сущ.) и высившаяся(деепричастие?)", "_____no_output_____" ], [ "## 4 - список стоп-слов из nltk", "_____no_output_____" ] ], [ [ "stop_words = stopwords.words('russian')", "_____no_output_____" ] ], [ [ "Посмотрим на самые частотные слова в нашем тексте, которые не встречаются в stopwords и посмотрим какие из них можно туда добавить", "_____no_output_____" ] ], [ [ "freq_words = {k:v for k, v in frequencies.most_common(200)}", "_____no_output_____" ], [ "dissset = set(freq_words.keys()) - set(stop_words)\nprint(dissset)", "{'это', 'время', 'поезд', 'тебе', 'андреевич', 'люди', 'доктор', 'жизни', 'своим', 'доме', 'которых', 'наверное', 'глаза', 'дома', 'точно', 'которой', 'вместе', 'стали', 'кроме', 'дом', 'сама', 'доктора', 'эта', 'всем', 'человек', 'часть', 'правда', 'нам', 'знаю', '...', 'словно', 'друг', 'ночь', 'очень', 'юрия', 'юрий', 'чтото', '.', 'день', 'нем', 'живаго', 'стало', 'окна', 'говорит', 'времени', 'кругом', 'оно', 'этим', 'лара', 'свете', 'который', 'андреевича', 'этих', 'юра', 'руки', 'которые', 'пока', 'своей', 'стал', 'ними', 'несколько', 'голову', 'дело', 'своих', 'жизнь', 'минуту', 'конца', 'сторону', 'весь'}\n" ] ], [ [ "Первые четыре слов, которые можно добавить в стоп-слова - это вариации слова 'это': \n- это\n- эта\n- этим\n- этих\n\nПочему:\n1. В стоп словах уже есть несколько вариаций слова 'это': этот, этого, этом, эти, эту, этой. Поэтому, следуя той же логике, можно добавить отсутствующие вариации, которые мы видим в нашем тексте", "_____no_output_____" ] ], [ [ "list(filter(lambda x: x.startswith('э'), stop_words))", "_____no_output_____" ] ], [ [ "2. Они очень часто встречаются в тексте, например 'это' встречается 1001 раз", "_____no_output_____" ] ], [ [ "freq_words['это']", "_____no_output_____" ] ], [ [ "Пятое слово - это 'оно'\n\nПочему его стоит добавить в список стоп-слов: в списке уже есть аналогичные слова для м.р, ж.р, и мн. числа. Выглядит как ошибка, что в списке нет формы для среднего рода.", "_____no_output_____" ] ], [ [ "list(filter(lambda x: x.startswith('он'), stop_words))", "_____no_output_____" ] ], [ [ "## Задание 5 - лемматизация", "_____no_output_____" ] ], [ [ "mystem = Mystem()\npymorhy = MorphAnalyzer()\nvocab = list(set(all_words))", "_____no_output_____" ], [ "mystem_lemms = list(map(lambda x: mystem.lemmatize(x)[0], tqdm(vocab)))", "_____no_output_____" ], [ "pymorphy_lemms = list(map(lambda x: pymorhy.normal_forms(x)[0], tqdm(vocab)))", "_____no_output_____" ], [ "mismatch = [\n (frequencies[vocab[i]], vocab[i], mystem_lemms[i], pymorphy_lemms[i]) \n for i in range(len(vocab)) \n if (mystem_lemms[i] != pymorphy_lemms[i])\n]", "_____no_output_____" ], [ "mismatch = sorted(mismatch, key=lambda x: x[0], reverse=True)", "_____no_output_____" ], [ "print(\"|# occurs| word | mystem3 | pymorphy2 |\")\nprint(\"-------------------------------------------------------\")\nfor count, vocab, lemma_mystem, lemma_pymorphy in mismatch[:50]:\n print(f\"|{count:^8}|{vocab:^13}|{lemma_mystem:^14}|{lemma_pymorphy:^15}|\")", "|# occurs| word | mystem3 | pymorphy2 |\n-------------------------------------------------------\n| 753 | все | все | всё |\n| 349 | еще | еще | ещё |\n| 234 | со | со | с |\n| 206 | во | во | в |\n| 202 | чем | что | чем |\n| 161 | ним | он | они |\n| 161 | может | может | мочь |\n| 141 | больше | больше | большой |\n| 132 | того | то | тот |\n| 122 | чтото | чтото | чтоть |\n| 114 | есть | быть | есть |\n| 114 | всех | все | весь |\n| 108 | тем | то | тем |\n| 103 | стал | становиться | стать |\n| 97 | всем | все | весь |\n| 94 | дома | дома | дом |\n| 88 | об | об | о |\n| 83 | стало | становиться | стать |\n| 76 | том | том | тот |\n| 72 | всего | всего | весь |\n| 67 | стали | становиться | стать |\n| 62 | свете | света | свет |\n| 62 | лучше | хорошо | хороший |\n| 59 | дальше | далеко | далёкий |\n| 52 | чтонибудь | чтонибудь | чтонибыть |\n| 47 | кажется | кажется | казаться |\n| 45 | стала | становиться | стать |\n| 43 | федоровна | федоровна | фёдорович |\n| 43 | какойто | какойто | какойтый |\n| 42 | всеми | все | весь |\n| 41 | ночью | ночь | ночью |\n| 37 | вперед | вперед | вперёд |\n| 37 | раньше | рано | ранний |\n| 37 | свое | свое | свой |\n| 36 | изза | изза | изз |\n| 36 | всему | все | весь |\n| 36 | тому | то | тот |\n| 35 | гдето | гдето | гдеть |\n| 30 |александровна|александровна | александрович |\n| 30 | антонина | антонина | антонин |\n| 29 | этому | это | этот |\n| 29 | ко | ко | к |\n| 29 | когдато | когдатый | когдато |\n| 29 | скорее | скоро | скорее |\n| 29 | вышел | выходить | выйти |\n| 28 | антипов | антипов | антип |\n| 28 | отчего | отчего | отчий |\n| 28 | антипова | антипова | антипов |\n| 28 | лары | лары | лара |\n| 28 | ктото | ктото | ктоть |\n" ] ], [ [ "Из результатов анализа мисматчей для наиболее частотных слов, видно, что каждая библиотека имеет свои проблемы:\n1. Pymorphy2 подвержен Gender Bias: имена/отчества/фамилии в ж.р нормализуются в аналогичные, но в м.р. \n\nMystem так не ошибается, скорее всего из-за того что у него словарь меньшего размера и он обрабатывает такие слова как ошибки\n\n2. Mystem некорректно лемматизирует некоторые устаревшие формы предлогов, см. 'ко', 'об', 'со', 'во'.\n3. Pymorphy2 исправляет ошибки в ходе лемматизации слов, которые должны быть написаны через 'ё', но написаны с 'е', см. вперед, еще, все(вероятно).\n4. Иногда это исправление некорректно, см. дальше. В т.ч исправления ошибок другого типа тоже могут быть некорректны, см. ктото, изза, гдето, какойто, чтото. В тоже время Mystem не делает ничего с словами, написанными с ошибкой.\n\nВ итоге можно сказать, что для анализа текстов такого рода больше подходит mystem.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
4a2dc1ea96d5926aebe4035bee5fb0b80302b825
34,619
ipynb
Jupyter Notebook
Model backlog/Train/15-jigsaw-train-1fold-xlm-roberta-large.ipynb
dimitreOliveira/Jigsaw-Multilingual-Toxic-Comment-Classification
44422e6aeeff227e22dbb5c05101322e9d4aabbe
[ "MIT" ]
4
2020-06-23T02:31:07.000Z
2020-07-04T11:50:08.000Z
Model backlog/Train/15-jigsaw-train-1fold-xlm-roberta-large.ipynb
dimitreOliveira/Jigsaw-Multilingual-Toxic-Comment-Classification
44422e6aeeff227e22dbb5c05101322e9d4aabbe
[ "MIT" ]
null
null
null
Model backlog/Train/15-jigsaw-train-1fold-xlm-roberta-large.ipynb
dimitreOliveira/Jigsaw-Multilingual-Toxic-Comment-Classification
44422e6aeeff227e22dbb5c05101322e9d4aabbe
[ "MIT" ]
null
null
null
30.772444
159
0.554089
[ [ [ "## Dependencies", "_____no_output_____" ] ], [ [ "import json, warnings, shutil\nfrom jigsaw_utility_scripts import *\nfrom transformers import TFXLMRobertaModel, XLMRobertaConfig\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras import optimizers, metrics, losses, layers\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\n\n\nfrom transformers import TFAutoModel, AutoTokenizer\n\nwarnings.filterwarnings(\"ignore\")", "_____no_output_____" ] ], [ [ "## TPU configuration", "_____no_output_____" ] ], [ [ "strategy, tpu = set_up_strategy()\nprint(\"REPLICAS: \", strategy.num_replicas_in_sync)\nAUTO = tf.data.experimental.AUTOTUNE", "Running on TPU grpc://10.0.0.2:8470\nREPLICAS: 8\n" ] ], [ [ "# Load data", "_____no_output_____" ] ], [ [ "# database_base_path = '/kaggle/input/jigsaw-dataset-split-pb-roberta-large-192/'\n# k_fold = pd.read_csv(database_base_path + '5-fold.csv')\n# valid_df = pd.read_csv(\"/kaggle/input/jigsaw-multilingual-toxic-comment-classification/validation.csv\", usecols=['comment_text', 'toxic', 'lang'])\n\n# print('Train set samples: %d' % len(k_fold))\n# print('Validation set samples: %d' % len(valid_df))\n# display(k_fold.head())\n\n# # Unzip files\n# !tar -xvf /kaggle/input/jigsaw-dataset-split-pb-roberta-large-192/fold_1.tar.gz\n# # !tar -xvf /kaggle/input/jigsaw-dataset-split-pb-roberta-large-192/fold_2.tar.gz\n# # !tar -xvf /kaggle/input/jigsaw-dataset-split-pb-roberta-large-192/fold_3.tar.gz\n# # !tar -xvf /kaggle/input/jigsaw-dataset-split-pb-roberta-large-192/fold_4.tar.gz\n# # !tar -xvf /kaggle/input/jigsaw-dataset-split-pb-roberta-large-192/fold_5.tar.gz", "_____no_output_____" ] ], [ [ "# Model parameters", "_____no_output_____" ] ], [ [ "base_path = '/kaggle/input/jigsaw-transformers/XLM-RoBERTa/'\n\nconfig = {\n \"MAX_LEN\": 192,\n \"BATCH_SIZE\": 16 * strategy.num_replicas_in_sync,\n \"EPOCHS\": 2,\n \"LEARNING_RATE\": 1e-5, \n \"ES_PATIENCE\": 1,\n \"N_FOLDS\": 1,\n \"base_model_path\": base_path + 'tf-xlm-roberta-large-tf_model.h5',\n \"config_path\": base_path + 'xlm-roberta-large-config.json'\n}\n\nwith open('config.json', 'w') as json_file:\n json.dump(json.loads(json.dumps(config)), json_file)", "_____no_output_____" ] ], [ [ "# Model", "_____no_output_____" ] ], [ [ "# module_config = XLMRobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False)\n\n# def model_fn(MAX_LEN):\n# input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')\n \n# base_model = TFXLMRobertaModel.from_pretrained(config['base_model_path'], config=module_config)\n# sequence_output = base_model({'input_ids': input_ids})\n \n# last_state = sequence_output[0]\n# cls_token = last_state[:, 0, :]\n \n# output = layers.Dense(1, activation='sigmoid', name='output')(cls_token)\n \n# model = Model(inputs=input_ids, outputs=output)\n# model.compile(optimizers.Adam(lr=config['LEARNING_RATE']), \n# loss=losses.BinaryCrossentropy(), \n# metrics=[metrics.BinaryAccuracy(), metrics.AUC()])\n \n# return model", "_____no_output_____" ] ], [ [ "# Train", "_____no_output_____" ] ], [ [ "# history_list = []\n\n# for n_fold in range(config['N_FOLDS']):\n# tf.tpu.experimental.initialize_tpu_system(tpu)\n# print('\\nFOLD: %d' % (n_fold+1))\n# # Load data\n# base_data_path = 'fold_%d/' % (n_fold+1)\n# x_train = np.load(base_data_path + 'x_train.npy')\n# y_train = np.load(base_data_path + 'y_train.npy')\n# x_valid = np.load(base_data_path + 'x_valid.npy')\n# x_valid_ml = np.load(database_base_path + 'x_valid.npy')\n# y_valid_ml = np.load(database_base_path + 'y_valid.npy')\n \n# step_size = x_train.shape[0] // config['BATCH_SIZE']\n\n# ### Delete data dir\n# shutil.rmtree(base_data_path)\n\n# # Train model\n# model_path = 'model_fold_%d.h5' % (n_fold+1)\n# es = EarlyStopping(monitor='val_loss', mode='min', patience=config['ES_PATIENCE'], \n# restore_best_weights=True, verbose=1)\n# checkpoint = ModelCheckpoint(model_path, monitor='val_loss', mode='min', \n# save_best_only=True, save_weights_only=True, verbose=1)\n \n# with strategy.scope():\n# model = model_fn(config['MAX_LEN'])\n# history = model.fit(get_training_dataset(x_train, y_train, config['BATCH_SIZE'], AUTO),\n# validation_data=(get_validation_dataset(x_valid_ml, y_valid_ml, config['BATCH_SIZE'], AUTO)),\n# callbacks=[checkpoint, es],\n# epochs=config['EPOCHS'], \n# steps_per_epoch=step_size,\n# verbose=1).history\n \n# history_list.append(history)\n \n# # Make predictions\n# train_preds = model.predict(get_test_dataset(x_train, config['BATCH_SIZE'], AUTO))\n# valid_preds = model.predict(get_test_dataset(x_valid, config['BATCH_SIZE'], AUTO))\n# valid_ml_preds = model.predict(get_test_dataset(x_valid_ml, config['BATCH_SIZE'], AUTO))\n \n# k_fold.loc[k_fold['fold_%d' % (n_fold+1)] == 'train', 'pred_%d' % (n_fold+1)] = np.round(train_preds)\n# k_fold.loc[k_fold['fold_%d' % (n_fold+1)] == 'validation', 'pred_%d' % (n_fold+1)] = np.round(valid_preds)\n# valid_df['pred_%d' % (n_fold+1)] = np.round(valid_ml_preds)", "_____no_output_____" ] ], [ [ "## Model loss graph", "_____no_output_____" ] ], [ [ "# sns.set(style=\"whitegrid\")\n# for n_fold in range(config['N_FOLDS']):\n# print('Fold: %d' % (n_fold+1))\n# plot_metrics(history_list[n_fold])", "_____no_output_____" ] ], [ [ "# Model evaluation", "_____no_output_____" ] ], [ [ "# display(evaluate_model(k_fold, config['N_FOLDS']).style.applymap(color_map))", "_____no_output_____" ] ], [ [ "# Confusion matrix", "_____no_output_____" ] ], [ [ "# for n_fold in range(config['N_FOLDS']):\n# print('Fold: %d' % (n_fold+1))\n# train_set = k_fold[k_fold['fold_%d' % (n_fold+1)] == 'train']\n# validation_set = k_fold[k_fold['fold_%d' % (n_fold+1)] == 'validation'] \n# plot_confusion_matrix(train_set['toxic'], train_set['pred_%d' % (n_fold+1)], \n# validation_set['toxic'], validation_set['pred_%d' % (n_fold+1)])", "_____no_output_____" ] ], [ [ "# Model evaluation by language", "_____no_output_____" ] ], [ [ "# display(evaluate_model_lang(valid_df, config['N_FOLDS']).style.applymap(color_map))", "_____no_output_____" ] ], [ [ "# Visualize predictions", "_____no_output_____" ] ], [ [ "# pd.set_option('max_colwidth', 120)\n# display(k_fold[['comment_text', 'toxic'] + [c for c in k_fold.columns if c.startswith('pred')]].head(15))", "_____no_output_____" ], [ "def regular_encode(texts, tokenizer, maxlen=512):\n enc_di = tokenizer.batch_encode_plus(\n texts, \n return_attention_masks=False, \n return_token_type_ids=False,\n pad_to_max_length=True,\n max_length=maxlen\n )\n \n return np.array(enc_di['input_ids'])\n\ndef build_model(transformer, max_len=512):\n input_word_ids = layers.Input(shape=(max_len,), dtype=tf.int32, name=\"input_word_ids\")\n sequence_output = transformer(input_word_ids)[0]\n cls_token = sequence_output[:, 0, :]\n out = layers.Dense(1, activation='sigmoid')(cls_token)\n \n model = Model(inputs=input_word_ids, outputs=out)\n model.compile(optimizers.Adam(lr=1e-5), loss='binary_crossentropy', metrics=['accuracy'])\n \n return model", "_____no_output_____" ], [ "AUTO = tf.data.experimental.AUTOTUNE\n\n# Configuration\nEPOCHS = 2\nBATCH_SIZE = 16 * strategy.num_replicas_in_sync\nMAX_LEN = 192\nMODEL = 'jplu/tf-xlm-roberta-large'", "_____no_output_____" ], [ "tokenizer = AutoTokenizer.from_pretrained(MODEL)", "_____no_output_____" ], [ "train1 = pd.read_csv(\"/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train.csv\")\ntrain2 = pd.read_csv(\"/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train.csv\")\ntrain2.toxic = train2.toxic.round().astype(int)\n\nvalid = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/validation.csv')\ntest = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/test.csv')\nsub = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/sample_submission.csv')\n\n# Combine train1 with a subset of train2\ntrain = pd.concat([\n train1[['comment_text', 'toxic']],\n train2[['comment_text', 'toxic']].query('toxic==1'),\n train2[['comment_text', 'toxic']].query('toxic==0').sample(n=100000, random_state=0)\n])", "_____no_output_____" ], [ "x_train = regular_encode(train.comment_text.values, tokenizer, maxlen=MAX_LEN)\nx_valid = regular_encode(valid.comment_text.values, tokenizer, maxlen=MAX_LEN)\nx_test = regular_encode(test.content.values, tokenizer, maxlen=MAX_LEN)\n\ny_train = train.toxic.values\ny_valid = valid.toxic.values", "_____no_output_____" ], [ "train_dataset = (\n tf.data.Dataset\n .from_tensor_slices((x_train, y_train))\n .repeat()\n .shuffle(2048)\n .batch(BATCH_SIZE)\n .prefetch(AUTO)\n)\n\nvalid_dataset = (\n tf.data.Dataset\n .from_tensor_slices((x_valid, y_valid))\n .batch(BATCH_SIZE)\n .cache()\n .prefetch(AUTO)\n)\n\ntest_dataset = (\n tf.data.Dataset\n .from_tensor_slices(x_test)\n .batch(BATCH_SIZE)\n)", "_____no_output_____" ], [ "module_config = XLMRobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False)\n\nwith strategy.scope():\n# transformer_layer = TFAutoModel.from_pretrained(config['base_model_path'])\n transformer_layer = TFXLMRobertaModel.from_pretrained(config['base_model_path'], config=module_config)\n model = build_model(transformer_layer, max_len=MAX_LEN)\nmodel.summary()", "Model: \"model\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_word_ids (InputLayer) [(None, 192)] 0 \n_________________________________________________________________\ntfxlm_roberta_model (TFXLMRo ((None, 192, 1024), (None 559890432 \n_________________________________________________________________\ntf_op_layer_strided_slice (T [(None, 1024)] 0 \n_________________________________________________________________\ndense (Dense) (None, 1) 1025 \n=================================================================\nTotal params: 559,891,457\nTrainable params: 559,891,457\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "n_steps = x_train.shape[0] // BATCH_SIZE\ntrain_history = model.fit(\n train_dataset,\n steps_per_epoch=n_steps,\n validation_data=valid_dataset,\n epochs=EPOCHS\n)", "Train for 3404 steps, validate for 63 steps\nEpoch 1/2\n3404/3404 [==============================] - 1825s 536ms/step - loss: 0.0679 - accuracy: 0.9737 - val_loss: 0.5837 - val_accuracy: 0.8469\nEpoch 2/2\n3404/3404 [==============================] - 1615s 475ms/step - loss: 0.0543 - accuracy: 0.9785 - val_loss: 0.3064 - val_accuracy: 0.8671\n" ], [ "n_steps = x_valid.shape[0] // BATCH_SIZE\ntrain_history_2 = model.fit(\n valid_dataset.repeat(),\n steps_per_epoch=n_steps,\n epochs=EPOCHS\n)", "Train for 62 steps\nEpoch 1/2\n62/62 [==============================] - 68s 1s/step - loss: 0.2344 - accuracy: 0.8943\nEpoch 2/2\n62/62 [==============================] - 140s 2s/step - loss: 0.1617 - accuracy: 0.9270\n" ], [ "sub['toxic'] = model.predict(test_dataset, verbose=1)\nsub.to_csv('submission.csv', index=False)", "499/499 [==============================] - 110s 221ms/step\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2dd45440316d8cadf214ba22d4219bc61fa3be
28,868
ipynb
Jupyter Notebook
Lab-04-14/MovieLens.ipynb
wileong/data_science_projects_directory
1a2e018bd6e8e0b97a8b6df1fa074f1a369d4318
[ "MIT" ]
null
null
null
Lab-04-14/MovieLens.ipynb
wileong/data_science_projects_directory
1a2e018bd6e8e0b97a8b6df1fa074f1a369d4318
[ "MIT" ]
null
null
null
Lab-04-14/MovieLens.ipynb
wileong/data_science_projects_directory
1a2e018bd6e8e0b97a8b6df1fa074f1a369d4318
[ "MIT" ]
null
null
null
32.656109
409
0.464875
[ [ [ "# MovieLens\n\n##DUE APRIL 21, 2016\n\n[MovieLens](http://www.movielens.org/) is a website where users can submit ratings for movies that they watch and receive recommendations for other movies they might enjoy. The data is collected and made publicly available for research. We will be working with a data set of 1 million user ratings of movies. You can find this data set and even larger ones at http://grouplens.org/datasets/movielens/.\n", "_____no_output_____" ], [ "## Reading in the Data\n\nNote that the data consists of three data frames: one with information about the users, another containing the ratings, and yet another with information about the movies. See the readme file (/data/movielens/README) for more information.", "_____no_output_____" ] ], [ [ "import pandas as pd\n\nunames = ['user_id', 'gender', 'age', 'occupation', 'zip']\nusers = pd.read_table('/data/movielens/users.dat', sep='::', header=None,\n names=unames, engine=\"python\")\n\nrnames = ['user_id', 'movie_id', 'rating', 'timestamp']\nratings = pd.read_table('/data/movielens/ratings.dat', sep='::', header=None,\n names=rnames, engine=\"python\")\n\nmnames = ['movie_id', 'title', 'genres']\nmovies = pd.read_table('/data/movielens/movies.dat', sep='::', header=None,\n names=mnames, engine=\"python\")", "_____no_output_____" ], [ "ratings", "_____no_output_____" ] ], [ [ "## Question 1\n\nDevelop a way to rank movies. Why is it not a good idea to simply sort the movies by average rating? (You may want to try calculating this first.) Then, explain your methodology and use it to produce a list of the Top 10 movies of all time.", "_____no_output_____" ] ], [ [ "#ratings", "_____no_output_____" ], [ "# YOUR CODE HERE\n#raise NotImplementedError()\n\n# ratings_movie_id_avg = ratings.groupby([\"movie_id\" ]).mean()#.get([\"rating\"])\n# #This would calculate the mean of each column, including userid and timestamp for ea movieid\n# ratings_movie_id_avg\n\n\n\n\n#Another way to rank movies:\n# A 'top' movie is a movie with not only THOUSANDS of ratings, but on average, people gave it a 5 star rating.\n\nnum_ratings_per_movie = ratings.groupby([\"movie_id\"])[['rating']].count()\nnum_ratings_per_movie = num_ratings_per_movie.ix[num_ratings_per_movie['rating'] >= 2000 ] # get movies with >= # of ratings\nnum_ratings_per_movie = num_ratings_per_movie.reset_index() # call this after groupby\nnum_ratings_per_movie = num_ratings_per_movie.merge( ratings, on= 'movie_id')\n\ntop_movies = pd.merge(num_ratings_per_movie,users, on = \"user_id\")\ntop_movies = top_movies.ix[ top_movies['rating_y'] == 5].groupby('movie_id')[['rating_y']].mean()\ntop_movies.columns = ['average_rating']\ntop_movies.head(n=10)\n\n\n\n\n# avg_ratings = ratings.ix[ratings['rating'] >= 4, :].groupby('movie_id')[ ['rating'] ].mean()\n# # don't use get('rating') as that will use all cols when calling .mean() on it\n# avg_ratings", "_____no_output_____" ] ], [ [ "It doesn't make sense to sort the movies by its average ratings alone because there may be some movies where only a few people gave it a rating. A movie with a rating defined by only a few reviewers is not a good rating because a few reviewers is not reprsentative of the entire audience at all.\n\nThe table outputted shows the top 10 movies of all time.", "_____no_output_____" ], [ "## Question 2\n\nRestrict to movies with at least 200 ratings. For each movie, calculate the difference between the average male rating and the average female rating. Based on these differences between average male ratings and average female ratings, what movies were the most male-friendly? What movies were the most female-friendly?", "_____no_output_____" ] ], [ [ "# YOUR CODE HERE\n#raise NotImplementedError()\n\n\nnum_ratings_per_movie = ratings.groupby([\"movie_id\"])[['rating']].count()\nnum_ratings_per_movie = num_ratings_per_movie.ix[num_ratings_per_movie['rating'] >= 200]\nnum_ratings_per_movie = num_ratings_per_movie.reset_index()\nnum_ratings_per_movie = num_ratings_per_movie.merge( ratings, on= 'movie_id')\n#num_ratings_per_movie\n#next step: merge num_ratings_per_movie_200 with original ratings table and then filter out.\n\n\ndata_merged_rate = pd.merge(num_ratings_per_movie,users, on = \"user_id\")\n\n# # inner join:\ndata_merged_rate['Avg Female Rating'] = data_merged_rate.ix[ data_merged_rate['gender'] == 'F' ].groupby(['movie_id']).mean()['rating_y']\ndata_merged_rate['Avg Female Rating'] = data_merged_rate['Avg Female Rating'].fillna(0)\ndata_merged_rate['Avg Male Rating'] = data_merged_rate.ix[ data_merged_rate['gender'] == 'M' ].groupby( ['movie_id'] ).mean()['rating_y']\ndata_merged_rate['Avg Male Rating'] = data_merged_rate['Avg Male Rating'].fillna(0)\n# #['rating'] is necessary because I only want to add a column\n\ndata_merged_rate['Diff Between Females\\' and Males\\' Avg Ratings' ] = data_merged_rate['Avg Female Rating']\\\n - data_merged_rate['Avg Male Rating']\ndata_merged_rate = data_merged_rate.sort_values(by = 'Diff Between Females\\' and Males\\' Avg Ratings' ,ascending = True)\n\n#data_merged_rate\n\nprint(data_merged_rate.head(n = 3))\n#for most male friendly\nprint(\"=========================================================================\")\nprint(data_merged_rate.tail(n = 3))\n# for most female friendly", " movie_id rating_x user_id rating_y timestamp gender age \\\n1201 3744 421 19 4 978147373 M 1 \n3760 1784 1424 65 5 977888204 M 35 \n1194 3624 791 19 5 978146984 M 1 \n\n occupation zip Avg Female Rating Avg Male Rating \\\n1201 10 48073 3.494949 4.221300 \n3760 12 55803 2.878788 3.555147 \n1194 10 48073 2.944444 3.585227 \n\n Diff Between Females' and Males' Avg Ratings \n1201 -0.726351 \n3760 -0.676359 \n1194 -0.640783 \n=========================================================================\n movie_id rating_x user_id rating_y timestamp gender age \\\n2468 1721 1546 38 4 978044771 F 18 \n203 2699 1367 8 5 978229347 M 25 \n1088 1792 448 19 3 978147395 M 1 \n\n occupation zip Avg Female Rating Avg Male Rating \\\n2468 4 02215 3.254717 2.578358 \n203 12 11413 3.486842 2.795276 \n1088 10 48073 3.790378 2.959596 \n\n Diff Between Females' and Males' Avg Ratings \n2468 0.676359 \n203 0.691567 \n1088 0.830782 \n" ] ], [ [ "Movies(movie_id) that were most male friendly : 3744, 1784, 3624\n\nMovies(move_id) that were most female friendly: 1721, 2699, 1792\n\n*note: If difference between females' and males' average ratings is positive, that means that on average, females gave a particular movie higher rating than males did (female_friendly). If difference is negative, males gave it higher ratings on average.", "_____no_output_____" ], [ "## Question 3\n\nCalculate the average rating by genre. Note that a movie can belong to multiple genres. You will have to write some code that parses the `genres` column of the `movies` table. What genre had the highest average rating? What genre had the lowest?", "_____no_output_____" ] ], [ [ "# YOUR CODE HERE\n#raise NotImplementedError()\n\n# merge the movies to the ratings\ndata = movies.merge(ratings, on=\"movie_id\")\n\n# get all occurrences of genres in a list\ngenres = movies[\"genres\"].str.cat(sep=\"|\").split(\"|\")\n# get unique genres, convert back to list\ngenres = list(set(genres))\n\n# add a boolean column to DataFrame for each genre\nfor genre in genres:\n data[genre] = data[\"genres\"].str.contains(genre)\n\n# multiply each genre column by the ratings column,\n# then .sum() to get the total of ratings for that genre\ntotal_rating = data[genres].multiply(data['rating'], axis=0).sum()\n# adding the booleans should give the number of ratings for each genre\nnum_rating = data[genres].sum()\n# divide to get the average rating\n#(total_rating / num_rating).sort_values()\nprint((total_rating / num_rating).sort_values().tail())\nprint((total_rating / num_rating).sort_values().head())\n\n\n\n# split_genres = movies[ 'genres'].str.cat(sep = '|') # returns a string\n# #split_genres\n\n# split_genres = split_genres.split(\"|\") # must concat before doing this step\n# split_genres\n\n# distinct_genres_list = []\n\n# #create list of distinct genres\n# # get unique genres, convert back to list\n# #enres = list(set(genres))\n\n# for genre in split_genres:\n \n \n# if genre not in distinct_genres_list:\n \n# distinct_genres_list.append(genre)\n\n\n\n# data_merged_genre = pd.merge(ratings, movies, on = 'movie_id')\n# data_merged_genre = data_merged_genre.sort_index()\n\n\n# for genre in distinct_genres_list:\n \n# data_merged_genre[genre] = data_merged_genre['genres'].str.contains(genre)\n \n# data_merged_genre\n\n# # multiply each genre column by the ratings column,\n# # then .sum() to get the total of ratings for that genre\n# total_rating = data_merged_genre[split_genres].multiply(data_merged_genre['rating'], axis=0).sum()\n# # adding the booleans should give the number of ratings for each genre\n# num_rating = data_merged_genre[genres].sum()\n# # divide to get the average rating\n# (total_rating / num_rating).sort_values()\n \n# #.str.contains('Drama')\n# #data_merged_genre['genres'].str.contains('Drama')\n# #use .split?\n\n# # data_merged_genre = data_merged_genre.groupby('genres' )[ ['rating'] ].mean()\n# # print(data_merged_genre.sort_values(by = 'rating').tail())\n# # print(data_merged_genre.sort_values(by = 'rating').head(n = 50))", "Crime 3.708679\nDrama 3.766332\nWar 3.893327\nDocumentary 3.933123\nFilm-Noir 4.075188\ndtype: float64\nHorror 3.215013\nChildren's 3.422035\nFantasy 3.447371\nSci-Fi 3.466521\nAdventure 3.477257\ndtype: float64\n" ] ], [ [ "Ignoring mixed genres, the Animation genre had the highest average ratings. Also, ignoring mixed genres, the Children genre had\nthe lowest average ratings.\n\nCORRECTION: Horror genre had lowest avg ratings. Film-Noi has highest.", "_____no_output_____" ], [ "## Question 4\n\nFormulate a question of your own that you can answer using this MovieLens data. State clearly what your question is and what your findings are. Bonus points are available if you find something interesting!\n\n**Tip:** You may find the `occupation` column of `users` to be a rich source of interesting questions. See the README file (/data/movielens/README) for information about how `occupation` is coded in the data.", "_____no_output_____" ] ], [ [ "# YOUR CODE HERE\n#raise NotImplementedError()\n\nnum_ratings_by_occupation = users.merge(ratings, on = 'user_id').groupby('occupation')[['rating']].count()\nnum_ratings_by_occupation.columns = ['number of ratings']\nnum_ratings_by_occupation", "_____no_output_____" ] ], [ [ "Q: How many ratings did each group of people by occupation give? Which occupation had people that gave the most ratings? \n\nUsers(people) with occupation 4 rated the most amount of movies as a group. One could speculate that those with occupation 4 were more willing to rate a movie that they watched. It is also possible that the movies those in occupation 4 chose to watch tended to provoke strong feelings.", "_____no_output_____" ], [ "## Submitting this Lab\n\nNow, restart your kernel and re-run your entire notebook from beginning to end. Make sure there are no errors or bugs. When you have verified this, open the Terminal on JupyterHub and type \n\n`nbgrader submit Lab-04-14 --course dlsun`\n\nto submit this lab.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
4a2de4ded28daad001e7473ba330d4977c81d94d
24,144
ipynb
Jupyter Notebook
notebooks/Figure - potential density rotation curve.ipynb
adrn/ophiuchus
fe7e937bf421d506ec252165f044d514f571667b
[ "MIT" ]
1
2015-09-25T10:12:52.000Z
2015-09-25T10:12:52.000Z
notebooks/Figure - potential density rotation curve.ipynb
adrn/ophiuchus
fe7e937bf421d506ec252165f044d514f571667b
[ "MIT" ]
null
null
null
notebooks/Figure - potential density rotation curve.ipynb
adrn/ophiuchus
fe7e937bf421d506ec252165f044d514f571667b
[ "MIT" ]
null
null
null
30.562025
207
0.481196
[ [ [ "from __future__ import division, print_function\nimport os\nimport sys\nfrom collections import OrderedDict\n\n# Third-party\nimport astropy.coordinates as coord\nimport astropy.units as u\nimport matplotlib as mpl\nimport matplotlib.pyplot as pl\nimport numpy as np\npl.style.use('apw-notebook')\n%matplotlib inline\n\n# Custom\nimport gala.dynamics as gd\nimport gala.integrate as gi\nimport gala.potential as gp\nfrom gala.units import galactic\nfrom scipy.misc import factorial\n\n# from ophiuchus import barred_mw, static_mw\nimport ophiuchus.potential as op\n\nplotpath = \"/Users/adrian/projects/ophiuchus-paper/figures/\"\nif not os.path.exists(plotpath):\n os.mkdir(plotpath)", "_____no_output_____" ], [ "barred_mw = op.load_potential(\"barred_mw_4\")\nstatic_mw = op.load_potential(\"static_mw\")", "_____no_output_____" ], [ "# transform from H&O 1992 coefficients to Lowing 2011 coefficients\nnlms = np.array([[0,0,0],\n [1,0,0],\n [2,0,0],\n [3,0,0],\n [0,2,0],\n [1,2,0],\n [2,2,0],\n [0,2,2],\n [1,2,2],\n [2,2,2],\n [0,4,0],\n [1,4,0],\n [0,4,2],\n [1,4,2],\n [0,4,4],\n [1,4,4],\n [0,6,0],\n [0,6,2],\n [0,6,4],\n [0,6,6]])\n\n_Snlm = np.array([1.509,-0.086,-0.033,-0.02,-2.606,\n -0.221,-0.001,0.665,0.129,0.006,6.406,\n 1.295,-0.66,-0.14,0.044,-0.012,-5.859,\n 0.984,-0.03,0.001])\nNEW_S = _Snlm.copy()\n\nfor i,(n,l,m) in zip(range(len(_Snlm)), nlms):\n if l != 0:\n fac = np.sqrt(4*np.pi) * np.sqrt((2*l+1) / (4*np.pi) * factorial(l-m) / factorial(l+m))\n NEW_S[i] /= fac", "_____no_output_____" ], [ "nmax = 3\nlmax = 6\n\nSnlm = np.zeros((nmax+1,lmax+1,lmax+1))\nfor (n,l,m),A in zip(nlms,NEW_S):\n Snlm[n,l,m] = A", "_____no_output_____" ], [ "static_mw", "_____no_output_____" ], [ "barred_mw", "_____no_output_____" ], [ "# barpars = barred_mw.parameters.copy()\n# barpars['halo']['q_z'] = 1.\n# barpars['spheroid']['c'] = 0.2\n# barpars['spheroid']['m'] = 5E9\n# barpars['disk']['m'] = 4E10\n# barpars['bar']['r_s'] = 1.2\n# barpars['bar']['m'] = barpars['bar']['m']\n# barred_mw = op.OphiuchusPotential(**barpars)\n\n# stapars = static_mw.parameters.copy()\n# stapars['halo']['q_z'] = 1.\n# stapars['spheroid']['c'] = 0.3\n# stapars['spheroid']['m'] = 1.2E10\n# stapars['disk']['m'] = 6E10\n# static_mw = op.OphiuchusPotential(**stapars)", "_____no_output_____" ], [ "potential_classes = OrderedDict()\npotential_classes['disk'] = gp.MiyamotoNagaiPotential\npotential_classes['halo'] = gp.FlattenedNFWPotential\npotential_classes['bar'] = op.WangZhaoBarPotential\npotential_classes['spheroid'] = gp.HernquistPotential", "_____no_output_____" ], [ "(0.19*u.kpc/u.Myr).to(u.km/u.s)", "_____no_output_____" ] ], [ [ "---\n\n### Mass profile", "_____no_output_____" ] ], [ [ "ix = 0\n\nxyz = np.zeros((3,128))\nxyz[ix] = np.linspace(0.,10.,xyz.shape[1])\n\nfor pot in [static_mw, barred_mw]:\n Menc = pot.mass_enclosed(xyz)\n pl.loglog(xyz[ix], Menc, marker='')\npl.axvline(1)\npl.axhline(1E10)", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ] ], [ [ "def density_on_grid(potential, t=0., grid_lim=(-15,15), ngrid=128):\n grid = np.linspace(grid_lim[0], grid_lim[1], ngrid)\n xyz = np.vstack(map(np.ravel, np.meshgrid(grid,grid,grid)))\n\n# val = np.zeros((ngrid*ngrid*ngrid,))\n val = potential.density(xyz, t=t).value\n val[np.isnan(val)] = val[np.isfinite(val)].min()\n val[val < 0] = 1.\n \n gridx = xyz[0].reshape(ngrid,ngrid,ngrid)[:,:,0]\n gridy = xyz[1].reshape(ngrid,ngrid,ngrid)[:,:,0]\n \n return gridx, gridy, val", "_____no_output_____" ], [ "ngrid = 128\nxx,yy,barred_dens = density_on_grid(barred_mw, ngrid=ngrid)\nxx,yy,static_dens = density_on_grid(static_mw, ngrid=ngrid)", "_____no_output_____" ] ], [ [ "## Surface density plots", "_____no_output_____" ] ], [ [ "def side_by_side_surface_dens(xx, yy, dens):\n ngrid = xx.shape[0]\n \n fig,axes = pl.subplots(1, 2, figsize=(8,4), \n sharex=True, sharey=True)\n \n axes[0].pcolormesh(xx, yy, dens.reshape(ngrid,ngrid,ngrid).sum(axis=2), \n cmap='Greys_r',\n norm=mpl.colors.LogNorm(),\n vmin=1E7, vmax=5E9)\n axes[0].text(-8., 0, r\"$\\odot$\", ha='center', va='center', fontsize=18, color='w')\n\n axes[1].pcolormesh(xx, yy, dens.reshape(ngrid,ngrid,ngrid).sum(axis=0).T, \n cmap='Greys_r',\n norm=mpl.colors.LogNorm(),\n vmin=1E7, vmax=5E9)\n\n axes[0].set_xlim(xx.min(), xx.max())\n axes[0].set_ylim(yy.min(), yy.max())\n\n # TODO: fix the damn aspect ratio\n# for ax in axes:\n# ax.set_aspect('equal')\n fig.tight_layout()\n \n return fig", "_____no_output_____" ], [ "fig = side_by_side_surface_dens(xx, yy, barred_dens)\nfig = side_by_side_surface_dens(xx, yy, static_dens)", "_____no_output_____" ] ], [ [ "## Contour plots", "_____no_output_____" ] ], [ [ "def side_by_side_contour_plots(xx, yy, dens, levels=10**np.arange(7,12,0.25)):\n ngrid = xx.shape[0]\n \n fig,axes = pl.subplots(1,2,figsize=(7.8,4),sharex=True,sharey=True)\n\n im = axes[0].contour(xx, yy, dens.reshape(ngrid,ngrid,ngrid).sum(axis=2), \n colors='k',\n levels=levels,\n rasterized=True)\n axes[0].text(-8., 0, r\"$\\odot$\", ha='center', va='center', fontsize=18)\n\n _ = axes[1].contour(xx, yy, dens.reshape(ngrid,ngrid,ngrid).sum(axis=1).T, \n colors='k',\n levels=levels,\n rasterized=True)\n\n\n # fig.subplots_adjust(bottom=0.2, right=0.85, wspace=0.25)\n\n for ax in axes:\n ax.xaxis.set_ticks([-10,0,10])\n ax.yaxis.set_ticks([-10,0,10])\n\n axes[0].set_xlabel(\"$x$ [kpc]\")\n axes[0].set_ylabel(\"$y$ [kpc]\")\n axes[1].set_xlabel(\"$y$ [kpc]\")\n axes[1].set_ylabel(\"$z$ [kpc]\")\n\n axes[0].set_xlim(xx.min(), xx.max())\n axes[0].set_ylim(yy.min(), yy.max())\n\n fig.tight_layout()\n \n return fig", "_____no_output_____" ], [ "barred_fig = side_by_side_contour_plots(xx, yy, barred_dens)\nstatic_fig = side_by_side_contour_plots(xx, yy, static_dens)\n\n# barred_fig.savefig(os.path.join(plotpath, \"barred-surface-density-contour.pdf\"), bbox_inches='tight')\n# barred_fig.savefig(os.path.join(plotpath, \"barred-surface-density-contour.png\"), dpi=400, bbox_inches='tight')\n\n# static_fig.savefig(os.path.join(plotpath, \"static-surface-density-contour.pdf\"), bbox_inches='tight')\n# static_fig.savefig(os.path.join(plotpath, \"static-surface-density-contour.png\"), dpi=400, bbox_inches='tight')", "_____no_output_____" ] ], [ [ "## Portail et al. (2015)", "_____no_output_____" ] ], [ [ "ngrid = 65\ngrid = np.linspace(-2,2,ngrid)\nxyz = np.vstack(map(np.ravel, np.meshgrid(grid,grid,grid)))\n\nval2 = np.zeros((ngrid*ngrid*ngrid,))\n# for k in potentials.keys():\n# val += potentials[k].density(xyz)\nval2 += potentials['bar'].density(xyz)\nval2[np.isnan(val2)] = val2[np.isfinite(val2)].max()", "_____no_output_____" ], [ "surf_dens = (val2.reshape(ngrid,ngrid,ngrid).sum(axis=1).T*u.Msun/(u.kpc**2)/ngrid).to(u.Msun/u.pc**2)\n\npl.figure(figsize=(6,3))\npl.contourf(xyz[0].reshape(ngrid,ngrid,ngrid)[:,:,0],\n xyz[1].reshape(ngrid,ngrid,ngrid)[:,:,0],\n surf_dens.value,\n norm=mpl.colors.LogNorm(),\n levels=np.logspace(1., 4, 8),\n cmap='Blues')\n# cmap='Greys_r',\n# norm=mpl.colors.LogNorm(),\n# vmin=5E8, vmax=5E10)\npl.xlim(-2,2)\npl.ylim(-1.1,1.1)\npl.colorbar()\npl.tight_layout()", "_____no_output_____" ] ], [ [ "## Circular velocity curve", "_____no_output_____" ] ], [ [ "def circ_vel_plot(potential, name): \n \"\"\" name = barred, static \"\"\"\n rr = np.linspace(0.1, 20., 1024)\n xyz = np.zeros((3, len(rr)))\n xyz[0] = rr\n \n potentials = OrderedDict()\n for k,P in potential_classes.items():\n potentials[k] = P(units=galactic, **potential.parameters[k])\n\n # vcirc = (np.sqrt(potential.G * potential.mass_enclosed(xyz) / rr)*u.kpc/u.Myr).to(u.km/u.s).value\n vcirc = (np.sqrt(potential.G * np.sum([p.mass_enclosed(xyz) for p in potentials.values()], axis=0) / rr)*u.kpc/u.Myr).to(u.km/u.s).value\n\n fig,ax = pl.subplots(1,1,figsize=(6,5))\n ax.plot(rr, vcirc, marker='', lw=3.)\n\n styles = dict(\n halo=dict(lw=2, ls='-.'),\n bar=dict(lw=3., ls=':'),\n spheroid=dict(lw=3., ls=':'),\n disk=dict(lw=2., ls='--')\n )\n for k,p in potentials.items():\n if k != 'halo' and potential.parameters[k]['m'] == 0:\n continue\n \n if k == 'bar':\n continue\n \n if name == 'static':\n disk_other = 'Spher'\n elif name == 'barred':\n disk_other = 'Bar+Spher'\n\n vc = (np.sqrt(potential.G * p.mass_enclosed(xyz).value / rr)*u.kpc/u.Myr).to(u.km/u.s).value\n if name == 'barred' and k == 'spheroid':\n menc_sph = p.mass_enclosed(xyz)\n p = potentials['bar']\n vc = (np.sqrt(potential.G * (menc_sph + p.mass_enclosed(xyz)).value / rr)*u.kpc/u.Myr).to(u.km/u.s).value\n label = 'Bar+Spheroid'\n else:\n label = k.capitalize()\n ax.plot(rr, vc, marker='', label=label, **styles[k])\n \n if name == 'barred':\n vc = (np.sqrt(potential.G * (potentials['spheroid'].mass_enclosed(xyz)+potentials['bar'].mass_enclosed(xyz)+potentials['disk'].mass_enclosed(xyz)).value / rr)*u.kpc/u.Myr).to(u.km/u.s).value\n ax.plot(rr, vc, marker='', label='Disk+Bar+Spher', lw=2.)\n else:\n vc = (np.sqrt(potential.G * (potentials['spheroid'].mass_enclosed(xyz)+potentials['disk'].mass_enclosed(xyz)).value / rr)*u.kpc/u.Myr).to(u.km/u.s).value\n\n ax.set_xlabel(\"$R$ [kpc]\")\n ax.set_ylabel(r\"$v_c$ [${\\rm km}\\,{\\rm s}^{-1}$]\")\n\n ax.legend(loc='upper right', fontsize=12)\n ax.set_ylim(0,300)\n # ax.set_ylim(150,300)\n # ax.axhline(220, alpha=0.2, lw=1.)\n # ax.axvline(8., color='#cccccc', lw=2., zorder=-100)\n\n rcolor = '#dddddd'\n rect = mpl.patches.Rectangle((0.,215), rr.max(), 20., zorder=-100, color=rcolor)\n ax.add_patch(rect)\n rect2 = mpl.patches.Rectangle((8.,0), 0.3, ax.get_ylim()[1], zorder=-100, color=rcolor)\n ax.add_patch(rect2)\n\n fig.tight_layout()\n \n return fig", "_____no_output_____" ], [ "fig = circ_vel_plot(barred_mw, 'barred')\n# fig.savefig(os.path.join(plotpath, \"barred-circ-vel.pdf\"))\n# fig.savefig(os.path.join(plotpath, \"barred-circ-vel.png\"), dpi=400)", "_____no_output_____" ], [ "fig = circ_vel_plot(static_mw, name='static')\n# fig.savefig(os.path.join(plotpath, \"static-circ-vel.pdf\"))\n# fig.savefig(os.path.join(plotpath, \"static-circ-vel.png\"), dpi=400)", "_____no_output_____" ] ], [ [ "## A new figure with all four panels", "_____no_output_____" ] ], [ [ "fig,axes = pl.subplots(2,2,figsize=(9,8.5),sharex='col')\n\n# Circular velocity\nstyles = dict(\n halo=dict(lw=2, ls='-.'),\n bar=dict(lw=3., ls=':'),\n spheroid=dict(lw=3., ls=':'),\n disk=dict(lw=2., ls='--')\n)\n\n# Contour\nlevels = 10**np.arange(7,12,0.25)\n\nrr = np.linspace(0.1, 22., 1024)\nfac = static_mw.G / rr\nxyz = np.zeros((3, len(rr)))\nxyz[0] = rr\nfor i,(name,pot,dens) in enumerate(zip(['barred','static'], [barred_mw, static_mw],[barred_dens, static_dens])):\n \n # Circular velocity\n ax = axes[i,0]\n \n potentials = OrderedDict()\n for k,P in potential_classes.items():\n potentials[k] = P(units=galactic, **pot.parameters[k])\n\n # vcirc = (np.sqrt(potential.G * potential.mass_enclosed(xyz) / rr)*u.kpc/u.Myr).to(u.km/u.s).value\n vcirc = (np.sqrt(pot.G * np.sum([p.mass_enclosed(xyz) for p in potentials.values()], axis=0) / rr)*u.kpc/u.Myr)\\\n .to(u.km/u.s).value\n\n ax.plot(rr, vcirc, marker='', lw=3.)\n \n menc = dict()\n for k,p in potentials.items():\n menc[k] = p.mass_enclosed(xyz)\n \n # Halo\n vc = np.sqrt(fac * menc['halo'].value)\n ax.plot(rr, (vc*u.kpc/u.Myr).to(u.km/u.s),\n marker='', label='Halo', **styles['halo'])\n \n # disk, etc.\n if name == 'static':\n vc = np.sqrt(fac * (menc['disk']+menc['spheroid']).value)\n ax.plot(rr, (vc*u.kpc/u.Myr).to(u.km/u.s), \n marker='', label='Disk+Sph', **styles['disk'])\n elif name == 'barred':\n vc = np.sqrt(fac * (menc['disk']+menc['spheroid']+menc['bar']).value)\n ax.plot(rr, (vc*u.kpc/u.Myr).to(u.km/u.s), \n marker='', label='Disk+Sph+Bar', **styles['disk'])\n\n ax.legend(loc='upper right', fontsize=12)\n ax.set_ylim(0,300)\n # ax.set_ylim(150,300)\n # ax.axhline(220, alpha=0.2, lw=1.)\n # ax.axvline(8., color='#cccccc', lw=2., zorder=-100)\n\n rcolor = '#dddddd'\n rect = mpl.patches.Rectangle((0.,215), rr.max(), 22., zorder=-100, color=rcolor)\n ax.add_patch(rect)\n rect2 = mpl.patches.Rectangle((8.,0), 0.3, ax.get_ylim()[1], zorder=-100, color=rcolor)\n ax.add_patch(rect2)\n \n # Surface density\n ngrid = xx.shape[0]\n ax = axes[i,1]\n im = ax.contour(xx, yy, dens.reshape(ngrid,ngrid,ngrid).sum(axis=2), \n colors='k', levels=levels, rasterized=True)\n ax.text(-8., 0, r\"$\\odot$\", ha='center', va='center', fontsize=18)\n ax.xaxis.set_ticks([-10,0,10])\n ax.yaxis.set_ticks([-10,0,10])\n\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n \n if i == 0:\n ax = axes[0,0]\n ax.text(8.4, 40, r'$R_\\odot$', fontsize=18, color='#666666')\n# ax.annotate(r'$R_\\odot$', xy=(8.3, 50), xytext=(12, 75.), \n# fontsize=18,\n# xycoords='data', textcoords='data',\n# arrowprops=dict(arrowstyle=\"fancy\",\n# fc=\"0.6\", ec=\"none\",\n# patchB=rect2,\n# connectionstyle=\"angle3,angleA=0,angleB=90\"),\n# )\n\naxes[0,0].text(1, 260, \"Barred\", fontsize=24, fontstyle='italic', ha='left')\naxes[1,0].text(1, 260, \"Static\", fontsize=24, fontstyle='italic', ha='left')\n \naxes[1,0].set_xlabel(\"$R$ [kpc]\")\naxes[1,1].set_xlabel(\"$x$ [kpc]\")\naxes[0,0].set_ylabel(r\"$v_c$ [${\\rm km}\\,{\\rm s}^{-1}$]\")\naxes[1,0].set_ylabel(r\"$v_c$ [${\\rm km}\\,{\\rm s}^{-1}$]\")\n\naxes[0,0].set_xlim(0,22)\n\naxes[0,1].set_ylabel(\"$y$ [kpc]\")\naxes[1,1].set_ylabel(\"$y$ [kpc]\")\naxes[0,1].yaxis.set_label_position('right')\naxes[1,1].yaxis.set_label_position('right')\naxes[0,1].yaxis.tick_right()\naxes[1,1].yaxis.tick_right()\naxes[1,1].set_aspect('equal')\nfig.tight_layout()\n\n# fig.savefig(os.path.join(plotpath, \"potentials-four.pdf\"))\n# fig.savefig(os.path.join(plotpath, \"potentials-four.png\"), dpi=400)", "_____no_output_____" ] ], [ [ "---\n\n### What direction is it rotating? I hope clockwise...", "_____no_output_____" ] ], [ [ "pot = op.WangZhaoBarPotential(**barred_mw.parameters['bar'])\n\nT = (2*np.pi/(60*u.km/u.s/u.kpc)).to(u.Myr).value\nfor time in np.linspace(0.,T/4,4):\n xx,yy,_dens = density_on_grid(pot, t=time, ngrid=64)\n fig = side_by_side_surface_dens(xx, yy, _dens)", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ] ], [ [ "pars = barred_mw.parameters['bar'].copy()\npars['alpha'] = 0.\npot = op.WangZhaoBarPotential(**pars)\n\nX = np.linspace(-15,15,256)\n\n_xyz = np.zeros((X.size,3))\n_xyz[:,0] = X\nalong_x = pot.acceleration(_xyz)[:,0]\n\n_xyz = np.zeros((X.size,3))\n_xyz[:,1] = X\nalong_y = pot.acceleration(_xyz)[:,1]", "_____no_output_____" ], [ "pl.plot(X, np.abs(along_x))\npl.plot(X, np.abs(along_y))", "_____no_output_____" ], [ "engrid = 32\nderp = np.linspace(-15,15,engrid)\nxy = np.vstack(map(np.ravel, np.meshgrid(derp,derp))).T\nxyz = np.zeros((len(xy),3))\nxyz[:,[0,2]] = xy\n\ndens = pot.density(xyz, t=0)\ndens[np.isnan(dens)] = dens[np.isfinite(dens)].max()\n \nxx = xyz[:,0].reshape(engrid,engrid)\nyy = xyz[:,2].reshape(engrid,engrid)\n\npl.figure(figsize=(5,5))\npl.contour(xx, yy, dens.reshape(engrid,engrid),\n colors='k', rasterized=True)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
4a2dec2d325662b67ac4fd80288ca3d0b826f1c4
15,317
ipynb
Jupyter Notebook
notebooks/Untitled.ipynb
kemaltulum/hack-covid-with-nlp
992d237e3eb750a1d572ddd17043bbd8d4be8097
[ "MIT" ]
null
null
null
notebooks/Untitled.ipynb
kemaltulum/hack-covid-with-nlp
992d237e3eb750a1d572ddd17043bbd8d4be8097
[ "MIT" ]
null
null
null
notebooks/Untitled.ipynb
kemaltulum/hack-covid-with-nlp
992d237e3eb750a1d572ddd17043bbd8d4be8097
[ "MIT" ]
null
null
null
38.581864
399
0.474114
[ [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ], [ "tweet_df = pd.read_csv('data/tweet_df_clustered.csv')\nsummary_df_tfidf = pd.read_csv('data/summary_df_tf_idf.csv')\nsummary_df_w2vec = pd.read_csv('data/summary_df_w2vec.csv')", "_____no_output_____" ], [ "tweet_df.head()", "_____no_output_____" ], [ "def get_cluster_summary(cluster_id, method='w2vec'):\n if method == 'w2vec':\n return summary_df_w2vec[summary_df_w2vec['cluster_id'] == cluster_id]['keywords'].values[0]\n elif method == 'tfidf':\n return summary_df_tfidf[summary_df_tfidf['cluster_id'] == cluster_id]['keywords'].values[0]\n else:\n print(\"Invalid method\")", "_____no_output_____" ], [ "get_cluster_summary(0, method=\"tfidf\")", "_____no_output_____" ], [ "def get_random_tweets_from_cluster(c_id, n=5, method='w2vec'):\n if method == 'w2vec':\n tweets = tweet_df[tweet_df['y_w2vec'] == c_id][['full_text', 'processed_text']].sample(n).values\n for tweet in tweets:\n print(tweet[0])\n print(tweet[1])\n print(\"----------------------------\")\n elif method == 'tfidf':\n tweets = tweet_df[tweet_df['y'] == c_id][['full_text', 'processed_text']].sample(n).values\n for tweet in tweets:\n print(tweet[0])\n print(tweet[1])\n print(\"----------------------------\")\n else:\n print('Invalid method')", "_____no_output_____" ], [ "get_random_tweets_from_cluster(4, method='tfidf')", "Ümmet kafayı yiyeceğim\nHani ŞEHİR hastaneleri yaptık\nSahra hastanesine ne gerek var\nVatan hainleri Şehir hastanelerini \nÇekemiyor diyorduk\nŞimdi sahra hastaneleri kuruluyor\nBu yalanımızda götümüzde patladı \nNe olacak bizim bu zavallı halimiz?..konuşun! 😎\n\n#neyinnesi\n#TamZamanı\nümmet kafa yiyecek şehir hastane sahra hastane vatan hain şehir hastane çekmek demek sahra hastane kurmak yalan göt patlamak zavallı hal konuşmak\n----------------------------\n27 mart'ta @ekrem_imamoglu'nun \" atatürk havalimanı 'na sahra hastanesi kurulsun \" önerisi 11 gün sonra değerlendirmeye alındı ama tabii gene çok geç kalındı. bir de 45 gün'de yapacaklarmış. 45 güne hepimiz ölürüz herhalde. #neyinnesi #pazartesi #vaka30217\nmart atatürk havalimanı na sahra hastane kurul öneri gün değer almak tabiî gen geç kalmak gün gün ölmek herhâlde\n----------------------------\nYıllarca hastane yapımını gereksiz gördünüz şimdi de hastane istemekte #neyinnesi . Vaka sayısına bakarak\nAtatürk havalimanı er geç hastane olacaktı \nEkrem önce hastane yolunu yapsın\nBuyrun hizmet görün\nTabi gözünüz varsa https://t.co/xwf82hQKyT\nyıllarca hastane yapım görmek hastane istemek vaka sayı bakmak atatürk havalimanı er geç hastane ekrem hastane yol buyru hizmet görmek tâbi göz varmak\n----------------------------\n1 hafta önce \"Atatürk havalimanı hastane olarak kullanılabilir\" diyen Ekrem İmamoğlu'nu tebrik etmek lazım..\nAynı şekilde Cumhurbaşkanı Recep Tayyip Erdoğanı da..\nÜlkeye ortak akıl lazım bu günlerde...\n#neyinnesi\nhafta atatürk havalimanı hastane kullanmak demek ekrem ımamoğlu nu tebrik etmek lazım aynı şekil cumhurbaşkanı receptayyip erdoğan ülke ortak akıl lazım gün\n----------------------------\n@fatihportakal Fatih bey bir sağlık çalışanı olarak atatürk havaalanını hastane yapma fikri gibi geçtiğimiz yıllarda camileri yapmadan önce hastane fakülte yapsaydı şimdi bu.#neyinnesi\nsağlık çalışmak atatürk havaalanı hastane fikir geçmek yıl cami hastane fakülte\n----------------------------\n" ], [ "get_cluster_summary(4, method='tfidf')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2e055403d16f8ed6bc51eb7e8988d0b9579eb4
108,694
ipynb
Jupyter Notebook
docs_src/core.ipynb
sqbislam/fastai
c03648dfc0c51daa0d5b1da7f87f03d5d523bae8
[ "Apache-2.0" ]
1
2022-03-20T17:00:21.000Z
2022-03-20T17:00:21.000Z
docs_src/core.ipynb
sqbislam/fastai
c03648dfc0c51daa0d5b1da7f87f03d5d523bae8
[ "Apache-2.0" ]
null
null
null
docs_src/core.ipynb
sqbislam/fastai
c03648dfc0c51daa0d5b1da7f87f03d5d523bae8
[ "Apache-2.0" ]
null
null
null
32.808331
1,737
0.535145
[ [ [ "## Basic core", "_____no_output_____" ], [ "This module contains all the basic functions we need in other modules of the fastai library (split with [`torch_core`](/torch_core.html#torch_core) that contains the ones requiring pytorch). Its documentation can easily be skipped at a first read, unless you want to know what a given function does.", "_____no_output_____" ] ], [ [ "from fastai.gen_doc.nbdoc import *\nfrom fastai.core import * ", "_____no_output_____" ] ], [ [ "## Global constants", "_____no_output_____" ], [ "`default_cpus = min(16, num_cpus())` <div style=\"text-align: right\"><a href=\"https://github.com/fastai/fastai/blob/master/fastai/core.py#L45\">[source]</a></div>", "_____no_output_____" ], [ "## Check functions", "_____no_output_____" ] ], [ [ "show_doc(has_arg)", "_____no_output_____" ] ], [ [ "Examples for two [`fastai.core`](/core.html#core) functions. Docstring shown before calling [`has_arg`](/core.html#has_arg) for reference\n", "_____no_output_____" ] ], [ [ "has_arg(download_url,'url')", "_____no_output_____" ], [ "has_arg(index_row,'x')", "_____no_output_____" ], [ "has_arg(index_row,'a')", "_____no_output_____" ], [ "show_doc(ifnone)", "_____no_output_____" ], [ "param,alt_param = None,5\nifnone(param,alt_param)", "_____no_output_____" ], [ "param,alt_param = None,[1,2,3]\nifnone(param,alt_param)", "_____no_output_____" ], [ "show_doc(is1d)", "_____no_output_____" ], [ "two_d_array = np.arange(12).reshape(6,2)\nprint( two_d_array )\nprint( is1d(two_d_array) )", "[[ 0 1]\n [ 2 3]\n [ 4 5]\n [ 6 7]\n [ 8 9]\n [10 11]]\nFalse\n" ], [ "is1d(two_d_array.flatten())", "_____no_output_____" ], [ "show_doc(is_listy)", "_____no_output_____" ] ], [ [ "Check if `x` is a `Collection`. `Tuple` or `List` qualify", "_____no_output_____" ] ], [ [ "some_data = [1,2,3]\nis_listy(some_data)", "_____no_output_____" ], [ "some_data = (1,2,3)\nis_listy(some_data)", "_____no_output_____" ], [ "some_data = 1024\nprint( is_listy(some_data) )", "False\n" ], [ "print( is_listy( [some_data] ) )", "True\n" ], [ "some_data = dict([('a',1),('b',2),('c',3)])\nprint( some_data )\nprint( some_data.keys() )", "{'a': 1, 'b': 2, 'c': 3}\ndict_keys(['a', 'b', 'c'])\n" ], [ "print( is_listy(some_data) )\nprint( is_listy(some_data.keys()) )", "False\nFalse\n" ], [ "print( is_listy(list(some_data.keys())) )", "True\n" ], [ "show_doc(is_tuple)", "_____no_output_____" ] ], [ [ "Check if `x` is a `tuple`.", "_____no_output_____" ] ], [ [ "print( is_tuple( [1,2,3] ) )", "False\n" ], [ "print( is_tuple( (1,2,3) ) )", "True\n" ] ], [ [ "## Collection related functions", "_____no_output_____" ] ], [ [ "show_doc(arange_of)", "_____no_output_____" ], [ "arange_of([5,6,7])", "_____no_output_____" ], [ "type(arange_of([5,6,7]))", "_____no_output_____" ], [ "show_doc(array)", "_____no_output_____" ], [ "array([1,2,3])", "_____no_output_____" ] ], [ [ "Note that after we call the generator, we do not reset. So the [`array`](/core.html#array) call has 5 less entries than it would if we ran from the start of the generator.", "_____no_output_____" ] ], [ [ "def data_gen():\n i = 100.01\n while i<200:\n yield i\n i += 1.\n\nex_data_gen = data_gen()\nfor _ in range(5):\n print(next(ex_data_gen))", "100.01\n101.01\n102.01\n103.01\n104.01\n" ], [ "array(ex_data_gen)", "_____no_output_____" ], [ "ex_data_gen_int = data_gen()\n\narray(ex_data_gen_int,dtype=int) #Cast output to int array", "_____no_output_____" ], [ "show_doc(arrays_split)", "_____no_output_____" ], [ "data_a = np.arange(15)\ndata_b = np.arange(15)[::-1]\n\nmask_a = (data_a > 10)\nprint(data_a)\nprint(data_b)\nprint(mask_a)", "[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14]\n[14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]\n[False False False False False False False False False False False True True True True]\n" ], [ "arrays_split(mask_a,data_a)", "_____no_output_____" ], [ "np.vstack([data_a,data_b]).transpose().shape", "_____no_output_____" ], [ "arrays_split(mask_a,np.vstack([data_a,data_b]).transpose()) #must match on dimension 0", "_____no_output_____" ], [ "show_doc(chunks)", "_____no_output_____" ] ], [ [ "You can transform a `Collection` into an `Iterable` of 'n' sized chunks by calling [`chunks`](/core.html#chunks):", "_____no_output_____" ] ], [ [ "data = [0,1,2,3,4,5,6,7,8,9]\nfor chunk in chunks(data, 2):\n print(chunk)", "[0, 1]\n[2, 3]\n[4, 5]\n[6, 7]\n[8, 9]\n" ], [ "for chunk in chunks(data, 3):\n print(chunk)", "[0, 1, 2]\n[3, 4, 5]\n[6, 7, 8]\n[9]\n" ], [ "show_doc(df_names_to_idx)", "_____no_output_____" ], [ "ex_df = pd.DataFrame.from_dict({\"a\":[1,1,1],\"b\":[2,2,2]})\nprint(ex_df)", " a b\n0 1 2\n1 1 2\n2 1 2\n" ], [ "df_names_to_idx('b',ex_df)", "_____no_output_____" ], [ "show_doc(extract_kwargs)", "_____no_output_____" ], [ "key_word_args = {\"a\":2,\"some_list\":[1,2,3],\"param\":'mean'}\nkey_word_args", "_____no_output_____" ], [ "(extracted_val,remainder) = extract_kwargs(['param'],key_word_args)\nprint( extracted_val,remainder )", "{'param': 'mean'} {'a': 2, 'some_list': [1, 2, 3]}\n" ], [ "show_doc(idx_dict)", "_____no_output_____" ], [ "idx_dict(['a','b','c'])", "_____no_output_____" ], [ "show_doc(index_row)", "_____no_output_____" ], [ "data = [0,1,2,3,4,5,6,7,8,9]\nindex_row(data,4)", "_____no_output_____" ], [ "index_row(pd.Series(data),7)", "_____no_output_____" ], [ "data_df = pd.DataFrame([data[::-1],data]).transpose()\ndata_df", "_____no_output_____" ], [ "index_row(data_df,7)", "_____no_output_____" ], [ "show_doc(listify)", "_____no_output_____" ], [ "to_match = np.arange(12)\nlistify('a',to_match)", "_____no_output_____" ], [ "listify('a',5)", "_____no_output_____" ], [ "listify(77.1,3)", "_____no_output_____" ], [ "listify( (1,2,3) )", "_____no_output_____" ], [ "listify((1,2,3),('a','b','c'))", "_____no_output_____" ], [ "show_doc(random_split)", "_____no_output_____" ] ], [ [ "Splitting is done here with `random.uniform()` so you may not get the exact split percentage for small data sets", "_____no_output_____" ] ], [ [ "data = np.arange(20).reshape(10,2)\ndata.tolist()", "_____no_output_____" ], [ "random_split(0.20,data.tolist())", "_____no_output_____" ], [ "random_split(0.20,pd.DataFrame(data))", "_____no_output_____" ], [ "show_doc(range_of)", "_____no_output_____" ], [ "range_of([5,4,3])", "_____no_output_____" ], [ "range_of(np.arange(10)[::-1])", "_____no_output_____" ], [ "show_doc(series2cat)", "_____no_output_____" ], [ "data_df = pd.DataFrame.from_dict({\"a\":[1,1,1,2,2,2],\"b\":['f','e','f','g','g','g']})\ndata_df", "_____no_output_____" ], [ "data_df['b']", "_____no_output_____" ], [ "series2cat(data_df,'b')\ndata_df['b']", "_____no_output_____" ], [ "series2cat(data_df,'a')\ndata_df['a']", "_____no_output_____" ], [ "show_doc(split_kwargs_by_func)", "_____no_output_____" ], [ "key_word_args = {'url':'http://fast.ai','dest':'./','new_var':[1,2,3],'testvalue':42}\nsplit_kwargs_by_func(key_word_args,download_url)", "_____no_output_____" ], [ "show_doc(to_int)", "_____no_output_____" ], [ "to_int(3.1415)", "_____no_output_____" ], [ "data = [1.2,3.4,7.25]\nto_int(data)", "_____no_output_____" ], [ "show_doc(uniqueify)", "_____no_output_____" ], [ "uniqueify( pd.Series(data=['a','a','b','b','f','g']) )", "_____no_output_____" ] ], [ [ "## Files management and downloads", "_____no_output_____" ] ], [ [ "show_doc(download_url)", "_____no_output_____" ], [ "show_doc(find_classes)", "_____no_output_____" ], [ "show_doc(join_path)", "_____no_output_____" ], [ "show_doc(join_paths)", "_____no_output_____" ], [ "show_doc(loadtxt_str)", "_____no_output_____" ], [ "show_doc(save_texts)", "_____no_output_____" ] ], [ [ "## Multiprocessing", "_____no_output_____" ] ], [ [ "show_doc(num_cpus)", "_____no_output_____" ], [ "show_doc(parallel)", "_____no_output_____" ], [ "show_doc(partition)", "_____no_output_____" ], [ "show_doc(partition_by_cores)", "_____no_output_____" ] ], [ [ "## Data block API", "_____no_output_____" ] ], [ [ "show_doc(ItemBase, title_level=3)", "_____no_output_____" ] ], [ [ "All items used in fastai should subclass this. Must have a [`data`](/tabular.data.html#tabular.data) field that will be used when collating in mini-batches.", "_____no_output_____" ] ], [ [ "show_doc(ItemBase.apply_tfms)", "_____no_output_____" ], [ "show_doc(ItemBase.show)", "_____no_output_____" ] ], [ [ "The default behavior is to set the string representation of this object as title of `ax`.", "_____no_output_____" ] ], [ [ "show_doc(Category, title_level=3)", "_____no_output_____" ] ], [ [ "Create a [`Category`](/core.html#Category) with an `obj` of index [`data`](/tabular.data.html#tabular.data) in a certain classes list. ", "_____no_output_____" ] ], [ [ "show_doc(EmptyLabel, title_level=3)", "_____no_output_____" ], [ "show_doc(MultiCategory, title_level=3)", "_____no_output_____" ] ], [ [ "Create a [`MultiCategory`](/core.html#MultiCategory) with an `obj` that is a collection of labels. [`data`](/tabular.data.html#tabular.data) corresponds to the one-hot encoded labels and `raw` is a list of associated string.", "_____no_output_____" ] ], [ [ "show_doc(FloatItem)", "_____no_output_____" ] ], [ [ "## Others", "_____no_output_____" ] ], [ [ "show_doc(camel2snake)", "_____no_output_____" ], [ "camel2snake('DeviceDataLoader')", "_____no_output_____" ], [ "show_doc(even_mults)", "_____no_output_____" ] ], [ [ "In linear scales each element is equidistant from its neighbors:", "_____no_output_____" ] ], [ [ "# from 1 to 10 in 5 steps\nt = np.linspace(1, 10, 5)\nt", "_____no_output_____" ], [ "for i in range(len(t) - 1):\n print(t[i+1] - t[i])", "2.25\n2.25\n2.25\n2.25\n" ] ], [ [ "In logarithmic scales, each element is a multiple of the previous entry:", "_____no_output_____" ] ], [ [ "t = even_mults(1, 10, 5)\nt", "_____no_output_____" ], [ "# notice how each number is a multiple of its predecessor\nfor i in range(len(t) - 1):\n print(t[i+1] / t[i])", "1.7782794100389228\n1.7782794100389228\n1.7782794100389228\n1.7782794100389228\n" ], [ "show_doc(func_args)", "_____no_output_____" ], [ "func_args(download_url)", "_____no_output_____" ] ], [ [ "Additionally, [`func_args`](/core.html#func_args) can be used with functions that do not belong to the fastai library", "_____no_output_____" ] ], [ [ "func_args(np.linspace)", "_____no_output_____" ], [ "show_doc(noop)", "_____no_output_____" ] ], [ [ "Return `x`.", "_____no_output_____" ] ], [ [ "# object is returned as-is\nnoop([1,2,3])", "_____no_output_____" ], [ "show_doc(one_hot)", "_____no_output_____" ] ], [ [ "One-hot encoding is a standard machine learning technique. Assume we are dealing with a 10-class classification problem and we are supplied a list of labels:", "_____no_output_____" ] ], [ [ "y = [1, 4, 4, 5, 7, 9, 2, 4, 0]", "_____no_output_____" ], [ "jekyll_note(\"\"\"y is zero-indexed, therefore its first element (1) belongs to class 2, its second element (4) to class 5 and so on.\"\"\")", "_____no_output_____" ], [ "len(y)", "_____no_output_____" ] ], [ [ "y can equivalently be expressed as a matrix of 9 rows and 10 columns, where each row represents one element of the original y. ", "_____no_output_____" ] ], [ [ "for label in y:\n print(one_hot(label, 10))", "[0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]\n[0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]\n[0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]\n[0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]\n[0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]\n[0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]\n[0. 0. 1. 0. 0. 0. 0. 0. 0. 0.]\n[0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]\n[1. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n" ], [ "show_doc(show_some)", "_____no_output_____" ], [ "# select 3 elements from a list\nsome_data = show_some([10, 20, 30, 40, 50], 3) \nsome_data", "_____no_output_____" ], [ "type(some_data) ", "_____no_output_____" ], [ "# the separator can be changed\nsome_data = show_some([10, 20, 30, 40, 50], 3, sep = '---') \nsome_data", "_____no_output_____" ], [ "some_data[:-3]", "_____no_output_____" ] ], [ [ "[`show_some`](/core.html#show_some) can take as input any class with \\_\\_len\\_\\_ and \\_\\_getitem\\_\\_ ", "_____no_output_____" ] ], [ [ "class Any(object):\n def __init__(self, data):\n self.data = data\n def __len__(self):\n return len(self.data)\n def __getitem__(self,i):\n return self.data[i]\n \nsome_other_data = Any('nice')\nshow_some(some_other_data, 2)", "_____no_output_____" ], [ "show_doc(subplots)", "_____no_output_____" ], [ "show_doc(text2html_table)", "_____no_output_____" ] ], [ [ "## Undocumented Methods - Methods moved below this line will intentionally be hidden", "_____no_output_____" ], [ "## New Methods - Please document or move to the undocumented section", "_____no_output_____" ] ], [ [ "show_doc(is_dict)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ] ]
4a2e0dbec719783f7b7b2b13689a31bca7b0cc97
5,185
ipynb
Jupyter Notebook
structural/3-tier.ipynb
n4m4g/python-design-patterns
fda6531d8b70edd6e30a856849442707c11fb3e2
[ "MIT" ]
null
null
null
structural/3-tier.ipynb
n4m4g/python-design-patterns
fda6531d8b70edd6e30a856849442707c11fb3e2
[ "MIT" ]
null
null
null
structural/3-tier.ipynb
n4m4g/python-design-patterns
fda6531d8b70edd6e30a856849442707c11fb3e2
[ "MIT" ]
null
null
null
28.489011
107
0.481581
[ [ [ "# 3-tier\n\nSeparates presentation, application processing, and data management functions.\n\nreference: https://shunnien.github.io/2017/07/29/3-tier-and-mvc-introduction/", "_____no_output_____" ] ], [ [ "class Data(object):\n products = {\n 'milk': {'price': 1.5, 'quantity': 10},\n 'eggs': {'price': 0.2, 'quantity': 100},\n 'cheese': {'price': 2.0, 'quantity': 50},\n }\n \n def __get__(self, obj, klas):\n print(\"(Fetching from Data Store)\")\n return {'products': self.products}\n \n\nclass BusinessLogic(object):\n data = Data()\n \n def product_list(self):\n return self.data['products'].keys()\n \n def product_information(self, product):\n return self.data['products'].get(product, None)\n \n\nclass Ui(object):\n def __init__(self):\n self.business_logic = BusinessLogic()\n \n def get_product_list(self):\n print('PRODUCT LIST:')\n for product in self.business_logic.product_list():\n print(product)\n print('')\n \n def get_product_information(self, product):\n product_info = self.business_logic.product_information(product)\n if product_info:\n print('PRODUCT INFORMATION:')\n print(\n \"Name: {0}, Price: {1:.2f}, Quantity: {2}\".format(\n product.title(), product_info.get('price', 0), product_info.get('quantity', 0)\n )\n )\n else:\n print('That product \"{0}\" does not exist in the records'.format(product))\n\n \nui = Ui()\nui.get_product_list()\nui.get_product_information('cheese')\nui.get_product_information('eggs')\nui.get_product_information('milk')\nui.get_product_information('arepas')", "PRODUCT LIST:\n(Fetching from Data Store)\nmilk\neggs\ncheese\n\n(Fetching from Data Store)\nPRODUCT INFORMATION:\nName: Cheese, Price: 2.00, Quantity: 50\n(Fetching from Data Store)\nPRODUCT INFORMATION:\nName: Eggs, Price: 0.20, Quantity: 100\n(Fetching from Data Store)\nPRODUCT INFORMATION:\nName: Milk, Price: 1.50, Quantity: 10\n(Fetching from Data Store)\nThat product \"arepas\" does not exist in the records\n" ], [ "class Data(object):\n products = {\n 'milk': {'price': 20, 'quantity': 1},\n 'egg': {'price': 30, 'quantity': 2}\n }\n def __get__(self, instance, owner):\n return {'products': self.products}\n \nclass BusinessLogic(object):\n data = Data()\n \n def product_list(self):\n return self.data['products'].keys()\n \n def product_info(self, product):\n return self.data['products'].get(product, None)\n \nclass Ui(object):\n def __init__(self):\n self.business_logic = BusinessLogic()\n \n def get_product_list(self):\n for i in self.business_logic.product_list():\n print(i, end=' ')\n print()\n \n def get_product_info(self, product):\n product_info = self.business_logic.product_info(product)\n print(product)\n for k, v in product_info.items():\n print(k, v)\n \n \nui = Ui()\nui.get_product_list()\nui.get_product_info('egg')", "milk egg \negg\nprice 30\nquantity 2\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ] ]
4a2e165087d902b9ff59c84936d686b69bce27dd
22,852
ipynb
Jupyter Notebook
Week 02 - Perceptrons/Week02_Homework_02.ipynb
denikn/Machine-Learning-MIT-Assignment
5733980e4724e7a4bd47965c4bd3b26be70529f3
[ "MIT" ]
3
2020-12-17T13:22:26.000Z
2022-03-15T19:20:36.000Z
Week 02 - Perceptrons/Week02_Homework_02.ipynb
denikn/Machine-Learning-MIT-Assignment
5733980e4724e7a4bd47965c4bd3b26be70529f3
[ "MIT" ]
null
null
null
Week 02 - Perceptrons/Week02_Homework_02.ipynb
denikn/Machine-Learning-MIT-Assignment
5733980e4724e7a4bd47965c4bd3b26be70529f3
[ "MIT" ]
1
2021-11-18T05:07:10.000Z
2021-11-18T05:07:10.000Z
37.037277
596
0.512734
[ [ [ "<a href=\"https://colab.research.google.com/github/denikn/Machine-Learning-MIT-Assignment/blob/main/Week%2002%20-%20Perceptrons/Week02_Homework_02.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "#MIT 6.036 Spring 2019: Homework 2#\n\nThis colab notebook provides code and a framework for problems 7-10 of [the homework](https://openlearninglibrary.mit.edu/courses/course-v1:MITx+6.036+1T2019/courseware/Week2/week2_homework/1). You can work out your solutions here, then submit your results back on the homework page when ready.\n\n## <section>**Setup**</section>\n\nFirst, download the code distribution for this homework that contains test cases and helper functions (such as `positive`).\n\nRun the next code block to download and import the code for this lab.\n", "_____no_output_____" ] ], [ [ "!rm -f code_for_hw02.py*\n!wget --no-check-certificate --quiet https://introml_oll.odl.mit.edu/6.036/static/homework/hw02/code_for_hw02.py\nfrom code_for_hw02 import *", "_____no_output_____" ], [ "help(tidy_plot)", "Help on function tidy_plot in module code_for_hw02:\n\ntidy_plot(xmin, xmax, ymin, ymax, center=False, title=None, xlabel=None, ylabel=None)\n Set up axes for plotting\n xmin, xmax, ymin, ymax = (float) plot extents\n Return matplotlib axes\n\n" ] ], [ [ "", "_____no_output_____" ] ], [ [ "def test(a):\n return a + 53", "_____no_output_____" ], [ "def methodB(a):\n return test(a)", "_____no_output_____" ], [ "def someMethod():\n test = 7\n return methodB(test + 3)", "_____no_output_____" ], [ "someMethod()", "_____no_output_____" ] ], [ [ "# <section>**7) Implement perceptron**</section>\n\nImplement [the perceptron algorithm](https://lms.mitx.mit.edu/courses/course-v1:MITx+6.036+2019_Spring/courseware/Week2/perceptron/2), where\n\n* `data` is a numpy array of dimension $d$ by $n$\n* `labels` is numpy array of dimension $1$ by $n$\n* `params` is a dictionary specifying extra parameters to this algorithm; your algorithm should run a number of iterations equal to $T$\n* `hook` is either None or a function that takes the tuple `(th, th0)` as an argument and displays the separator graphically. We won't be testing this in the Tutor, but it will help you in debugging on your own machine.\n\nIt should return a tuple of $\\theta$ (a $d$ by 1 array) and $\\theta_0$ (a 1 by 1 array).\n\nWe have given you some data sets in the code file for you to test your implementation.\n\nYour function should initialize all parameters to 0, then run through the data, in the order it is given, performing an update to the parameters whenever the current parameters would make a mistake on that data point. Perform $T$ iterations through the data. ", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ], [ "import numpy as np\n\n\n# x is dimension d by 1\n# th is dimension d by 1\n# th0 is dimension 1 by 1\n# return 1 by 1 matrix of +1, 0, -1\ndef positive(x, th, th0):\n return np.sign(th.T@x + th0)\n\n# Perceptron algorithm with offset.\n# data is dimension d by n\n# labels is dimension 1 by n\n# T is a positive integer number of steps to run\n# Perceptron algorithm with offset.\n# data is dimension d by n\n# labels is dimension 1 by n\n# T is a positive integer number of steps to run\ndef perceptron(data, labels, params = {}, hook = None):\n # if T not in params, default to 100\n T = params.get('T', 100)\n (d, n) = data.shape\n\n theta = np.zeros((d, 1)); theta_0 = np.zeros((1, 1))\n for t in range(T):\n for i in range(n):\n x = data[:,i:i+1]\n y = labels[:,i:i+1]\n if y * positive(x, theta, theta_0) <= 0.0:\n theta = theta + y * x\n theta_0 = theta_0 + y\n if hook: hook((theta, theta_0))\n return theta, theta_0", "_____no_output_____" ], [ "test_perceptron(perceptron)", "-----------Test Perceptron 0-----------\nPassed! \n\n-----------Test Perceptron 1-----------\nPassed! \n\n" ] ], [ [ "# <section>8) Implement averaged perceptron</section>\n\nRegular perceptron can be somewhat sensitive to the most recent examples that it sees. Instead, averaged perceptron produces a more stable output by outputting the average value of `th` and `th0` across all iterations.\n\nImplement averaged perceptron with the same spec as regular perceptron, and using the pseudocode below as a guide.\n<pre>\nprocedure averaged_perceptron({(x^(i), y^(i)), i=1,...n}, T)\n th = 0 (d by 1); th0 = 0 (1 by 1)\n ths = 0 (d by 1); th0s = 0 (1 by 1)\n for t = 1,...,T do:\n for i = 1,...,n do:\n\t if y^(i)(th . x^(i) + th0) <= 0 then\n th = th + y^(i)x^(i)\n th0 = th0 + y^(i)\n\t ths = ths + th\n\t th0s = th0s + th0\n return ths/(nT), th0s/(nT)\n</pre>", "_____no_output_____" ] ], [ [ "import numpy as np\n\n\n# x is dimension d by 1\n# th is dimension d by 1\n# th0 is dimension 1 by 1\n# return 1 by 1 matrix of +1, 0, -1\ndef positive(x, th, th0):\n return np.sign(th.T@x + th0)\n\ndef averaged_perceptron(data, labels, params = {}, hook = None):\n T = params.get('T', 100)\n (d, n) = data.shape\n\n theta = np.zeros((d, 1)); theta_0 = np.zeros((1, 1))\n theta_sum = theta.copy() \n theta_0_sum = theta_0.copy()\n for t in range(T):\n for i in range(n):\n x = data[:,i:i+1]\n y = labels[:,i:i+1]\n if y * positive(x, theta, theta_0) <= 0.0:\n theta = theta + y * x\n theta_0 = theta_0 + y\n if hook: hook((theta, theta_0))\n theta_sum = theta_sum + theta\n theta_0_sum = theta_0_sum + theta_0\n theta_avg = theta_sum / (T*n)\n theta_0_avg = theta_0_sum / (T*n)\n if hook: hook((theta_avg, theta_0_avg))\n return theta_avg, theta_0_avg", "_____no_output_____" ], [ "test_averaged_perceptron(averaged_perceptron)", "-----------Test Averaged Perceptron 0-----------\nPassed! \n\n-----------Test Averaged Perceptron 1-----------\nPassed! \n\n" ] ], [ [ "# 9) Implement evaluation strategies\n \n## 9.1) Evaluating a classifier\n\nTo evaluate a classifier, we are interested in how well it performs on data that it wasn't trained on. Construct a testing procedure that uses a training data set, calls a learning algorithm to get a linear separator (a tuple of $\\theta, \\theta_0$), and then reports the percentage correct on a new testing set as a float between 0. and 1..\n\nThe learning algorithm is passed as a function that takes a data array and a labels vector. Your evaluator should be able to interchangeably evaluate `perceptron` or `averaged_perceptron` (or future algorithms with the same spec), depending on what is passed through the `learner` parameter.\n\nThe `eval_classifier` function should accept the following parameters:\n\n* <tt>learner</tt> - a function, such as perceptron or averaged_perceptron\n* <tt>data_train</tt> - training data\n* <tt>labels_train</tt> - training labels\n* <tt>data_test</tt> - test data\n* <tt>labels_test</tt> - test labels\n\nAssume that you have available the function `score` from HW 1, which takes inputs:\n\n* <tt>data</tt>: a <tt>d</tt> by <tt>n</tt> array of floats (representing <tt>n</tt> data points in <tt>d</tt> dimensions)\n* <tt>labels</tt>: a <tt>1</tt> by <tt>n</tt> array of elements in <tt>(+1, -1)</tt>, representing target labels\n* <tt>th</tt>: a <tt>d</tt> by <tt>1</tt> array of floats that together with\n* <tt>th0</tt>: a single scalar or 1 by 1 array, represents a hyperplane\n\nand returns 1 by 1 matrix with an integer indicating number of data points correct for the separator.", "_____no_output_____" ] ], [ [ "import numpy as np\ndef eval_classifier(learner, data_train, labels_train, data_test, labels_test):\n th, th0 = learner(data_train, labels_train)\n return score(data_test, labels_test, th, th0)/data_test.shape[1]", "_____no_output_____" ], [ "test_eval_classifier(eval_classifier,perceptron)", "-----------Test Eval Classifier 0-----------\nPassed! \n\n-----------Test Eval Classifier 1-----------\nPassed! \n\n" ] ], [ [ "## <subsection>9.2) Evaluating a learning algorithm using a data source</subsection>\n\nConstruct a testing procedure that takes a learning algorithm and a data source as input and runs the learning algorithm multiple times, each time evaluating the resulting classifier as above. It should report the overall average classification accuracy.\n\nYou can use our implementation of `eval_classifier` as above.\n\nWrite the function `eval_learning_alg` that takes:\n\n* <tt>learner</tt> - a function, such as perceptron or averaged_perceptron\n* <tt>data_gen</tt> - a data generator, call it with a desired data set size; returns a tuple (data, labels)\n* <tt>n_train</tt> - the size of the learning sets\n* <tt>n_test</tt> - the size of the test sets\n* <tt>it</tt> - the number of iterations to average over\n\nand returns the average classification accuracy as a float between 0. and 1.. \n\n** Note: Be sure to generate your training data and then testing data in that order, to ensure that the pseudorandomly generated data matches that in the test code. **", "_____no_output_____" ] ], [ [ "import numpy as np\ndef eval_learning_alg(learner, data_gen, n_train, n_test, it):\n score_sum = 0\n for i in range(it):\n data_train, labels_train = data_gen(n_train)\n data_test, labels_test = data_gen(n_test)\n score_sum += eval_classifier(learner, data_train, labels_train,\n data_test, labels_test)\n return score_sum/it ", "_____no_output_____" ], [ "test_eval_learning_alg(eval_learning_alg,perceptron)", "-----------Test Eval Learning Algo-----------\nPassed! \n\n" ] ], [ [ "## <subsection>9.3) Evaluating a learning algorithm with a fixed dataset</subsection>\n\nCross-validation is a strategy for evaluating a learning algorithm, using a single training set of size $n$. Cross-validation takes in a learning algorithm $L$, a fixed data set $\\mathcal{D}$, and a parameter $k$. It will run the learning algorithm $k$ different times, then evaluate the accuracy of the resulting classifier, and ultimately return the average of the accuracies over each of the $k$ \"runs\" of $L$. It is structured like this:\n\n<pre><code>divide D into k parts, as equally as possible; call them D_i for i == 0 .. k-1\n# be sure the data is shuffled in case someone put all the positive examples first in the data!\nfor j from 0 to k-1:\n D_minus_j = union of all the datasets D_i, except for D_j\n h_j = L(D_minus_j)\n score_j = accuracy of h_j measured on D_j\nreturn average(score0, ..., score(k-1))\n</code></pre>\n\nSo, each time, it trains on $k−1$ of the pieces of the data set and tests the resulting hypothesis on the piece that was not used for training.\n\nWhen $k=n$, it is called *leave-one-out cross validation*.\n\nImplement cross validation **assuming that the input data is shuffled already** so that the positives and negatives are distributed randomly. If the size of the data does not evenly divide by k, split the data into n % k sub-arrays of size n//k + 1 and the rest of size n//k. (Hint: You can use <a href=\"https://docs.scipy.org/doc/numpy/reference/generated/numpy.array_split.html\">numpy.array_split</a>\nand <a href=\"https://docs.scipy.org/doc/numpy/reference/generated/numpy.concatenate.html\">numpy.concatenate</a> with axis arguments to split and rejoin the data as you desire.)\n\nNote: In Python, n//k indicates integer division, e.g. 2//3 gives 0 and 4//3 gives 1.", "_____no_output_____" ] ], [ [ "import numpy as np\ndef xval_learning_alg(learner, data, labels, k):\n s_data = np.array_split(data, k, axis=1)\n s_labels = np.array_split(labels, k, axis=1)\n\n score_sum = 0\n for i in range(k):\n data_train = np.concatenate(s_data[:i] + s_data[i+1:], axis=1)\n labels_train = np.concatenate(s_labels[:i] + s_labels[i+1:], axis=1)\n data_test = np.array(s_data[i])\n labels_test = np.array(s_labels[i])\n score_sum += eval_classifier(learner, data_train, labels_train,\n data_test, labels_test)\n return score_sum/k", "_____no_output_____" ], [ "test_xval_learning_alg(xval_learning_alg,perceptron)", "-----------Test Cross-eval Learning Algo-----------\nPassed! \n\n" ] ], [ [ "## 10) Testing\n\nIn this section, we compare the effectiveness of perceptron and averaged perceptron on some data that are not necessarily linearly separable.\n\nUse your `eval_learning_alg` and the `gen_flipped_lin_separable` generator in the code file to evaluate the accuracy of `perceptron` vs. a`veraged_perceptron`. `gen_flipped_lin_separable` can be called with an integer to return a data set and labels. Note that this generates linearly separable data and then \"flips\" the labels with some specified probability (the argument pflip); so most of the results will not be linearly separable. You can also specifiy pflip in the call to the generator. You should use the default values of th and th_0 to retain consistency with the Tutor.\n\nRun enough trials so that you can confidently predict the accuracy of these algorithms on new data from that same generator; assume training/test sets on the order of 20 points. The Tutor will check that your answer is within 0.025 of the answer we got using the same generator.", "_____no_output_____" ] ], [ [ "print(eval_learning_alg(perceptron, gen_flipped_lin_separable(pflip=.1), 20, 20, 5))", "0.82\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
4a2e2c6b8a68e5a4bb33bf99f5f955fa38187620
9,140
ipynb
Jupyter Notebook
checkpoint2/.ipynb_checkpoints/Checkpoint2-1-divergence-checkpoint.ipynb
liyuxuan48/MAE250H
7ea98451772ee8a199ba9f5ac0c01a1b9e256c30
[ "MIT" ]
null
null
null
checkpoint2/.ipynb_checkpoints/Checkpoint2-1-divergence-checkpoint.ipynb
liyuxuan48/MAE250H
7ea98451772ee8a199ba9f5ac0c01a1b9e256c30
[ "MIT" ]
null
null
null
checkpoint2/.ipynb_checkpoints/Checkpoint2-1-divergence-checkpoint.ipynb
liyuxuan48/MAE250H
7ea98451772ee8a199ba9f5ac0c01a1b9e256c30
[ "MIT" ]
null
null
null
25.674157
195
0.364114
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4a2e2f74f853e40fffcf30e9fcd88b7a199269f6
5,372
ipynb
Jupyter Notebook
Shape_Detection/Untitled.ipynb
RobEn-AAST/AI-UAVC
732683fd5821d492b772cc5f966e86aed164a68c
[ "MIT" ]
16
2022-02-05T15:51:13.000Z
2022-02-05T17:38:54.000Z
Shape_Detection/Untitled.ipynb
RobEn-AAST/AI-UAVC
732683fd5821d492b772cc5f966e86aed164a68c
[ "MIT" ]
null
null
null
Shape_Detection/Untitled.ipynb
RobEn-AAST/AI-UAVC
732683fd5821d492b772cc5f966e86aed164a68c
[ "MIT" ]
null
null
null
54.816327
2,336
0.625652
[ [ [ "!ls", " 117.mkv\t\t DarknetConfig.cmake.in\t predictions.jpg\r\n 3rdparty\t\t darknet_images.py\t __pycache__\r\n backup\t\t\t darknet_n.py\t\t README.md\r\n backup1\t\t darknetn.py\t\t results\r\n'backup (copy)'\t\t darknet.py\t\t scripts\r\n bad.list\t\t darknet_video.py\t src\r\n build\t\t\t data\t\t\t Untitled.ipynb\r\n build.ps1\t\t image_yolov3.sh\t\t uselib\r\n cfg\t\t\t image_yolov4.sh\t\t vcpkg.json\r\n chart.png\t\t include\t\t\t video_yolov3.sh\r\n chartx.png\t\t json_mjpeg_streams.sh\t video_yolov4.sh\r\n chart_yolov4-custom.png libdarknet.so\t\t x.avi\r\n chart_yolov4-tiny-3l.png LICENSE\t\t\t yolov4.conv.137\r\n chart_yolov4-tiny-3lx.png Makefile\t\t yolov4-p6.conv.289\r\n chart_yolov4-tiny-custom.png net_cam_v3.sh\t\t yolov4-tiny.conv.29\r\n cmake\t\t\t net_cam_v4.sh\t\t yolov4.weights\r\n CMakeLists.txt\t\t new\r\n darknet\t\t obj\r\n" ], [ "from darknet_n import *", "_____no_output_____" ], [ "Y = YoloModel()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
4a2e3acbfd78fab427da9f4a7d564ebfae11dbc2
323,002
ipynb
Jupyter Notebook
experiments/pytorch_cascadenet.ipynb
starcksophie/fastmri-reproducible-benchmark
9d1f17011f0be911f2da5300063bfeecea86876d
[ "MIT" ]
null
null
null
experiments/pytorch_cascadenet.ipynb
starcksophie/fastmri-reproducible-benchmark
9d1f17011f0be911f2da5300063bfeecea86876d
[ "MIT" ]
null
null
null
experiments/pytorch_cascadenet.ipynb
starcksophie/fastmri-reproducible-benchmark
9d1f17011f0be911f2da5300063bfeecea86876d
[ "MIT" ]
null
null
null
29.819239
2,399
0.581953
[ [ [ "# # this just to make sure we are using only on CPU\n# import os\n# os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"-1\"", "_____no_output_____" ], [ "%cd ..", "/volatile/home/Zaccharie/workspace/fastmri-reproducible-benchmark\n" ], [ "import time\nimport os.path as op\n\nimport numpy as np\nimport torch\nfrom torch.optim import Adam\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm_notebook\n\nfrom cascading import CascadeNet\nfrom data_torch import MaskedUntouched2DDataset, MaskedUntouched2DAllLoadedDataset\nfrom torch_training import fit_torch, torch_psnr", "/volatile/home/Zaccharie/workspace/fastmri-reproducible-benchmark/venv/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nUsing TensorFlow backend.\n" ], [ "# paths\ntrain_path = '/media/Zaccharie/UHRes/singlecoil_train/singlecoil_train/'\nval_path = '/media/Zaccharie/UHRes/singlecoil_val/'\ntest_path = '/media/Zaccharie/UHRes/singlecoil_test/'\n\nn_samples_train = 34742\nn_samples_val = 7135\n\nn_volumes_train = 973\nn_volumes_val = 199", "_____no_output_____" ], [ "# data loader\n# generators\nAF = 4\n# train_gen = MaskedUntouched2DAllLoadedDataset(train_path, af=AF, inner_slices=1)\ntrain_gen = MaskedUntouched2DDataset(train_path, af=AF, inner_slices=1)\nval_gen = MaskedUntouched2DDataset(val_path, af=AF)", "_____no_output_____" ], [ "run_params = {\n 'n_cascade': 5, \n 'n_convs': 5, \n 'n_filters': 48,\n}\nn_epochs = 500\nrun_id = f'cascadenet_torch_af{AF}_{int(time.time())}'\nchkpt_path = f'checkpoints/{run_id}' + '-{epoch:02d}.hdf5'\nlog_dir = op.join('logs', run_id)\nprint(run_id)", "cascadenet_torch_af4_1568279302\n" ], [ "model = CascadeNet(**run_params)\noptimizer = Adam(model.parameters(), lr=1e-3, weight_decay=1e-7)\nwriter = SummaryWriter(log_dir=log_dir)\n\nmodel.cuda();", "_____no_output_____" ], [ "model_parameters = filter(lambda p: p.requires_grad, model.parameters())\nparams = sum([np.prod(p.size()) for p in model_parameters])", "_____no_output_____" ], [ "params", "_____no_output_____" ], [ "# def overfit_epoch(model, data, optimizer, device):\n# model.train()\n# kspace, mask, image_gt = data\n# kspace = kspace[0] * 1e6\n# mask = mask[0]\n# image_gt = image_gt[0] * 1e6\n# kspace = kspace.to(device)\n# mask = mask.to(device)\n# image_gt = image_gt.to(device)\n# image_pred = model(kspace, mask)\n\n\n# loss = F.l1_loss(image_pred, image_gt)\n# optimizer.zero_grad()\n# loss.backward()\n# optimizer.step()\n# psnr = torch_psnr(image_pred, image_gt)\n# print('Training PSNR:', psnr)", "_____no_output_____" ], [ "# %%time\n# i, data = next(enumerate(train_loader))\n# for _ in tqdm_notebook(range(500)):\n# overfit_epoch(model, data, optimizer, 'cuda')", "_____no_output_____" ], [ "%%time\ntrain_gen.filenames = train_gen.filenames[:10]\nval_gen.filenames = val_gen.filenames[:1]\ntrain_loader = DataLoader(\n dataset=train_gen,\n batch_size=1,\n shuffle=False,\n num_workers=10,\n pin_memory=True,\n)\nval_loader = DataLoader(\n dataset=val_gen,\n batch_size=1,\n# num_workers=35,\n pin_memory=True,\n shuffle=False,\n)\nfit_torch(\n model, \n train_loader, \n val_loader, \n n_epochs,\n writer, \n optimizer, \n chkpt_path, \n run_id=run_id, \n device='cuda', \n save_freq=500, \n tqdm_wrapper=tqdm_notebook,\n)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2e4988598be24cd04f9d899720ca518cd55e1c
31,408
ipynb
Jupyter Notebook
solutions by participants/ex1/ex1-EliottRosenberg-71cost.ipynb
fazliberkordek/ibm-quantum-challenge-2021
2206a364e354965b749dcda7c5d62631f571d718
[ "Apache-2.0" ]
136
2021-05-20T14:07:53.000Z
2022-03-19T17:19:31.000Z
solutions by participants/ex1/ex1-EliottRosenberg-71cost.ipynb
fazliberkordek/ibm-quantum-challenge-2021
2206a364e354965b749dcda7c5d62631f571d718
[ "Apache-2.0" ]
106
2021-05-21T15:41:13.000Z
2021-11-08T08:29:25.000Z
solutions by participants/ex1/ex1-EliottRosenberg-71cost.ipynb
fazliberkordek/ibm-quantum-challenge-2021
2206a364e354965b749dcda7c5d62631f571d718
[ "Apache-2.0" ]
190
2021-05-20T14:02:09.000Z
2022-03-27T16:31:20.000Z
184.752941
13,848
0.909195
[ [ [ "import numpy as np\n# Importing standard Qiskit libraries\nfrom qiskit import QuantumCircuit, transpile, Aer, IBMQ\nfrom qiskit.tools.jupyter import *\nfrom qiskit.visualization import *\nfrom ibm_quantum_widgets import *\n\n# Loading your IBM Quantum account(s)\nprovider = IBMQ.load_account()", "_____no_output_____" ] ], [ [ "I began with the decomposition of the Toffoli gate given here:\nhttps://qiskit.org/textbook/ch-gates/more-circuit-identities.html\n\nThen I substituted the decomposition of the Hadamard gate worked out earlier in this exercise. Then I combined two adjacent Rz rotations.", "_____no_output_____" ] ], [ [ "from math import pi\n\nqc = QuantumCircuit(3)\n\nqc.rz(pi/2,2)\nqc.sx(2)\nqc.rz(pi/2,2)\nqc.cx(1,2)\nqc.rz(-pi/4,2)\nqc.cx(0,2)\nqc.rz(pi/4,2)\nqc.cx(1,2)\nqc.rz(-pi/4,2)\nqc.cx(0,2)\nqc.rz(pi/4,1)\nqc.rz(3*pi/4,2)\nqc.sx(2)\nqc.rz(pi/2,2)\nqc.cx(0,1)\nqc.rz(pi/4,0)\nqc.rz(-pi/4,1)\nqc.cx(0,1)\n\nqc.draw()", "_____no_output_____" ] ], [ [ "Notice that an Rz can be commuted past two CX gates if the rotation is on the target qubit. (If the control bit is 0, this is trivial; if the control bit is 1, this negates the angle twice.) Therefore, we can commute the Rz(3pi/4) gate on qubit 2 to the left past the two pairs of CNOT gates. The result is:", "_____no_output_____" ] ], [ [ "from math import pi\n\nqc = QuantumCircuit(3)\n\nqc.rz(pi/2,2)\nqc.sx(2)\nqc.rz(5*pi/4,2)\nqc.cx(1,2)\nqc.rz(-pi/4,2)\nqc.cx(0,2)\nqc.rz(pi/4,2)\nqc.cx(1,2)\nqc.rz(-pi/4,2)\nqc.cx(0,2)\nqc.rz(pi/4,1)\nqc.sx(2)\nqc.rz(pi/2,2)\nqc.cx(0,1)\nqc.rz(pi/4,0)\nqc.rz(-pi/4,1)\nqc.cx(0,1)\n\nqc.draw()", "_____no_output_____" ], [ "from qc_grader import grade_ex1\ngrade_ex1(qc)", "Grading your answer for ex1. Please wait...\n\nCongratulations 🎉! Your answer is correct.\nYour cost is 71.\nFeel free to submit your answer.\n\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
4a2e51be1ce9310f5693940de9bd2e69a42d30ac
83,734
ipynb
Jupyter Notebook
labs/lab18/flights.ipynb
ludmilask/informatics2020
f955826c6c9211f22ab702e4d42879b54560ec17
[ "MIT" ]
null
null
null
labs/lab18/flights.ipynb
ludmilask/informatics2020
f955826c6c9211f22ab702e4d42879b54560ec17
[ "MIT" ]
null
null
null
labs/lab18/flights.ipynb
ludmilask/informatics2020
f955826c6c9211f22ab702e4d42879b54560ec17
[ "MIT" ]
1
2021-05-21T12:30:41.000Z
2021-05-21T12:30:41.000Z
126.295626
14,194
0.76706
[ [ [ "import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport csv\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "path_to_file = 'flight_delays.csv'\ndf = pd.read_csv(path_to_file, sep=',')\nf = df[(df['dep_delayed_15min'] == 'N')]\nf", "_____no_output_____" ] ], [ [ "1.Доля всех задержек ко всем вылетам", "_____no_output_____" ] ], [ [ "df.groupby('dep_delayed_15min')['UniqueCarrier'].count()", "_____no_output_____" ], [ "import plotly.express as px\n\nfig = px.pie(df.groupby('dep_delayed_15min')['UniqueCarrier'].count(), values='UniqueCarrier', names='UniqueCarrier', title='Delays')\nfig.show()", "_____no_output_____" ] ], [ [ "2. зависимость количества задержек от длины пути, который предстоит пролететь самолёту", "_____no_output_____" ] ], [ [ "from_distance = f.groupby('Distance')['dep_delayed_15min'].count()\nfrom_distance = from_distance.plot(kind=\"bar\", rot=5, fontsize=10, color = 'seagreen')\nfrom_distance.set_ylabel(\"Number of delay\")\nfrom_distance.set_xlabel(\"Distance\")\nplt.ylim([0, 340])", "_____no_output_____" ] ], [ [ "3. Tоп 5 направлений, для которых чаще всего происходят задержки", "_____no_output_____" ] ], [ [ "five_bad_directions = f.groupby('Dest')['dep_delayed_15min'].count().sort_values(ascending=False).head(5)\nfive_bad_directions = five_bad_directions.plot(kind=\"bar\", rot=5, fontsize=10, color = 'purple')\nfive_bad_directions.set_ylabel(\"Number of delay\")\nfive_bad_directions.set_xlabel(\"Direction\")\n", "_____no_output_____" ] ], [ [ "4. В какие времена года чаще всего происходят задержки рейсов", "_____no_output_____" ] ], [ [ "per_month = f.groupby('Month')['dep_delayed_15min'].count()\nseasons = ['Winter','Spring', 'Summer', 'Autumn']\nstarts_of_seasons = [1, 4, 7, 9]\ndata = []\nper_season = 0\nfor i in range(1, 13):\n per_season += per_month[f'c-{i}'] \n if i%3==0:\n data.append(per_season)\n per_season = 0\n\nseasons_data = pd.DataFrame(data, index = seasons, columns = ['delay'])\ntemp = seasons_data.plot(kind='bar', rot=75, color='maroon');\ntemp.set_xlabel(\"Season\")\ntemp.set_ylabel(\"Number of delays\")\nplt.ylim([19100, 21000])\n\n", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "\n5. Топ 10 самых хороших перевозчиков, которые реже всего задерживают свои рейсы\n", "_____no_output_____" ] ], [ [ "tail_ten_comp = f.groupby('UniqueCarrier')['dep_delayed_15min'].count().sort_values(ascending=True).tail(10)\ntail_ten_comp = tail_ten_comp.plot(kind=\"bar\", rot=10, fontsize=10, color = 'rebeccapurple')\ntail_ten_comp.set_ylabel(\"Number of delay\")\ntail_ten_comp.set_xlabel(\"Company\")", "_____no_output_____" ] ], [ [ "6. Топ 10 самых безответственных аэропортов, в которых чаще всего происходят задержки\n", "_____no_output_____" ] ], [ [ "bad_airports = f.groupby('Origin')['dep_delayed_15min'].count().sort_values(ascending=False).head(10)\nSmth_after_that = bad_airports.plot(x=\"airport\", y=\"Delay\", kind=\"bar\", rot=10, fontsize=10, color = 'darkkhaki')\nSmth_after_that.set_ylabel(\"Number of delay\")\nSmth_after_that.set_xlabel(\"Airport\")", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a2e55ba3beaacb5e58a555e471ff614ed54cd1f
17,692
ipynb
Jupyter Notebook
Financial Word2Vec Infer.ipynb
dsatchkov/NLPfin
f2bb63b7510417ca59b16debeb40b90552750018
[ "Apache-2.0" ]
1
2020-08-26T23:20:40.000Z
2020-08-26T23:20:40.000Z
Financial Word2Vec Infer.ipynb
dsatchkov/NLPfin
f2bb63b7510417ca59b16debeb40b90552750018
[ "Apache-2.0" ]
null
null
null
Financial Word2Vec Infer.ipynb
dsatchkov/NLPfin
f2bb63b7510417ca59b16debeb40b90552750018
[ "Apache-2.0" ]
1
2020-09-01T13:49:50.000Z
2020-09-01T13:49:50.000Z
36.403292
126
0.489713
[ [ [ "import gensim\nimport pandas as pd\nimport numpy as np\nimport csv\nimport re\nimport nltk\nfrom nltk.corpus import stopwords\nnltk.download('stopwords')\nimport operator\nimport gensim.models", "[nltk_data] Downloading package stopwords to /home/user/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n" ], [ "#load pre-existing model\nmodel = gensim.models.Word2Vec.load(\"/media/user/Data/wellai/models/word2vec_financial_new2.model\")", "_____no_output_____" ], [ "#file contains mappings from some tickers to some names\ndf=pd.read_excel('Assets.xlsx')", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "#Create mapping from ticker to name and from name to ticker to bring order to final prediction dictionary\ntickname={}\nnametick={}\nfor index, row in df.iterrows():\n #print(row['Ticker'])\n ticker=row['Ticker'].split()[0].title()\n name=row['name'].split()[0].title()\n tickname[ticker]=name\n nametick[name]=ticker\n ", "_____no_output_____" ], [ "#find most similar stocks & companies\npr=model.wv.most_similar(['Tesla'],topn=200)", "_____no_output_____" ], [ "finalist={}\n#go through model predictions, remove duplicates and create a final dictionary {Name: (Probability, Ticker if avail)}\nfor p in pr:\n if len(p[0])>2:\n clean=p[0].replace(\"’s\",\"\")\n clean=clean.replace(\"'s\",\"\").title()\n #print(clean)\n cleanalias=clean\n tickeralias=\"\"\n # a bit of a trick that in text we could see the name or the ticker being mentioned. Properly, it is best to \n #combine those at the text cleaning stage, so that 'Tesla' and 'TSLA' get put into one entity\n #here we are simply taking the first available in the list with highest probability and using that\n if clean in tickname:\n cleanalias=tickname[clean]\n if clean in nametick or cleanalias in nametick:\n tickeralias=nametick[cleanalias]\n if cleanalias not in finalist :\n finalist[cleanalias]=(p[1],tickeralias)\n #finalist[entity]=(p[1])\nfinalist", "_____no_output_____" ], [ " #correlation between stocks or companies\nmodel.wv.similarity('Tesla','Nio')", "_____no_output_____" ], [ "#correlation between two concepts\npr2=model.predict_output_word(['Tesla','Nvidia','Recession'], topn=100)\n", "_____no_output_____" ], [ "#thematic correlations\npr2=model.predict_output_word(['Biden','Amazon'], topn=100)\nfinal_prob,final=ret_ner(pr2)\nfinal_prob\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2e60803482e06aa2b075a7073d6da4f8dae1a5
339,359
ipynb
Jupyter Notebook
Neural NetWorks and Deep Learning/Week 2 Neural Networks Basics/Logistic_Regression_with_a_Neural_Network_mindset_v6a.ipynb
joyfinder/Deep_Learning_Specialisation
e8dd50b6f3eeda73509e690981b8818120c1dcd0
[ "MIT" ]
null
null
null
Neural NetWorks and Deep Learning/Week 2 Neural Networks Basics/Logistic_Regression_with_a_Neural_Network_mindset_v6a.ipynb
joyfinder/Deep_Learning_Specialisation
e8dd50b6f3eeda73509e690981b8818120c1dcd0
[ "MIT" ]
null
null
null
Neural NetWorks and Deep Learning/Week 2 Neural Networks Basics/Logistic_Regression_with_a_Neural_Network_mindset_v6a.ipynb
joyfinder/Deep_Learning_Specialisation
e8dd50b6f3eeda73509e690981b8818120c1dcd0
[ "MIT" ]
null
null
null
252.687267
226,946
0.896752
[ [ [ "# Logistic Regression with a Neural Network mindset\n\nWelcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and so will also hone your intuitions about deep learning.\n\n**Instructions:**\n- Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so.\n\n**You will learn to:**\n- Build the general architecture of a learning algorithm, including:\n - Initializing parameters\n - Calculating the cost function and its gradient\n - Using an optimization algorithm (gradient descent) \n- Gather all three functions above into a main model function, in the right order.", "_____no_output_____" ], [ "## <font color='darkblue'>Updates</font>\nThis notebook has been updated over the past few months. The prior version was named \"v5\", and the current versionis now named '6a'\n\n#### If you were working on a previous version:\n* You can find your prior work by looking in the file directory for the older files (named by version name).\n* To view the file directory, click on the \"Coursera\" icon in the top left corner of this notebook.\n* Please copy your work from the older versions to the new version, in order to submit your work for grading.\n\n#### List of Updates\n* Forward propagation formula, indexing now starts at 1 instead of 0.\n* Optimization function comment now says \"print cost every 100 training iterations\" instead of \"examples\".\n* Fixed grammar in the comments.\n* Y_prediction_test variable name is used consistently.\n* Plot's axis label now says \"iterations (hundred)\" instead of \"iterations\".\n* When testing the model, the test image is normalized by dividing by 255.", "_____no_output_____" ], [ "## 1 - Packages ##\n\nFirst, let's run the cell below to import all the packages that you will need during this assignment. \n- [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.\n- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.\n- [matplotlib](http://matplotlib.org) is a famous library to plot graphs in Python.\n- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport scipy\nfrom PIL import Image\nfrom scipy import ndimage\nfrom lr_utils import load_dataset\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## 2 - Overview of the Problem set ##\n\n**Problem Statement**: You are given a dataset (\"data.h5\") containing:\n - a training set of m_train images labeled as cat (y=1) or non-cat (y=0)\n - a test set of m_test images labeled as cat or non-cat\n - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px).\n\nYou will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat.\n\nLet's get more familiar with the dataset. Load the data by running the following code.", "_____no_output_____" ] ], [ [ "# Loading the data (cat/non-cat)\ntrain_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()", "_____no_output_____" ] ], [ [ "We added \"_orig\" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).\n\nEach line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the `index` value and re-run to see other images. ", "_____no_output_____" ] ], [ [ "# Example of a picture\nindex = 25\nplt.imshow(train_set_x_orig[index])\nprint (\"y = \" + str(train_set_y[:, index]) + \", it's a '\" + classes[np.squeeze(train_set_y[:, index])].decode(\"utf-8\") + \"' picture.\")", "y = [1], it's a 'cat' picture.\n" ] ], [ [ "Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs. \n\n**Exercise:** Find the values for:\n - m_train (number of training examples)\n - m_test (number of test examples)\n - num_px (= height = width of a training image)\nRemember that `train_set_x_orig` is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access `m_train` by writing `train_set_x_orig.shape[0]`.", "_____no_output_____" ] ], [ [ "### START CODE HERE ### (≈ 3 lines of code)\nm_train = train_set_x_orig.shape[0]\nm_test = test_set_x_orig.shape[0]\nnum_px = train_set_x_orig.shape[1]\n### END CODE HERE ###\n\nprint (\"Number of training examples: m_train = \" + str(m_train))\nprint (\"Number of testing examples: m_test = \" + str(m_test))\nprint (\"Height/Width of each image: num_px = \" + str(num_px))\nprint (\"Each image is of size: (\" + str(num_px) + \", \" + str(num_px) + \", 3)\")\nprint (\"train_set_x shape: \" + str(train_set_x_orig.shape))\nprint (\"train_set_y shape: \" + str(train_set_y.shape))\nprint (\"test_set_x shape: \" + str(test_set_x_orig.shape))\nprint (\"test_set_y shape: \" + str(test_set_y.shape))", "Number of training examples: m_train = 209\nNumber of testing examples: m_test = 50\nHeight/Width of each image: num_px = 64\nEach image is of size: (64, 64, 3)\ntrain_set_x shape: (209, 64, 64, 3)\ntrain_set_y shape: (1, 209)\ntest_set_x shape: (50, 64, 64, 3)\ntest_set_y shape: (1, 50)\n" ] ], [ [ "**Expected Output for m_train, m_test and num_px**: \n<table style=\"width:15%\">\n <tr>\n <td>**m_train**</td>\n <td> 209 </td> \n </tr>\n \n <tr>\n <td>**m_test**</td>\n <td> 50 </td> \n </tr>\n \n <tr>\n <td>**num_px**</td>\n <td> 64 </td> \n </tr>\n \n</table>\n", "_____no_output_____" ], [ "For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $*$ num_px $*$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns.\n\n**Exercise:** Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num\\_px $*$ num\\_px $*$ 3, 1).\n\nA trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$*$c$*$d, a) is to use: \n```python\nX_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X\n```", "_____no_output_____" ] ], [ [ "# Reshape the training and test examples\n\n### START CODE HERE ### (≈ 2 lines of code)\ntrain_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T\ntest_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T\n### END CODE HERE ###\n\nprint (\"train_set_x_flatten shape: \" + str(train_set_x_flatten.shape))\nprint (\"train_set_y shape: \" + str(train_set_y.shape))\nprint (\"test_set_x_flatten shape: \" + str(test_set_x_flatten.shape))\nprint (\"test_set_y shape: \" + str(test_set_y.shape))\nprint (\"sanity check after reshaping: \" + str(train_set_x_flatten[0:5,0]))", "train_set_x_flatten shape: (12288, 209)\ntrain_set_y shape: (1, 209)\ntest_set_x_flatten shape: (12288, 50)\ntest_set_y shape: (1, 50)\nsanity check after reshaping: [17 31 56 22 33]\n" ] ], [ [ "**Expected Output**: \n\n<table style=\"width:35%\">\n <tr>\n <td>**train_set_x_flatten shape**</td>\n <td> (12288, 209)</td> \n </tr>\n <tr>\n <td>**train_set_y shape**</td>\n <td>(1, 209)</td> \n </tr>\n <tr>\n <td>**test_set_x_flatten shape**</td>\n <td>(12288, 50)</td> \n </tr>\n <tr>\n <td>**test_set_y shape**</td>\n <td>(1, 50)</td> \n </tr>\n <tr>\n <td>**sanity check after reshaping**</td>\n <td>[17 31 56 22 33]</td> \n </tr>\n</table>", "_____no_output_____" ], [ "To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255.\n\nOne common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel).\n\n<!-- During the training of your model, you're going to multiply weights and add biases to some initial inputs in order to observe neuron activations. Then you backpropogate with the gradients to train the model. But, it is extremely important for each feature to have a similar range such that our gradients don't explode. You will see that more in detail later in the lectures. !--> \n\nLet's standardize our dataset.", "_____no_output_____" ] ], [ [ "train_set_x = train_set_x_flatten/255.\ntest_set_x = test_set_x_flatten/255.", "_____no_output_____" ] ], [ [ "<font color='blue'>\n**What you need to remember:**\n\nCommon steps for pre-processing a new dataset are:\n- Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...)\n- Reshape the datasets such that each example is now a vector of size (num_px \\* num_px \\* 3, 1)\n- \"Standardize\" the data", "_____no_output_____" ], [ "## 3 - General Architecture of the learning algorithm ##\n\nIt's time to design a simple algorithm to distinguish cat images from non-cat images.\n\nYou will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why **Logistic Regression is actually a very simple Neural Network!**\n\n<img src=\"images/LogReg_kiank.png\" style=\"width:650px;height:400px;\">\n\n**Mathematical expression of the algorithm**:\n\nFor one example $x^{(i)}$:\n$$z^{(i)} = w^T x^{(i)} + b \\tag{1}$$\n$$\\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\\tag{2}$$ \n$$ \\mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \\log(a^{(i)}) - (1-y^{(i)} ) \\log(1-a^{(i)})\\tag{3}$$\n\nThe cost is then computed by summing over all training examples:\n$$ J = \\frac{1}{m} \\sum_{i=1}^m \\mathcal{L}(a^{(i)}, y^{(i)})\\tag{6}$$\n\n**Key steps**:\nIn this exercise, you will carry out the following steps: \n - Initialize the parameters of the model\n - Learn the parameters for the model by minimizing the cost \n - Use the learned parameters to make predictions (on the test set)\n - Analyse the results and conclude", "_____no_output_____" ], [ "## 4 - Building the parts of our algorithm ## \n\nThe main steps for building a Neural Network are:\n1. Define the model structure (such as number of input features) \n2. Initialize the model's parameters\n3. Loop:\n - Calculate current loss (forward propagation)\n - Calculate current gradient (backward propagation)\n - Update parameters (gradient descent)\n\nYou often build 1-3 separately and integrate them into one function we call `model()`.\n\n### 4.1 - Helper functions\n\n**Exercise**: Using your code from \"Python Basics\", implement `sigmoid()`. As you've seen in the figure above, you need to compute $sigmoid( w^T x + b) = \\frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions. Use np.exp().", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: sigmoid\n\ndef sigmoid(z):\n \"\"\"\n Compute the sigmoid of z\n\n Arguments:\n z -- A scalar or numpy array of any size.\n\n Return:\n s -- sigmoid(z)\n \"\"\"\n\n ### START CODE HERE ### (≈ 1 line of code)\n s = 1 / (1 + np.exp(-z))\n ### END CODE HERE ###\n \n return s", "_____no_output_____" ], [ "print (\"sigmoid([0, 2]) = \" + str(sigmoid(np.array([0,2]))))", "sigmoid([0, 2]) = [ 0.5 0.88079708]\n" ] ], [ [ "**Expected Output**: \n\n<table>\n <tr>\n <td>**sigmoid([0, 2])**</td>\n <td> [ 0.5 0.88079708]</td> \n </tr>\n</table>", "_____no_output_____" ], [ "### 4.2 - Initializing parameters\n\n**Exercise:** Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: initialize_with_zeros\n\ndef initialize_with_zeros(dim):\n \"\"\"\n This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.\n \n Argument:\n dim -- size of the w vector we want (or number of parameters in this case)\n \n Returns:\n w -- initialized vector of shape (dim, 1)\n b -- initialized scalar (corresponds to the bias)\n \"\"\"\n \n ### START CODE HERE ### (≈ 1 line of code)\n w = np.zeros([dim, 1])\n b = 0\n ### END CODE HERE ###\n\n assert(w.shape == (dim, 1))\n assert(isinstance(b, float) or isinstance(b, int))\n \n return w, b", "_____no_output_____" ], [ "dim = 2\nw, b = initialize_with_zeros(dim)\nprint (\"w = \" + str(w))\nprint (\"b = \" + str(b))", "w = [[ 0.]\n [ 0.]]\nb = 0\n" ] ], [ [ "**Expected Output**: \n\n\n<table style=\"width:15%\">\n <tr>\n <td> ** w ** </td>\n <td> [[ 0.]\n [ 0.]] </td>\n </tr>\n <tr>\n <td> ** b ** </td>\n <td> 0 </td>\n </tr>\n</table>\n\nFor image inputs, w will be of shape (num_px $\\times$ num_px $\\times$ 3, 1).", "_____no_output_____" ], [ "### 4.3 - Forward and Backward propagation\n\nNow that your parameters are initialized, you can do the \"forward\" and \"backward\" propagation steps for learning the parameters.\n\n**Exercise:** Implement a function `propagate()` that computes the cost function and its gradient.\n\n**Hints**:\n\nForward Propagation:\n- You get X\n- You compute $A = \\sigma(w^T X + b) = (a^{(1)}, a^{(2)}, ..., a^{(m-1)}, a^{(m)})$\n- You calculate the cost function: $J = -\\frac{1}{m}\\sum_{i=1}^{m}y^{(i)}\\log(a^{(i)})+(1-y^{(i)})\\log(1-a^{(i)})$\n\nHere are the two formulas you will be using: \n\n$$ \\frac{\\partial J}{\\partial w} = \\frac{1}{m}X(A-Y)^T\\tag{7}$$\n$$ \\frac{\\partial J}{\\partial b} = \\frac{1}{m} \\sum_{i=1}^m (a^{(i)}-y^{(i)})\\tag{8}$$", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: propagate\n\ndef propagate(w, b, X, Y):\n \"\"\"\n Implement the cost function and its gradient for the propagation explained above\n\n Arguments:\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\n b -- bias, a scalar\n X -- data of size (num_px * num_px * 3, number of examples)\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)\n\n Return:\n cost -- negative log-likelihood cost for logistic regression\n dw -- gradient of the loss with respect to w, thus same shape as w\n db -- gradient of the loss with respect to b, thus same shape as b\n \n Tips:\n - Write your code step by step for the propagation. np.log(), np.dot()\n \"\"\"\n \n m = X.shape[1]\n \n # FORWARD PROPAGATION (FROM X TO COST)\n ### START CODE HERE ### (≈ 2 lines of code)\n A = sigmoid(np.dot(w.T,X) + b) # compute activation\n cost = -1 / m * (np.dot(Y,np.log(A).T) + np.dot((1-Y),np.log(1 - A).T)) # compute cost\n ### END CODE HERE ###\n \n # BACKWARD PROPAGATION (TO FIND GRAD)\n ### START CODE HERE ### (≈ 2 lines of code)\n dw = 1 / m * (np.dot(X,(A- Y).T))\n db = 1 / m * (np.sum(A - Y))\n ### END CODE HERE ###\n\n assert(dw.shape == w.shape)\n assert(db.dtype == float)\n cost = np.squeeze(cost)\n assert(cost.shape == ())\n \n grads = {\"dw\": dw,\n \"db\": db}\n \n return grads, cost", "_____no_output_____" ], [ "w, b, X, Y = np.array([[1.],[2.]]), 2., np.array([[1.,2.,-1.],[3.,4.,-3.2]]), np.array([[1,0,1]])\ngrads, cost = propagate(w, b, X, Y)\nprint (\"dw = \" + str(grads[\"dw\"]))\nprint (\"db = \" + str(grads[\"db\"]))\nprint (\"cost = \" + str(cost))", "dw = [[ 0.99845601]\n [ 2.39507239]]\ndb = 0.00145557813678\ncost = 5.801545319394553\n" ] ], [ [ "**Expected Output**:\n\n<table style=\"width:50%\">\n <tr>\n <td> ** dw ** </td>\n <td> [[ 0.99845601]\n [ 2.39507239]]</td>\n </tr>\n <tr>\n <td> ** db ** </td>\n <td> 0.00145557813678 </td>\n </tr>\n <tr>\n <td> ** cost ** </td>\n <td> 5.801545319394553 </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "### 4.4 - Optimization\n- You have initialized your parameters.\n- You are also able to compute a cost function and its gradient.\n- Now, you want to update the parameters using gradient descent.\n\n**Exercise:** Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\\theta$, the update rule is $ \\theta = \\theta - \\alpha \\text{ } d\\theta$, where $\\alpha$ is the learning rate.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: optimize\n\ndef optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):\n \"\"\"\n This function optimizes w and b by running a gradient descent algorithm\n \n Arguments:\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\n b -- bias, a scalar\n X -- data of shape (num_px * num_px * 3, number of examples)\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)\n num_iterations -- number of iterations of the optimization loop\n learning_rate -- learning rate of the gradient descent update rule\n print_cost -- True to print the loss every 100 steps\n \n Returns:\n params -- dictionary containing the weights w and bias b\n grads -- dictionary containing the gradients of the weights and bias with respect to the cost function\n costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.\n \n Tips:\n You basically need to write down two steps and iterate through them:\n 1) Calculate the cost and the gradient for the current parameters. Use propagate().\n 2) Update the parameters using gradient descent rule for w and b.\n \"\"\"\n \n costs = []\n \n for i in range(num_iterations):\n \n \n # Cost and gradient calculation (≈ 1-4 lines of code)\n ### START CODE HERE ### \n grads, cost = propagate(w,b,X,Y)\n ### END CODE HERE ###\n \n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n \n # update rule (≈ 2 lines of code)\n ### START CODE HERE ###\n w = w - learning_rate * dw\n b = b - learning_rate * db\n ### END CODE HERE ###\n \n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n \n # Print the cost every 100 training iterations\n if print_cost and i % 100 == 0:\n print (\"Cost after iteration %i: %f\" %(i, cost))\n \n params = {\"w\": w,\n \"b\": b}\n \n grads = {\"dw\": dw,\n \"db\": db}\n \n return params, grads, costs", "_____no_output_____" ], [ "params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)\n\nprint (\"w = \" + str(params[\"w\"]))\nprint (\"b = \" + str(params[\"b\"]))\nprint (\"dw = \" + str(grads[\"dw\"]))\nprint (\"db = \" + str(grads[\"db\"]))", "w = [[ 0.19033591]\n [ 0.12259159]]\nb = 1.92535983008\ndw = [[ 0.67752042]\n [ 1.41625495]]\ndb = 0.219194504541\n" ] ], [ [ "**Expected Output**: \n\n<table style=\"width:40%\">\n <tr>\n <td> **w** </td>\n <td>[[ 0.19033591]\n [ 0.12259159]] </td>\n </tr>\n \n <tr>\n <td> **b** </td>\n <td> 1.92535983008 </td>\n </tr>\n <tr>\n <td> **dw** </td>\n <td> [[ 0.67752042]\n [ 1.41625495]] </td>\n </tr>\n <tr>\n <td> **db** </td>\n <td> 0.219194504541 </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "**Exercise:** The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the `predict()` function. There are two steps to computing predictions:\n\n1. Calculate $\\hat{Y} = A = \\sigma(w^T X + b)$\n\n2. Convert the entries of a into 0 (if activation <= 0.5) or 1 (if activation > 0.5), stores the predictions in a vector `Y_prediction`. If you wish, you can use an `if`/`else` statement in a `for` loop (though there is also a way to vectorize this). ", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: predict\n\ndef predict(w, b, X):\n '''\n Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)\n \n Arguments:\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\n b -- bias, a scalar\n X -- data of size (num_px * num_px * 3, number of examples)\n \n Returns:\n Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X\n '''\n \n m = X.shape[1]\n Y_prediction = np.zeros((1,m))\n w = w.reshape(X.shape[0], 1)\n \n # Compute vector \"A\" predicting the probabilities of a cat being present in the picture\n ### START CODE HERE ### (≈ 1 line of code)\n A = sigmoid(np.dot(w.T,X) + b)\n ### END CODE HERE ###\n \n for i in range(A.shape[1]):\n \n # Convert probabilities A[0,i] to actual predictions p[0,i]\n ### START CODE HERE ### (≈ 4 lines of code)\n if(A[0][i] <= 0.5):\n Y_prediction[0][i] = 0\n else:\n Y_prediction[0][i] = 1\n pass\n ### END CODE HERE ###\n \n assert(Y_prediction.shape == (1, m))\n \n return Y_prediction", "_____no_output_____" ], [ "w = np.array([[0.1124579],[0.23106775]])\nb = -0.3\nX = np.array([[1.,-1.1,-3.2],[1.2,2.,0.1]])\nprint (\"predictions = \" + str(predict(w, b, X)))", "predictions = [[ 1. 1. 0.]]\n" ] ], [ [ "**Expected Output**: \n\n<table style=\"width:30%\">\n <tr>\n <td>\n **predictions**\n </td>\n <td>\n [[ 1. 1. 0.]]\n </td> \n </tr>\n\n</table>\n", "_____no_output_____" ], [ "<font color='blue'>\n**What to remember:**\nYou've implemented several functions that:\n- Initialize (w,b)\n- Optimize the loss iteratively to learn parameters (w,b):\n - computing the cost and its gradient \n - updating the parameters using gradient descent\n- Use the learned (w,b) to predict the labels for a given set of examples", "_____no_output_____" ], [ "## 5 - Merge all functions into a model ##\n\nYou will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order.\n\n**Exercise:** Implement the model function. Use the following notation:\n - Y_prediction_test for your predictions on the test set\n - Y_prediction_train for your predictions on the train set\n - w, costs, grads for the outputs of optimize()", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: model\n\ndef model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):\n \"\"\"\n Builds the logistic regression model by calling the function you've implemented previously\n \n Arguments:\n X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)\n Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)\n X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)\n Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)\n num_iterations -- hyperparameter representing the number of iterations to optimize the parameters\n learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()\n print_cost -- Set to true to print the cost every 100 iterations\n \n Returns:\n d -- dictionary containing information about the model.\n \"\"\"\n \n ### START CODE HERE ###\n \n # initialize parameters with zeros (≈ 1 line of code)\n w, b = initialize_with_zeros(X_train.shape[0])\n\n # Gradient descent (≈ 1 line of code)\n parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)\n \n # Retrieve parameters w and b from dictionary \"parameters\"\n w = parameters[\"w\"]\n b = parameters[\"b\"]\n \n # Predict test/train set examples (≈ 2 lines of code)\n Y_prediction_test = predict(w, b, X_test)\n Y_prediction_train = predict(w, b, X_train)\n\n ### END CODE HERE ###\n\n # Print train/test Errors\n print(\"train accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))\n print(\"test accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))\n\n \n d = {\"costs\": costs,\n \"Y_prediction_test\": Y_prediction_test, \n \"Y_prediction_train\" : Y_prediction_train, \n \"w\" : w, \n \"b\" : b,\n \"learning_rate\" : learning_rate,\n \"num_iterations\": num_iterations}\n \n return d", "_____no_output_____" ] ], [ [ "Run the following cell to train your model.", "_____no_output_____" ] ], [ [ "d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)", "Cost after iteration 0: 0.693147\nCost after iteration 100: 0.584508\nCost after iteration 200: 0.466949\nCost after iteration 300: 0.376007\nCost after iteration 400: 0.331463\nCost after iteration 500: 0.303273\nCost after iteration 600: 0.279880\nCost after iteration 700: 0.260042\nCost after iteration 800: 0.242941\nCost after iteration 900: 0.228004\nCost after iteration 1000: 0.214820\nCost after iteration 1100: 0.203078\nCost after iteration 1200: 0.192544\nCost after iteration 1300: 0.183033\nCost after iteration 1400: 0.174399\nCost after iteration 1500: 0.166521\nCost after iteration 1600: 0.159305\nCost after iteration 1700: 0.152667\nCost after iteration 1800: 0.146542\nCost after iteration 1900: 0.140872\ntrain accuracy: 99.04306220095694 %\ntest accuracy: 70.0 %\n" ] ], [ [ "**Expected Output**: \n\n<table style=\"width:40%\"> \n\n <tr>\n <td> **Cost after iteration 0 ** </td> \n <td> 0.693147 </td>\n </tr>\n <tr>\n <td> <center> $\\vdots$ </center> </td> \n <td> <center> $\\vdots$ </center> </td> \n </tr> \n <tr>\n <td> **Train Accuracy** </td> \n <td> 99.04306220095694 % </td>\n </tr>\n\n <tr>\n <td>**Test Accuracy** </td> \n <td> 70.0 % </td>\n </tr>\n</table> \n\n\n", "_____no_output_____" ], [ "**Comment**: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test accuracy is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week!\n\nAlso, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the `index` variable) you can look at predictions on pictures of the test set.", "_____no_output_____" ] ], [ [ "# Example of a picture that was wrongly classified.\nindex = 1\nplt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))\nprint (\"y = \" + str(test_set_y[0,index]) + \", you predicted that it is a \\\"\" + classes[d[\"Y_prediction_test\"][0,index]].decode(\"utf-8\") + \"\\\" picture.\")", "y = 1, you predicted that it is a \"cat\" picture.\n" ] ], [ [ "Let's also plot the cost function and the gradients.", "_____no_output_____" ] ], [ [ "# Plot learning curve (with costs)\ncosts = np.squeeze(d['costs'])\nplt.plot(costs)\nplt.ylabel('cost')\nplt.xlabel('iterations (per hundreds)')\nplt.title(\"Learning rate =\" + str(d[\"learning_rate\"]))\nplt.show()", "_____no_output_____" ] ], [ [ "**Interpretation**:\nYou can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting. ", "_____no_output_____" ], [ "## 6 - Further analysis (optional/ungraded exercise) ##\n\nCongratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\\alpha$. ", "_____no_output_____" ], [ "#### Choice of learning rate ####\n\n**Reminder**:\nIn order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may \"overshoot\" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate.\n\nLet's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the `learning_rates` variable to contain, and see what happens. ", "_____no_output_____" ] ], [ [ "learning_rates = [0.01, 0.001, 0.0001]\nmodels = {}\nfor i in learning_rates:\n print (\"learning rate is: \" + str(i))\n models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)\n print ('\\n' + \"-------------------------------------------------------\" + '\\n')\n\nfor i in learning_rates:\n plt.plot(np.squeeze(models[str(i)][\"costs\"]), label= str(models[str(i)][\"learning_rate\"]))\n\nplt.ylabel('cost')\nplt.xlabel('iterations (hundreds)')\n\nlegend = plt.legend(loc='upper center', shadow=True)\nframe = legend.get_frame()\nframe.set_facecolor('0.90')\nplt.show()", "learning rate is: 0.01\ntrain accuracy: 100.0 %\ntest accuracy: 74.0 %\n\n-------------------------------------------------------\n\nlearning rate is: 0.001\ntrain accuracy: 100.0 %\ntest accuracy: 74.0 %\n\n-------------------------------------------------------\n\nlearning rate is: 0.0001\ntrain accuracy: 100.0 %\ntest accuracy: 74.0 %\n\n-------------------------------------------------------\n\n" ] ], [ [ "**Interpretation**: \n- Different learning rates give different costs and thus different predictions results.\n- If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost). \n- A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy.\n- In deep learning, we usually recommend that you: \n - Choose the learning rate that better minimizes the cost function.\n - If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.) \n", "_____no_output_____" ], [ "## 7 - Test with your own image (optional/ungraded exercise) ##\n\nCongratulations on finishing this assignment. You can use your own image and see the output of your model. To do that:\n 1. Click on \"File\" in the upper bar of this notebook, then click \"Open\" to go on your Coursera Hub.\n 2. Add your image to this Jupyter Notebook's directory, in the \"images\" folder\n 3. Change your image's name in the following code\n 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!", "_____no_output_____" ] ], [ [ "## START CODE HERE ## (PUT YOUR IMAGE NAME) \nmy_image = \"my_image.jpg\" # change this to the name of your image file \n## END CODE HERE ##\n\n# We preprocess the image to fit your algorithm.\nfname = \"images/\" + my_image\nimage = np.array(ndimage.imread(fname, flatten=False))\nimage = image/255.\nmy_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T\nmy_predicted_image = predict(d[\"w\"], d[\"b\"], my_image)\n\nplt.imshow(image)\nprint(\"y = \" + str(np.squeeze(my_predicted_image)) + \", your algorithm predicts a \\\"\" + classes[int(np.squeeze(my_predicted_image)),].decode(\"utf-8\") + \"\\\" picture.\")", "y = 0.0, your algorithm predicts a \"non-cat\" picture.\n" ] ], [ [ "<font color='blue'>\n**What to remember from this assignment:**\n1. Preprocessing the dataset is important.\n2. You implemented each function separately: initialize(), propagate(), optimize(). Then you built a model().\n3. Tuning the learning rate (which is an example of a \"hyperparameter\") can make a big difference to the algorithm. You will see more examples of this later in this course!", "_____no_output_____" ], [ "Finally, if you'd like, we invite you to try different things on this Notebook. Make sure you submit before trying anything. Once you submit, things you can play with include:\n - Play with the learning rate and the number of iterations\n - Try different initialization methods and compare the results\n - Test other preprocessings (center the data, or divide each row by its standard deviation)", "_____no_output_____" ], [ "Bibliography:\n- http://www.wildml.com/2015/09/implementing-a-neural-network-from-scratch/\n- https://stats.stackexchange.com/questions/211436/why-do-we-normalize-images-by-subtracting-the-datasets-image-mean-and-not-the-c", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
4a2e7b9061cdb7fd022296fc484befac61421ffe
36,148
ipynb
Jupyter Notebook
LaneLines-P1.ipynb
demigecko/CarND-LaneLines-P1
4044fff07fdcd47d528de0577ed0402d8944f363
[ "MIT" ]
null
null
null
LaneLines-P1.ipynb
demigecko/CarND-LaneLines-P1
4044fff07fdcd47d528de0577ed0402d8944f363
[ "MIT" ]
null
null
null
LaneLines-P1.ipynb
demigecko/CarND-LaneLines-P1
4044fff07fdcd47d528de0577ed0402d8944f363
[ "MIT" ]
null
null
null
44.029233
647
0.607973
[ [ [ "# Self-Driving Car Engineer Nanodegree\n\n\n## Project: **Finding Lane Lines on the Road** \n***\nIn this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip \"raw-lines-example.mp4\" (also contained in this repository) to see what the output should look like after using the helper functions below. \n\nOnce you have a result that looks roughly like \"raw-lines-example.mp4\", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video \"P1_example.mp4\". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.\n\nIn addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.\n\n---\nLet's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the \"play\" button above) to display the image.\n\n**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the \"Kernel\" menu above and selecting \"Restart & Clear Output\".**\n\n---", "_____no_output_____" ], [ "**The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**\n\n---\n\n<figure>\n <img src=\"examples/line-segments-example.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Your output should look something like this (above) after detecting line segments using the helper functions below </p> \n </figcaption>\n</figure>\n <p></p> \n<figure>\n <img src=\"examples/laneLines_thirdPass.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Your goal is to connect/average/extrapolate line segments to get output like this</p> \n </figcaption>\n</figure>", "_____no_output_____" ], [ "**Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.** ", "_____no_output_____" ], [ "## Import Packages", "_____no_output_____" ] ], [ [ "#importing some useful packages\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Read in an Image", "_____no_output_____" ] ], [ [ "#reading in an image\nimage = mpimg.imread('test_images/solidWhiteCurve.jpg')\nysize = image.shape[0] # 540\nxsize = image.shape[1] # 960\n#printing out some stats and plotting\nprint('This image is:', type(image), 'with dimensions:', image.shape)\nplt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')", "_____no_output_____" ] ], [ [ "## Ideas for Lane Detection Pipeline\n\n**Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**\n\n`cv2.inRange()` for color selection \n`cv2.fillPoly()` for regions selection \n`cv2.line()` to draw lines on an image given endpoints \n`cv2.addWeighted()` to coadd / overlay two images\n`cv2.cvtColor()` to grayscale or change color\n`cv2.imwrite()` to output images to file \n`cv2.bitwise_and()` to apply a mask to an image\n\n**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**", "_____no_output_____" ], [ "## My Thought Process \n\n***\nIn the project report, I will present two methods that I developed. \n\n### Method 1 \n\nImage Pre-processing: grayscale -> canny edge detection -> excessive gaussian blur to provide the single-line lane marks. \n\nMethodology: using the maximum brightness detection in each rows in the defined regions of interest, which are left and right. After finding the points in (row, column) that indicating the lanes, I used linear regression to interpolate/extrapolate the lanes. I found that method 1 is more stable than the Method 2. \n\n### Method 2 \nImage Pre-processing: grayscale -> canny edge detection -> gentle gaussian blur to outline the lanes. \n\nMethodology: Use hough transformation to find all the lines in the defined regions of interest, which are left and right. To filter the slope of lines in a reasonable range, and interpolate/extrapolate selected lines to the top and bottom highlights. Average the top and bottom in x-position and link them as two single lines. \n\nTest sample images: this method is much more sensitive to the parameters I used. s\nTest sample videos: The segmented lane is shaky. \n", "_____no_output_____" ], [ "## Helper Functions\n\nBelow are some helper functions to help get you started. They should look familiar from the lesson!", "_____no_output_____" ] ], [ [ "import math\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\n%matplotlib inline\n\ndef grayscale(img):\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \ndef canny(img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold)\n\ndef gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n\ndef region_of_interest(img, vertices):\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\ndef draw_lines(img, lines, color=[255, 0, 0], thickness=2):\n for line in lines:\n for x1,y1,x2,y2 in line:\n cv2.line(img, (x1, y1), (x2, y2), color, thickness)\n\ndef hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):\n lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n draw_lines(line_img, lines)\n return line_img\n\n# Python 3 has support for cool math symbols.\n\ndef weighted_img(img, initial_img, α=0.8, β=1., γ=0.):\n return cv2.addWeighted(initial_img, α, img, β, γ) \n\ndef plot(img, cmap='gray'):\n plt.imshow(img, cmap=cmap)\n plt.show()", "_____no_output_____" ] ], [ [ "## Test Images\n\nBuild your pipeline to work on the images in the directory \"test_images\" \n**You should make sure your pipeline works well on these images before you try the videos.**", "_____no_output_____" ] ], [ [ "import os\ntest_image_list= os.listdir(\"test_images/\")\n\nfor img_name in test_image_list:\n print (img_name[:-4])\n test_img = mpimg.imread(\"test_images/\" + img_name[:-4]+'.jpg') ", "_____no_output_____" ] ], [ [ "## Build a Lane Finding Pipeline - Method 1\n\n\n\nBuild the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.\n\nTry tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.", "_____no_output_____" ] ], [ [ "# TODO: Build your pipeline that will draw lane lines on the test_images\n# then save them to the test_images_output directory.\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\n%matplotlib inline\n\nimport os\ntest_img_list= os.listdir(\"test_images/\")\n\nfor img_name in test_img_list:\n print (img_name)\n test_img = mpimg.imread(\"test_images/\" + img_name)\n\n# Grab the x and y size and make a copy of the image\n ysize = test_img.shape[0] # 540\n xsize = test_img.shape[1] # 960\n \n# Applies the Grayscale transform \n gray_img=grayscale(test_img)\n #print(\"Convert to grayscale:\")\n #plot(gray_img)\n #plt.imsave(\"test_images_output/\" + img_name[:-4] + \"_gray.jpg\", gray_img, cmap='gray')\n \n# Applies the Canny transform\n low_threshold=200\n high_threshold=250\n #print(\"Apply Canny transform\")\n canny_img=canny(gray_img, low_threshold, high_threshold)\n #plot(canny_img, cmap='gray')\n #plt.imsave(\"test_images_output/\" + img_name[:-4] + \"_canny.jpg\", canny_img, cmap='gray')\n \n# Apply a Gaussian Noise kernel\n #print(\"Apply Gaussian Noise\")\n blurred_img=gaussian_blur(canny_img, 51)\n #plot(blurred_img)\n #plt.imsave(\"test_images_output/\" + img_name[:-4] + \"_blurred.jpg\", blurred_img, cmap='gray')\n \n# Apply an image mask = left and right \n vertices_right= np.array([[[xsize*0.5,ysize*0.6],[xsize*0.55,ysize*0.6],[xsize*0.95,ysize*0.9],[xsize*0.5,ysize*0.9]]], dtype=np.int32)\n vertices_left= np.array([[[xsize*0.1,ysize*0.9],[xsize*0.45,ysize*0.6],[xsize*0.5,ysize*0.6],[xsize*0.5,ysize*0.9]]], dtype=np.int32)\n region_right=region_of_interest(blurred_img,vertices_right)\n region_left=region_of_interest(blurred_img,vertices_left)\n\n# Find the max brightness after left_mask image\n data_left=np.argmax(region_left,axis=1)\n x_left=np.arange(len(data_left));\n data_left_extracted= data_left[data_left > 10]\n x_left_extracted=x_left[data_left > 10]\n fitplot_left=np.poly1d(np.polyfit(x_left_extracted,data_left_extracted, 1))\n\n# Find the max brightness after right_mask image\n data_right=np.argmax(region_right,axis=1)\n x_right=np.arange(len(data_right));\n data_right_extracted= data_right[data_right > 10]\n x_right_extracted=x_right[data_right > 10]\n fitplot_right=np.poly1d(np.polyfit(x_right_extracted,data_right_extracted, 1))\n\n# extroploate the points of interest\n line_image = np.copy(test_img)*0 # creating a blank to draw lines on\n lines=[[[int(fitplot_left(ysize)),int(ysize),int(fitplot_left(ysize*0.6)),int(ysize*0.6)],[int(fitplot_right(ysize)),int(ysize),int(fitplot_right(ysize*0.6)),int(ysize*0.6)]]]\n draw_lines(line_image, lines, color=[255, 0, 0], thickness=10)\n lines_edges = weighted_img(line_image, test_img, 0.9, 1, 0) \n #plt.imshow(lines_edges, cmap='gray')\n #plt.show()\n plot(lines_edges, cmap='gray')\n #plt.imsave(\"test_images_output/\" + img_name[:-4] + \"_final.jpg\", lines_edges, cmap='gray')\n", "_____no_output_____" ] ], [ [ "## Build a Lane Finding Pipeline - Method 2", "_____no_output_____" ] ], [ [ "# TODO: Build your pipeline that will draw lane lines on the test_images\n# then save them to the test_images_output directory.\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\n%matplotlib inline\n\nimport os\ntest_img_list= os.listdir(\"test_images/\")\n\nfor img_name in test_img_list:\n print (img_name)\n test_img = mpimg.imread(\"test_images/\" + img_name)\n test_image = mpimg.imread('test_images/solidWhiteCurve.jpg')\n\n# Grab the x and y size and make a copy of the image\n ysize = test_img.shape[0] # 540\n xsize = test_img.shape[1] # 960\n\n# Applies the Grayscale transform \n gray_img=grayscale(test_img)\n #print(\"Convert to grayscale:\")\n #plot(gray_img)\n #plt.imsave(\"test_images_output/\" + img_name[:-4] + \"_gray_2.jpg\", gray_img, cmap='gray')\n \n# Applies the Canny transform\n low_threshold=200\n high_threshold=240\n #print(\"Apply Canny transform\")\n canny_img=canny(gray_img, low_threshold, high_threshold)\n #plot(canny_img, cmap='gray')\n #plt.imsave(\"test_images_output/\" + img_name[:-4] + \"_canny.jpg_2\", canny_img, cmap='gray')\n\n# Apply a Gaussian Noise kernel\n print(\"Apply Gaussian Blur\")\n blurred_img=gaussian_blur(canny_img, 11)\n #plot(blurred_img)\n #plt.imsave(\"test_images_output/\" + img_name[:-4] + \"_blurred.jpg_2\", blurred_img, cmap='gray')\n \n# Applies an image mask.\n vertices_right= np.array([[[xsize*0.5,ysize*0.55],[xsize*0.55,ysize*0.55],[xsize*0.9,ysize],[xsize*0.5,ysize]]], dtype=np.int32)\n vertices_left= np.array([[[xsize*0.1,ysize],[xsize*0.45,ysize*0.55],[xsize*0.5,ysize*0.55],[xsize*0.5,ysize]]], dtype=np.int32)\n region_right=region_of_interest(blurred_img,vertices_right)\n region_left=region_of_interest(blurred_img,vertices_left)\n\n# Apply Hough Lines\n rho = 1 # distance resolution in pixels of the Hough grid\n theta = np.pi/180 # angular resolution in radians of the Hough grid\n threshold = 5 # minimum number of votes (intersections in Hough grid cell)\n min_line_length = 100 #minimum number of pixels making up a line\n max_line_gap = 100 # maximum gap in pixels between connectable line segments\n line_image = np.copy(test_img)*0 # creating a blank to draw lines on\n\n# Average the position of each of the lines and extrapolate to the top and bottom of the lane. \n lines_right = cv2.HoughLinesP(region_right, rho, theta, threshold, np.array([]),min_line_length, max_line_gap)\n lines_left = cv2.HoughLinesP(region_left, rho, theta, threshold, np.array([]),min_line_length, max_line_gap)\n line_correct_right=[]\n line_correct_left=[]\n slope_right=[]\n slope_left=[]\n x1_right=[]\n x2_right=[]\n x1_left=[]\n x2_left=[]\n\n for line in lines_right:\n for x1,y1,x2,y2 in line:\n #slope= (y1-y2)/(x1-x2)\n [slope,b] = np.polyfit([y1, y2],[x1, x2], 1)\n if slope < 2 and slope > 1: \n #line_correct_right.append(line) \n slope_right.append(slope) \n f=np.poly1d([slope,b])\n x1_right.append(f(ysize*0.6))\n x2_right.append(f(ysize))\n for line in lines_left:\n for x1,y1,x2,y2 in line:\n #slope= (y1-y2)/(x1-x2)\n [slope,b] = np.polyfit([y1, y2],[x1, x2],1)\n if slope < -1 and slope > -2: \n #line_correct_left.append(line) \n slope_left.append(slope)\n f=np.poly1d([slope,b])\n x1_left.append(f(ysize*0.6))\n x2_left.append(f(ysize))\n \n line_correct=[[[int(np.mean(x1_right)),int(ysize*0.6),int(np.mean(x2_right)),int(ysize)],[int(np.mean(x1_left)),int(ysize*0.6),int(np.mean(x2_left)),int(ysize)]]]\n draw_plot=draw_lines(line_image, line_correct, color=[255, 0, 0], thickness=10)\n lines_edges = cv2.addWeighted(test_img, 0.8, line_image, 1, 0) \n \n #print(line_correct)\n #print(slope_right)\n #print(slope_left)\n plot(lines_edges, cmap='gray')\n #plt.imsave(\"test_images_output/\" + img_name[:-4] + \"_final_2.jpg\", lines_edges, cmap='gray')", "_____no_output_____" ] ], [ [ "## Test on Videos\n\nYou know what's cooler than drawing lanes over images? Drawing lanes over video!\n\nWe can test our solution on two provided videos:\n\n`solidWhiteRight.mp4`\n\n`solidYellowLeft.mp4`\n\n**Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**\n\n**If you get an error that looks like this:**\n```\nNeedDownloadError: Need ffmpeg exe. \nYou can download it by calling: \nimageio.plugins.ffmpeg.download()\n```\n**Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**", "_____no_output_____" ] ], [ [ "# Import everything needed to edit/save/watch video clips\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML", "_____no_output_____" ] ], [ [ "## Pipeline 1 based on Method 1", "_____no_output_____" ] ], [ [ "# Pipeline 1 \n\ndef process_image(image):\n # NOTE: The output you return should be a color image (3 channel) for processing video below\n # TODO: put your pipeline here,\n # you should return the final output (image where lines are drawn on lanes)\n \n t = 60\n low_threshold=200 #100\n high_threshold=250\n \n # make a copy\n test_img = np.copy(image)\n ysize = test_img.shape[0] # 540\n xsize = test_img.shape[1] # 960\n # PIPELINE\n # 0 convert to grayscale\n gray_img=grayscale(test_img) \n # 1 apply the Canny transform\n canny_img=canny(gray_img, low_threshold, high_threshold)\n # 2 apply blur\n blurred_img = gaussian_blur(canny_img, 51)\n # 3 apply ROI mask\n # Apply an image mask = left and right \n vertices_right= np.array([[[xsize*0.5,ysize*0.65],[xsize*0.55,ysize*0.65],[xsize*0.95,ysize*0.85],[xsize*0.5,ysize*0.85]]], dtype=np.int32)\n vertices_left= np.array([[[xsize*0.1,ysize*0.85],[xsize*0.45,ysize*0.65],[xsize*0.5,ysize*0.65],[xsize*0.5,ysize*0.85]]], dtype=np.int32)\n region_right=region_of_interest(blurred_img,vertices_right)\n region_left=region_of_interest(blurred_img,vertices_left)\n\n # Find the max brightness after left_mask image\n data_left=np.argmax(region_left,axis=1)\n x_left=np.arange(len(data_left));\n data_left_extracted= data_left[data_left > 10]\n x_left_extracted=x_left[data_left > 10]\n fitplot_left=np.poly1d(np.polyfit(x_left_extracted,data_left_extracted, 1))\n\n # Find the max brightness after right_mask image\n data_right=np.argmax(region_right,axis=1)\n x_right=np.arange(len(data_right));\n data_right_extracted= data_right[data_right > 10]\n x_right_extracted=x_right[data_right > 10]\n fitplot_right=np.poly1d(np.polyfit(x_right_extracted,data_right_extracted, 1))\n \n # 4 get lines image \n # extroploate the points of interest\n line_image = np.copy(test_img)*0 # creating a blank to draw lines on\n lines=[[[int(fitplot_left(ysize)),int(ysize),int(fitplot_left(ysize*0.65)),int(ysize*0.65)],[int(fitplot_right(ysize)),int(ysize),int(fitplot_right(ysize*0.65)),int(ysize*0.65)]]]\n draw_lines(line_image, lines, color=[255, 0, 0], thickness=10)\n \n # 5 superimpose result on top of original image\n final_img = weighted_img(line_image, test_img, 0.9, 1, 0)\n \n #final_img = weighted_img(lines_edges, test_img)\n return final_img", "_____no_output_____" ] ], [ [ "## Pipeline 2 based on Method 2", "_____no_output_____" ] ], [ [ "# Pipeline 2\n\ndef process_image(image):\n # NOTE: The output you return should be a color image (3 channel) for processing video below\n # TODO: put your pipeline here,\n # you should return the final output (image where lines are drawn on lanes)\n \n t = 60\n low_threshold=200\n high_threshold=240\n \n # make a copy\n test_img = np.copy(image)\n ysize = test_img.shape[0] # 540\n xsize = test_img.shape[1] # 960\n # PIPELINE\n # 0 convert to grayscale\n gray_img=grayscale(test_img) \n # 1 apply the Canny transform\n canny_img=canny(gray_img, low_threshold, high_threshold)\n # 2 apply blur\n blurred_img = gaussian_blur(canny_img, 11)\n # 3 apply ROI mask\n vertices_right= np.array([[[xsize*0.5,ysize*0.55],[xsize*0.55,ysize*0.55],[xsize*0.9,ysize],[xsize*0.5,ysize]]], dtype=np.int32)\n vertices_left= np.array([[[xsize*0.1,ysize],[xsize*0.45,ysize*0.55],[xsize*0.5,ysize*0.55],[xsize*0.5,ysize]]], dtype=np.int32)\n region_right=region_of_interest(blurred_img,vertices_right)\n region_left=region_of_interest(blurred_img,vertices_left)\n\n # 4 apply Hough Lines\n rho = 1 # distance resolution in pixels of the Hough grid\n theta = np.pi/180 # angular resolution in radians of the Hough grid\n threshold = 5 # minimum number of votes (intersections in Hough grid cell)\n min_line_length = 100 #minimum number of pixels making up a line\n max_line_gap = 100 # maximum gap in pixels between connectable line segments\n \n # 5 Average the position of each of the lines and extrapolate to the top and bottom of the lane. \n lines_right = cv2.HoughLinesP(region_right, rho, theta, threshold, np.array([]),min_line_length, max_line_gap)\n lines_left = cv2.HoughLinesP(region_left, rho, theta, threshold, np.array([]),min_line_length, max_line_gap)\n line_correct_right=[]\n line_correct_left=[]\n slope_right=[]\n slope_left=[]\n x1_right=[]\n x2_right=[]\n x1_left=[]\n x2_left=[]\n\n for line in lines_right:\n for x1,y1,x2,y2 in line:\n #slope= (y1-y2)/(x1-x2)\n [slope,b] = np.polyfit([y1, y2],[x1, x2], 1)\n if slope < 2 and slope > 1: \n #line_correct_right.append(line) \n slope_right.append(slope) \n f=np.poly1d([slope,b])\n x1_right.append(f(ysize*0.6))\n x2_right.append(f(ysize))\n for line in lines_left:\n for x1,y1,x2,y2 in line:\n #slope= (y1-y2)/(x1-x2)\n [slope,b] = np.polyfit([y1, y2],[x1, x2],1)\n if slope < -1 and slope > -2: \n #line_correct_left.append(line) \n slope_left.append(slope)\n f=np.poly1d([slope,b])\n x1_left.append(f(ysize*0.6))\n x2_left.append(f(ysize))\n \n # 6 get lines image \n line_image = np.copy(image)*0 # creating a blank to draw lines on\n line_correct=[[[int(np.mean(x1_right)),int(ysize*0.6),int(np.mean(x2_right)),int(ysize)],[int(np.mean(x1_left)),int(ysize*0.6),int(np.mean(x2_left)),int(ysize)]]]\n draw_plot=draw_lines(line_image, line_correct, color=[255, 0, 0], thickness=10)\n \n # 7 superimpose result on top of original image\n final_img = weighted_img(line_image, test_img, 0.9, 1, 0)\n \n #final_img = weighted_img(lines_edges, test_img)\n return final_img", "_____no_output_____" ] ], [ [ "Let's try the one with the solid white lane on the right first ...", "_____no_output_____" ] ], [ [ "image = mpimg.imread('test_images/solidWhiteCurve.jpg')\nabc=process_image(image)\nplt.imshow(abc)", "_____no_output_____" ], [ "white_output = 'test_videos_output/solidWhiteRight.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\").subclip(0,5)\nclip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\")\nwhite_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!\n%time white_clip.write_videofile(white_output, audio=False)", "_____no_output_____" ] ], [ [ "Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.", "_____no_output_____" ] ], [ [ "HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(white_output))", "_____no_output_____" ] ], [ [ "## Improve the draw_lines() function\n\n**At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video \"P1_example.mp4\".**\n\n**Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**", "_____no_output_____" ], [ "Now for the one with the solid yellow lane on the left. This one's more tricky!", "_____no_output_____" ] ], [ [ "yellow_output = 'test_videos_output/solidYellowLeft.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)\nclip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')\nyellow_clip = clip2.fl_image(process_image)\n%time yellow_clip.write_videofile(yellow_output, audio=False)", "_____no_output_____" ], [ "HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(yellow_output))", "_____no_output_____" ] ], [ [ "## Optional Challenge\n\nTry your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!", "_____no_output_____" ] ], [ [ "challenge_output = 'test_videos_output/challenge.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)\nclip3 = VideoFileClip('test_videos/challenge.mp4')\nchallenge_clip = clip3.fl_image(process_image)\n%time challenge_clip.write_videofile(challenge_output, audio=False)", "_____no_output_____" ] ], [ [ "## Reflection\n\nPlease see all 3 videos in the folder incluing the challenge. \n\nMethod 1 is more robust than Method 2, as you can see that most of the time, method 1 can find the lanes accurately in both solidWhiteRight.mp4 and solidYellowLeft.mp4. \n\nTo manage this challenge.mp4 is the best practice to validate the robustness of the pipeline. There are a few improvement I made to make finding the lanes in the challenge video based on Method 1. Still it is not preferct. \n\nHere are some ideas of improvement. \n\n(1) The bottom of images was covered by the body of vehicle that certainly disturb the pipeline process. To avoid that, simply to make the region of interest smaller and avoid that area of image.\n\n(2) I also noticed that the pipeline failed when car was driven under tree shade, so the next step is to sharpen the lanes in such image. \n\n(3) When the road condition changes, from old gray road to new black asphalt road, there are the horizontal lines/features to interfere the original algorithm. Therefore, if I combine method 1 and method 2, then I could have avoid this. \n\n(4) Method 1 can be exptended to curved lane by making the trajectory. \n\nOverall, this is a fun project, and I sharpen my basic python skills in a short time.\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a2e82ff2090e022de9381008becd772f3c33b6d
4,633
ipynb
Jupyter Notebook
PIN/PIN.ipynb
nicolezattarin/LOB-feature-analysis
c73735d887c146a85f24267de7789d689a6c4311
[ "Apache-2.0" ]
1
2022-03-17T14:58:37.000Z
2022-03-17T14:58:37.000Z
PIN/PIN.ipynb
nicolezattarin/LOB-feature-analysis
c73735d887c146a85f24267de7789d689a6c4311
[ "Apache-2.0" ]
null
null
null
PIN/PIN.ipynb
nicolezattarin/LOB-feature-analysis
c73735d887c146a85f24267de7789d689a6c4311
[ "Apache-2.0" ]
1
2022-03-21T06:00:09.000Z
2022-03-21T06:00:09.000Z
34.574627
511
0.561407
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "## PIN computation\nTo compute the PIN of a given day, we need to optimize the product of the likelihood computed on each time interval in the day.\nIn particular we fix a time interval of 5 minutes to discretize time, and since we are dealing with the dta of a single trade day we only comppute the corresponding PIN, without further analysis of its time evolution.\n\nNote that this problem must be approached by taking particular care about the optimization method choosen. We tested all the methods from scipy.optimize.minimize for bounded problems, both gradient-based and gredient-free, but most of the results exhibited high dependence on the initial guess for the set of parameters. We then choose to apply powell method, which is a gradient-free method, since it is the only one which actually exhibits an evolution and results to be unbiased by the initial point.\n", "_____no_output_____" ] ], [ [ "def likelihood(x, bid, ask, T): #x = [alpha, delta, eps, mu]\n \"\"\"\n likelihood function for the model\n args:\n x: parameters of the model\n bid: observation of the bid side\n ask: observation of the ask side\n T: time bins\n \"\"\"\n #compute likelihood with Ealsy's (15) notation\n from scipy.stats import poisson\n likelihood = (1-x[0])*poisson.pmf(k=bid,mu=x[2]*T)*poisson.pmf(k=ask,mu=x[2]*T)+\\\n +x[0]*x[1]*poisson.pmf(k=bid,mu=x[2]*T)*poisson.pmf(k=ask,mu=(x[2]+x[3])*T)+\\\n +x[0]*(1-x[1])*poisson.pmf(k=bid,mu=(x[2]+x[3])*T)*poisson.pmf(k=ask,mu=x[2]*T)\n return likelihood\n\ndef loss (x, bid, ask, T):\n \"\"\"\n loss function for the model\n args:\n x: parameters of the model (to train)\n bid: list of observations of the bid side\n ask: list of observations of the ask side\n T: time bin width (assumed the same for each bin)\n \"\"\"\n prod=[]\n #restricting the loss function to values which do not kill the output\n for b, a in zip(bid, ask):\n l=likelihood(x, b, a, T)\n if l>0: prod.append(l)\n else: continue\n return -np.prod(prod)", "_____no_output_____" ], [ "from scipy.optimize import minimize\nfrom tqdm import tqdm\nfrom datetime import timedelta\ntime_delta = timedelta(minutes=1)\n\noccurrences = pd.read_csv(\"../data_cleaned/occurrences.csv\")\nnp.random.seed(0)\nr=minimize(loss, x0=np.random.uniform(size=4),#\n args=(occurrences['bid_observations'], occurrences['ask_observations'], time_delta.total_seconds()),\n method='powell', bounds=[(0, 1), (0, 1), (0, None), (0, None)])", "_____no_output_____" ], [ "params = {'alpha': r.x[0], 'delta': r.x[0], 'eps': r.x[0], 'mu': r.x[0]}\nPIN = params['alpha']*params['mu']/(params['alpha']*params['mu']+2*params['eps'])\n\nprint('PIN: {:.2f}'.format(PIN))\nprint('alpha: {:.2f}'.format(params['alpha']))\nprint('delta: {:.2f}'.format(params['delta']))", "PIN: 0.24\nalpha: 0.64\ndelta: 0.64\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
4a2e83c2f16743cc0e6c86e792ef6c03bf15e8ad
15,295
ipynb
Jupyter Notebook
4_2_Robot_Localization/6_1. Move Function, exercise.ipynb
ramanpreet9/CVND_udacity
d70e3dd897c463eb594e7804bc4c323cc7b4a704
[ "MIT" ]
null
null
null
4_2_Robot_Localization/6_1. Move Function, exercise.ipynb
ramanpreet9/CVND_udacity
d70e3dd897c463eb594e7804bc4c323cc7b4a704
[ "MIT" ]
5
2021-03-19T01:13:24.000Z
2022-03-11T23:49:57.000Z
4_2_Robot_Localization/6_1. Move Function, exercise.ipynb
ramanpreet9/CVND_udacity
d70e3dd897c463eb594e7804bc4c323cc7b4a704
[ "MIT" ]
null
null
null
78.435897
9,628
0.806146
[ [ [ "# Move Function\n\nNow that you know how a robot uses sensor measurements to update its idea of its own location, let's see how we can incorporate motion into this location. In this notebook, let's go over the steps a robot takes to help localize itself from an initial, uniform distribution to sensing, moving and updating that distribution.\n\nWe include the `sense` function that you've seen, which updates an initial distribution based on whether a robot senses a grid color: red or green. \n\nNext, you're tasked with writing a function `move` that incorporates motion into the distribution. As seen below, **one motion `U= 1` to the right, causes all values in a distribution to shift one grid cell to the right.**\n\n<img src='images/motion_1.png' width=50% height=50% />\n", "_____no_output_____" ], [ "First let's include our usual resource imports and display function.", "_____no_output_____" ] ], [ [ "# importing resources\nimport matplotlib.pyplot as plt\nimport numpy as np", "_____no_output_____" ] ], [ [ "A helper function for visualizing a distribution.", "_____no_output_____" ] ], [ [ "def display_map(grid, bar_width=0.9):\n if(len(grid) > 0):\n x_labels = range(len(grid))\n plt.bar(x_labels, height=grid, width=bar_width, color='b')\n plt.xlabel('Grid Cell')\n plt.ylabel('Probability')\n plt.ylim(0, 1) # range of 0-1 for probability values \n plt.title('Probability of the robot being at each cell in the grid')\n plt.xticks(np.arange(min(x_labels), max(x_labels)+1, 1))\n plt.show()\n else:\n print('Grid is empty')\n", "_____no_output_____" ] ], [ [ "You are given the initial variables and the complete `sense` function, below.", "_____no_output_____" ] ], [ [ "# given initial variables\np=[0, 1, 0, 0, 0]\n# the color of each grid cell in the 1D world\nworld=['green', 'red', 'red', 'green', 'green']\n# Z, the sensor reading ('red' or 'green')\nZ = 'red'\npHit = 0.6\npMiss = 0.2\n\n# You are given the complete sense function\ndef sense(p, Z):\n ''' Takes in a current probability distribution, p, and a sensor reading, Z.\n Returns a *normalized* distribution after the sensor measurement has been made, q.\n This should be accurate whether Z is 'red' or 'green'. '''\n q=[]\n # loop through all grid cells\n for i in range(len(p)):\n # check if the sensor reading is equal to the color of the grid cell\n # if so, hit = 1\n # if not, hit = 0\n hit = (Z == world[i])\n q.append(p[i] * (hit * pHit + (1-hit) * pMiss))\n \n # sum up all the components\n s = sum(q)\n # divide all elements of q by the sum to normalize\n for i in range(len(p)):\n q[i] = q[i] / s\n return q\n\n# Commented out code for measurements\n# for k in range(len(measurements)):\n# p = sense(p, measurements)\n", "_____no_output_____" ] ], [ [ "### QUIZ: Program a function that returns a new distribution q, shifted to the right by the motion (U) units. \n\nThis function should shift a distribution with the motion, U. Keep in mind that this world is cyclic and that if U=0, q should be the same as the given p. You should see all the values in `p` are moved to the right by 1, for U=1.", "_____no_output_____" ] ], [ [ "\n## TODO: Complete this move function so that it shifts a probability distribution, p\n## by a given motion, U\ndef move(p, U):\n q=[]\n # Your code here\n if U == 0:\n return p\n q = p[-U:]\n q.extend(p[:-U])\n return q\n\np = move(p,4)\nprint(p)\ndisplay_map(p)", "[1, 0, 0, 0, 0]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a2e8d5dc1506a667a1a2187d4c4098e4858315f
10,020
ipynb
Jupyter Notebook
DevelopmentNotebooks/ds1000de-KISS.ipynb
jed-frey/python-ds1000de
039800c84f683123bafde2cffe7948263543c7f7
[ "BSD-3-Clause" ]
null
null
null
DevelopmentNotebooks/ds1000de-KISS.ipynb
jed-frey/python-ds1000de
039800c84f683123bafde2cffe7948263543c7f7
[ "BSD-3-Clause" ]
null
null
null
DevelopmentNotebooks/ds1000de-KISS.ipynb
jed-frey/python-ds1000de
039800c84f683123bafde2cffe7948263543c7f7
[ "BSD-3-Clause" ]
null
null
null
17.860963
79
0.44491
[ [ [ "from ds1000de import DS1000DE\nscope=DS1000DE(\"USB0\")\nr = scope.inst.query\nw = scope.inst.write", "_____no_output_____" ], [ "def Q(*args):\n return r(\":\"+\":\".join(args)+\"?\")", "_____no_output_____" ], [ "Q(\"MEASURE\", \"VPP\")", "_____no_output_____" ], [ "Q(\"TIMEBASE\", \"MODE\")", "_____no_output_____" ], [ "Q(\"TIMEBASE\", \"SCALE\")", "_____no_output_____" ], [ "from mhs5200 import MHS5200\nport = \"COM12\" # Serial port.\nsignal_gen = MHS5200(port=port)\n\nchan1 = signal_gen.channels[0]", "_____no_output_____" ], [ "chan1.frequency = 2\nchan1.amplitude = 20", "_____no_output_____" ], [ "grid_t = 12\ngrid_v = 8\ncycles = 2", "_____no_output_____" ], [ "w(\":TIMEBASE:MODE MAIN\")", "_____no_output_____" ], [ "period = 1/chan1.frequency # Seconds", "_____no_output_____" ], [ "scale_val = period*cycles/grid_t", "_____no_output_____" ], [ "w(\":TIMEBASE:SCALE {}\".format(scale_val))", "_____no_output_____" ], [ "w(\":CHAN2:DISP OFF\")", "_____no_output_____" ], [ "w(\":CHAN2:PROB 1\")", "_____no_output_____" ], [ "w(\":CHAN2:PROB 1\")", "_____no_output_____" ], [ "scale_pct = 0.8\nscale = chan1.amplitude/scale_pct/grid_v\nw(\":CHAN1:SCALE {}\".format(scale))", "_____no_output_____" ], [ "w(\":CHAN1:OFFSET 0\")", "_____no_output_____" ], [ "class Measure(object):\n def __init__(self, scope):\n self.scope = scope\n \n @property\n def inst(self):\n return self.scope.inst\n \n @property\n def _kw_(self):\n return self.__class__.__name__.upper()\n \n \n def _write_(self, cmd, arg=None):\n if arg is None:\n self.inst.write(\":{}:{}\".format(self._kw_, cmd))\n else:\n self.inst.write(\":{}:{} {}\".format(self._kw_, cmd, arg))\n \n def _query_(self, cmd):\n return self.inst.query(\":{}:{}?\".format(self._kw_, cmd))\n \n def clear(self):\n self._write_(\"clear\")", "_____no_output_____" ], [ "measurements = [\n 'vpp',\n 'vmax',\n 'vmin',\n 'vamplitude',\n 'vtop',\n 'vbase',\n 'vaverage',\n 'vrms',\n 'overshoot',\n 'preshoot',\n 'frequency',\n 'risetime',\n 'falltime',\n 'period',\n 'pwidth',\n 'nwidth',\n 'pdutycycle',\n 'ndutycycle',\n 'pdelay',\n 'ndelay',\n]", "_____no_output_____" ], [ "def measurement_factory(measurement):\n def measure(self):\n value_raw = self._query_(measurement)\n return float(value_raw)\n return measure", "_____no_output_____" ], [ "for measurement in measurements:\n fcn = measurement_factory(measurement)\n prop = property(fcn)\n setattr(Measure, measurement, prop)", "_____no_output_____" ], [ "m = Measure(scope)", "_____no_output_____" ], [ "m.vmax", "_____no_output_____" ], [ "m.frequency", "_____no_output_____" ], [ "m.ndutycycle", "_____no_output_____" ], [ "m.period", "_____no_output_____" ], [ "chan1.amplitude=10", "_____no_output_____" ], [ "m.vpp", "_____no_output_____" ], [ "m.vamplitude", "_____no_output_____" ], [ "signal_gen.serial.close()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2e8d7c5b5950ddfd7541d57bf5a3fd8723d633
551,458
ipynb
Jupyter Notebook
notebooks/ml_experiments/001-base_experiment.ipynb
openclimatefix/predict_pv_yield_OLD
e569e240915ba17b8f6274814f8ceaeebb8ab32b
[ "Apache-2.0" ]
1
2021-07-10T17:48:46.000Z
2021-07-10T17:48:46.000Z
notebooks/ml_experiments/001-base_experiment.ipynb
merq2019/predict_pv_yield
7db72e6d16bed90927a3e1b28037c66c3dabe307
[ "Apache-2.0" ]
null
null
null
notebooks/ml_experiments/001-base_experiment.ipynb
merq2019/predict_pv_yield
7db72e6d16bed90927a3e1b28037c66c3dabe307
[ "Apache-2.0" ]
null
null
null
183.574567
175,184
0.876743
[ [ [ "import cartopy.crs as ccrs\nimport xarray as xr\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom itertools import product\nimport pandas as pd\nimport os\nimport time\nfrom datetime import timedelta\nimport rasterio.warp as rasteriowarp", "_____no_output_____" ], [ "SATELLITE_DATA_PATH = os.path.expanduser('~/data/EUMETSAT/reprojected_subsetted/')\nPV_DATA_FILENAME = os.path.expanduser('~/data/pvoutput.org/UK_PV_timeseries_batch.nc')\nPV_METADATA_FILENAME = os.path.expanduser('~/data/pvoutput.org/UK_PV_metadata.csv')\n\nDST_CRS = {\n 'ellps': 'WGS84',\n 'proj': 'tmerc', # Transverse Mercator\n 'units': 'm' # meters\n}\n\n# Geospatial boundary in Transverse Mercator projection (meters)\nSOUTH = 5513500\nNORTH = 6613500\nWEST = -889500\nEAST = 410500", "_____no_output_____" ] ], [ [ "## Load and convert PV metadata", "_____no_output_____" ] ], [ [ "pv_metadata = pd.read_csv(PV_METADATA_FILENAME, index_col='system_id')\npv_metadata.dropna(subset=['longitude', 'latitude'], how='any', inplace=True)", "_____no_output_____" ], [ "# Convert lat lons to Transverse Mercator\npv_metadata['x'], pv_metadata['y'] = rasteriowarp.transform(\n src_crs={'init': 'EPSG:4326'},\n dst_crs=DST_CRS,\n xs=pv_metadata['longitude'].values,\n ys=pv_metadata['latitude'].values)\n\n# Filter 3 PV systems which apparently aren't in the UK!\npv_metadata = pv_metadata[\n (pv_metadata.x >= WEST) &\n (pv_metadata.x <= EAST) &\n (pv_metadata.y <= NORTH) &\n (pv_metadata.y >= SOUTH)]\n\nlen(pv_metadata)", "_____no_output_____" ] ], [ [ "## Load and normalise PV power data", "_____no_output_____" ] ], [ [ "%%time\npv_power = xr.load_dataset(PV_DATA_FILENAME)", "CPU times: user 13.2 s, sys: 1.76 s, total: 15 s\nWall time: 15.3 s\n" ], [ "pv_power_selected = pv_power.loc[dict(datetime=slice('2018-06-01', '2019-07-01'))]", "_____no_output_____" ], [ "pv_power_df = pv_power_selected.to_dataframe().dropna(axis='columns', how='all')\npv_power_df = pv_power_df.clip(lower=0, upper=5E7)\npv_power_df.columns = [np.int64(col) for col in pv_power_df.columns]\npv_power_df = pv_power_df.tz_localize('Europe/London').tz_convert('UTC')", "_____no_output_____" ], [ "del pv_power\ndel pv_power_selected", "_____no_output_____" ], [ "# A bit of hand-crafted cleaning\n# TODO: Is this still relevant?\npv_power_df[30248][:'2019-01-03'] = np.NaN", "_____no_output_____" ], [ "# Scale to the range [0, 1]\npv_power_df -= pv_power_df.min()\npv_power_df /= pv_power_df.max()", "_____no_output_____" ], [ "# Drop systems which are producing over night\nNIGHT_YIELD_THRESHOLD = 0.4\nnight_hours = list(range(21, 24)) + list(range(0, 4))\nbad_systems = np.where(\n (pv_power_df[pv_power_df.index.hour.isin(night_hours)] > NIGHT_YIELD_THRESHOLD).sum()\n)[0]\nbad_systems = pv_power_df.columns[bad_systems]\nprint(len(bad_systems), 'bad systems found.')\n\n#ax = pv_power_df[bad_systems].plot(figsize=(40, 10), alpha=0.5)\n#ax.set_title('Bad PV systems');", "35 bad systems found.\n" ], [ "pv_power_df.drop(bad_systems, axis='columns', inplace=True)", "_____no_output_____" ], [ "%%time\n# Interpolate up to 15 minutes ahead.\npv_power_df = pv_power_df.interpolate(limit=3)", "CPU times: user 22 s, sys: 247 ms, total: 22.2 s\nWall time: 22.2 s\n" ], [ "# Sort the columns\npv_power_df = pv_power_df[np.sort(pv_power_df.columns)]", "_____no_output_____" ], [ "len(pv_power_df.columns)", "_____no_output_____" ], [ "#pv_power_df.plot(figsize=(40, 10), alpha=0.5, legend=False);", "_____no_output_____" ], [ "# Sort the metadata in the same order as the PV power data\npv_metadata = pv_metadata.reindex(pv_power_df.columns, axis='index')", "_____no_output_____" ], [ "pv_power_df.head()", "_____no_output_____" ] ], [ [ "## Load satellite data", "_____no_output_____" ] ], [ [ "from glob import glob\nfrom torch.utils.data import Dataset", "_____no_output_____" ], [ "RECTANGLE_WIDTH = 128000 # in meters\nRECTANGLE_HEIGHT = RECTANGLE_WIDTH\n\n\ndef get_rectangle(data_array, time, centre_x, centre_y, width=RECTANGLE_WIDTH, height=RECTANGLE_HEIGHT):\n half_width = width / 2\n half_height = height / 2\n\n north = centre_y + half_height\n south = centre_y - half_height\n east = centre_x + half_width\n west = centre_x - half_width\n\n data = data_array.loc[dict(\n x=slice(west, east), \n y=slice(north, south))]\n\n MEAN = 20.444992\n STD = 8.766013\n data = data - MEAN\n data = data / STD \n\n return data\n\n\nclass SatelliteLoader(Dataset):\n \"\"\"\n Attributes:\n index: pd.Series which maps from UTC datetime to full filename of satellite data.\n _data_array_cache: The last lazily opened xr.DataArray that __getitem__ was asked to open.\n Useful so that we don't have to re-open the DataArray if we're asked to get\n data from the same file on several different calls.\n \"\"\"\n def __init__(self, file_pattern):\n self._load_sat_index(file_pattern)\n self._data_array_cache = None\n self._last_filename_requested = None\n \n def __getitem__(self, dt):\n sat_filename = self.index[dt]\n if self._data_array_cache is None or sat_filename != self._last_filename_requested:\n self._data_array_cache = xr.open_dataarray(sat_filename)\n self._last_filename_requested = sat_filename\n return self._data_array_cache\n \n def close(self):\n if self._data_array_cache is not None:\n self._data_array_cache.close()\n \n def __len__(self):\n return len(self.index)\n \n def _load_sat_index(self, file_pattern):\n \"\"\"Opens all satellite files in `file_pattern` and loads all their datetime indicies into self.index.\"\"\"\n sat_filenames = glob(file_pattern)\n sat_filenames.sort()\n \n n_filenames = len(sat_filenames)\n sat_index = []\n for i_filename, sat_filename in enumerate(sat_filenames):\n if i_filename % 10 == 0 or i_filename == (n_filenames - 1):\n print('\\r {:5d} of {:5d}'.format(i_filename + 1, n_filenames), end='', flush=True)\n data_array = xr.open_dataarray(sat_filename, drop_variables=['x', 'y'])\n sat_index.extend([(sat_filename, t) for t in data_array.time.values])\n\n sat_index = pd.DataFrame(sat_index, columns=['filename', 'datetime']).set_index('datetime').squeeze()\n self.index = sat_index.tz_localize('UTC')\n \n def get_rectangles_for_all_data(self, centre_x, centre_y, width=RECTANGLE_WIDTH, height=RECTANGLE_HEIGHT):\n \"\"\"Iterate through all satellite filenames and load rectangle of imagery.\"\"\"\n sat_filenames = np.sort(np.unique(self.index.values))\n for sat_filename in sat_filenames:\n data_array = xr.open_dataarray(sat_filename)\n yield get_rectangle(data_array, time, centre_x, centre_y, width, height)\n \n def get_rectangle(self, time, centre_x, centre_y, width=RECTANGLE_WIDTH, height=RECTANGLE_HEIGHT):\n data_array = self[time]\n return get_rectangle(data_array, time, centre_x, centre_y, width, height)", "_____no_output_____" ], [ "%%time\nsat_loader = SatelliteLoader(os.path.join(SATELLITE_DATA_PATH, '*.nc'))\nprint()", " 3814 of 3815\nCPU times: user 14.2 s, sys: 474 ms, total: 14.7 s\nWall time: 16.5 s\n" ], [ "len(sat_loader)", "_____no_output_____" ], [ "# Test get rectangle\ndt = pd.Timestamp('2019-02-21 10:15')\npv_system_id = pv_metadata.index[1]\nx, y = pv_metadata.loc[pv_system_id][['x', 'y']]", "_____no_output_____" ], [ "%%time\nsat_data = sat_loader.get_rectangle(time=dt, centre_x=x, centre_y=y) #, width=512000, height=512000)", "CPU times: user 56.5 ms, sys: 329 µs, total: 56.8 ms\nWall time: 64.1 ms\n" ], [ "fig = plt.figure(figsize=(10, 10))\ncrs = ccrs.TransverseMercator()\nax = plt.axes(projection=crs)\nax.coastlines(resolution='10m', alpha=0.5, color='pink')\n\nimg = sat_data.isel(time=10).plot.imshow(ax=ax, cmap='gray', origin='upper', add_colorbar=True)\npath_collection = ax.scatter(x=x, y=y, alpha=0.7)", "_____no_output_____" ], [ "import pvlib\nfrom pvlib.location import Location", "_____no_output_____" ], [ "location = Location(\n latitude=pv_metadata['latitude'][pv_system_id],\n longitude=pv_metadata['longitude'][pv_system_id],\n tz='UTC',\n name=pv_metadata['system_name'][pv_system_id])\nlocation", "_____no_output_____" ], [ "fig, ax = plt.subplots(figsize=(20, 7))\npv_data_to_plot = pv_power_df[pv_system_id][dt - timedelta(hours=48):dt + timedelta(hours=48)]\nax.plot(pv_data_to_plot, label='PV yield')\n#ax.plot((dt, dt), (0, 1), linewidth=1, color='black', label='datetime of image above')\nax.set_title(dt)\nax.set_ylim((0, 1))\n\nax2 = ax.twinx()\nclearsky = location.get_clearsky(pv_data_to_plot.index)\nlines = ax2.plot(clearsky)\nfor line, label in zip(lines, clearsky.columns):\n line.set_label(label);\nax2.legend(loc='upper left');", "_____no_output_____" ], [ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom collections import OrderedDict", "_____no_output_____" ], [ "%%time\n# Load all satellite data rectangles into RAM\ndims = OrderedDict()\ndims['time'] = sat_loader.index.index.values\ndims['y'] = sat_data.y\ndims['x'] = sat_data.x\n\nshape = [len(values) for values in dims.values()]\nprint('Creating huge numpy array!', flush=True)\ndata = np.zeros(shape, dtype=np.float16)\nprint('Setting to NaN', flush=True)\ndata[:, :, :] = np.NaN\nprint('Creating huge DataArray!', flush=True)\nsat_data_master = xr.DataArray(\n data,\n coords=dims,\n dims=dims.keys(),\n name='HRV')\ndel data, dims, shape\n\nfor data_array in sat_loader.get_rectangles_for_all_data(centre_x=x, centre_y=y):\n print('\\r', data_array.time.values[0], flush=True, end='')\n sat_data_master.loc[data_array.time.values, :, :] = data_array\n \nprint()", "Creating huge numpy array!\nSetting to NaN\nCreating huge DataArray!\n 2019-06-30T20:00:00.0000000002019-04-05T10:00:00.0000000002019-04-12T13:00:00.000000000CPU times: user 3min 3s, sys: 25.1 s, total: 3min 28s\nWall time: 3min 59s\n" ], [ "sat_data_master = sat_data_master.dropna(dim='time', how='any')", "_____no_output_____" ], [ "# Align with PV\npv_data = pv_power_df[pv_system_id].dropna()\nsat_data_index = pd.DatetimeIndex(sat_data_master.time.values, tz='UTC')\ndatetime_index = pv_data.index.intersection(sat_data_index)", "_____no_output_____" ], [ "len(datetime_index)", "_____no_output_____" ], [ "datetime_index.tz", "_____no_output_____" ], [ "sat_data_master = sat_data_master.loc[datetime_index.tz_convert(None)]", "_____no_output_____" ], [ "pv_data = pv_data[datetime_index]", "_____no_output_____" ], [ "pv_data_cuda = torch.cuda.HalfTensor(pv_data.values[:, np.newaxis])\npv_data_cuda.shape", "_____no_output_____" ], [ "sat_data_master_cuda = torch.cuda.HalfTensor(sat_data_master.values[:, np.newaxis])\nsat_data_master_cuda.shape", "_____no_output_____" ], [ "torch.cuda.get_device_name(0)", "_____no_output_____" ], [ "print('{:,.0f} MB CUDA memory allocated.'.format(torch.cuda.memory_allocated() / 1E6))", "3,784 MB CUDA memory allocated.\n" ], [ "# Split train & test by days\ndays = np.unique(datetime_index.date)\nlen(days)", "_____no_output_____" ], [ "# Use every 5th day for testing\ntesting_days = days[::5]\nlen(testing_days)", "_____no_output_____" ], [ "training_days = np.array(list(set(days) - set(testing_days)))\ntraining_days = np.sort(training_days)\nlen(training_days)", "_____no_output_____" ], [ "def get_index_into_datetime_index(training_or_testing_days):\n return np.where(pd.Series(datetime_index.date).isin(training_or_testing_days))[0]\n\ntraining_index = get_index_into_datetime_index(training_days)\ntesting_index = get_index_into_datetime_index(testing_days)\nassert not set(training_index).intersection(testing_index)\n\nlen(training_index), len(testing_index)", "_____no_output_____" ], [ "hours_of_day = datetime_index.hour.values.astype(np.float32)\nhours_of_day -= hours_of_day.mean()\nhours_of_day /= hours_of_day.std()\nhours_of_day = torch.cuda.HalfTensor(hours_of_day[:, np.newaxis])", "_____no_output_____" ], [ "clearsky = location.get_clearsky(datetime_index)\nclearsky -= clearsky.mean()\nclearsky /= clearsky.std()\nclearsky = torch.cuda.HalfTensor(clearsky.values)", "_____no_output_____" ], [ "class Net(nn.Module):\n def __init__(self, dropout_proportion=0.1):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=12, kernel_size=5)\n self.pool = nn.MaxPool2d(kernel_size=2, stride=2)\n self.conv2 = nn.Conv2d(in_channels=12, out_channels=16, kernel_size=5)\n HOURS_OF_DAY_CHANNELS = 1\n CLEARSKY_CHANNELS = 3\n self.fc1 = nn.Linear(16 * 29 * 29, 120)\n self.fc2 = nn.Linear(120 + HOURS_OF_DAY_CHANNELS + CLEARSKY_CHANNELS, 84)\n self.fc3 = nn.Linear(84, 1)\n self.dropout_layer = nn.Dropout(p=dropout_proportion)\n\n def forward(self, x, hour_of_day, clearsky):\n #x = self.dropout_layer(x)\n x = self.pool(F.relu(self.conv1(x)))\n # x is now <batch_size>, 6, 62, 62. \n # 62 is 124 / 2. 124 is the 128-dim input - 4\n x = self.dropout_layer(x)\n x = self.pool(F.relu(self.conv2(x)))\n # x is now <batch_size>, 16, 29, 29\n x = x.view(-1, 16 * 29 * 29)\n # x is now <batch_size>, 16 x 29 x 29\n x = self.dropout_layer(x)\n x = F.relu(self.fc1(x))\n x = self.dropout_layer(x)\n x = torch.cat((x, hour_of_day, clearsky), dim=1)\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n \nnet = Net().cuda().half()", "_____no_output_____" ], [ "optimizer = optim.SGD(net.parameters(), lr=0.01)\nloss_func = nn.MSELoss()\nmae_loss_func = nn.L1Loss()", "_____no_output_____" ], [ "train_losses = []\ntrain_mae_losses = []\ntest_losses = []\ntest_mae_losses = []", "_____no_output_____" ], [ "%%time\nSTATS_PERIOD = 1000\nTRAINING_BATCH_SIZE = 128\n\nTESTING_BATCH_SIZE = 256\nTESTING_BATCH_INDEX = testing_index[:TESTING_BATCH_SIZE]\nTESTING_INPUTS = sat_data_master_cuda[TESTING_BATCH_INDEX]\nTESTING_TARGET = pv_data_cuda[TESTING_BATCH_INDEX]\nTESTING_HOURS_OF_DAY = hours_of_day[TESTING_BATCH_INDEX]\nTESTING_CLEARSKY = clearsky[TESTING_BATCH_INDEX]\n\nrunning_train_loss = 0.0\nrunning_train_mae = 0.0\nt0 = time.time()\ntraining_index_len_minus_1 = len(training_index)-1\n\nfor i_batch in range(20000 * 4 * 3):\n print('\\rBatch: {:4d}'.format(i_batch + 1), end='', flush=True)\n \n # Create batch\n batch_index = np.random.randint(low=0, high=training_index_len_minus_1, size=TRAINING_BATCH_SIZE)\n batch_index = training_index[batch_index]\n inputs = sat_data_master_cuda[batch_index]\n hours_of_day_for_batch = hours_of_day[batch_index]\n clearsky_for_batch = clearsky[batch_index]\n target = pv_data_cuda[batch_index]\n \n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n net.train()\n outputs = net(inputs, hours_of_day_for_batch, clearsky_for_batch)\n train_loss = loss_func(outputs, target)\n train_loss.backward()\n optimizer.step()\n running_train_loss += train_loss.item()\n \n # MAE\n train_mae = mae_loss_func(outputs, target)\n running_train_mae += train_mae.item()\n\n # print statistics\n if i_batch % STATS_PERIOD == STATS_PERIOD - 1: # print every STATS_PERIOD mini-batches\n t1 = time.time()\n \n # Train loss\n mean_train_loss = running_train_loss / STATS_PERIOD\n train_losses.append(mean_train_loss)\n mean_train_mae = running_train_mae / STATS_PERIOD\n train_mae_losses.append(mean_train_mae)\n \n # Test loss\n net.eval()\n test_outputs = net(TESTING_INPUTS, TESTING_HOURS_OF_DAY, TESTING_CLEARSKY)\n test_loss = loss_func(test_outputs, TESTING_TARGET).item()\n test_losses.append(test_loss)\n test_mae = mae_loss_func(test_outputs, TESTING_TARGET).item()\n test_mae_losses.append(test_mae)\n \n print(\n '\\n time = {:.2f} milli seconds per batch.\\n'\n ' train loss = {:8.5f}\\n'\n ' train MAE = {:8.5f}\\n'\n ' test loss = {:8.5f}\\n'\n ' test MAE = {:8.5f}'.format(\n ((t1 - t0) / STATS_PERIOD) * 1000,\n mean_train_loss, \n mean_train_mae,\n test_loss,\n test_mae\n ))\n running_train_loss = 0.0\n running_train_mae = 0.0\n t0 = time.time()\n\nprint('Finished Training')", "Batch: 1000\n time = 46.80 milli seconds per batch.\n train loss = 0.00929\n train MAE = 0.06451\n test loss = 0.00714\n test MAE = 0.06134\nBatch: 2000\n time = 47.99 milli seconds per batch.\n train loss = 0.00917\n train MAE = 0.06404\n test loss = 0.00624\n test MAE = 0.05862\nBatch: 3000\n time = 48.93 milli seconds per batch.\n train loss = 0.00889\n train MAE = 0.06296\n test loss = 0.00564\n test MAE = 0.05609\nBatch: 4000\n time = 49.53 milli seconds per batch.\n train loss = 0.00881\n train MAE = 0.06273\n test loss = 0.00568\n test MAE = 0.05457\nBatch: 5000\n time = 49.65 milli seconds per batch.\n train loss = 0.00863\n train MAE = 0.06204\n test loss = 0.00621\n test MAE = 0.05682\nBatch: 6000\n time = 49.78 milli seconds per batch.\n train loss = 0.00842\n train MAE = 0.06133\n test loss = 0.00573\n test MAE = 0.05542\nBatch: 7000\n time = 48.36 milli seconds per batch.\n train loss = 0.00827\n train MAE = 0.06084\n test loss = 0.00573\n test MAE = 0.05499\nBatch: 8000\n time = 48.05 milli seconds per batch.\n train loss = 0.00814\n train MAE = 0.06027\n test loss = 0.00634\n test MAE = 0.05829\nBatch: 9000\n time = 48.04 milli seconds per batch.\n train loss = 0.00797\n train MAE = 0.05956\n test loss = 0.00562\n test MAE = 0.05417\nBatch: 10000\n time = 48.10 milli seconds per batch.\n train loss = 0.00774\n train MAE = 0.05875\n test loss = 0.00679\n test MAE = 0.05933\nBatch: 11000\n time = 48.05 milli seconds per batch.\n train loss = 0.00771\n train MAE = 0.05851\n test loss = 0.00573\n test MAE = 0.05411\nBatch: 12000\n time = 48.22 milli seconds per batch.\n train loss = 0.00752\n train MAE = 0.05783\n test loss = 0.00628\n test MAE = 0.05634\nBatch: 13000\n time = 48.90 milli seconds per batch.\n train loss = 0.00746\n train MAE = 0.05769\n test loss = 0.00597\n test MAE = 0.05554\nBatch: 14000\n time = 49.69 milli seconds per batch.\n train loss = 0.00728\n train MAE = 0.05704\n test loss = 0.00512\n test MAE = 0.05222\nBatch: 15000\n time = 50.07 milli seconds per batch.\n train loss = 0.00721\n train MAE = 0.05667\n test loss = 0.00695\n test MAE = 0.05881\nBatch: 16000\n time = 47.33 milli seconds per batch.\n train loss = 0.00716\n train MAE = 0.05652\n test loss = 0.00565\n test MAE = 0.05322\nBatch: 17000\n time = 45.57 milli seconds per batch.\n train loss = 0.00701\n train MAE = 0.05585\n test loss = 0.00564\n test MAE = 0.05362\nBatch: 18000\n time = 45.74 milli seconds per batch.\n train loss = 0.00688\n train MAE = 0.05534\n test loss = 0.00662\n test MAE = 0.05804\nBatch: 19000\n time = 45.61 milli seconds per batch.\n train loss = 0.00676\n train MAE = 0.05493\n test loss = 0.00579\n test MAE = 0.05341\nBatch: 20000\n time = 45.57 milli seconds per batch.\n train loss = 0.00675\n train MAE = 0.05475\n test loss = 0.00608\n test MAE = 0.05524\nBatch: 21000\n time = 45.60 milli seconds per batch.\n train loss = 0.00670\n train MAE = 0.05476\n test loss = 0.00614\n test MAE = 0.05533\nBatch: 22000\n time = 45.66 milli seconds per batch.\n train loss = 0.00655\n train MAE = 0.05396\n test loss = 0.00680\n test MAE = 0.05719\nBatch: 23000\n time = 45.63 milli seconds per batch.\n train loss = 0.00639\n train MAE = 0.05342\n test loss = 0.00629\n test MAE = 0.05548\nBatch: 24000\n time = 45.59 milli seconds per batch.\n train loss = 0.00625\n train MAE = 0.05288\n test loss = 0.00637\n test MAE = 0.05493\nBatch: 25000\n time = 45.67 milli seconds per batch.\n train loss = 0.00618\n train MAE = 0.05255\n test loss = 0.00544\n test MAE = 0.05167\nBatch: 26000\n time = 45.60 milli seconds per batch.\n train loss = 0.00622\n train MAE = 0.05266\n test loss = 0.00621\n test MAE = 0.05429\nBatch: 27000\n time = 45.57 milli seconds per batch.\n train loss = 0.00606\n train MAE = 0.05204\n test loss = 0.00592\n test MAE = 0.05310\nBatch: 28000\n time = 45.55 milli seconds per batch.\n train loss = 0.00594\n train MAE = 0.05142\n test loss = 0.00584\n test MAE = 0.05246\nBatch: 29000\n time = 45.63 milli seconds per batch.\n train loss = 0.00594\n train MAE = 0.05149\n test loss = 0.00615\n test MAE = 0.05371\nBatch: 30000\n time = 45.57 milli seconds per batch.\n train loss = 0.00582\n train MAE = 0.05098\n test loss = 0.00628\n test MAE = 0.05371\nBatch: 31000\n time = 45.61 milli seconds per batch.\n train loss = 0.00573\n train MAE = 0.05068\n test loss = 0.00594\n test MAE = 0.05328\nBatch: 32000\n time = 45.65 milli seconds per batch.\n train loss = 0.00566\n train MAE = 0.05040\n test loss = 0.00604\n test MAE = 0.05307\nBatch: 33000\n time = 45.57 milli seconds per batch.\n train loss = 0.00558\n train MAE = 0.04996\n test loss = 0.00688\n test MAE = 0.05634\nBatch: 34000\n time = 45.65 milli seconds per batch.\n train loss = 0.00555\n train MAE = 0.04984\n test loss = 0.00644\n test MAE = 0.05466\nBatch: 35000\n time = 45.59 milli seconds per batch.\n train loss = 0.00544\n train MAE = 0.04938\n test loss = 0.00589\n test MAE = 0.05164\nBatch: 36000\n time = 45.67 milli seconds per batch.\n train loss = 0.00544\n train MAE = 0.04942\n test loss = 0.00578\n test MAE = 0.05252\nBatch: 37000\n time = 45.61 milli seconds per batch.\n train loss = 0.00537\n train MAE = 0.04913\n test loss = 0.00629\n test MAE = 0.05469\nBatch: 38000\n time = 45.55 milli seconds per batch.\n train loss = 0.00530\n train MAE = 0.04868\n test loss = 0.00599\n test MAE = 0.05270\nBatch: 39000\n time = 45.62 milli seconds per batch.\n train loss = 0.00525\n train MAE = 0.04855\n test loss = 0.00599\n test MAE = 0.05310\nBatch: 40000\n time = 45.68 milli seconds per batch.\n train loss = 0.00517\n train MAE = 0.04810\n test loss = 0.00570\n test MAE = 0.05173\nBatch: 41000\n time = 45.63 milli seconds per batch.\n train loss = 0.00506\n train MAE = 0.04779\n test loss = 0.00597\n test MAE = 0.05191\nBatch: 42000\n time = 45.67 milli seconds per batch.\n train loss = 0.00514\n train MAE = 0.04803\n test loss = 0.00602\n test MAE = 0.05292\nBatch: 43000\n time = 45.62 milli seconds per batch.\n train loss = 0.00500\n train MAE = 0.04742\n test loss = 0.00593\n test MAE = 0.05222\nBatch: 44000\n time = 45.59 milli seconds per batch.\n train loss = 0.00503\n train MAE = 0.04747\n test loss = 0.00590\n test MAE = 0.05173\nBatch: 45000\n time = 45.60 milli seconds per batch.\n train loss = 0.00500\n train MAE = 0.04733\n test loss = 0.00639\n test MAE = 0.05331\nBatch: 46000\n time = 45.67 milli seconds per batch.\n train loss = 0.00482\n train MAE = 0.04657\n test loss = 0.00652\n test MAE = 0.05399\nBatch: 47000\n time = 45.66 milli seconds per batch.\n train loss = 0.00488\n train MAE = 0.04684\n test loss = 0.00600\n test MAE = 0.05188\nBatch: 48000\n time = 45.60 milli seconds per batch.\n train loss = 0.00477\n train MAE = 0.04645\n test loss = 0.00569\n test MAE = 0.05063\nBatch: 49000\n time = 45.55 milli seconds per batch.\n train loss = 0.00479\n train MAE = 0.04637\n test loss = 0.00571\n test MAE = 0.05093\nBatch: 50000\n time = 45.57 milli seconds per batch.\n train loss = 0.00468\n train MAE = 0.04587\n test loss = 0.00678\n test MAE = 0.05496\nBatch: 51000\n time = 45.56 milli seconds per batch.\n train loss = 0.00468\n train MAE = 0.04588\n test loss = 0.00583\n test MAE = 0.05124\nBatch: 52000\n time = 45.64 milli seconds per batch.\n train loss = 0.00464\n train MAE = 0.04587\n test loss = 0.00537\n test MAE = 0.04950\nBatch: 53000\n time = 45.64 milli seconds per batch.\n train loss = 0.00448\n train MAE = 0.04498\n test loss = 0.00602\n test MAE = 0.05151\nBatch: 54000\n time = 45.52 milli seconds per batch.\n train loss = 0.00454\n train MAE = 0.04526\n test loss = 0.00642\n test MAE = 0.05325\nBatch: 55000\n time = 45.61 milli seconds per batch.\n train loss = 0.00444\n train MAE = 0.04474\n test loss = 0.00651\n test MAE = 0.05374\nBatch: 56000\n time = 45.61 milli seconds per batch.\n train loss = 0.00443\n train MAE = 0.04470\n test loss = 0.00599\n test MAE = 0.05127\nBatch: 57000\n time = 45.59 milli seconds per batch.\n train loss = 0.00448\n train MAE = 0.04479\n test loss = 0.00638\n test MAE = 0.05249\nBatch: 58000\n time = 45.59 milli seconds per batch.\n train loss = 0.00438\n train MAE = 0.04446\n test loss = 0.00624\n test MAE = 0.05258\nBatch: 59000\n time = 45.60 milli seconds per batch.\n train loss = 0.00433\n train MAE = 0.04424\n test loss = 0.00587\n test MAE = 0.05096\nBatch: 60000\n time = 45.67 milli seconds per batch.\n train loss = 0.00427\n train MAE = 0.04400\n test loss = 0.00576\n test MAE = 0.05023\nBatch: 61000\n time = 45.62 milli seconds per batch.\n train loss = 0.00428\n train MAE = 0.04407\n test loss = 0.00603\n test MAE = 0.05112\nBatch: 62000\n time = 45.60 milli seconds per batch.\n train loss = 0.00428\n train MAE = 0.04400\n test loss = 0.00660\n test MAE = 0.05338\nBatch: 63000\n time = 45.74 milli seconds per batch.\n train loss = 0.00421\n train MAE = 0.04359\n test loss = 0.00622\n test MAE = 0.05200\nBatch: 64000\n time = 45.70 milli seconds per batch.\n train loss = 0.00413\n train MAE = 0.04329\n test loss = 0.00591\n test MAE = 0.05139\nBatch: 65000\n time = 45.66 milli seconds per batch.\n train loss = 0.00411\n train MAE = 0.04318\n test loss = 0.00645\n test MAE = 0.05234\nBatch: 66000\n time = 45.65 milli seconds per batch.\n train loss = 0.00409\n train MAE = 0.04296\n test loss = 0.00615\n test MAE = 0.05209\nBatch: 67000\n time = 45.80 milli seconds per batch.\n train loss = 0.00408\n train MAE = 0.04296\n test loss = 0.00568\n test MAE = 0.04999\nBatch: 68000\n time = 45.61 milli seconds per batch.\n train loss = 0.00400\n train MAE = 0.04264\n test loss = 0.00589\n test MAE = 0.05048\nBatch: 69000\n time = 45.64 milli seconds per batch.\n train loss = 0.00405\n train MAE = 0.04287\n test loss = 0.00621\n test MAE = 0.05173\nBatch: 70000\n time = 45.61 milli seconds per batch.\n train loss = 0.00400\n train MAE = 0.04261\n test loss = 0.00644\n test MAE = 0.05267\nBatch: 71000\n time = 45.70 milli seconds per batch.\n train loss = 0.00393\n train MAE = 0.04225\n test loss = 0.00653\n test MAE = 0.05298\nBatch: 72000\n time = 45.51 milli seconds per batch.\n train loss = 0.00394\n train MAE = 0.04228\n test loss = 0.00602\n test MAE = 0.05069\nBatch: 73000\n time = 45.51 milli seconds per batch.\n train loss = 0.00389\n train MAE = 0.04202\n test loss = 0.00577\n test MAE = 0.04999\nBatch: 74000\n time = 45.59 milli seconds per batch.\n train loss = 0.00387\n train MAE = 0.04190\n test loss = 0.00649\n test MAE = 0.05304\nBatch: 75000\n time = 45.49 milli seconds per batch.\n train loss = 0.00382\n train MAE = 0.04158\n test loss = 0.00587\n test MAE = 0.04984\nBatch: 76000\n time = 45.62 milli seconds per batch.\n train loss = 0.00378\n train MAE = 0.04150\n test loss = 0.00592\n test MAE = 0.05002\nBatch: 77000\n time = 45.50 milli seconds per batch.\n train loss = 0.00382\n train MAE = 0.04174\n test loss = 0.00598\n test MAE = 0.05124\nBatch: 78000\n time = 45.47 milli seconds per batch.\n train loss = 0.00378\n train MAE = 0.04138\n test loss = 0.00591\n test MAE = 0.05029\nBatch: 79000\n time = 45.51 milli seconds per batch.\n train loss = 0.00376\n train MAE = 0.04127\n test loss = 0.00597\n test MAE = 0.05051\nBatch: 80000\n time = 45.66 milli seconds per batch.\n train loss = 0.00368\n train MAE = 0.04089\n test loss = 0.00640\n test MAE = 0.05276\nBatch: 81000\n time = 45.53 milli seconds per batch.\n train loss = 0.00370\n train MAE = 0.04103\n test loss = 0.00592\n test MAE = 0.05057\nBatch: 82000\n time = 45.60 milli seconds per batch.\n train loss = 0.00366\n train MAE = 0.04087\n test loss = 0.00603\n test MAE = 0.05078\nBatch: 83000\n time = 45.55 milli seconds per batch.\n train loss = 0.00367\n train MAE = 0.04092\n test loss = 0.00604\n test MAE = 0.05081\nBatch: 84000\n time = 45.69 milli seconds per batch.\n train loss = 0.00359\n train MAE = 0.04049\n test loss = 0.00565\n test MAE = 0.04880\nBatch: 85000\n time = 45.49 milli seconds per batch.\n train loss = 0.00360\n train MAE = 0.04065\n test loss = 0.00604\n test MAE = 0.05035\nBatch: 86000\n time = 45.47 milli seconds per batch.\n train loss = 0.00360\n train MAE = 0.04049\n test loss = 0.00602\n test MAE = 0.05109\nBatch: 87000\n time = 45.50 milli seconds per batch.\n train loss = 0.00359\n train MAE = 0.04043\n test loss = 0.00618\n test MAE = 0.05182\nBatch: 88000\n time = 45.53 milli seconds per batch.\n train loss = 0.00358\n train MAE = 0.04031\n test loss = 0.00605\n test MAE = 0.05103\nBatch: 89000\n time = 45.57 milli seconds per batch.\n train loss = 0.00354\n train MAE = 0.04021\n test loss = 0.00626\n test MAE = 0.05182\nBatch: 90000\n time = 45.60 milli seconds per batch.\n train loss = 0.00350\n train MAE = 0.03998\n test loss = 0.00591\n test MAE = 0.04962\nBatch: 91000\n time = 49.01 milli seconds per batch.\n train loss = 0.00351\n train MAE = 0.04004\n test loss = 0.00583\n test MAE = 0.05002\nBatch: 92000\n time = 45.28 milli seconds per batch.\n train loss = 0.00345\n train MAE = 0.03976\n test loss = 0.00618\n test MAE = 0.05048\nBatch: 93000\n time = 45.17 milli seconds per batch.\n train loss = 0.00345\n train MAE = 0.03970\n test loss = 0.00586\n test MAE = 0.04968\nBatch: 94000\n time = 45.09 milli seconds per batch.\n train loss = 0.00343\n train MAE = 0.03956\n test loss = 0.00612\n test MAE = 0.05078\nBatch: 95000\n time = 45.13 milli seconds per batch.\n train loss = 0.00340\n train MAE = 0.03930\n test loss = 0.00616\n test MAE = 0.05084\nBatch: 96000\n time = 45.19 milli seconds per batch.\n train loss = 0.00334\n train MAE = 0.03911\n test loss = 0.00616\n test MAE = 0.05112\nBatch: 97000\n time = 45.02 milli seconds per batch.\n train loss = 0.00336\n train MAE = 0.03918\n test loss = 0.00623\n test MAE = 0.05151\nBatch: 98000\n time = 45.22 milli seconds per batch.\n train loss = 0.00335\n train MAE = 0.03901\n test loss = 0.00616\n test MAE = 0.05118\nBatch: 99000\n time = 45.21 milli seconds per batch.\n train loss = 0.00333\n train MAE = 0.03897\n test loss = 0.00585\n test MAE = 0.04956\nBatch: 100000\n time = 45.22 milli seconds per batch.\n train loss = 0.00331\n train MAE = 0.03900\n test loss = 0.00629\n test MAE = 0.05154\nBatch: 101000\n time = 45.18 milli seconds per batch.\n train loss = 0.00329\n train MAE = 0.03879\n test loss = 0.00587\n test MAE = 0.04987\nBatch: 102000\n time = 45.11 milli seconds per batch.\n train loss = 0.00328\n train MAE = 0.03879\n test loss = 0.00635\n test MAE = 0.05176\nBatch: 103000\n time = 45.21 milli seconds per batch.\n train loss = 0.00329\n train MAE = 0.03882\n test loss = 0.00608\n test MAE = 0.05048\nBatch: 104000\n time = 45.22 milli seconds per batch.\n train loss = 0.00323\n train MAE = 0.03845\n test loss = 0.00622\n test MAE = 0.05115\nBatch: 105000\n time = 45.21 milli seconds per batch.\n train loss = 0.00322\n train MAE = 0.03835\n test loss = 0.00550\n test MAE = 0.04788\nBatch: 106000\n time = 45.23 milli seconds per batch.\n train loss = 0.00326\n train MAE = 0.03865\n test loss = 0.00572\n test MAE = 0.04883\nBatch: 107000\n time = 45.23 milli seconds per batch.\n train loss = 0.00322\n train MAE = 0.03840\n test loss = 0.00576\n test MAE = 0.04904\nBatch: 108000\n time = 45.19 milli seconds per batch.\n train loss = 0.00319\n train MAE = 0.03822\n test loss = 0.00618\n test MAE = 0.05081\nBatch: 109000\n time = 45.19 milli seconds per batch.\n train loss = 0.00319\n train MAE = 0.03824\n test loss = 0.00658\n test MAE = 0.05222\nBatch: 110000\n time = 45.18 milli seconds per batch.\n train loss = 0.00315\n train MAE = 0.03806\n test loss = 0.00612\n test MAE = 0.04993\nBatch: 111000\n time = 45.29 milli seconds per batch.\n train loss = 0.00314\n train MAE = 0.03801\n test loss = 0.00615\n test MAE = 0.05011\nBatch: 112000\n time = 45.16 milli seconds per batch.\n train loss = 0.00319\n train MAE = 0.03808\n test loss = 0.00628\n test MAE = 0.05124\nBatch: 113000\n time = 45.08 milli seconds per batch.\n train loss = 0.00314\n train MAE = 0.03785\n test loss = 0.00607\n test MAE = 0.05005\nBatch: 114000\n time = 45.31 milli seconds per batch.\n train loss = 0.00311\n train MAE = 0.03769\n test loss = 0.00632\n test MAE = 0.05075\nBatch: 115000\n time = 45.18 milli seconds per batch.\n train loss = 0.00309\n train MAE = 0.03772\n test loss = 0.00594\n test MAE = 0.04968\nBatch: 116000\n time = 45.24 milli seconds per batch.\n train loss = 0.00306\n train MAE = 0.03747\n test loss = 0.00625\n test MAE = 0.05045\nBatch: 117000\n time = 45.18 milli seconds per batch.\n train loss = 0.00304\n train MAE = 0.03734\n test loss = 0.00638\n test MAE = 0.05164\nBatch: 118000\n time = 45.22 milli seconds per batch.\n train loss = 0.00307\n train MAE = 0.03746\n test loss = 0.00570\n test MAE = 0.04868\nBatch: 119000\n time = 44.93 milli seconds per batch.\n train loss = 0.00301\n train MAE = 0.03732\n test loss = 0.00645\n test MAE = 0.05154\nBatch: 120000\n time = 45.12 milli seconds per batch.\n train loss = 0.00305\n train MAE = 0.03740\n test loss = 0.00598\n test MAE = 0.04971\nBatch: 121000\n time = 45.15 milli seconds per batch.\n train loss = 0.00303\n train MAE = 0.03721\n test loss = 0.00607\n test MAE = 0.05029\nBatch: 122000\n time = 45.29 milli seconds per batch.\n train loss = 0.00299\n train MAE = 0.03702\n test loss = 0.00616\n test MAE = 0.05048\nBatch: 123000\n time = 45.41 milli seconds per batch.\n train loss = 0.00299\n train MAE = 0.03717\n test loss = 0.00607\n test MAE = 0.05032\nBatch: 124000\n time = 45.49 milli seconds per batch.\n train loss = 0.00298\n train MAE = 0.03710\n test loss = 0.00585\n test MAE = 0.04944\nBatch: 125000\n time = 45.29 milli seconds per batch.\n train loss = 0.00298\n train MAE = 0.03701\n test loss = 0.00626\n test MAE = 0.05127\nBatch: 126000\n time = 45.29 milli seconds per batch.\n train loss = 0.00296\n train MAE = 0.03691\n test loss = 0.00600\n test MAE = 0.04990\nBatch: 127000\n time = 45.26 milli seconds per batch.\n train loss = 0.00292\n train MAE = 0.03665\n test loss = 0.00621\n test MAE = 0.05087\nBatch: 128000\n time = 45.29 milli seconds per batch.\n train loss = 0.00292\n train MAE = 0.03662\n test loss = 0.00626\n test MAE = 0.05069\nBatch: 129000\n time = 45.42 milli seconds per batch.\n train loss = 0.00291\n train MAE = 0.03661\n test loss = 0.00644\n test MAE = 0.05170\nBatch: 130000\n time = 45.46 milli seconds per batch.\n train loss = 0.00292\n train MAE = 0.03662\n test loss = 0.00607\n test MAE = 0.05023\nBatch: 131000\n time = 45.36 milli seconds per batch.\n train loss = 0.00290\n train MAE = 0.03646\n test loss = 0.00616\n test MAE = 0.05072\nBatch: 132000\n time = 45.51 milli seconds per batch.\n train loss = 0.00284\n train MAE = 0.03621\n test loss = 0.00587\n test MAE = 0.04922\nBatch: 133000\n time = 45.44 milli seconds per batch.\n train loss = 0.00288\n train MAE = 0.03634\n test loss = 0.00612\n test MAE = 0.05048\nBatch: 134000\n time = 45.40 milli seconds per batch.\n train loss = 0.00288\n train MAE = 0.03638\n test loss = 0.00604\n test MAE = 0.05014\nBatch: 135000\n time = 45.37 milli seconds per batch.\n train loss = 0.00287\n train MAE = 0.03634\n test loss = 0.00607\n test MAE = 0.05026\nBatch: 136000\n time = 45.30 milli seconds per batch.\n train loss = 0.00286\n train MAE = 0.03618\n test loss = 0.00631\n test MAE = 0.05096\nBatch: 137000\n time = 45.30 milli seconds per batch.\n train loss = 0.00283\n train MAE = 0.03612\n test loss = 0.00628\n test MAE = 0.05084\nBatch: 138000\n time = 45.43 milli seconds per batch.\n train loss = 0.00287\n train MAE = 0.03628\n test loss = 0.00634\n test MAE = 0.05170\nBatch: 139000\n time = 45.50 milli seconds per batch.\n train loss = 0.00277\n train MAE = 0.03574\n test loss = 0.00607\n test MAE = 0.05032\nBatch: 140000\n time = 45.51 milli seconds per batch.\n train loss = 0.00280\n train MAE = 0.03599\n test loss = 0.00646\n test MAE = 0.05173\nBatch: 141000\n time = 45.48 milli seconds per batch.\n train loss = 0.00277\n train MAE = 0.03573\n test loss = 0.00675\n test MAE = 0.05276\nBatch: 142000\n time = 45.46 milli seconds per batch.\n train loss = 0.00278\n train MAE = 0.03585\n test loss = 0.00633\n test MAE = 0.05151\nBatch: 143000\n time = 45.53 milli seconds per batch.\n train loss = 0.00274\n train MAE = 0.03563\n test loss = 0.00629\n test MAE = 0.05096\nBatch: 144000\n time = 45.66 milli seconds per batch.\n train loss = 0.00272\n train MAE = 0.03552\n test loss = 0.00646\n test MAE = 0.05179\nBatch: 145000\n time = 45.28 milli seconds per batch.\n train loss = 0.00277\n train MAE = 0.03568\n test loss = 0.00620\n test MAE = 0.05075\nBatch: 146000\n time = 45.24 milli seconds per batch.\n train loss = 0.00271\n train MAE = 0.03545\n test loss = 0.00642\n test MAE = 0.05185\nBatch: 147000\n time = 45.11 milli seconds per batch.\n train loss = 0.00274\n train MAE = 0.03559\n test loss = 0.00645\n test MAE = 0.05188\nBatch: 148000\n time = 45.25 milli seconds per batch.\n train loss = 0.00276\n train MAE = 0.03565\n test loss = 0.00634\n test MAE = 0.05118\nBatch: 149000\n time = 45.21 milli seconds per batch.\n train loss = 0.00270\n train MAE = 0.03536\n test loss = 0.00632\n test MAE = 0.05148\nBatch: 150000\n time = 45.25 milli seconds per batch.\n train loss = 0.00271\n train MAE = 0.03541\n test loss = 0.00608\n test MAE = 0.05054\nBatch: 151000\n time = 45.02 milli seconds per batch.\n train loss = 0.00270\n train MAE = 0.03534\n test loss = 0.00583\n test MAE = 0.04935\nBatch: 152000\n time = 45.22 milli seconds per batch.\n train loss = 0.00268\n train MAE = 0.03506\n test loss = 0.00601\n test MAE = 0.04984\nBatch: 153000\n time = 45.15 milli seconds per batch.\n train loss = 0.00267\n train MAE = 0.03514\n test loss = 0.00593\n test MAE = 0.04962\nBatch: 154000\n time = 45.19 milli seconds per batch.\n train loss = 0.00264\n train MAE = 0.03505\n test loss = 0.00594\n test MAE = 0.04965\nBatch: 155000\n time = 45.31 milli seconds per batch.\n train loss = 0.00265\n train MAE = 0.03508\n test loss = 0.00628\n test MAE = 0.05121\nBatch: 156000\n time = 45.22 milli seconds per batch.\n train loss = 0.00268\n train MAE = 0.03510\n test loss = 0.00622\n test MAE = 0.05130\nBatch: 157000\n time = 45.31 milli seconds per batch.\n train loss = 0.00265\n train MAE = 0.03502\n test loss = 0.00593\n test MAE = 0.04974\nBatch: 158000\n time = 45.33 milli seconds per batch.\n train loss = 0.00264\n train MAE = 0.03501\n test loss = 0.00626\n test MAE = 0.05112\nBatch: 159000\n time = 45.40 milli seconds per batch.\n train loss = 0.00267\n train MAE = 0.03509\n test loss = 0.00631\n test MAE = 0.05127\nBatch: 160000\n time = 45.24 milli seconds per batch.\n train loss = 0.00260\n train MAE = 0.03466\n test loss = 0.00605\n test MAE = 0.05014\nBatch: 161000\n time = 45.05 milli seconds per batch.\n train loss = 0.00263\n train MAE = 0.03486\n test loss = 0.00596\n test MAE = 0.04968\nBatch: 162000\n time = 45.29 milli seconds per batch.\n train loss = 0.00261\n train MAE = 0.03471\n test loss = 0.00577\n test MAE = 0.04922\nBatch: 163000\n time = 45.22 milli seconds per batch.\n train loss = 0.00256\n train MAE = 0.03453\n test loss = 0.00578\n test MAE = 0.04919\nBatch: 164000\n time = 45.29 milli seconds per batch.\n train loss = 0.00260\n train MAE = 0.03473\n test loss = 0.00632\n test MAE = 0.05133\nBatch: 165000\n time = 45.14 milli seconds per batch.\n train loss = 0.00256\n train MAE = 0.03453\n test loss = 0.00631\n test MAE = 0.05109\nBatch: 166000\n time = 45.19 milli seconds per batch.\n train loss = 0.00256\n train MAE = 0.03438\n test loss = 0.00624\n test MAE = 0.05087\nBatch: 167000\n time = 45.47 milli seconds per batch.\n train loss = 0.00256\n train MAE = 0.03455\n test loss = 0.00618\n test MAE = 0.05087\nBatch: 168000\n time = 45.40 milli seconds per batch.\n train loss = 0.00256\n train MAE = 0.03445\n test loss = 0.00607\n test MAE = 0.05008\nBatch: 169000\n time = 45.20 milli seconds per batch.\n train loss = 0.00257\n train MAE = 0.03445\n test loss = 0.00636\n test MAE = 0.05103\nBatch: 170000\n time = 45.15 milli seconds per batch.\n train loss = 0.00254\n train MAE = 0.03422\n test loss = 0.00572\n test MAE = 0.04861\nBatch: 171000\n time = 45.30 milli seconds per batch.\n train loss = 0.00253\n train MAE = 0.03426\n test loss = 0.00587\n test MAE = 0.04926\nBatch: 172000\n time = 45.25 milli seconds per batch.\n train loss = 0.00250\n train MAE = 0.03410\n test loss = 0.00581\n test MAE = 0.04926\nBatch: 173000\n time = 45.27 milli seconds per batch.\n train loss = 0.00254\n train MAE = 0.03427\n test loss = 0.00563\n test MAE = 0.04849\nBatch: 174000\n time = 45.23 milli seconds per batch.\n train loss = 0.00249\n train MAE = 0.03402\n test loss = 0.00599\n test MAE = 0.05020\nBatch: 175000\n time = 45.29 milli seconds per batch.\n train loss = 0.00250\n train MAE = 0.03405\n test loss = 0.00586\n test MAE = 0.04999\nBatch: 176000\n time = 45.17 milli seconds per batch.\n train loss = 0.00249\n train MAE = 0.03406\n test loss = 0.00571\n test MAE = 0.04904\nBatch: 177000\n time = 45.18 milli seconds per batch.\n train loss = 0.00250\n train MAE = 0.03405\n test loss = 0.00598\n test MAE = 0.05005\nBatch: 178000\n time = 45.12 milli seconds per batch.\n train loss = 0.00249\n train MAE = 0.03398\n test loss = 0.00605\n test MAE = 0.04996\nBatch: 179000\n time = 45.07 milli seconds per batch.\n train loss = 0.00248\n train MAE = 0.03398\n test loss = 0.00577\n test MAE = 0.04892\nBatch: 180000\n time = 45.25 milli seconds per batch.\n train loss = 0.00248\n train MAE = 0.03383\n test loss = 0.00604\n test MAE = 0.05023\nBatch: 181000\n time = 45.17 milli seconds per batch.\n train loss = 0.00246\n train MAE = 0.03384\n test loss = 0.00600\n test MAE = 0.05020\nBatch: 182000\n time = 45.32 milli seconds per batch.\n train loss = 0.00248\n train MAE = 0.03392\n test loss = 0.00591\n test MAE = 0.04965\nBatch: 183000\n time = 45.08 milli seconds per batch.\n train loss = 0.00245\n train MAE = 0.03373\n test loss = 0.00578\n test MAE = 0.04932\nBatch: 184000\n time = 45.22 milli seconds per batch.\n train loss = 0.00246\n train MAE = 0.03373\n test loss = 0.00596\n test MAE = 0.05011\nBatch: 185000\n time = 45.27 milli seconds per batch.\n train loss = 0.00245\n train MAE = 0.03372\n test loss = 0.00592\n test MAE = 0.05023\nBatch: 186000\n time = 45.24 milli seconds per batch.\n train loss = 0.00241\n train MAE = 0.03359\n test loss = 0.00589\n test MAE = 0.04962\nBatch: 187000\n time = 45.23 milli seconds per batch.\n train loss = 0.00244\n train MAE = 0.03364\n test loss = 0.00602\n test MAE = 0.05026\nBatch: 188000\n time = 45.21 milli seconds per batch.\n train loss = 0.00241\n train MAE = 0.03344\n test loss = 0.00582\n test MAE = 0.04916\nBatch: 189000\n time = 45.29 milli seconds per batch.\n train loss = 0.00241\n train MAE = 0.03346\n test loss = 0.00614\n test MAE = 0.05054\nBatch: 190000\n time = 45.18 milli seconds per batch.\n train loss = 0.00240\n train MAE = 0.03352\n test loss = 0.00582\n test MAE = 0.04938\nBatch: 191000\n time = 45.28 milli seconds per batch.\n train loss = 0.00240\n train MAE = 0.03345\n test loss = 0.00608\n test MAE = 0.05011\nBatch: 192000\n time = 45.28 milli seconds per batch.\n train loss = 0.00238\n train MAE = 0.03333\n test loss = 0.00603\n test MAE = 0.05032\nBatch: 193000\n time = 45.17 milli seconds per batch.\n train loss = 0.00241\n train MAE = 0.03338\n test loss = 0.00616\n test MAE = 0.05051\nBatch: 194000\n time = 45.17 milli seconds per batch.\n train loss = 0.00240\n train MAE = 0.03334\n test loss = 0.00577\n test MAE = 0.04950\nBatch: 195000\n time = 45.18 milli seconds per batch.\n train loss = 0.00237\n train MAE = 0.03318\n test loss = 0.00602\n test MAE = 0.05048\nBatch: 196000\n time = 45.27 milli seconds per batch.\n train loss = 0.00240\n train MAE = 0.03344\n test loss = 0.00597\n test MAE = 0.04993\nBatch: 197000\n time = 45.22 milli seconds per batch.\n train loss = 0.00236\n train MAE = 0.03314\n test loss = 0.00600\n test MAE = 0.04987\nBatch: 198000\n time = 45.18 milli seconds per batch.\n train loss = 0.00237\n train MAE = 0.03321\n test loss = 0.00591\n test MAE = 0.04947\nBatch: 199000\n time = 45.23 milli seconds per batch.\n train loss = 0.00236\n train MAE = 0.03319\n test loss = 0.00591\n test MAE = 0.04968\nBatch: 200000\n time = 45.33 milli seconds per batch.\n train loss = 0.00235\n train MAE = 0.03312\n test loss = 0.00574\n test MAE = 0.04996\nBatch: 201000\n time = 45.25 milli seconds per batch.\n train loss = 0.00238\n train MAE = 0.03327\n test loss = 0.00602\n test MAE = 0.05060\nBatch: 202000\n time = 45.34 milli seconds per batch.\n train loss = 0.00233\n train MAE = 0.03292\n test loss = 0.00578\n test MAE = 0.04965\nBatch: 203000\n time = 45.23 milli seconds per batch.\n train loss = 0.00232\n train MAE = 0.03288\n test loss = 0.00573\n test MAE = 0.04929\nBatch: 204000\n time = 45.21 milli seconds per batch.\n train loss = 0.00233\n train MAE = 0.03308\n test loss = 0.00588\n test MAE = 0.04947\nBatch: 205000\n time = 45.20 milli seconds per batch.\n train loss = 0.00229\n train MAE = 0.03277\n test loss = 0.00613\n test MAE = 0.05035\nBatch: 206000\n time = 45.32 milli seconds per batch.\n train loss = 0.00235\n train MAE = 0.03297\n test loss = 0.00566\n test MAE = 0.04947\nBatch: 207000\n time = 45.04 milli seconds per batch.\n train loss = 0.00233\n train MAE = 0.03291\n test loss = 0.00574\n test MAE = 0.04916\nBatch: 208000\n time = 45.21 milli seconds per batch.\n train loss = 0.00235\n train MAE = 0.03312\n test loss = 0.00573\n test MAE = 0.04926\nBatch: 209000\n time = 45.26 milli seconds per batch.\n train loss = 0.00231\n train MAE = 0.03279\n test loss = 0.00574\n test MAE = 0.04938\nBatch: 210000\n time = 45.12 milli seconds per batch.\n train loss = 0.00231\n train MAE = 0.03282\n test loss = 0.00590\n test MAE = 0.05032\nBatch: 211000\n time = 45.19 milli seconds per batch.\n train loss = 0.00233\n train MAE = 0.03296\n test loss = 0.00598\n test MAE = 0.05023\nBatch: 212000\n time = 45.32 milli seconds per batch.\n train loss = 0.00228\n train MAE = 0.03268\n test loss = 0.00574\n test MAE = 0.04913\nBatch: 213000\n time = 45.17 milli seconds per batch.\n train loss = 0.00228\n train MAE = 0.03255\n test loss = 0.00566\n test MAE = 0.04935\nBatch: 214000\n time = 45.19 milli seconds per batch.\n train loss = 0.00227\n train MAE = 0.03258\n test loss = 0.00573\n test MAE = 0.04919\nBatch: 215000\n time = 44.85 milli seconds per batch.\n train loss = 0.00229\n train MAE = 0.03267\n test loss = 0.00565\n test MAE = 0.04892\nBatch: 216000\n time = 45.22 milli seconds per batch.\n train loss = 0.00228\n train MAE = 0.03261\n test loss = 0.00574\n test MAE = 0.04956\nBatch: 217000\n time = 45.25 milli seconds per batch.\n train loss = 0.00225\n train MAE = 0.03248\n test loss = 0.00593\n test MAE = 0.05029\nBatch: 218000\n time = 45.24 milli seconds per batch.\n train loss = 0.00222\n train MAE = 0.03232\n test loss = 0.00587\n test MAE = 0.04987\nBatch: 219000\n time = 45.15 milli seconds per batch.\n train loss = 0.00228\n train MAE = 0.03258\n test loss = 0.00624\n test MAE = 0.05121\nBatch: 220000\n time = 45.18 milli seconds per batch.\n train loss = 0.00227\n train MAE = 0.03257\n test loss = 0.00584\n test MAE = 0.04999\nBatch: 221000\n time = 44.96 milli seconds per batch.\n train loss = 0.00223\n train MAE = 0.03235\n test loss = 0.00552\n test MAE = 0.04883\nBatch: 222000\n time = 45.34 milli seconds per batch.\n train loss = 0.00225\n train MAE = 0.03241\n test loss = 0.00590\n test MAE = 0.04965\nBatch: 223000\n time = 45.24 milli seconds per batch.\n train loss = 0.00223\n train MAE = 0.03231\n test loss = 0.00569\n test MAE = 0.04907\nBatch: 224000\n time = 45.38 milli seconds per batch.\n train loss = 0.00223\n train MAE = 0.03223\n test loss = 0.00560\n test MAE = 0.04898\nBatch: 225000\n time = 45.23 milli seconds per batch.\n train loss = 0.00224\n train MAE = 0.03241\n test loss = 0.00584\n test MAE = 0.04980\nBatch: 226000\n time = 45.26 milli seconds per batch.\n train loss = 0.00219\n train MAE = 0.03210\n test loss = 0.00567\n test MAE = 0.04883\nBatch: 227000\n time = 45.26 milli seconds per batch.\n train loss = 0.00218\n train MAE = 0.03199\n test loss = 0.00607\n test MAE = 0.05096\nBatch: 228000\n time = 45.34 milli seconds per batch.\n train loss = 0.00226\n train MAE = 0.03248\n test loss = 0.00576\n test MAE = 0.04974\nBatch: 229000\n time = 45.28 milli seconds per batch.\n train loss = 0.00222\n train MAE = 0.03222\n test loss = 0.00596\n test MAE = 0.05029\nBatch: 230000\n time = 45.26 milli seconds per batch.\n train loss = 0.00220\n train MAE = 0.03212\n test loss = 0.00613\n test MAE = 0.05072\nBatch: 231000\n time = 45.33 milli seconds per batch.\n train loss = 0.00220\n train MAE = 0.03199\n test loss = 0.00595\n test MAE = 0.05029\nBatch: 232000\n time = 45.22 milli seconds per batch.\n train loss = 0.00218\n train MAE = 0.03196\n test loss = 0.00558\n test MAE = 0.04892\nBatch: 233000\n time = 45.22 milli seconds per batch.\n train loss = 0.00221\n train MAE = 0.03215\n test loss = 0.00570\n test MAE = 0.04916\nBatch: 234000\n time = 45.20 milli seconds per batch.\n train loss = 0.00218\n train MAE = 0.03198\n test loss = 0.00587\n test MAE = 0.05014\nBatch: 235000\n time = 45.19 milli seconds per batch.\n train loss = 0.00220\n train MAE = 0.03202\n test loss = 0.00597\n test MAE = 0.05026\nBatch: 236000\n time = 45.21 milli seconds per batch.\n train loss = 0.00219\n train MAE = 0.03204\n test loss = 0.00582\n test MAE = 0.04968\nBatch: 237000\n time = 45.23 milli seconds per batch.\n train loss = 0.00219\n train MAE = 0.03205\n test loss = 0.00577\n test MAE = 0.04956\nBatch: 238000\n time = 45.27 milli seconds per batch.\n train loss = 0.00217\n train MAE = 0.03192\n test loss = 0.00591\n test MAE = 0.05017\nBatch: 239000\n time = 45.19 milli seconds per batch.\n train loss = 0.00217\n train MAE = 0.03186\n test loss = 0.00601\n test MAE = 0.05026\nBatch: 240000\n time = 45.22 milli seconds per batch.\n train loss = 0.00218\n train MAE = 0.03197\n test loss = 0.00590\n test MAE = 0.05035\nFinished Training\nCPU times: user 3h 2min 47s, sys: 53.4 s, total: 3h 3min 40s\nWall time: 3h 2min 25s\n" ], [ "fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=True, figsize=(20, 10))\n\nax1.plot(test_losses, label='testing')\nax1.plot(train_losses, label='training')\nax1.set_title('MSE (training objective)')\nax1.set_ylabel('MSE')\nax1.legend()\n\nax2.plot(test_mae_losses, label='testing')\nax2.plot(train_mae_losses, label='training')\nax2.set_title('MAE')\nax2.set_ylabel('MAE')\nax2.legend();", "_____no_output_____" ], [ "# Get MAPE across entire testing dataset :)\nnet.eval()\nstart_i = 0\nmae_on_all_testset = []\nwhile start_i < len(testing_index) - 1:\n end_i = start_i + TESTING_BATCH_SIZE\n test_index_batch = testing_index[start_i:end_i]\n start_i = end_i\n \n inputs = sat_data_master_cuda[test_index_batch]\n testing_hour_of_day = hours_of_day[test_index_batch]\n testing_clearsky = clearsky[test_index_batch]\n target = pv_data_cuda[test_index_batch]\n output = net(inputs, testing_hour_of_day, testing_clearsky)\n mae = mae_loss_func(output, target).item()\n mae_on_all_testset.append(mae)\n \nnp.mean(mae_on_all_testset)", "_____no_output_____" ], [ "%%time\n# Plot some results!\n\n#batch_index = np.random.randint(low=0, high=len(testing_index)-1, size=BATCH_SIZE)\n\nSTART = 500\nbatch_index = range(START, START+TESTING_BATCH_SIZE + 512)\nbatch_index = testing_index[batch_index]\n\ninputs = sat_data_master_cuda[batch_index]\ntesting_hour_of_day = hours_of_day[batch_index]\ntesting_clearsky = clearsky[batch_index]\ntarget = pv_data_cuda[batch_index]", "CPU times: user 1.98 ms, sys: 17 µs, total: 2 ms\nWall time: 1.09 ms\n" ], [ "net.eval()\noutput = net(inputs, testing_hour_of_day, testing_clearsky)", "_____no_output_____" ], [ "i = 30\nplt.imshow(\n inputs[i, 0].to(device=torch.device('cpu'), dtype=torch.float32), \n origin='upper')", "_____no_output_____" ], [ "output[i, 0].detach().cpu()", "_____no_output_____" ], [ "target[i, 0].detach().cpu()", "_____no_output_____" ], [ "fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, figsize=(13,10))\n#dt_index = datetime_index[batch_index]\n\nax1.set_title('5-minutely data')\nax1.plot(output[:, 0].detach().cpu(), label='net output')\nax1.plot(target[:, 0].detach().cpu(), label='target')\n\nax2.set_title('Hourly rolling means')\nax2.plot(pd.Series(output[:, 0].detach().cpu()).rolling(12, center=True).mean().values, label='net output (hourly rolling mean)')\nax2.plot(pd.Series(target[:, 0].detach().cpu()).rolling(12, center=True).mean().values, label='target (hourly rolling mean)')\n\nax3.plot(testing_clearsky.detach().cpu())\nax3.set_title('Clearsky irradiance (scaled to have mean=0 and std=1)')\n\nax1.legend()\nax2.legend()\nax1.set_ylabel('PV yield')\nax2.set_ylabel('PV yield')\nax3.set_xlabel('timestep (5 minutes between timesteps)')\nfig.tight_layout();", "_____no_output_____" ], [ "np.unique(datetime_index[batch_index].date)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2e9bc98871e636756df5c1f7598a7c014eccac
866,402
ipynb
Jupyter Notebook
.ipynb_checkpoints/P1-checkpoint.ipynb
fpgholizadeh/CarND-LaneLines-P1
53285f8946ef7bb5af8df7b789b0fd229d827cd2
[ "MIT" ]
null
null
null
.ipynb_checkpoints/P1-checkpoint.ipynb
fpgholizadeh/CarND-LaneLines-P1
53285f8946ef7bb5af8df7b789b0fd229d827cd2
[ "MIT" ]
null
null
null
.ipynb_checkpoints/P1-checkpoint.ipynb
fpgholizadeh/CarND-LaneLines-P1
53285f8946ef7bb5af8df7b789b0fd229d827cd2
[ "MIT" ]
null
null
null
643.208612
128,500
0.941417
[ [ [ "# Self-Driving Car Engineer Nanodegree\n\n\n## Project: **Finding Lane Lines on the Road** \n***\nIn this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip \"raw-lines-example.mp4\" (also contained in this repository) to see what the output should look like after using the helper functions below. \n\nOnce you have a result that looks roughly like \"raw-lines-example.mp4\", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video \"P1_example.mp4\". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.\n\nIn addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.\n\n---\nLet's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the \"play\" button above) to display the image.\n\n**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the \"Kernel\" menu above and selecting \"Restart & Clear Output\".**\n\n---", "_____no_output_____" ], [ "**The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**\n\n---\n\n<figure>\n <img src=\"examples/line-segments-example.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Your output should look something like this (above) after detecting line segments using the helper functions below </p> \n </figcaption>\n</figure>\n <p></p> \n<figure>\n <img src=\"examples/laneLines_thirdPass.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Your goal is to connect/average/extrapolate line segments to get output like this</p> \n </figcaption>\n</figure>", "_____no_output_____" ], [ "**Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.** ", "_____no_output_____" ], [ "## Import Packages", "_____no_output_____" ] ], [ [ "#importing some useful packages\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Read in an Image", "_____no_output_____" ] ], [ [ "#reading in an image\nimage = mpimg.imread('test_images/solidWhiteRight.jpg')\n\n#printing out some stats and plotting\nprint('This image is:', type(image), 'with dimensions:', image.shape)\nplt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')", "This image is: <class 'numpy.ndarray'> with dimensions: (540, 960, 3)\n" ] ], [ [ "## Ideas for Lane Detection Pipeline", "_____no_output_____" ], [ "**Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**\n\n`cv2.inRange()` for color selection \n`cv2.fillPoly()` for regions selection \n`cv2.line()` to draw lines on an image given endpoints \n`cv2.addWeighted()` to coadd / overlay two images \n`cv2.cvtColor()` to grayscale or change color \n`cv2.imwrite()` to output images to file \n`cv2.bitwise_and()` to apply a mask to an image\n\n**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**", "_____no_output_____" ], [ "## Helper Functions", "_____no_output_____" ], [ "Below are some helper functions to help get you started. They should look familiar from the lesson!", "_____no_output_____" ] ], [ [ "import math\n\ndef grayscale(img):\n \"\"\"Applies the Grayscale transform\n This will return an image with only one color channel\n but NOTE: to see the returned image as grayscale\n (assuming your grayscaled image is called 'gray')\n you should call plt.imshow(gray, cmap='gray')\"\"\"\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \ndef grayscale_HSV(img):\n \n # img_gray = (img_gray - img_gray.min()) / (img_gray.max() = img_gray.min()) * 255\n img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n #img_h = img_hsv[:,:,0]\n #img_s = img_hsv[:,:,1]\n #img_v = img_hsv[:,:,2]\n return img_hsv\n \ndef canny(img, low_threshold, high_threshold):\n \"\"\"Applies the Canny transform\"\"\"\n return cv2.Canny(img, low_threshold, high_threshold)\n\ndef gaussian_blur(img, kernel_size):\n \"\"\"Applies a Gaussian Noise kernel\"\"\"\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n\ndef region_of_interest(img, vertices):\n \"\"\"\n Applies an image mask.\n \n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n `vertices` should be a numpy array of integer points.\n \"\"\"\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\ndef single_line_points(img,lines):\n \"\"\"finds the coordinates of single line on left and right side \"\"\"\n #if (lines[0]!= 0):\n \n y1 = int(img.shape[0])\n y2 = int(0.64*y1)\n x1 =int((y1-lines[1])/lines[0])\n x2 =int((y2-lines[1])/lines[0])\n #print([x1,y1,x2,y2]) \n\n return [[x1, y1, x2, y2]]\ndef remove_outliers(lines):\n data_median = np.median(lines, axis = 0)\n data_std = np.std(lines, axis = 0)\n cut_off = data_std * 2\n lower, upper = data_median - cut_off, data_median + cut_off\n lines_outlier_removed = [[x,y] for x,y in lines if (lower[0]<x<upper[0]) and (lower[1]<y<upper[1])]\n return lines_outlier_removed\n\ndef averaged_line(img,lines):\n \"\"\"Seperates the left and right lane lines and calculates the average slope \"\"\"\n right_lane = []\n left_lane = []\n if lines is None:\n return None\n for line in lines:\n for x1,y1,x2,y2 in line:\n if (x2-x1)!=0:\n slope = (y2-y1)/(x2-x1) # calculate slope\n theta = np.arctan(slope) * 180 / np.pi \n #exclude outlier slopes\n if (15>abs(theta) or 40<abs(theta)):\n continue\n if(math.isnan(theta) or math.isinf(theta)):\n continue\n intercept = y1 - slope*x1 # calculate intercept\n if theta < 0: ## finds slopes less than zero\n left_lane.append([slope,intercept])\n #print(theta)\n else:\n right_lane.append([slope,intercept])\n ## removing any outlier in left and right lane\n if left_lane == [] or right_lane == [] :\n left_lane_clean = [-1.4,650]\n left_single_line = single_line_points(img,left_lane_clean)\n right_lane_clean = [1.7,-50]\n right_single_line = single_line_points(img,right_lane_clean)\n combined_new_line = [left_single_line, right_single_line]\n \n else:\n left_lane_clean = remove_outliers(left_lane) \n left_lane_average = np.mean(left_lane_clean, axis = 0)\n right_lane_clean = remove_outliers(right_lane) \n right_lane_average = np.mean(right_lane_clean, axis = 0) \n left_single_line = single_line_points(img,left_lane_average)\n right_single_line = single_line_points(img,right_lane_average)\n combined_new_line = [left_single_line, right_single_line]\n \n #print('right_lane_clean = ', right_lane_clean)\n ### taking average of slope and intercept to form one line\n \n #print('left_lane_average =',left_lane_average)\n #print('right_lane_average =',right_lane_average)\n #print(\"combined_new_line=\",combined_new_line)\n return combined_new_line\n\ndef draw_lines(img, lines):\n \"\"\"\n This function draws `lines` with `color` and `thickness`. \n Lines are drawn on the image inplace (mutates the image).\n If you want to make the lines semi-transparent, think about combining\n this function with the weighted_img() function below\n \"\"\"\n line_image = np.zeros_like(img)\n for line in lines:\n for x1, y1, x2, y2 in line:\n cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)\n return line_image\n \n\ndef weighted_img(img, initial_img, α=0.8, β=1., γ=0.):\n \"\"\"\n `img` is the output of the hough_lines(), An image with lines drawn on it.\n Should be a blank image (all black) with lines drawn on it.\n \n `initial_img` should be the image before any processing.\n \n The result image is computed as follows:\n \n initial_img * α + img * β + γ\n NOTE: initial_img and img must be the same shape!\n \"\"\"\n weighted_image = cv2.addWeighted(initial_img, α, img, β, γ)\n return weighted_image\n\n# Read each image from the directory, then make a copy of each image\nimport os\ntest_image_dir = \"test_images/\"\ntest_images = os.listdir(test_image_dir)\nfor img in test_images:\n image = mpimg.imread(test_image_dir + img)\n image_resize =cv2.resize(image,(960,540)) \n lane_image = np.copy(image_resize)\n print(img)\n# grayscale the image\n gray = grayscale(lane_image)\n \n# Define a kernel size and apply Gaussian smoothing\n kernel_size = 5\n blur_gray = gaussian_blur(gray, kernel_size)\n \n# Define our parameters for Canny and apply\n low_threshold = 50\n high_threshold = 150\n canny= cv2.Canny(blur_gray, low_threshold, high_threshold)\n \n# Define region of interest and creat a maked image\n imshape = lane_image.shape\n vertices = np.array([[(150,imshape[0]),(950, imshape[0]), (550, 320), (440,320)]], dtype=np.int32)\n masked_image = region_of_interest(canny, vertices)\n \n# Define the Hough transform parameters\n# Make a blank the same size as our image to draw on\n rho = 1\n theta = np.pi/180\n threshold = 35\n minLineLength = 5\n maxLineGap = 2\n# Run Hough on edge detected image \n lines = cv2.HoughLinesP(masked_image, rho, theta, threshold, np.array([]), minLineLength, maxLineGap)\n print(\"lines =\", lines)\n print(\"line_shape =\", lines.shape)\n print(\"lines_length =\",len(lines))\n \n# find the left and right averaged lines\n combined_new_line = averaged_line(lane_image,lines)\n \n# Iterate over the output \"lines\" and draw lines on the blank \n line_image = draw_lines(lane_image, combined_new_line)\n weighted_image = weighted_img(line_image, lane_image)\n plt.imshow(weighted_image)\n plt.show()\n plt.imsave('test_images_output/' + img, weighted_image)\n\n \n", "solidWhiteCurve.jpg\nlines = [[[527 333 898 538]]\n\n [[590 372 798 492]]\n\n [[524 335 877 538]]\n\n [[288 454 346 410]]\n\n [[327 433 342 420]]\n\n [[290 463 347 417]]\n\n [[280 460 306 441]]\n\n [[312 435 345 410]]\n\n [[451 335 469 322]]\n\n [[449 332 461 322]]\n\n [[512 325 533 337]]\n\n [[842 508 864 520]]]\nline_shape = (12, 1, 4)\nlines_length = 12\n" ] ], [ [ "## Build a Lane Finding Pipeline\n\n", "_____no_output_____" ], [ "Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.\n\nTry tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.", "_____no_output_____" ] ], [ [ "# TODO: Build your pipeline that will draw lane lines on the test_images\n# then save them to the test_images_output directory.\n", "_____no_output_____" ] ], [ [ "## Test on Videos\n\nYou know what's cooler than drawing lanes over images? Drawing lanes over video!\n\nWe can test our solution on two provided videos:\n\n`solidWhiteRight.mp4`\n\n`solidYellowLeft.mp4`\n\n**Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**\n\n**If you get an error that looks like this:**\n```\nNeedDownloadError: Need ffmpeg exe. \nYou can download it by calling: \nimageio.plugins.ffmpeg.download()\n```\n**Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**", "_____no_output_____" ] ], [ [ "# Import everything needed to edit/save/watch video clips\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML", "_____no_output_____" ], [ "def process_image(image):\n # NOTE: The output you return should be a color image (3 channel) for processing video below\n # TODO: put your pipeline here,\n # you should return the final output (image where lines are drawn on lanes)\n # grayscale the image\n lane_image = np.copy(image)\n gray = grayscale(lane_image)\n \n# Define a kernel size and apply Gaussian smoothing\n kernel_size = 5\n blur_gray = gaussian_blur(gray, kernel_size)\n \n# Define our parameters for Canny and apply\n low_threshold = 50\n high_threshold = 150\n canny= cv2.Canny(blur_gray, low_threshold, high_threshold)\n \n# Define region of interest and creat a maked image\n imshape = lane_image.shape\n vertices = np.array([[(150,imshape[0]),(950, imshape[0]), (550, 320), (440,320)]], dtype=np.int32)\n masked_image = region_of_interest(canny, vertices)\n \n# Define the Hough transform parameters\n# Make a blank the same size as our image to draw on\n rho = 1\n theta = np.pi/180\n threshold = 15\n minLineLength = 20\n maxLineGap = 10\n# Run Hough on edge detected image \n lines = cv2.HoughLinesP(masked_image, rho, theta, threshold, np.array([]), minLineLength, maxLineGap)\n #print(lines.shape)\n# find the left and right averaged lines\n combined_new_line = averaged_line(lane_image,lines)\n# Iterate over the output \"lines\" and draw lines on the blank \n line_image = draw_lines(lane_image, combined_new_line)\n results = weighted_img(line_image, lane_image) \n\n\n return results", "_____no_output_____" ] ], [ [ "Let's try the one with the solid white lane on the right first ...", "_____no_output_____" ] ], [ [ "white_output = 'test_videos_output/solidWhiteRight.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\").subclip(0,5)\nclip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\")\nwhite_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!\n%time white_clip.write_videofile(white_output, audio=False)", "Moviepy - Building video test_videos_output/solidWhiteRight.mp4.\nMoviepy - Writing video test_videos_output/solidWhiteRight.mp4\n\n" ] ], [ [ "Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.", "_____no_output_____" ] ], [ [ "HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(white_output))", "_____no_output_____" ] ], [ [ "## Improve the draw_lines() function\n\n**At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video \"P1_example.mp4\".**\n\n**Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**", "_____no_output_____" ], [ "Now for the one with the solid yellow lane on the left. This one's more tricky!", "_____no_output_____" ] ], [ [ "yellow_output = 'test_videos_output/solidYellowLeft.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)\nclip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')\nyellow_clip = clip2.fl_image(process_image)\n%time yellow_clip.write_videofile(yellow_output, audio=False)", "Moviepy - Building video test_videos_output/solidYellowLeft.mp4.\nMoviepy - Writing video test_videos_output/solidYellowLeft.mp4\n\n" ], [ "HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(yellow_output))", "_____no_output_____" ] ], [ [ "## Optional Challenge\n\nTry your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!", "_____no_output_____" ] ], [ [ "def process_image_challenge(image):\n # NOTE: The output you return should be a color image (3 channel) for processing video below\n # TODO: put your pipeline here,\n # you should return the final output (image where lines are drawn on lanes)\n # grayscale the image\n lane_image = np.copy(image)\n hsv = grayscale_HSV(lane_image)\n lower_yellow = np.array([20, 100, 100])\n upper_yellow = np.array([40, 255, 255])\n yellow_mask = cv2.inRange(hsv, lower_yellow, upper_yellow)\n lower_white = np.array([0, 0, 215])\n upper_white = np.array([180, 40, 255])\n white_mask = cv2.inRange(hsv, lower_white, upper_white)\n \n color_mask = cv2.bitwise_or(yellow_mask, white_mask)\n \n gray_img = grayscale(lane_image) # convert to gray image \n \n darken = (gray_img / 3).astype(np.uint8)\n color_masked = cv2.bitwise_or(darken, color_mask)\n \n \n gauss_img = gaussian_blur(color_masked, 7) # 低通过滤器,抑制高频部分,从而消除噪点\n\n\n #canny_img = canny(gauss_img, 100, 150) # edge detection\n \n \n \n \n \n# Define a kernel size and apply Gaussian smoothing\n #kernel_size = 5\n #blur_gray = gaussian_blur(gray, kernel_size)\n \n# Define our parameters for Canny and apply\n low_threshold = 50\n high_threshold = 150\n canny= cv2.Canny(gauss_img , low_threshold, high_threshold)\n \n# Define region of interest and creat a maked image\n imshape = lane_image.shape\n vertices = np.array([[(150,imshape[0]),(950, imshape[0]), (550, 320), (440,320)]], dtype=np.int32)\n masked_image = region_of_interest(canny, vertices)\n \n# Define the Hough transform parameters\n# Make a blank the same size as our image to draw on\n rho = 1\n theta = np.pi/180\n threshold = 15\n minLineLength = 20\n maxLineGap = 10\n# Run Hough on edge detected image \n lines = cv2.HoughLinesP(masked_image, rho, theta, threshold, np.array([]), minLineLength, maxLineGap)\n #print(lines.shape)\n# find the left and right averaged lines\n combined_new_line = averaged_line(lane_image,lines)\n# Iterate over the output \"lines\" and draw lines on the blank \n line_image = draw_lines(lane_image, combined_new_line)\n results = weighted_img(line_image, lane_image) \n\n\n return results", "_____no_output_____" ], [ "challenge_output = 'test_videos_output/challenge.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)\nclip3 = VideoFileClip('test_videos/challenge.mp4')\nchallenge_clip = clip3.fl_image(process_image_challenge)\n%time challenge_clip.write_videofile(challenge_output, audio=False)", "Moviepy - Building video test_videos_output/challenge.mp4.\nMoviepy - Writing video test_videos_output/challenge.mp4\n\n" ], [ "HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(challenge_output))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
4a2eadaa51a9086fd050614d3c082c9c413457d5
151,454
ipynb
Jupyter Notebook
Temperature variations/Temperature variations by city plot.ipynb
project-lovelace/lovelace-code
0eca51da63c61a8366040c9def2b03b0cf5628f5
[ "MIT" ]
3
2019-01-07T00:47:50.000Z
2021-12-25T16:31:31.000Z
Temperature variations/Temperature variations by city plot.ipynb
project-lovelace/lovelace-code-snippets
0eca51da63c61a8366040c9def2b03b0cf5628f5
[ "MIT" ]
null
null
null
Temperature variations/Temperature variations by city plot.ipynb
project-lovelace/lovelace-code-snippets
0eca51da63c61a8366040c9def2b03b0cf5628f5
[ "MIT" ]
null
null
null
1,442.419048
148,972
0.958568
[ [ [ "import matplotlib\nimport matplotlib.pyplot as plt\n\nmatplotlib.rcParams['figure.dpi'] = 200", "_____no_output_____" ], [ "from numpy import mean, var", "_____no_output_____" ], [ "T = {\n 'Saskatoon, Canada': [-13.9, -11.4, -4.9, 5.2, 11.8, 16.1, 19.0, 18.2, 12.0, 4.4, -5.2, -12.4],\n 'Baku, Azerbaijan': [4.4, 4.2, 7.0, 12.9, 18.5, 23.5, 26.4, 26.3, 22.5, 16.6, 11.2, 7.3],\n 'Khartoum, Sudan': [23.2, 25.0, 28.7, 31.9, 34.5, 34.3, 32.1, 31.5, 32.5, 32.4, 28.1, 24.5],\n 'Singapore': [26.5, 27.1, 27.5, 28.0, 28.3, 28.3, 27.9, 27.9, 27.6, 27.6, 27.0, 26.4],\n 'San Juan, Argentina': [27.1, 25.5, 22.8, 17.2, 12.2, 8.3, 7.7, 10.6, 14.4, 19.8, 23.4, 26.3]\n}\n\nfor city in T.keys():\n plt.plot(range(1, 13), T[city], label=city, marker='.')\n \nplt.ylabel(\"Temperature (°C)\")\nplt.xlim([1, 12])\nplt.xticks(range(1, 13), [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"], rotation=45)\nplt.legend(frameon=False)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
4a2ed1cedab8ec514d87f258c18fa7bd3c20688d
334,792
ipynb
Jupyter Notebook
nb/plots.ipynb
HW21/TeachSpice
8cf0ba8603dd82eeb35b45e964df9ca69cd09747
[ "BSD-3-Clause" ]
2
2019-10-09T12:26:26.000Z
2019-11-15T16:10:16.000Z
nb/plots.ipynb
HW21/TeachSpice
8cf0ba8603dd82eeb35b45e964df9ca69cd09747
[ "BSD-3-Clause" ]
null
null
null
nb/plots.ipynb
HW21/TeachSpice
8cf0ba8603dd82eeb35b45e964df9ca69cd09747
[ "BSD-3-Clause" ]
1
2019-10-09T12:28:20.000Z
2019-10-09T12:28:20.000Z
1,119.705686
61,236
0.954384
[ [ [ "import numpy as np \nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = [16, 8]", "_____no_output_____" ], [ "from spice import Circuit, DcOp, Resistor, Mos, Diode\nfrom spice.analysis import Contour", "_____no_output_____" ], [ "class DutContour:\n \"\"\" Stores a DUT and its contour results \"\"\"\n def __init__(self, dut_cls):\n self.dut_cls = dut_cls\n self.xs = []\n self.ys = []\n self.dxs = []\n \n def run(self): \n for k in range(11):\n vgs = k / 10.0\n dut = self.dut_cls(vgs)\n an = Contour(dut)\n x, y, dx = an.explore(xmin=-1.0, xmax=2.0, xstep=0.1)\n x=np.transpose(x)\n y=np.transpose(y)\n dx=np.transpose(dx)\n self.xs.append(x[0])\n self.ys.append(y[0])\n self.dxs.append(dx[0])", "_____no_output_____" ], [ "from spice.tests.test_nmos_inv import nmos_inv\nn = DutContour(nmos_inv)\nn.run()", "_____no_output_____" ], [ "from spice.tests.test_pmos_inv import pmos_inv\np = DutContour(pmos_inv)\np.run()", "_____no_output_____" ], [ "from spice.tests.test_cmos_inv import cmos_inv\nc = DutContour(cmos_inv)\nc.run()", "_____no_output_____" ], [ "def plots(c):\n fig, ax = plt.subplots()\n for k in range(len(c.xs)):\n ax.plot(c.xs[k], c.ys[k], label=k)\n # ax.set_ylim(-0.001,0.001)\n ax.legend()\n\n fig, ax = plt.subplots()\n for k in range(len(c.xs)):\n dys = np.diff(c.ys[k]) / 1e-2\n ax.plot(c.xs[k][:-1], dys, label=k)\n # ax.set_ylim(-1e-3,1e-3)\n ax.legend()\n\n fig, ax = plt.subplots()\n for k in range(len(c.xs)):\n ax.plot(c.xs[k], c.dxs[k], label=k)\n # ax.set_ylim(-20,20)\n ax.legend()\n\n# fig, ax = plt.subplots()\n# dys = -1* np.diff(c.ys[5]) / 1e-2\n# ax.plot(c.xs[5][:-1], c.ys[5][:-1]/dys, label=3)\n# ax.plot(c.xs[5], c.dxs[5], label=k)\n# ax.plot([-1.0,2.0],[0,0])\n# # plt.xlim(1.1,1.3)\n\n# fig, ax = plt.subplots()\n# dys = -1* np.diff(c.ys[5]) / 1e-2\n# ax.plot(c.xs[5][:-1], c.ys[5][:-1], label=3)\n# ax.plot([-1.0,2.0],[0,0])\n# # plt.plot(xs[5][:-1], ys[5][:-1]/dys, label=3)\n# # plt.xlim(1.1,1.3)\n# # plt.ylim(.001,.002)", "_____no_output_____" ], [ "plots(n)", "_____no_output_____" ], [ "plots(p)", "_____no_output_____" ], [ "plots(c)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2ed9f0be31f9eb5ecd742fdc2f8495acc153dd
57,530
ipynb
Jupyter Notebook
Chapter03/Exercise3.04/Exercise3.04.ipynb
TrainingByPackt/Data-Science-in-Python-the-Simple-Way
28bbdc8489a467081d4d9e9bcf57ffb30f21cb86
[ "MIT" ]
4
2019-06-24T11:40:40.000Z
2019-08-17T05:47:20.000Z
Chapter03/Exercise3.04/Exercise3.04.ipynb
TrainingByPackt/Data-Science-in-Python-the-Simple-Way
28bbdc8489a467081d4d9e9bcf57ffb30f21cb86
[ "MIT" ]
null
null
null
Chapter03/Exercise3.04/Exercise3.04.ipynb
TrainingByPackt/Data-Science-in-Python-the-Simple-Way
28bbdc8489a467081d4d9e9bcf57ffb30f21cb86
[ "MIT" ]
6
2019-10-18T00:42:08.000Z
2022-03-22T04:04:06.000Z
40.514085
483
0.340588
[ [ [ "from google.colab import drive\ndrive.mount('/content/drive')", "Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=email%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdocs.test%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive.photos.readonly%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fpeopleapi.readonly&response_type=code\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n" ], [ "# Installing the necessary packages\n!pip install dfply\n!pip install ggplot\n!pip install pandas==0.19.2", "Collecting dfply\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/53/91/18ab48c64661252dadff685f8ddbc6f456302923918f488714ee2345d49b/dfply-0.3.3-py3-none-any.whl (612kB)\n\u001b[K |████████████████████████████████| 614kB 2.8MB/s \n\u001b[?25hRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from dfply) (1.16.5)\nRequirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (from dfply) (0.24.2)\nRequirement already satisfied: pytz>=2011k in /usr/local/lib/python3.6/dist-packages (from pandas->dfply) (2018.9)\nRequirement already satisfied: python-dateutil>=2.5.0 in /usr/local/lib/python3.6/dist-packages (from pandas->dfply) (2.5.3)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.5.0->pandas->dfply) (1.12.0)\nInstalling collected packages: dfply\nSuccessfully installed dfply-0.3.3\nCollecting ggplot\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/48/04/5c88cc51c6713583f2dc78a5296adb9741505348c323d5875bc976143db2/ggplot-0.11.5-py2.py3-none-any.whl (2.2MB)\n\u001b[K |████████████████████████████████| 2.2MB 2.8MB/s \n\u001b[?25hRequirement already satisfied: patsy>=0.4 in /usr/local/lib/python3.6/dist-packages (from ggplot) (0.5.1)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from ggplot) (1.12.0)\nCollecting brewer2mpl (from ggplot)\n Downloading https://files.pythonhosted.org/packages/84/57/00c45a199719e617db0875181134fcb3aeef701deae346547ac722eaaf5e/brewer2mpl-1.4.1-py2.py3-none-any.whl\nRequirement already satisfied: cycler in /usr/local/lib/python3.6/dist-packages (from ggplot) (0.10.0)\nRequirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (from ggplot) (3.0.3)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from ggplot) (1.3.1)\nRequirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (from ggplot) (0.24.2)\nRequirement already satisfied: statsmodels in /usr/local/lib/python3.6/dist-packages (from ggplot) (0.10.1)\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from ggplot) (1.16.5)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->ggplot) (2.4.2)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->ggplot) (1.1.0)\nRequirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->ggplot) (2.5.3)\nRequirement already satisfied: pytz>=2011k in /usr/local/lib/python3.6/dist-packages (from pandas->ggplot) (2018.9)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from kiwisolver>=1.0.1->matplotlib->ggplot) (41.2.0)\nInstalling collected packages: brewer2mpl, ggplot\nSuccessfully installed brewer2mpl-1.4.1 ggplot-0.11.5\nCollecting pandas==0.19.2\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/f1/33/b455d0af521b76b1982eac1ed1c30c9e67f9885f54c3349aef0b0c547d85/pandas-0.19.2-cp36-cp36m-manylinux1_x86_64.whl (18.9MB)\n\u001b[K |████████████████████████████████| 18.9MB 2.7MB/s \n\u001b[?25hRequirement already satisfied: pytz>=2011k in /usr/local/lib/python3.6/dist-packages (from pandas==0.19.2) (2018.9)\nRequirement already satisfied: numpy>=1.7.0 in /usr/local/lib/python3.6/dist-packages (from pandas==0.19.2) (1.16.5)\nRequirement already satisfied: python-dateutil>=2 in /usr/local/lib/python3.6/dist-packages (from pandas==0.19.2) (2.5.3)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2->pandas==0.19.2) (1.12.0)\n\u001b[31mERROR: plotnine 0.5.1 has requirement pandas>=0.23.4, but you'll have pandas 0.19.2 which is incompatible.\u001b[0m\n\u001b[31mERROR: mizani 0.5.4 has requirement pandas>=0.23.4, but you'll have pandas 0.19.2 which is incompatible.\u001b[0m\n\u001b[31mERROR: google-colab 1.0.0 has requirement pandas~=0.24.0, but you'll have pandas 0.19.2 which is incompatible.\u001b[0m\n\u001b[31mERROR: featuretools 0.4.1 has requirement pandas>=0.23.0, but you'll have pandas 0.19.2 which is incompatible.\u001b[0m\n\u001b[31mERROR: fbprophet 0.5 has requirement pandas>=0.23.4, but you'll have pandas 0.19.2 which is incompatible.\u001b[0m\nInstalling collected packages: pandas\n Found existing installation: pandas 0.24.2\n Uninstalling pandas-0.24.2:\n Successfully uninstalled pandas-0.24.2\nSuccessfully installed pandas-0.19.2\n" ], [ "#Loading data from the google drive to colab notebook\n\n# Please change the filename as per the location where the file is stored\n\nfilename = '/content/drive/My Drive/Packt_Colab/bank-full.csv'", "_____no_output_____" ], [ "# Importing necessary packages\nfrom dfply import *\nfrom ggplot import *", "_____no_output_____" ], [ "bankData = pd.read_csv(filename,sep=\";\")", "_____no_output_____" ], [ "# Normalising data\nfrom sklearn import preprocessing\nx = bankData[['balance']].values.astype(float)\n# Creating the scaling function\nminmaxScaler = preprocessing.MinMaxScaler()\n# Transforming the balance data by normalising it with minmaxScalre\nbankData['balanceTran'] = minmaxScaler.fit_transform(x)\n# Printing the head of the data\nbankData.head()", "_____no_output_____" ], [ "# Adding a small numerical constant to eliminate 0 values\n\nbankData['balanceTran'] = bankData['balanceTran'] + 0.00001", "_____no_output_____" ], [ "# Let us transform values for loan data\nbankData['loanTran'] = 1\n# Giving a weight of 5 if there is no loan\nbankData.loanTran[bankData['loan'] == 'no'] = 5\nbankData.head()", "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n This is separate from the ipykernel package so we can avoid doing imports until\n" ], [ "# Let us transform values for Housing data\nbankData['houseTran'] = 5\n# Giving a weight of 1 if the customer has a house\nbankData.houseTran[bankData['housing'] == 'no'] = 1\n\nbankData.head()", "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n This is separate from the ipykernel package so we can avoid doing imports until\n" ], [ "# Let us now create the new variable which is a product of all these\nbankData['assetIndex'] = bankData['balanceTran'] * bankData['loanTran'] * bankData['houseTran']\nbankData.head()", "_____no_output_____" ], [ "# Finding the quantile\nnp.quantile(bankData['assetIndex'],[0.25,0.5,0.75])", "_____no_output_____" ], [ "# Creating quantiles from the assetindex data\nbankData['assetClass'] = 'Quant1'\n\nbankData.assetClass[(bankData['assetIndex'] > 0.38) & (bankData['assetIndex'] < 0.57)] = 'Quant2'\n\nbankData.assetClass[(bankData['assetIndex'] > 0.57) & (bankData['assetIndex'] < 1.9)] = 'Quant3'\n\nbankData.assetClass[bankData['assetIndex'] > 1.9] = 'Quant4'\n\nbankData.head()", "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n This is separate from the ipykernel package so we can avoid doing imports until\n/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:5: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \"\"\"\n/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:7: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n import sys\n" ], [ "# Calculating total of each asset class\nassetTot = (bankData >> group_by(X.assetClass) >> summarise(assetTot = X.y.count()))\n# Calculating the category wise counts\nassetProp = (bankData >> group_by(X.assetClass,X.y) >> summarise(assetCat = X.y.count()))", "_____no_output_____" ], [ "# Merging both the data frames\nassetComb = (pd.merge(assetProp,assetTot,left_on = ['assetClass'],right_on = ['assetClass']) >> mutate(catProp = (X.assetCat/X.assetTot)*100))\nassetComb", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2ef673e346379da84304ce86309cc70f237083
334,141
ipynb
Jupyter Notebook
10_bayesian_machine_learning/05_stochastic_volatility.ipynb
driscolljt/Machine-Learning-for-Algorithmic-Trading-Second-Edition_Original
2a39f17a6112618bb0fe5455328edd3b2881e4a6
[ "MIT" ]
336
2020-09-24T01:35:33.000Z
2022-03-29T18:35:31.000Z
10_bayesian_machine_learning/05_stochastic_volatility.ipynb
ikamanu/Machine-Learning-for-Algorithmic-Trading-Second-Edition_Original
ca5817ad00890fa6d6321a27277ee9a1a4f2fcf4
[ "MIT" ]
10
2020-12-18T02:45:32.000Z
2021-12-17T19:21:09.000Z
10_bayesian_machine_learning/05_stochastic_volatility.ipynb
ikamanu/Machine-Learning-for-Algorithmic-Trading-Second-Edition_Original
ca5817ad00890fa6d6321a27277ee9a1a4f2fcf4
[ "MIT" ]
143
2020-09-25T08:35:04.000Z
2022-03-31T01:39:34.000Z
526.206299
141,256
0.939606
[ [ [ "# Stochastic Volatility model", "_____no_output_____" ], [ "## Imports & Settings", "_____no_output_____" ] ], [ [ "import warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ], [ "%matplotlib inline\n\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import FuncFormatter\nimport seaborn as sns\n\nimport pymc3 as pm\nfrom pymc3.distributions.timeseries import GaussianRandomWalk", "_____no_output_____" ], [ "sns.set_style('whitegrid')\n# model_path = Path('models')", "_____no_output_____" ] ], [ [ "## Model assumptions", "_____no_output_____" ], [ "Asset prices have time-varying volatility (variance of day over day `returns`). In some periods, returns are highly variable, while in others very stable. Stochastic volatility models model this with a latent volatility variable, modeled as a stochastic process. The following model is similar to the one described in the No-U-Turn Sampler paper, Hoffman (2011) p21.\n\n$$\\begin{align*} \n\\sigma &\\sim \\text{Exponential}(50)\\\\\n\\nu &\\sim \\text{Exponential}(.1)\\\\\ns_i &\\sim \\text{Normal}(s_{i-1}, \\sigma^{-2})\\\\\n\\log(r_i) &\\sim t(\\nu, 0, \\exp(-2 s_i))\n\\end{align*}$$\n\nHere, $r$ is the daily return series and $s$ is the latent log volatility process.", "_____no_output_____" ], [ "## Get Return Data", "_____no_output_____" ], [ "First we load some daily returns of the S&P 500.", "_____no_output_____" ] ], [ [ "prices = pd.read_hdf('../data/assets.h5', key='sp500/stooq').loc['2000':, 'close']\nlog_returns = np.log(prices).diff().dropna()", "_____no_output_____" ], [ "ax = log_returns.plot(figsize=(15, 4),\n title='S&P 500 | Daily Log Returns',\n rot=0)\nax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: '{:.0%}'.format(y)))\nsns.despine()\nplt.tight_layout();", "_____no_output_____" ] ], [ [ "As you can see, the volatility seems to change over time quite a bit while clustering around certain time-periods, most notably the 2009 financial crash.", "_____no_output_____" ], [ "## Specify Model in PyMC3", "_____no_output_____" ], [ "Specifying the model in `PyMC3` mirrors its statistical specification. ", "_____no_output_____" ] ], [ [ "with pm.Model() as model:\n step_size = pm.Exponential('sigma', 50.)\n s = GaussianRandomWalk('s', sd=step_size, \n shape=len(log_returns))\n nu = pm.Exponential('nu', .1)\n r = pm.StudentT('r', nu=nu, \n lam=pm.math.exp(-2*s), \n observed=log_returns)", "_____no_output_____" ], [ "pm.model_to_graphviz(model)", "_____no_output_____" ] ], [ [ "## Fit Model", "_____no_output_____" ], [ "For this model, the full maximum a posteriori (MAP) point is degenerate and has infinite density. NUTS, however, gives the correct posterior.", "_____no_output_____" ] ], [ [ "with model:\n trace = pm.sample(tune=2000, \n draws=5000,\n chains=4,\n cores=1,\n target_accept=.9)", "Auto-assigning NUTS sampler...\nInitializing NUTS using jitter+adapt_diag...\nSequential sampling (4 chains in 1 job)\nNUTS: [nu, s, sigma]\n" ] ], [ [ "Optionally, persist result as pickle:", "_____no_output_____" ] ], [ [ "# with open('model_vol.pkl', 'wb') as buff:\n# pickle.dump({'model': model, 'trace': trace}, buff)", "_____no_output_____" ] ], [ [ "## Evaluate results", "_____no_output_____" ], [ "### Trace Plot", "_____no_output_____" ] ], [ [ "pm.traceplot(trace, varnames=['sigma', 'nu']);", "_____no_output_____" ] ], [ [ "Looking at the returns over time and overlaying the estimated standard deviation we can see how the model tracks the volatility over time.", "_____no_output_____" ], [ "### In-Sample Predictions", "_____no_output_____" ] ], [ [ "pm.trace_to_dataframe(trace).info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 20000 entries, 0 to 19999\nColumns: 5032 entries, s__0 to nu\ndtypes: float64(5032)\nmemory usage: 767.8 MB\n" ], [ "fig, ax = plt.subplots(figsize=(15, 5))\n\nlog_returns.plot(ax=ax, lw=.5, xlim=('2000', '2020'), rot=0, \n title='In-Sample Fit of Stochastic Volatility Model')\n\nax.plot(log_returns.index, np.exp(trace[s]).T, 'r', alpha=.03, lw=.5);\n\nax.set(xlabel='Time', ylabel='Returns')\nax.legend(['S&P 500 (log returns)', 'Stochastic Volatility Model'])\nax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: '{:.0%}'.format(y))) \nsns.despine()\nfig.tight_layout();", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
4a2f058282a419b96c9fc2ba87a299ce50c2a679
6,810
ipynb
Jupyter Notebook
docs/dialect/graphblas_dialect_tutorials/graphblas_optimize/fuse_multiply_apply.ipynb
vishalbelsare/mlir-graphblas
ae9e5bd97ad63f0182230dbcde2a205b4086e607
[ "Apache-2.0", "MIT" ]
null
null
null
docs/dialect/graphblas_dialect_tutorials/graphblas_optimize/fuse_multiply_apply.ipynb
vishalbelsare/mlir-graphblas
ae9e5bd97ad63f0182230dbcde2a205b4086e607
[ "Apache-2.0", "MIT" ]
null
null
null
docs/dialect/graphblas_dialect_tutorials/graphblas_optimize/fuse_multiply_apply.ipynb
vishalbelsare/mlir-graphblas
ae9e5bd97ad63f0182230dbcde2a205b4086e607
[ "Apache-2.0", "MIT" ]
null
null
null
36.223404
284
0.569163
[ [ [ "# Fusing graphblas.matrix_multiply with graphblas.apply\n\nThis example will go over how to use the `--graphblas-structuralize ` and `--graphblas-optimize` passes from `graphblas-opt` to fuse `graphblas.matrix_multiply` ops with `graphblas.apply` ops into `graphblas.matrix_multiply_generic` ops.\n\nLet's first import some necessary libraries.", "_____no_output_____" ] ], [ [ "import tempfile\nfrom mlir_graphblas.cli import GRAPHBLAS_OPT_EXE", "_____no_output_____" ] ], [ [ "Since [sparse tensor encodings](https://mlir.llvm.org/docs/Dialects/SparseTensorOps/#sparsetensorencodingattr) can be very verbose in MLIR, let's import some helpers to make the MLIR code more readable.", "_____no_output_____" ] ], [ [ "from mlir_graphblas.tools import tersify_mlir", "_____no_output_____" ] ], [ [ "## Fusion Details\n\nRecall that `graphblas.matrix_multiply` ops can lower into `graphblas.matrix_multiply_generic` ops, which take blocks that specify exact behavior at several points during the matrix multiply. One of those blocks is a \"transform_out\" block.\n\nSince `graphblas.apply` ops only change tensors in an element-wise fashion, we can perform these element-wise changes in the \"transform_out\" block of a `graphblas.matrix_multiply_generic` op if the `graphblas.apply` op is run on the result of a `graphblas.matrix_multiply` op.", "_____no_output_____" ], [ "## Simple Fusion\n\nHere, we'll show the simplest example of how we can fuse a `graphblas.matrix_multiply` op with a `graphblas.apply` op.", "_____no_output_____" ] ], [ [ "mlir_text = \"\"\"\n#CSR64 = #sparse_tensor.encoding<{\n dimLevelType = [ \"dense\", \"compressed\" ],\n dimOrdering = affine_map<(i,j) -> (i,j)>,\n pointerBitWidth = 64,\n indexBitWidth = 64\n}>\n\n#CSC64 = #sparse_tensor.encoding<{\n dimLevelType = [ \"dense\", \"compressed\" ],\n dimOrdering = affine_map<(i,j) -> (j,i)>,\n pointerBitWidth = 64,\n indexBitWidth = 64\n}>\n\nfunc @fuse_adjacent(%A: tensor<?x?xf64, #CSR64>, %B: tensor<?x?xf64, #CSC64>, %thunk: f64) -> tensor<?x?xf64, #CSR64> {\n %C = graphblas.matrix_multiply %A, %B { semiring = \"plus_plus\" } : (tensor<?x?xf64, #CSR64>, tensor<?x?xf64, #CSC64>) to tensor<?x?xf64, #CSR64> \n %apply_result = graphblas.apply %C, %thunk { apply_operator = \"min\" } : (tensor<?x?xf64, #CSR64>, f64) to tensor<?x?xf64, #CSR64>\n return %apply_result : tensor<?x?xf64, #CSR64>\n}\n\"\"\"\n\nwith tempfile.NamedTemporaryFile() as temp:\n temp_file_name = temp.name\n with open(temp_file_name, 'w') as f:\n f.write(mlir_text)\n temp.flush()\n\n output_mlir = ! cat $temp_file_name | $GRAPHBLAS_OPT_EXE --graphblas-structuralize --graphblas-optimize\n output_mlir = \"\\n\".join(output_mlir)\n output_mlir = tersify_mlir(output_mlir)\n\nprint(output_mlir)", "#CSR64 = #sparse_tensor.encoding<{\n dimLevelType = [ \"dense\", \"compressed\" ],\n dimOrdering = affine_map<(d0, d1) -> (d0, d1)>,\n pointerBitWidth = 64,\n indexBitWidth = 64\n}>\n\n#CSC64 = #sparse_tensor.encoding<{\n dimLevelType = [ \"dense\", \"compressed\" ],\n dimOrdering = affine_map<(d0, d1) -> (d1, d0)>,\n pointerBitWidth = 64,\n indexBitWidth = 64\n}>\n\nmodule {\n func @fuse_adjacent(%arg0: tensor<?x?xf64, #CSR64>, %arg1: tensor<?x?xf64, #CSC64>, %arg2: f64) -> tensor<?x?xf64, #CSR64> {\n %cst = arith.constant 0.000000e+00 : f64\n %0 = graphblas.matrix_multiply_generic %arg0, %arg1 {mask_complement = false} : (tensor<?x?xf64, #CSR64>, tensor<?x?xf64, #CSC64>) to tensor<?x?xf64, #CSR64> {\n graphblas.yield add_identity %cst : f64\n }, {\n ^bb0(%arg3: f64, %arg4: f64): // no predecessors\n %1 = arith.addf %arg3, %arg4 : f64\n graphblas.yield add %1 : f64\n }, {\n ^bb0(%arg3: f64, %arg4: f64): // no predecessors\n %1 = arith.addf %arg3, %arg4 : f64\n graphblas.yield mult %1 : f64\n }, {\n ^bb0(%arg3: f64): // no predecessors\n %1 = arith.cmpf olt, %arg3, %arg2 : f64\n %2 = select %1, %arg3, %arg2 : f64\n graphblas.yield transform_out %2 : f64\n }\n return %0 : tensor<?x?xf64, #CSR64>\n }\n}\n\n\n" ] ], [ [ "Note how this function now only has one op from the GraphBLAS dialect. Notice how this one op, i.e. the `graphblas.matrix_multiply_generic`, has a \"transform_out\" block that performs the exact behavior specified by the `graphblas.apply` op in the original code. \n\nIt's noteworthy that this fusion also works if the `graphblas.matrix_multiply` use takes a mask. Rather than explicitly demonstrating this, we'll leave it as an exercise for the reader as it's a fairly straightforward. \n\nIf the intermediate result from the `graphblas.matrix_multiply` op is used in other places outside of the `graphblas.apply` op, this fusion cannot apply. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
4a2f074eeeddc34435dcc7d432a21de2826f137a
711,804
ipynb
Jupyter Notebook
sagemaker-debugger/xgboost_census_explanations/xgboost-census-debugger-rules.ipynb
Amirosimani/amazon-sagemaker-examples
bc35e7a9da9e2258e77f98098254c2a8e308041a
[ "Apache-2.0" ]
2,610
2020-10-01T14:14:53.000Z
2022-03-31T18:02:31.000Z
sagemaker-debugger/xgboost_census_explanations/xgboost-census-debugger-rules.ipynb
Amirosimani/amazon-sagemaker-examples
bc35e7a9da9e2258e77f98098254c2a8e308041a
[ "Apache-2.0" ]
1,959
2020-09-30T20:22:42.000Z
2022-03-31T23:58:37.000Z
sagemaker-debugger/xgboost_census_explanations/xgboost-census-debugger-rules.ipynb
Amirosimani/amazon-sagemaker-examples
bc35e7a9da9e2258e77f98098254c2a8e308041a
[ "Apache-2.0" ]
2,052
2020-09-30T22:11:46.000Z
2022-03-31T23:02:51.000Z
634.406417
86,260
0.734744
[ [ [ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Explainability-with-Amazon-SageMaker-Debugger\" data-toc-modified-id=\"Explainability-with-Amazon-SageMaker-Debugger-1\">Explainability with Amazon SageMaker Debugger</a></span><ul class=\"toc-item\"><li><span><a href=\"#Introduction-\" data-toc-modified-id=\"Introduction--1.1\">Introduction <a id=\"intro\"></a></a></span><ul class=\"toc-item\"><li><span><a href=\"#Saving-tensors\" data-toc-modified-id=\"Saving-tensors-1.1.1\">Saving model parameters</a></span></li><li><span><a href=\"#Analysis\" data-toc-modified-id=\"Analysis-1.1.2\">Analysis</a></span></li></ul></li><li><span><a href=\"#Section-1---Setup-\" data-toc-modified-id=\"Section-1---Setup--1.2\">Section 1 - Setup <a id=\"setup\"></a></a></span><ul class=\"toc-item\"><li><span><a href=\"#1.1-Import-necessary-libraries\" data-toc-modified-id=\"1.1-Import-necessary-libraries-1.2.1\">1.1 Import necessary libraries</a></span></li><li><span><a href=\"#1.2-AWS-region-and--IAM-Role\" data-toc-modified-id=\"1.2-AWS-region-and--IAM-Role-1.2.2\">1.2 AWS region and IAM Role</a></span></li><li><span><a href=\"#1.3-S3-bucket-and-prefix-to-hold-training-data,-debugger-information-and-model-artifact\" data-toc-modified-id=\"1.3-S3-bucket-and-prefix-to-hold-training-data,-debugger-information-and-model-artifact-1.2.3\">1.3 S3 bucket and prefix to hold training data, debugger information and model artifact</a></span></li></ul></li><li><span><a href=\"#Section-2---Data-preparation-\" data-toc-modified-id=\"Section-2---Data-preparation--1.3\">Section 2 - Data preparation <a id=\"prep-data\"></a></a></span></li><li><span><a href=\"#Section-3---Train-XGBoost-model-in-Amazon-SageMaker-with--debugger-enabled.-\" data-toc-modified-id=\"Section-3---Train-XGBoost-model-in-Amazon-SageMaker-with--debugger-enabled.--1.4\">Section 3 - Train XGBoost model in Amazon SageMaker with debugger enabled. <a id=\"train\"></a></a></span><ul class=\"toc-item\"><li><span><a href=\"#3.1-Install-the-'smdebug'-open-source-library\" data-toc-modified-id=\"3.1-Install-the-'smdebug'-open-source-library-1.4.1\">3.1 Install the 'smdebug' open source library</a></span></li><li><span><a href=\"#3.2-Build-the-XGBoost-container\" data-toc-modified-id=\"3.2-Build-the-XGBoost-container-1.4.2\">3.2 Build the XGBoost container</a></span></li><li><span><a href=\"#3.3-Enabling-Debugger-in-Estimator-object\" data-toc-modified-id=\"3.3-Enabling-Debugger-in-Estimator-object-1.4.3\">3.3 Enabling Debugger in Estimator object</a></span><ul class=\"toc-item\"><li><span><a href=\"#DebuggerHookConfig\" data-toc-modified-id=\"DebuggerHookConfig-1.4.3.1\">DebuggerHookConfig</a></span></li><li><span><a href=\"#Rules\" data-toc-modified-id=\"Rules-1.4.3.2\">Rules</a></span></li></ul></li><li><span><a href=\"#3.4-Result\" data-toc-modified-id=\"3.4-Result-1.4.4\">3.4 Result</a></span></li><li><span><a href=\"#3.5-Check-the-status-of-the-Rule-Evaluation-Job\" data-toc-modified-id=\"3.5-Check-the-status-of-the-Rule-Evaluation-Job-1.4.5\">3.5 Check the status of the Rule Evaluation Job</a></span></li></ul></li><li><span><a href=\"#Section-4---Analyze-debugger-output-\" data-toc-modified-id=\"Section-4---Analyze-debugger-output--1.5\">Section 4 - Analyze debugger output <a id=\"analyze-debugger-ouput\"></a></a></span><ul class=\"toc-item\"><li><span><a href=\"#Retrieving-and-Analyzing-tensors\" data-toc-modified-id=\"Retrieving-and-Analyzing-tensors-1.5.1\">Retrieving and Analyzing model parameters</a></span></li><li><span><a href=\"#Plot-Performance-metrics\" data-toc-modified-id=\"Plot-Performance-metrics-1.5.2\">Plot Performance metrics</a></span></li><li><span><a href=\"#Feature-importance\" data-toc-modified-id=\"Feature-importance-1.5.3\">Feature importance</a></span></li><li><span><a href=\"#SHAP\" data-toc-modified-id=\"SHAP-1.5.4\">SHAP</a></span></li><li><span><a href=\"#Global-explanations\" data-toc-modified-id=\"Global-explanations-1.5.5\">Global explanations</a></span></li><li><span><a href=\"#Local-explanations\" data-toc-modified-id=\"Local-explanations-1.5.6\">Local explanations</a></span><ul class=\"toc-item\"><li><span><a href=\"#Force-plot\" data-toc-modified-id=\"Force-plot-1.5.6.1\">Force plot</a></span></li><li><span><a href=\"#Stacked-force-plot\" data-toc-modified-id=\"Stacked-force-plot-1.5.6.2\">Stacked force plot</a></span></li></ul></li><li><span><a href=\"#Outliers\" data-toc-modified-id=\"Outliers-1.5.7\">Outliers</a></span></li></ul></li><li><span><a href=\"#Conclusion\" data-toc-modified-id=\"Conclusion-1.6\">Conclusion</a></span></li></ul></li></ul></div>", "_____no_output_____" ], [ "# Explainability with Amazon SageMaker Debugger\n_**Explain a XGBoost model that predicts an individual's income**_\n\nThis notebook demonstrates how to use Amazon SageMaker Debugger to capture the feature importance and SHAP values for a XGBoost model.\n\n*This notebook was created and tested on an ml.t2.medium notebook instance.*", "_____no_output_____" ], [ "## Introduction <a id='intro'></a>\n\nAmazon SageMaker Debugger is the capability of Amazon SageMaker that allows debugging machine learning training. The capability helps you monitor the training jobs in near real time using rules and alert you once it has detected inconsistency in training. \n\nUsing Amazon SageMaker Debugger is a two step process: Saving model parameters and Analysis.\nLet's look at each one of them closely.\n\n### Saving model parameters\n\nIn machine learning process, model parameters are updated every forward and backward pass and can describe the state of the training job at any particular instant in an ML lifecycle. Amazon SageMaker Debugger allows you to capture the model parameters and save them for analysis. Although XGBoost is not a deep learning algorithm, Amazon SageMaker Debugger is highly customizable and can help you interpret results by saving insightful metrics. For example, performance metrics or the importance of features at different frequencies. Refer to [SageMaker Debugger documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-configuration.html) for details on how to save the metrics you want. \n\nThe saved model parameters in this notebook include feature importance and SHAP values for all features in the dataset. The feature importance and SHAP values are what we will use to provide local and global explainability.\n\n\n### Analysis\n\nAfter the model parameters are saved, perform automatic analysis by running debugging ***Rules***.\nOn a very broad level, a rule is Python code used to detect certain conditions during training.\nSome of the conditions that a data scientist training an algorithm may care about are monitoring for gradients getting too large or too small, detecting overfitting, and so on.\nAmazon SageMaker Debugger comes pre-packaged with certain rules that can be invoked on Amazon SageMaker. Users can also write their own rules using the Amazon SageMaker Debugger APIs. \nFor more information about automatic analysis using a rule, see the [rules documentation](https://github.com/awslabs/sagemaker-debugger/blob/master/docs/analysis.md).\n ", "_____no_output_____" ], [ "## Section 1 - Setup <a id='setup'></a>\n\nIn this section, we will import the necessary libraries, setup variables and examine dataset used. that was used to train the XGBoost model to predict an individual's income.\n\nLet's start by specifying:\n\n* The AWS region used to host your model.\n* The IAM role associated with this SageMaker notebook instance.\n* The S3 bucket used to store the data used to train the model, save debugger information during training and the trained model artifact.", "_____no_output_____" ], [ "<font color='red'>**Important**</font>: To use the new Debugger features, you need to upgrade the SageMaker Python SDK and the SMDebug libary. In the following cell, change the third line to `install_needed=True` and run to upgrade the libraries.", "_____no_output_____" ] ], [ [ "import sys\nimport IPython\ninstall_needed = False # Set to True to upgrade\nif install_needed:\n print(\"installing deps and restarting kernel\")\n !{sys.executable} -m pip install -U sagemaker\n !{sys.executable} -m pip install -U smdebug\n IPython.Application.instance().kernel.do_shutdown(True)", "_____no_output_____" ] ], [ [ "### 1.1 Import necessary libraries", "_____no_output_____" ] ], [ [ "import boto3\nimport sagemaker\nimport os\nimport pandas as pd\n\nfrom sagemaker import get_execution_role", "_____no_output_____" ] ], [ [ "### 1.2 AWS region and IAM Role", "_____no_output_____" ] ], [ [ "region = boto3.Session().region_name\nprint(\"AWS Region: {}\".format(region))\n\nrole = get_execution_role()\nprint(\"RoleArn: {}\".format(role))", "_____no_output_____" ] ], [ [ "### 1.3 S3 bucket and prefix to hold training data, debugger information, and model artifact", "_____no_output_____" ] ], [ [ "bucket = sagemaker.Session().default_bucket()\nprefix = \"DEMO-smdebug-xgboost-adult-income-prediction\"", "_____no_output_____" ] ], [ [ "## Section 2 - Data preparation <a id='prep-data'></a>", "_____no_output_____" ], [ "We'll be using the [Adult Census dataset](https://archive.ics.uci.edu/ml/datasets/adult) for this exercise. \nThis data was extracted from the [1994 Census bureau database](http://www.census.gov/en.html) by Ronny Kohavi and Barry Becker (Data Mining and Visualization, Silicon Graphics), with the task being to predict if an individual person makes over 50K a year. \n\nWe'll be using the [SHAP](https://github.com/slundberg/shap) library to perform visual analysis. The library contains the dataset pre-loaded which we will utilize here. ", "_____no_output_____" ] ], [ [ "!python -m pip install shap", "_____no_output_____" ], [ "import shap\n\nX, y = shap.datasets.adult()\nX_display, y_display = shap.datasets.adult(display=True)\nfeature_names = list(X.columns)", "_____no_output_____" ], [ "feature_names", "_____no_output_____" ], [ "# create a train/test split\nfrom sklearn.model_selection import train_test_split # For splitting the dataset\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=7)\nX_train_display = X_display.loc[X_train.index]", "_____no_output_____" ], [ "train = pd.concat(\n [pd.Series(y_train, index=X_train.index, name=\"Income>50K\", dtype=int), X_train], axis=1\n)\ntest = pd.concat(\n [pd.Series(y_test, index=X_test.index, name=\"Income>50K\", dtype=int), X_test], axis=1\n)\n\n# Use 'csv' format to store the data\n# The first column is expected to be the output column\ntrain.to_csv(\"train.csv\", index=False, header=False)\ntest.to_csv(\"validation.csv\", index=False, header=False)\n\nboto3.Session().resource(\"s3\").Bucket(bucket).Object(\n os.path.join(prefix, \"data/train.csv\")\n).upload_file(\"train.csv\")\nboto3.Session().resource(\"s3\").Bucket(bucket).Object(\n os.path.join(prefix, \"data/validation.csv\")\n).upload_file(\"validation.csv\")", "_____no_output_____" ] ], [ [ "## Section 3 - Train XGBoost model in Amazon SageMaker with debugger enabled. <a id='train'></a>\n\nNow train an XGBoost model with Amazon SageMaker Debugger enabled and monitor the training jobs. This is done using the Amazon SageMaker Estimator API. While the training job is running, use Amazon SageMaker Debugger API to access saved model parameters in real time and visualize them. You can rely on Amazon SageMaker Debugger to take care of downloading a fresh set of model parameters every time you query for them.", "_____no_output_____" ], [ "Amazon SageMaker Debugger is available in Amazon SageMaker XGBoost container version 0.90-2 or later. If you want to use XGBoost with Amazon SageMaker Debugger, you have to specify `repo_version='0.90-2'` in the `get_image_uri` function.", "_____no_output_____" ], [ "### 3.2 Build the XGBoost container", "_____no_output_____" ], [ "Amazon SageMaker Debugger is available in Amazon SageMaker XGBoost container version 0.90-2 or later.", "_____no_output_____" ] ], [ [ "container = sagemaker.image_uris.retrieve(\"xgboost\", region, \"0.90-2\")", "_____no_output_____" ], [ "base_job_name = \"demo-smdebug-xgboost-adult-income-prediction-classification\"\nbucket_path = \"s3://{}\".format(bucket)\n\nhyperparameters = {\n \"max_depth\": \"5\",\n \"eta\": \"0.2\",\n \"gamma\": \"4\",\n \"min_child_weight\": \"6\",\n \"subsample\": \"0.7\",\n \"silent\": \"0\",\n \"objective\": \"binary:logistic\",\n \"num_round\": \"51\",\n}\nsave_interval = 5", "_____no_output_____" ] ], [ [ "### 3.3 Enabling Debugger in Estimator object\n\n\n#### DebuggerHookConfig\n\nEnabling Amazon SageMaker Debugger in training job can be accomplished by adding its configuration into Estimator object constructor:\n\n```python\nfrom sagemaker.debugger import DebuggerHookConfig, CollectionConfig\n\nestimator = Estimator(\n ...,\n debugger_hook_config = DebuggerHookConfig(\n s3_output_path=\"s3://{bucket_name}/{location_in_bucket}\", # Required\n collection_configs=[\n CollectionConfig(\n name=\"metrics\",\n parameters={\n \"save_interval\": \"10\"\n }\n )\n ]\n )\n)\n```\nHere, the `DebuggerHookConfig` object instructs `Estimator` what data we are interested in.\nTwo parameters are provided in the example:\n\n- `s3_output_path`: Points to an S3 bucket where you intend to store model parameters. Amount of data saved depends on multiple factors, major ones are training job, data set, model, frequency of saving model parameters. This S3 bucket should be in your AWS account so that you have full access to control over the stored data. **Note**: The S3 bucket should be originally created in the same Region where your training job is running, otherwise you might run into problems with cross-Region access.\n\n- `collection_configs`: It enumerates named collections of model parameters to save. Collections are a convenient way to organize relevant model parameters under same umbrella to make it easy to navigate them during analysis. In this particular example, you are interested in a single collection named metrics. You also configured Amazon SageMaker Debugger to save metrics every 10 iterations. See [Collection](https://github.com/awslabs/sagemaker-debugger/blob/master/docs/api.md#collection) documentation for all parameters that are supported by Collections and DebuggerConfig documentation for more details about all parameters DebuggerConfig supports.\n \n#### Rules\n\nEnabling Rules in training job can be accomplished by adding the `rules` configuration into Estimator object constructor.\n\n- `rules`: This parameter will accept a list of rules you want to evaluate against training jobs.\n For rules, Amazon SageMaker Debugger supports two types:\n - SageMaker Rules: These are rules specially curated by the data science and engineering teams in Amazon SageMaker which you can opt to evaluate against your training job.\n - Custom Rules: You can optionally choose to write your own rule as a Python source file and have it evaluated against your training job.\n To provide Amazon SageMaker Debugger to evaluate this rule, you would have to provide the S3 location of the rule source and the evaluator image.\n\nIn this example, you will use a Amazon SageMaker's LossNotDecreasing rule, which helps you identify if you are running into a situation where the training loss is not going down.\n\n```python\nfrom sagemaker.debugger import rule_configs, Rule\n\nestimator = Estimator(\n ...,\n rules=[\n Rule.sagemaker(\n rule_configs.loss_not_decreasing(),\n rule_parameters={\n \"collection_names\": \"metrics\",\n \"num_steps\": \"10\",\n },\n ),\n ],\n)\n```\n\n- `rule_parameters`: In this parameter, you provide the runtime values of the parameter in your constructor.\n You can still choose to pass in other values which may be necessary for your rule to be evaluated.\n In this example, you will use Amazon SageMaker's LossNotDecreasing rule to monitor the `metircs` collection.\n The rule will alert you if the loss value in the `metrics` collection has not decreased for more than 10 steps.", "_____no_output_____" ] ], [ [ "from sagemaker.debugger import rule_configs, Rule, DebuggerHookConfig, CollectionConfig\nfrom sagemaker.estimator import Estimator\n\nxgboost_estimator = Estimator(\n role=role,\n base_job_name=base_job_name,\n instance_count=1,\n instance_type=\"ml.m5.4xlarge\",\n image_uri=container,\n hyperparameters=hyperparameters,\n max_run=1800,\n debugger_hook_config=DebuggerHookConfig(\n s3_output_path=bucket_path, # Required\n collection_configs=[\n CollectionConfig(name=\"metrics\", parameters={\"save_interval\": str(save_interval)}),\n CollectionConfig(\n name=\"feature_importance\", parameters={\"save_interval\": str(save_interval)}\n ),\n CollectionConfig(name=\"full_shap\", parameters={\"save_interval\": str(save_interval)}),\n CollectionConfig(name=\"average_shap\", parameters={\"save_interval\": str(save_interval)}),\n ],\n ),\n rules=[\n Rule.sagemaker(\n rule_configs.loss_not_decreasing(),\n rule_parameters={\n \"collection_names\": \"metrics\",\n \"num_steps\": str(save_interval * 2),\n },\n ),\n ],\n)", "_____no_output_____" ] ], [ [ "With the next step, start a training job by using the Estimator object you created above. This job is started in an asynchronous, non-blocking way. This means that control is passed back to the notebook and further commands can be run while the training job is progressing.", "_____no_output_____" ] ], [ [ "from sagemaker.session import TrainingInput\n\ntrain_input = TrainingInput(\n \"s3://{}/{}/{}\".format(bucket, prefix, \"data/train.csv\"), content_type=\"csv\"\n)\nvalidation_input = TrainingInput(\n \"s3://{}/{}/{}\".format(bucket, prefix, \"data/validation.csv\"), content_type=\"csv\"\n)\nxgboost_estimator.fit(\n {\"train\": train_input, \"validation\": validation_input},\n # This is a fire and forget event. By setting wait=False, you submit the job to run in the background.\n # Amazon SageMaker starts one training job and release control to next cells in the notebook.\n # Follow this notebook to see status of the training job.\n wait=False,\n)", "_____no_output_____" ] ], [ [ "### 3.4 Result\n\nAs a result of the above command, Amazon SageMaker starts **one training job and one rule job** for you. The first one is the job that produces the model parameters to be analyzed. The second one analyzes the model parameters to check if `train-error` and `validation-error` are not decreasing at any point during training.\n\nCheck the status of the training job below.\nAfter your training job is started, Amazon SageMaker starts a rule-execution job to run the LossNotDecreasing rule. \n\nThe cell below will block till the training job is complete.", "_____no_output_____" ] ], [ [ "import time\n\nfor _ in range(36):\n job_name = xgboost_estimator.latest_training_job.name\n client = xgboost_estimator.sagemaker_session.sagemaker_client\n description = client.describe_training_job(TrainingJobName=job_name)\n training_job_status = description[\"TrainingJobStatus\"]\n rule_job_summary = xgboost_estimator.latest_training_job.rule_job_summary()\n rule_evaluation_status = rule_job_summary[0][\"RuleEvaluationStatus\"]\n print(\n \"Training job status: {}, Rule Evaluation Status: {}\".format(\n training_job_status, rule_evaluation_status\n )\n )\n\n if training_job_status in [\"Completed\", \"Failed\"]:\n break\n\n time.sleep(10)", "_____no_output_____" ] ], [ [ "### 3.5 Check the status of the Rule Evaluation Job\n\nTo get the rule evaluation job that Amazon SageMaker started for you, run the command below. The results show you the `RuleConfigurationName`, `RuleEvaluationJobArn`, `RuleEvaluationStatus`, `StatusDetails`, and `RuleEvaluationJobArn`.\nIf the model parameters meet a rule evaluation condition, the rule execution job throws a client error with `RuleEvaluationConditionMet`.\n\nThe logs of the rule evaluation job are available in the Cloudwatch Logstream `/aws/sagemaker/ProcessingJobs` with `RuleEvaluationJobArn`.\n\nYou can see that once the rule execution job starts, it identifies the loss not decreasing situation in the training job, it raises the `RuleEvaluationConditionMet` exception, and it ends the job.", "_____no_output_____" ] ], [ [ "xgboost_estimator.latest_training_job.rule_job_summary()", "_____no_output_____" ] ], [ [ "## Section 4 - Analyze debugger output <a id='analyze-debugger-ouput'></a>\n\nNow that you've trained the system, analyze the data. Here, you focus on after-the-fact analysis.\n\nYou import a basic analysis library, which defines the concept of trial, which represents a single training run.\n\n### Retrieving and Analyzing tensors\n\nBefore getting to analysis, here are some notes on concepts being used in Amazon SageMaker Debugger that help with analysis.\n- ***Trial*** - Object that is a centerpiece of the SageMaker Debugger API when it comes to getting access to model parameters. It is a top level abstract that represents a single run of a training job. All model parameters emitted by a training job are associated with its trial.\n- ***Tensor*** - Object that represents model parameters, such as weights, gradients, accuracy, and loss, that are saved during training job.\n\nFor more details on aforementioned concepts as well as on SageMaker Debugger API in general (including examples) see [SageMaker Debugger Analysis API](https://github.com/awslabs/sagemaker-debugger/blob/master/docs/analysis.md) documentation.\n\nIn the following code cell, use a ***Trial*** to access model parameters. You can do that by inspecting currently running training job and extract necessary parameters from its debug configuration to instruct SageMaker Debugger where the data you are looking for is located. Keep in mind the following:\n- model parameters are being stored in your own S3 bucket to which you can navigate and manually inspect its content if desired.\n- You might notice a slight delay before trial object is created. This is normal as SageMaker Debugger monitors the corresponding bucket and waits until model parameters to appear. The delay is introduced by less than instantaneous upload of model parameters from a training container to your S3 bucket. ", "_____no_output_____" ] ], [ [ "from smdebug.trials import create_trial\n\ns3_output_path = xgboost_estimator.latest_job_debugger_artifacts_path()\ntrial = create_trial(s3_output_path)", "_____no_output_____" ] ], [ [ "You can list all model parameters that you want to analyze. Each one of these names is the name of a model parameter. The name is a combination of the feature name, which in these cases, is auto-assigned by XGBoost, and whether it's an evaluation metric, feature importance, or SHAP value.", "_____no_output_____" ] ], [ [ "trial.tensor_names()", "_____no_output_____" ] ], [ [ "For each model parameter, we can get the values at all saved steps. ", "_____no_output_____" ] ], [ [ "trial.tensor(\"average_shap/f1\").values()", "_____no_output_____" ] ], [ [ "### Plot Performance metrics\n\nYou can also create a simple function that visualizes the training and validation errors as the training progresses.\nThe error should get smaller over time, as the system converges to a good solution.", "_____no_output_____" ] ], [ [ "from itertools import islice\nimport matplotlib.pyplot as plt\nimport re\n\nMAX_PLOTS = 35\n\n\ndef get_data(trial, tname):\n \"\"\"\n For the given tensor name, walks though all the iterations\n for which you have data and fetches the values.\n Returns the set of steps and the values.\n \"\"\"\n tensor = trial.tensor(tname)\n steps = tensor.steps()\n vals = [tensor.value(s) for s in steps]\n return steps, vals\n\n\ndef match_tensor_name_with_feature_name(tensor_name, feature_names=feature_names):\n feature_tag = tensor_name.split(\"/\")\n for ifeat, feature_name in enumerate(feature_names):\n if feature_tag[-1] == \"f{}\".format(str(ifeat)):\n return feature_name\n return tensor_name\n\n\ndef plot_collection(trial, collection_name, regex=\".*\", figsize=(8, 6)):\n \"\"\"\n Takes a `trial` and a collection name, and\n plots all tensors that match the given regex.\n \"\"\"\n fig, ax = plt.subplots(figsize=figsize)\n tensors = trial.collection(collection_name).tensor_names\n matched_tensors = [t for t in tensors if re.match(regex, t)]\n for tensor_name in islice(matched_tensors, MAX_PLOTS):\n steps, data = get_data(trial, tensor_name)\n ax.plot(steps, data, label=match_tensor_name_with_feature_name(tensor_name))\n\n ax.legend(loc=\"center left\", bbox_to_anchor=(1, 0.5))\n ax.set_xlabel(\"Iteration\")", "_____no_output_____" ], [ "plot_collection(trial, \"metrics\")", "_____no_output_____" ] ], [ [ "### Feature importance\n\nYou can also visualize the feature priorities as determined by\n[xgboost.get_score()](https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.Booster.get_score).\nIf you instructed Estimator to log the `feature_importance` collection, all importance types supported by `xgboost.get_score()` will be available in the collection.", "_____no_output_____" ] ], [ [ "def plot_feature_importance(trial, importance_type=\"weight\"):\n SUPPORTED_IMPORTANCE_TYPES = [\"weight\", \"gain\", \"cover\", \"total_gain\", \"total_cover\"]\n if importance_type not in SUPPORTED_IMPORTANCE_TYPES:\n raise ValueError(f\"{importance_type} is not one of the supported importance types.\")\n plot_collection(trial, \"feature_importance\", regex=f\"feature_importance/{importance_type}/.*\")", "_____no_output_____" ], [ "plot_feature_importance(trial, importance_type=\"cover\")", "_____no_output_____" ] ], [ [ "### SHAP\n\n[SHAP](https://github.com/slundberg/shap) (SHapley Additive exPlanations) is\nanother approach to explain the output of machine learning models.\nSHAP values represent a feature's contribution to a change in the model output.\nYou instructed Estimator to log the average SHAP values in this example so the SHAP values (as calculated by [xgboost.predict(pred_contribs=True)](https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.Booster.predict)) will be available the `average_shap` collection.", "_____no_output_____" ] ], [ [ "plot_collection(trial, \"average_shap\")", "_____no_output_____" ] ], [ [ "### Global explanations", "_____no_output_____" ], [ "Global explanatory methods allow understanding the model and its feature contributions in aggregate over multiple datapoints. Here we show an aggregate bar plot that plots the mean absolute SHAP value for each feature. \n\nSpecifically, the below plot indicates that the value of relationship (Wife=5, Husband=4, Own-child=3, Other-relative=2, Unmarried=1, Not-in-family=0) plays the most important role in predicting the income probability being higher than 50K.", "_____no_output_____" ] ], [ [ "shap_values = trial.tensor(\"full_shap/f0\").value(trial.last_complete_step)\nshap_no_base = shap_values[:, :-1]\nshap_base_value = shap_values[0, -1]\nshap.summary_plot(shap_no_base, plot_type=\"bar\", feature_names=feature_names)", "_____no_output_____" ], [ "shap_base_value", "_____no_output_____" ] ], [ [ "The detailed summary plot below can provide more context over the above bar chart. It tells which features are most important and, in addition, their range of effects over the dataset. The color allows us to match how changes in the value of a feature effect the change in prediction. \n\nThe 'red' indicates higher value of the feature and 'blue' indicates lower (normalized over the features). This allows conclusions such as 'increase in age leads to higher log odds for prediction, eventually leading to `True` predictions more often. ", "_____no_output_____" ] ], [ [ "shap.summary_plot(shap_no_base, X_train)", "_____no_output_____" ] ], [ [ "### Local explanations", "_____no_output_____" ], [ "Local explainability aims to explain model behavior for a fixed input point. This can be used for either auditing models before deployment or to provide explanations for specific inference predictions. ", "_____no_output_____" ] ], [ [ "shap.initjs()", "_____no_output_____" ] ], [ [ "#### Force plot", "_____no_output_____" ], [ "A force plot explanation shows how features are contributing to push the model output from the base value (the average model output over the dataset) to the model output. Features pushing the prediction higher are shown in **red**, those pushing the prediction lower are in **blue**.\n\nPlot below indicates that for this particular data point the prediction probability (0.48) is higher than the average (~0.2) primarily because this person is in a relationship (`Relationship = Wife`), and to smaller degree because of the higher-than-average age. Similarly the model reduces the probability due specific `Sex` and `Race` values indicating existence of bias in model behavior (possibly due to bias in the data). ", "_____no_output_____" ] ], [ [ "shap.force_plot(\n shap_base_value,\n shap_no_base[100, :],\n X_train_display.iloc[100, :],\n link=\"logit\",\n matplotlib=False,\n)", "_____no_output_____" ] ], [ [ "#### Stacked force plot", "_____no_output_____" ], [ "SHAP allows stacking multiple force-plots after rotating 90 degress to understand the explanations for multiple datapoints. If Javascript is enabled, then in the notebook this plot is interactive, allowing understanding the change in output for each feature independently. This stacking of force plots provides a balance between local and global explainability.", "_____no_output_____" ] ], [ [ "import numpy as np\n\nN_ROWS = shap_no_base.shape[0]\nN_SAMPLES = min(100, N_ROWS)\nsampled_indices = np.random.randint(N_ROWS, size=N_SAMPLES)", "_____no_output_____" ], [ "shap.force_plot(\n shap_base_value,\n shap_no_base[sampled_indices, :],\n X_train_display.iloc[sampled_indices, :],\n link=\"logit\",\n)", "_____no_output_____" ] ], [ [ "### Outliers\n\nOutliers are extreme values that deviate from other observations on data. It's useful to understand the influence of various features for outlier predictions to determine if it's a novelty, an experimental error, or a shortcoming in the model.\n\nHere we show force plot for prediction outliers that are on either side of the baseline value.", "_____no_output_____" ] ], [ [ "# top outliers\nfrom scipy import stats\n\nN_OUTLIERS = 3 # number of outliers on each side of the tail\n\nshap_sum = np.sum(shap_no_base, axis=1)\nz_scores = stats.zscore(shap_sum)\noutlier_indices = (np.argpartition(z_scores, -N_OUTLIERS)[-N_OUTLIERS:]).tolist()\noutlier_indices += (np.argpartition(z_scores, N_OUTLIERS)[:N_OUTLIERS]).tolist()", "_____no_output_____" ], [ "for fig_index, outlier_index in enumerate(outlier_indices, start=1):\n shap.force_plot(\n shap_base_value,\n shap_no_base[outlier_index, :],\n X_train_display.iloc[outlier_index, :],\n matplotlib=False,\n link=\"logit\",\n )", "_____no_output_____" ] ], [ [ "## Conclusion", "_____no_output_____" ], [ "This notebook discussed the importance of explainability for improved ML\nadoption and. We introduced the Amazon SageMaker Debugger capability with built-in\nmodel parameter collections to enable model explainability.\nThe notebook walked you through training an ML model for a financial services use case\nof individual income prediction. We further analyzed the global and local\nexplanations of the model by visualizing the captured model parameters.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
4a2f0c2391b41538b314f37f074c9a3433413728
14,510
ipynb
Jupyter Notebook
lessons/1-variables-and-data-structures.ipynb
malvikasharan/software-carpentry-embl-2019
17d0f4dd1989e8e516044747c2ec7ccf30a03d24
[ "CC-BY-4.0" ]
null
null
null
lessons/1-variables-and-data-structures.ipynb
malvikasharan/software-carpentry-embl-2019
17d0f4dd1989e8e516044747c2ec7ccf30a03d24
[ "CC-BY-4.0" ]
null
null
null
lessons/1-variables-and-data-structures.ipynb
malvikasharan/software-carpentry-embl-2019
17d0f4dd1989e8e516044747c2ec7ccf30a03d24
[ "CC-BY-4.0" ]
2
2019-10-17T08:29:21.000Z
2019-10-18T08:45:12.000Z
24.183333
387
0.54907
[ [ [ "# Programming in Python", "_____no_output_____" ], [ "## Session 1", "_____no_output_____" ], [ "### Aim of the Session\nLearn/review the basics\n- what is ...\n- how to ...", "_____no_output_____" ], [ "### 'Hello World!'", "_____no_output_____" ] ], [ [ "# the culturally-expected introductory statement\n", "_____no_output_____" ] ], [ [ "### Literals", "_____no_output_____" ], [ "Values of a _type_, presented literally", "_____no_output_____" ] ], [ [ "# example name type designation\n42 # integer int\n2.016 # float float*\n\"Homo sapiens\" # string str", "_____no_output_____" ] ], [ [ "- int: whole numbers e.g. 1, 1000, 6000000000\n- float: 'floating point' non-whole numbers e.g. 1.9, 30.01, 10e3, 1e-3\n- string: ordered sequence of characters, enclosed in quotation marks (single, double, _triple_)", "_____no_output_____" ] ], [ [ "# type conversions\n", "_____no_output_____" ] ], [ [ "#### Aside - Comments\n\nComments are preceded by a **#**, and are completely ignored by the python interpreter. \nComments can be on their own line or after a line of code.\n\nComments are an incredibly useful way to keep track of what you are doing in\nyour code. Use comments to document what you do as much as possible, it will\npay off in the long run.\n", "_____no_output_____" ], [ "### Exercises 1", "_____no_output_____" ] ], [ [ "# print some strings\n", "_____no_output_____" ], [ "# print some numbers (ints or floats)\n", "_____no_output_____" ], [ "# print multiple values of different types all at once\n# (hints: use comma to separate values with a space, or + to join strings)\n", "_____no_output_____" ], [ "# print a string containing quote marks\n", "_____no_output_____" ] ], [ [ "### Variables", "_____no_output_____" ], [ "Store values (information) in memory, and (re-)use them. We give variables names (identifiers) so that we have a means of referring to the information on demand.", "_____no_output_____" ] ], [ [ "# variable assignment is done with '='\n", "_____no_output_____" ] ], [ [ "#### Variable naming\nRules:\n\n- identifier lookup is case-sensitive\n - `myname` & `MyName` are different\n- must be unique in your working environment\n - existing variable will be __over-written without warning__\n- cannot start with a number, or any special symbol (e.g. $, %, @, -, etc...) except for \"_\" (underscore), which is OK.\n- cannot have any spaces or special characters (except for \"-\" (hyphen) and \"_\" (underscore))\n\nConventions/good practice:\n\n- identifiers (usually) begin with a lowercase letter\n- followed by letters, numbers, underscores\n- use a strategy to make reading easier\n - `myName`\n - `exciting_variable`\n- long, descriptive > short, vague", "_____no_output_____" ], [ "### String Formatting\nCreate formatted strings, with variable values substituted in.", "_____no_output_____" ] ], [ [ "# two ways to do it in Python\nname = 'Florence'\nage = 73\n\nprint('%s is %d years old' % (name, age)) # common amongst many programming languages\n\nprint('{} is {} years old'.format(name, age)) # perhaps more consistent with stardard Python syntax", "_____no_output_____" ] ], [ [ "### Operators & Operands", "_____no_output_____" ], [ "Using Python as a calculator: `+`, `-`, `/`, `*` etc are _operators_, the values/variables that they work on are _operands_.", "_____no_output_____" ] ], [ [ "# standard mathematical operations can be performed in Python\n\n# and some less common ones\n", "_____no_output_____" ] ], [ [ "_Note: check out numpy, scipy, stats modules if you want to do a lot of maths_", "_____no_output_____" ], [ "### Data Structures", "_____no_output_____" ], [ "Programming generally requires building/working with much larger and more complex sets of data than the single values/words/sentences that we have looked at so far. In fact, finding ways to operate effectively (and efficiently) on complex structures in order to extract/produce information, _is_ (data) programming.\n\nPython has two most commonly-used structures for storing multiple pieces of data - _lists_ and _dictionaries_. Let's look at these, and a few more, now.", "_____no_output_____" ], [ "#### Lists", "_____no_output_____" ] ], [ [ "# sequence of entries, in order and of any type\nnumbers = [32, 72, 42]\nmixed_list = [1, 'b', 3.0, 'd']\n\nempty_list = []\nanother_empty_list = list()\n\nletters = list('abcdefghi')", "_____no_output_____" ] ], [ [ "#### What more can we do with a list?", "_____no_output_____" ] ], [ [ "# creating a sensible list\n\n# Sugar per person (g per day) in 2004: ref: https://www.gapminder.org/data/\n\ntop_suger_consumers = ['United States', 'Canada', 'Estonia', 'Croatia', 'New Zealand', 'Switzerland']", "_____no_output_____" ], [ "# adding/removing entries\n\n## next 3 top countries is Denmark, can we add that to the list\n\n## how can keep this list with info on only Americas and Europe only\n", "_____no_output_____" ] ], [ [ "#### Objects, Methods, and How To Get Help", "_____no_output_____" ], [ "In Python, everything is an _object_ - some value(s), packaged up with a set of things that can be done with/to it (___methods___), and pieces of information about it (___attributes___). This makes it very easy to perform the most commonly-needed operations for that/those type of value(s). The language has a standard syntax for accessing methods:", "_____no_output_____" ] ], [ [ "string_object = 'data for 2004 based on a rough extrapolation'\n\n# methods - object.something()\nprint(string_object.upper())\n\n# more...", "_____no_output_____" ], [ "# help()\n", "_____no_output_____" ], [ "# sets\n", "_____no_output_____" ] ], [ [ "### Exercises 2", "_____no_output_____" ] ], [ [ "# add 'New Zealand' back to the list of top_suger_consumers\n", "_____no_output_____" ], [ "# access the fifth entry of the list\n", "_____no_output_____" ], [ "# access the last entry of the list", "_____no_output_____" ], [ "# join the list with a new list from another 8 countries\n\nnext_high_suger_consumers = ['Barbados', 'Costa Rica', \n 'Saint Kitts and Nevis', 'Trinidad and Tobago', \n 'Brazil', 'Grenada', 'Iceland', 'Belgium']", "_____no_output_____" ], [ "# access the last entry of the list now", "_____no_output_____" ] ], [ [ "### Range\n\nWe can access range of items from the list by defining the start index and stop index, separated by a colon symbol \":\".\n\nFor example, to access the item 3-5 from the a list, we will use the following syntax `list[2:5]`", "_____no_output_____" ] ], [ [ "top_suger_consumers[2:5]", "_____no_output_____" ] ], [ [ "Please note that such ranges in python are defined as **left inclusive and right exclusive** meaning that the right number is excluded while accessing the items. Which in this case the 6th item (index 5).", "_____no_output_____" ] ], [ [ "# access top 4 items", "_____no_output_____" ], [ "# access last 4 items", "_____no_output_____" ] ], [ [ "#### Dictionaries", "_____no_output_____" ] ], [ [ "# collection of paired information - keys and values\nstudent_marks = {'United States': 191.78, 'Costa Rica': 156.16, 'Belgium': 150.69}\n\nempty_dict = {}\nanother_empty_dict = dict()\n\n# accessing dict entries\n\n# adding/changing/deleting entries\n", "_____no_output_____" ] ], [ [ "#### Mutable?", "_____no_output_____" ], [ "Object types can be divided into two categories - mutable & immutable. _Mutable_ objects can be changed 'in-place' - their value can be updated, added to, re-ordered etc without the need to create a whole new object every time. _Immutable_ types cannot be changed in place - once they have a value, this value cannot be altered. though, of course, it can __always__ be overwritten.", "_____no_output_____" ] ], [ [ "# lists are mutable\n\ntop_consumers_europe = [\"Estonia\", \"Croatia\", \"Switzerland\", \"Denmark\", \"Belgium\"]\ncities[4] = 'Iceland'", "_____no_output_____" ], [ "# strings are immutable\nfunfact = \"These lessons use: sad cancer examples\"\n\nfunfact[17] = 'd'\nprint(funfact)", "_____no_output_____" ] ], [ [ "### Exercise 3\n\nBelow is a set of commands working with a list of common cancers. \n\n- First, the list is extended by adding 'Liver' onto the end.\n- Then 'Prostate' - the fourth element in the list - is assigned to the variable fourth_common_cancer. \n\nSome of the code has been removed (replaced with ---). Fill in the blanks in the code block to make it work.", "_____no_output_____" ] ], [ [ "# Examples of most common cancers worldwide \n# Ref: https://www.wcrf.org/int/cancer-facts-figures/worldwide-data\n\ncommon_cancers = ['Lung', 'Breast', 'Colorectum', 'Prostate', 'Stomach']\n# add 'Liver' onto the end of the list \ncommon_cancers.---('Liver') \n# access the fourth entry in the list\nfourth_common_cancer = common_cancers[---] ", "_____no_output_____" ] ], [ [ "### Debugging Exercise", "_____no_output_____" ] ], [ [ "coffee_break = ['coffee', 'tea'; 'cookies', 'fruits']\ncoffee_break.append['water']\n print(coffee_break)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a2f132e5fd1c7384a71e54b0d7770d66fa55101
159,693
ipynb
Jupyter Notebook
06-AccGD/cg.ipynb
amkatrutsa/cet_opt_met
be24ec95c4f4fff4c7e78bb42e9243206b064270
[ "MIT" ]
4
2020-12-26T11:36:07.000Z
2021-06-26T11:14:41.000Z
06-AccGD/cg.ipynb
amkatrutsa/cet_opt_met
be24ec95c4f4fff4c7e78bb42e9243206b064270
[ "MIT" ]
1
2019-08-28T16:09:58.000Z
2019-08-28T16:10:10.000Z
06-AccGD/cg.ipynb
amkatrutsa/cet_opt_met
be24ec95c4f4fff4c7e78bb42e9243206b064270
[ "MIT" ]
2
2020-04-01T11:34:16.000Z
2020-07-13T02:10:50.000Z
140.947043
49,512
0.873739
[ [ [ "# Метод сопряжённых градиентов (Conjugate gradient method): гадкий утёнок ", "_____no_output_____" ], [ "## На прошлом занятии...\n\n1. Методы спуска\n2. Направление убывания\n3. Градиентный метод\n4. Правила выбора шага\n5. Теоремы сходимости\n6. Эксперименты", "_____no_output_____" ], [ "## Система линейных уравнений vs. задача безусловной минимизации\nРассмотрим задачу\n\n$$\n\\min_{x \\in \\mathbb{R}^n} \\frac{1}{2}x^{\\top}Ax - b^{\\top}x,\n$$\n\nгде $A \\in \\mathbb{S}^n_{++}$.\nИз необходимого условия экстремума имеем\n\n$$\nAx^* = b\n$$\n\nТакже обозначим $f'(x_k) = Ax_k - b = r_k$", "_____no_output_____" ], [ "## Как решить систему $Ax = b$?\n\n- Прямые методы основаны на матричных разложениях:\n - Плотная матрица $A$: для размерностей не больше нескольких тысяч\n - Разреженная (sparse) матрица $A$: для размерностей порядка $10^4 - 10^5$\n- Итерационные методы: хороши во многих случаях, единственный подход для задач с размерностью $ > 10^6$", "_____no_output_____" ], [ "## Немного истории...\n\nM. Hestenes и E. Stiefel предложили *метод сопряжённых градиентов* для решения систем линейных уравнений в 1952 году как **прямой** метод. \n\nТакже долгое время считалось, что метод представляет только теоретический интерес поскольку\n- метод сопряжённых градиентов не работает на логарифмической линейке\n- метод сопряжённых градиентов имеет небольшое преимущество перед исключением Гаусса при вычислениях на калькуляторе \n- для вычислений на \"human computers\" слишком много обменов данными\n\n<img src=\"./human_computer.jpeg\">\n\nМетод сопряжённых градиентов необходимо рассматривать как **итерационный метод**, то есть останавливаться до точной сходимости!\n\nПодробнее [здесь](https://www.siam.org/meetings/la09/talks/oleary.pdf)", "_____no_output_____" ], [ "## Метод сопряжённых направлений\n\nВ градиентном спуске направления убывания - анти-градиенты, но для функций с плохо обусловленным гессианом сходимость **медленная**.\n\n**Идея:** двигаться вдоль направлений, которые гарантируют сходимость за $n$ шагов.\n\n**Определение.** Множество ненулевых векторов $\\{p_0, \\ldots, p_l\\}$ называется *сопряжённым* относительно матрицы $A \\in \\mathbb{S}^n_{++}$, если \n\n$$\np^{\\top}_iAp_j = 0, \\qquad i \\neq j\n$$", "_____no_output_____" ], [ "**Утверждение.** Для любой $x_0 \\in \\mathbb{R}^n$ последовательность $\\{x_k\\}$, генерируемая методом сопряжённых направлений, сходится к решению системы $Ax = b$ максимум за $n$ шагов.\n\n```python\ndef ConjugateDirections(x0, A, b, p):\n \n x = x0\n \n r = A.dot(x) - b\n \n for i in range(len(p)):\n \n alpha = - (r.dot(p[i])) / (p[i].dot(A.dot(p[i])))\n \n x = x + alpha * p[i]\n \n r = A.dot(x) - b\n \n return x\n\n```", "_____no_output_____" ], [ "### Примеры сопряжённых направлений\n\n- Собственные векторы матрицы $A$\n- Для любого набора из $n$ векторов можно провести аналог ортогонализации Грама-Шмидта и получить сопряжённые направления\n\n**Вопрос:** что такое ортогонализация Грама-Шмидта? :)", "_____no_output_____" ], [ "### Геометрическая интерпретация (Mathematics Stack Exchange)\n\n<center><img src=\"./cg.png\" ></center>", "_____no_output_____" ], [ "## Метод сопряжённых градиентов\n\n**Идея:** новое направление $p_k$ ищется в виде $p_k = -r_k + \\beta_k p_{k-1}$, где $\\beta_k$ выбирается, исходя из требования сопряжённости $p_k$ и $p_{k-1}$:\n\n$$\n\\beta_k = \\dfrac{p^{\\top}_{k-1}Ar_k}{p^{\\top}_{k-1}Ap_{k-1}}\n$$\n\nТаким образом, для получения следующего сопряжённого направления $p_k$ необходимо хранить только сопряжённое направление $p_{k-1}$ и остаток $r_k$ с предыдущей итерации. \n\n**Вопрос:** как находить размер шага $\\alpha_k$?", "_____no_output_____" ], [ "## Сопряжённость сопряжённых градиентов\n\n**Теорема**\nПусть после $k$ итераций $x_k \\neq x^*$. Тогда \n\n- $\\langle r_k, r_i \\rangle = 0, \\; i = 1, \\ldots k - 1$\n- $\\mathtt{span}(r_0, \\ldots, r_k) = \\mathtt{span}(r_0, Ar_0, \\ldots, A^kr_0)$\n- $\\mathtt{span}(p_0, \\ldots, p_k) = \\mathtt{span}(r_0, Ar_0, \\ldots, A^kr_0)$\n- $p_k^{\\top}Ap_i = 0$, $i = 1,\\ldots,k-1$", "_____no_output_____" ], [ "### Теоремы сходимости\n\n**Теорема 1.** Если матрица $A$ имеет только $r$ различных собственных значений, то метод сопряжённых градиентов cойдётся за $r$ итераций.\n\n**Теорема 2.** Имеет место следующая оценка сходимости\n\n$$\n\\| x_{k} - x^* \\|_A \\leq 2\\left( \\dfrac{\\sqrt{\\kappa(A)} - 1}{\\sqrt{\\kappa(A)} + 1} \\right)^k \\|x_0 - x^*\\|_A,\n$$\n\nгде $\\|x\\|_A = x^{\\top}Ax$ и $\\kappa(A) = \\frac{\\lambda_1(A)}{\\lambda_n(A)}$ - число обусловленности матрицы $A$, $\\lambda_1(A) \\geq ... \\geq \\lambda_n(A)$ - собственные значения матрицы $A$\n\n**Замечание:** сравните коэффициент геометрической прогрессии с аналогом в градиентном спуске.", "_____no_output_____" ], [ "### Интерпретации метода сопряжённых градиентов\n\n- Градиентный спуск в пространстве $y = Sx$, где $S = [p_0, \\ldots, p_n]$, в котором матрица $A$ становится диагональной (или единичной в случае ортонормированности сопряжённых направлений)\n- Поиск оптимального решения в [Крыловском подпространстве](https://stanford.edu/class/ee364b/lectures/conj_grad_slides.pdf) $\\mathcal{K}_k(A) = \\{b, Ab, A^2b, \\ldots A^{k-1}b\\}$\n\n$$\nx_k = \\arg\\min_{x \\in \\mathcal{K}_k} f(x)\n$$\n\n- Однако естественный базис Крыловского пространства неортогональный и, более того, **плохо обусловлен**.\n\n**Упражнение** Проверьте численно, насколько быстро растёт обусловленность матрицы из векторов $\\{b, Ab, ... \\}$\n\n- Поэтому его необходимо ортогонализовать, что и происходит в методе сопряжённых градиентов", "_____no_output_____" ], [ "### Основное свойство\n$$ \nA^{-1}b \\in \\mathcal{K}_n(A)\n$$\n\nДоказательство\n\n- Теорема Гамильтона-Кэли: $p(A) = 0$, где $p(\\lambda) = \\det(A - \\lambda I)$\n- $p(A)b = A^nb + a_1A^{n-1}b + \\ldots + a_{n-1}Ab + a_n b = 0$\n- $A^{-1}p(A)b = A^{n-1}b + a_1A^{n-2}b + \\ldots + a_{n-1}b + a_nA^{-1}b = 0$\n- $A^{-1}b = -\\frac{1}{a_n}(A^{n-1}b + a_1A^{n-2}b + \\ldots + a_{n-1}b)$", "_____no_output_____" ], [ "### Сходимость по функции и по аргументу\n\n- Решение: $x^* = A^{-1}b$\n- Минимум функции: \n\n$$\nf^* = \\frac{1}{2}b^{\\top}A^{-\\top}AA^{-1}b - b^{\\top}A^{-1}b = -\\frac{1}{2}b^{\\top}A^{-1}b = -\\frac{1}{2}\\|x^*\\|^2_A\n$$ \n\n- Оценка сходимости по функции: \n\n$$ \nf(x) - f^* = \\frac{1}{2}x^{\\top}Ax - b^{\\top}x + \\frac{1}{2}\\|x^*\\|_A^2 =\\frac{1}{2}\\|x\\|_A^2 - x^{\\top}Ax^* + \\frac{1}{2}\\|x^*\\|_A^2 = \\frac{1}{2}\\|x - x^*\\|_A^2 \n$$ \n", "_____no_output_____" ], [ "### Доказательство сходимости\n\n- $x_k$ лежит в $\\mathcal{K}_k$\n- $x_k = \\sum\\limits_{i=1}^k c_i A^{i-1}b = p(A)b$, где $p(x)$ некоторый полином степени не выше $k-1$\n- $x_k$ минимизирует $f$ на $\\mathcal{K}_k$, отсюда\n\n$$\n2(f_k - f^*) = \\inf_{x \\in \\mathcal{K}_k} \\|x - x^* \\|^2_A = \\inf_{\\mathrm{deg}(p) < k} \\|(p(A) - A^{-1})b\\|^2_A\n$$\n\n- Спектральное разложение $A = U\\Lambda U^*$ даёт\n\n$$\n2(f_k - f^*) = \\inf_{\\mathrm{deg}(p) < k} \\|(p(\\Lambda) - \\Lambda^{-1})d\\|^2_{\\Lambda} = \\inf_{\\mathrm{deg}(p) < k} \\sum_{i=1}^n\\frac{d_i^2 (\\lambda_ip(\\lambda_i) - 1)^2}{\\lambda_i} = \\inf_{\\mathrm{deg}(q) \\leq k, q(0) = 1} \\sum_{i=1}^n\\frac{d_i^2 q(\\lambda_i)^2}{\\lambda_i}\n$$\n\n- Сведём задачу к поиску некоторого многочлена\n$$\nf_k - f^* \\leq \\left(\\sum_{i=1}^n \\frac{d_i^2}{2\\lambda_i}\\right) \\inf_{\\mathrm{deg}(q) \\leq k, q(0) = 1}\\left(\\max_{i=1,\\ldots,n} q(\\lambda_i)^2 \\right) = \\frac{1}{2}\\|x^*\\|^2_A \\inf_{\\mathrm{deg}(q) \\leq k, q(0) = 1}\\left(\\max_{i=1,\\ldots,n} q(\\lambda_i)^2 \\right)\n$$\n\n- Пусть $A$ имеет $m$ различных собственных значений, тогда для \n\n$$\nr(y) = \\frac{(-1)^m}{\\lambda_1 \\cdot \\ldots \\cdot \\lambda_m}(y - \\lambda_i)\\cdot \\ldots \\cdot (y - \\lambda_m)\n$$\n\nвыполнено $\\mathrm{deg}(r) = m$ и $r(0) = 1$\n- Значение для оптимального полинома степени не выше $k$ оценим сверху значением для полинома $r$ степени $m$\n\n$$\n0 \\leq f_k - f^* \\leq \\frac{1}{2}\\|x^*\\|_A^2 \\max_{i=1,\\ldots,m} r(\\lambda_i) = 0\n$$\n- Метод сопряжённых градиентов сошёлся за $m$ итераций\n", "_____no_output_____" ], [ "### Улучшенная версия метода сопряжённых градиентов\nНа практике используются следующие формулы для шага $\\alpha_k$ и коэффициента $\\beta_{k}$:\n\n$$\n\\alpha_k = \\dfrac{r^{\\top}_k r_k}{p^{\\top}_{k}Ap_{k}} \\qquad \\beta_k = \\dfrac{r^{\\top}_k r_k}{r^{\\top}_{k-1} r_{k-1}}\n$$\n\n**Вопрос:** чем они лучше базовой версии?", "_____no_output_____" ], [ "### Псевдокод метода сопряжённых градиентов\n```python\ndef ConjugateGradientQuadratic(x0, A, b, eps):\n \n r = A.dot(x0) - b\n \n p = -r\n \n while np.linalg.norm(r) > eps:\n \n alpha = r.dot(r) / p.dot(A.dot(p))\n \n x = x + alpha * p\n \n r_next = r + alpha * A.dot(p)\n \n beta = r_next.dot(r_next) / r.dot(r)\n \n p = -r_next + beta * p\n \n r = r_next\n \n return x\n```", "_____no_output_____" ], [ "## Метод сопряжённых градиентов для неквадратичной функции\n**Идея:** использовать градиенты $f'(x_k)$ неквадратичной функции вместо остатков $r_k$ и линейный поиск шага $\\alpha_k$ вместо аналитического вычисления. Получим метод Флетчера-Ривса.\n\n```python\ndef ConjugateGradientFR(f, gradf, x0, eps):\n \n x = x0\n \n grad = gradf(x)\n \n p = -grad\n \n while np.linalg.norm(gradf(x)) > eps:\n \n alpha = StepSearch(x, f, gradf, **kwargs)\n \n x = x + alpha * p\n \n grad_next = gradf(x)\n \n beta = grad_next.dot(grad_next) / grad.dot(grad)\n \n p = -grad_next + beta * p\n \n grad = grad_next\n \n if restart_condition:\n \n p = -gradf(x)\n \n return x\n```", "_____no_output_____" ], [ "### Теорема сходимости\n\n**Теорема.** Пусть \n- множество уровней $\\mathcal{L}$ ограничено\n- существует $\\gamma > 0$: $\\| f'(x) \\|_2 \\leq \\gamma$ для $x \\in \\mathcal{L}$\nТогда\n\n$$\n\\lim_{j \\to \\infty} \\| f'(x_{k_j}) \\|_2 = 0\n$$", "_____no_output_____" ], [ "### Перезапуск (restart)\n\n1. Для ускорения метода сопряжённых градиентов используют технику перезапусков: удаление ранее накопленной истории и перезапуск метода с текущей точки, как будто это точка $x_0$\n2. Существуют разные условия, сигнализирующие о том, что надо делать перезапуск, например\n - $k = n$\n - $\\dfrac{|\\langle f'(x_k), f'(x_{k-1}) \\rangle |}{\\| f'(x_k) \\|_2^2} \\geq \\nu \\approx 0.1$\n3. Можно показать (см. Nocedal, Wright Numerical Optimization, Ch. 5, p. 125), что запуск метода Флетчера-Ривза без использования перезапусков на некоторых итерациях может приводить к крайне медленной сходимости! \n4. Метод Полака-Рибьера и его модификации лишены подобного недостатка.", "_____no_output_____" ], [ "### Комментарии\n- Замечательная методичка \"An Introduction to the Conjugate Gradient Method Without the Agonizing Pain\" размещена [тут](https://www.cs.cmu.edu/~quake-papers/painless-conjugate-gradient.pdf)\n- Помимо метода Флетчера-Ривса существуют другие способы вычисления $\\beta_k$: метод Полака-Рибьера, метод Хестенса-Штифеля...\n- Для метода сопряжённых градиентов требуется 4 вектора: каких?\n- Самой дорогой операцией является умножение матрицы на вектор", "_____no_output_____" ], [ "## Эксперименты\n", "_____no_output_____" ], [ "### Квадратичная целевая функция", "_____no_output_____" ] ], [ [ "import numpy as np\nn = 100\n# Random\nA = np.random.randn(n, n)\nA = A.T.dot(A)\n# Clustered eigenvalues\n# A = np.diagflat([np.ones(n//4), 10 * np.ones(n//4), 100*np.ones(n//4), 1000* np.ones(n//4)])\n# U = np.random.rand(n, n)\n# Q, _ = np.linalg.qr(U)\n# A = Q.dot(A).dot(Q.T)\n# A = (A + A.T) * 0.5\nprint(\"A is normal matrix: ||AA* - A*A|| =\", np.linalg.norm(A.dot(A.T) - A.T.dot(A)))\nb = np.random.randn(n)\n# Hilbert matrix\n# A = np.array([[1.0 / (i+j - 1) for i in range(1, n+1)] for j in range(1, n+1)]) + 1e-3 * np.eye(n)\n# b = np.ones(n)\n\nf = lambda x: 0.5 * x.dot(A.dot(x)) - b.dot(x)\ngrad_f = lambda x: A.dot(x) - b\nx0 = np.zeros(n)", "A is normal matrix: ||AA* - A*A|| = 0.0\n" ] ], [ [ "#### Распределение собственных значений", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nplt.rc(\"text\", usetex=True)\nplt.rc(\"font\", family='serif')\nimport seaborn as sns\nsns.set_context(\"talk\")\n\neigs = np.linalg.eigvalsh(A)\ncond_A = np.linalg.cond(A)\nprint((np.sqrt(cond_A) - 1) / (np.sqrt(cond_A) + 1))\nprint((cond_A - 1) / (cond_A + 1))\nplt.semilogy(np.unique(eigs))\nplt.ylabel(\"Eigenvalues\", fontsize=20)\nplt.xticks(fontsize=18)\n_ = plt.yticks(fontsize=18)", "0.9949220032356589\n0.9999870413374606\n" ] ], [ [ "#### Правильный ответ", "_____no_output_____" ] ], [ [ "import scipy.optimize as scopt\n\ndef callback(x, array):\n array.append(x)", "_____no_output_____" ], [ "scopt_cg_array = []\nscopt_cg_callback = lambda x: callback(x, scopt_cg_array)\nx = scopt.minimize(f, x0, method=\"CG\", jac=grad_f, callback=scopt_cg_callback)\nx = x.x\nprint(\"||f'(x*)|| =\", np.linalg.norm(A.dot(x) - b))\nprint(\"f* =\", f(x))", "||f'(x*)|| = 0.0008375645375203281\nf* = -161.08674289217973\n" ] ], [ [ "#### Реализация метода сопряжённых градиентов", "_____no_output_____" ] ], [ [ "def ConjugateGradientQuadratic(x0, A, b, tol=1e-8, callback=None):\n x = x0\n r = A.dot(x0) - b\n p = -r\n while np.linalg.norm(r) > tol:\n alpha = r.dot(r) / p.dot(A.dot(p))\n x = x + alpha * p\n if callback is not None:\n callback(x)\n r_next = r + alpha * A.dot(p)\n beta = r_next.dot(r_next) / r.dot(r)\n p = -r_next + beta * p\n r = r_next\n return x", "_____no_output_____" ], [ "import liboptpy.unconstr_solvers as methods\nimport liboptpy.step_size as ss\n\nprint(\"\\t CG quadratic\")\ncg_quad = methods.fo.ConjugateGradientQuad(A, b)\nx_cg = cg_quad.solve(x0, max_iter=1000, tol=1e-7, disp=True)\n\nprint(\"\\t Gradient Descent\")\ngd = methods.fo.GradientDescent(f, grad_f, ss.ExactLineSearch4Quad(A, b))\nx_gd = gd.solve(x0, tol=1e-7, disp=True)\n\nprint(\"Condition number of A =\", abs(max(eigs)) / abs(min(eigs)))", "\t CG quadratic\nRequired tolerance achieved!\nConvergence in 155 iterations\nFunction value = -161.0867480844567\nNorm of gradient = 8.882504945739075\n\t Gradient Descent\nMaximum iteration exceeds!\nConvergence in 100 iterations\nFunction value = -3.5437507073344374\nNorm of gradient = 2.591961559534102\nCondition number of A = 154335.91508856174\n" ] ], [ [ "#### График сходимости", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(8,6))\nplt.semilogy([np.linalg.norm(grad_f(x)) for x in cg_quad.get_convergence()], label=r\"$\\|f'(x_k)\\|^{CG}_2$\", linewidth=2)\nplt.semilogy([np.linalg.norm(grad_f(x)) for x in scopt_cg_array[:5000]], label=r\"$\\|f'(x_k)\\|^{CG_{PR}}_2$\", linewidth=2)\nplt.semilogy([np.linalg.norm(grad_f(x)) for x in gd.get_convergence()], label=r\"$\\|f'(x_k)\\|^{G}_2$\", linewidth=2)\nplt.legend(loc=\"best\", fontsize=20)\nplt.xlabel(r\"Iteration number, $k$\", fontsize=20)\nplt.ylabel(\"Convergence rate\", fontsize=20)\nplt.xticks(fontsize=18)\n_ = plt.yticks(fontsize=18)", "_____no_output_____" ], [ "print([np.linalg.norm(grad_f(x)) for x in cg_quad.get_convergence()])", "[8.882504945739075, 8.02312088569592, 7.82823897372081, 8.228469078569972, 8.02794091828915, 8.545475880079737, 7.7073274445122255, 8.169308667231142, 6.900096159518948, 6.028760821833386, 5.462256914177975, 6.1694709106304595, 6.190481019114486, 7.164609359193713, 5.909704031656815, 6.409751673681231, 7.531317394687884, 8.57642043860904, 8.3063409423551, 8.692753863282514, 9.454506536439668, 11.769568749916493, 12.485662898024287, 12.191538956825184, 12.675141878675703, 11.617683919711972, 10.650472932605874, 12.046746330921751, 11.105597065370535, 9.836420813032703, 10.010939150132543, 10.88325108133763, 8.666008880474333, 7.699269654011459, 6.355503224223968, 5.90443784272244, 6.221287818412301, 8.195589286785, 7.299096585496559, 6.161077145583338, 7.349147178576559, 6.6893396705637445, 8.40946426152716, 9.748632525353658, 7.796774493958513, 6.846679708578971, 7.208869249348138, 8.344352174763728, 6.719521167979252, 7.553812333612303, 7.295050517200499, 7.255100360639423, 9.10176817826305, 7.564137691646654, 5.574934220751499, 5.0689605056064675, 6.64803360144127, 9.082991847266362, 8.91874045104218, 7.630214445117524, 7.875437833929472, 8.077348635654587, 6.878495923978833, 8.618381403241512, 8.60676294144403, 9.383106492261556, 11.630888877902407, 10.156110363881771, 7.934608295859058, 9.757445983633852, 9.892228941626778, 16.3990317468293, 13.203312252399416, 16.354109068634433, 10.969604623502992, 7.310309983285203, 8.708753249309645, 5.898285732655631, 7.526246807448508, 6.965704898283933, 7.462581571583673, 7.805960221274653, 8.508286459164133, 6.782791617211194, 9.525918981013422, 9.27258932893244, 6.619343831847434, 6.672880385414406, 13.518695334560144, 7.464056244223318, 11.202629039874763, 6.673582480342727, 10.237190398363426, 10.120525319769705, 18.497999554411077, 10.063524026467377, 8.346018612682675, 13.380868050911541, 14.459140976373078, 10.989041043239876, 11.977437925472458, 22.957132321437662, 18.799585215865093, 13.586212572471215, 14.537247509391333, 19.896420997927063, 18.289144794815655, 22.450938275591362, 16.67984956082469, 19.22436706248422, 24.836134745948023, 43.179657919843265, 17.705104807295076, 25.979472552345317, 12.030392663393808, 19.010168855008587, 8.343901894881581, 13.500132216144808, 10.669010006533966, 21.47652108829665, 10.198236869744527, 19.613641749092963, 17.790400875541483, 4.331643129820227, 7.315848711992672, 6.248687509536643, 2.25535701628628, 3.141518264745747, 7.709087249053964, 6.757352443310114, 5.673560225589232, 10.771835819830274, 27.363273351822432, 15.776334053216077, 18.93722967717437, 16.513403275439256, 4.049925728451657, 1.2925323891380547, 0.47386622432041325, 1.584786386721307, 1.6450410867080851, 0.4030636557301404, 0.0599062767817567, 0.08432480446857377, 0.13553825271819156, 0.031917734280972714, 0.010084511770498688, 0.0023004401221770193, 0.00038032197499700027, 0.00015523267357007485, 2.450675882919022e-05, 3.2669330587524425e-06, 9.692125396835116e-07, 3.6674131353981623e-07, 2.08191209877252e-07, 7.511506632641239e-08]\n" ], [ "plt.figure(figsize=(8,6))\nplt.plot([f(x) for x in cg_quad.get_convergence()], label=r\"$f(x^{CG}_k)$\", linewidth=2)\nplt.plot([f(x) for x in scopt_cg_array], label=r\"$f(x^{CG_{PR}}_k)$\", linewidth=2)\nplt.plot([f(x) for x in gd.get_convergence()], label=r\"$f(x^{G}_k)$\", linewidth=2)\nplt.legend(loc=\"best\", fontsize=20)\nplt.xlabel(r\"Iteration number, $k$\", fontsize=20)\nplt.ylabel(\"Function value\", fontsize=20)\nplt.xticks(fontsize=18)\n_ = plt.yticks(fontsize=18)", "_____no_output_____" ] ], [ [ "### Неквадратичная функция", "_____no_output_____" ] ], [ [ "import numpy as np\nimport sklearn.datasets as skldata\nimport scipy.special as scspec\n\nn = 300\nm = 1000\n\nX, y = skldata.make_classification(n_classes=2, n_features=n, n_samples=m, n_informative=n//3)\nC = 1\ndef f(w):\n return np.linalg.norm(w)**2 / 2 + C * np.mean(np.logaddexp(np.zeros(X.shape[0]), -y * X.dot(w)))\n\ndef grad_f(w):\n denom = scspec.expit(-y * X.dot(w))\n return w - C * X.T.dot(y * denom) / X.shape[0]\n# f = lambda x: -np.sum(np.log(1 - A.T.dot(x))) - np.sum(np.log(1 - x*x))\n# grad_f = lambda x: np.sum(A.dot(np.diagflat(1 / (1 - A.T.dot(x)))), axis=1) + 2 * x / (1 - np.power(x, 2))\nx0 = np.zeros(n)\nprint(\"Initial function value = {}\".format(f(x0)))\nprint(\"Initial gradient norm = {}\".format(np.linalg.norm(grad_f(x0))))", "Initial function value = 0.6931471805599454\nInitial gradient norm = 2.7311012301205486\n" ] ], [ [ "#### Реализация метода Флетчера-Ривса", "_____no_output_____" ] ], [ [ "def ConjugateGradientFR(f, gradf, x0, num_iter=100, tol=1e-8, callback=None, restart=False):\n x = x0\n grad = gradf(x)\n p = -grad\n it = 0\n while np.linalg.norm(gradf(x)) > tol and it < num_iter:\n alpha = utils.backtracking(x, p, method=\"Wolfe\", beta1=0.1, beta2=0.4, rho=0.5, f=f, grad_f=gradf)\n if alpha < 1e-18:\n break\n x = x + alpha * p\n if callback is not None:\n callback(x)\n grad_next = gradf(x)\n beta = grad_next.dot(grad_next) / grad.dot(grad)\n p = -grad_next + beta * p\n grad = grad_next.copy()\n it += 1\n if restart and it % restart == 0:\n grad = gradf(x)\n p = -grad\n return x", "_____no_output_____" ] ], [ [ "#### График сходимости", "_____no_output_____" ] ], [ [ "import scipy.optimize as scopt\nimport liboptpy.restarts as restarts\n\nn_restart = 60\ntol = 1e-5\nmax_iter = 600\n\nscopt_cg_array = []\nscopt_cg_callback = lambda x: callback(x, scopt_cg_array)\nx = scopt.minimize(f, x0, tol=tol, method=\"CG\", jac=grad_f, callback=scopt_cg_callback, options={\"maxiter\": max_iter})\nx = x.x\nprint(\"\\t CG by Polak-Rebiere\")\nprint(\"Norm of garient = {}\".format(np.linalg.norm(grad_f(x))))\nprint(\"Function value = {}\".format(f(x)))\n\nprint(\"\\t CG by Fletcher-Reeves\")\ncg_fr = methods.fo.ConjugateGradientFR(f, grad_f, ss.Backtracking(\"Wolfe\", rho=0.9, beta1=0.1, beta2=0.4, init_alpha=1.))\nx = cg_fr.solve(x0, tol=tol, max_iter=max_iter, disp=True)\n\nprint(\"\\t CG by Fletcher-Reeves with restart n\")\ncg_fr_rest = methods.fo.ConjugateGradientFR(f, grad_f, ss.Backtracking(\"Wolfe\", rho=0.9, beta1=0.1, beta2=0.4, \n init_alpha=1.), restarts.Restart(n // n_restart))\nx = cg_fr_rest.solve(x0, tol=tol, max_iter=max_iter, disp=True)\n\nprint(\"\\t Gradient Descent\")\ngd = methods.fo.GradientDescent(f, grad_f, ss.Backtracking(\"Wolfe\", rho=0.9, beta1=0.1, beta2=0.4, init_alpha=1.))\nx = gd.solve(x0, max_iter=max_iter, tol=tol, disp=True)", "\t CG by Polak-Rebiere\nNorm of garient = 1.0348293374010706e-05\nFunction value = 0.5084404304041087\n\t CG by Fletcher-Reeves\nRequired tolerance achieved!\nConvergence in 62 iterations\nFunction value = 0.5084404303884263\nNorm of gradient = 5.618955736001479e-06\n\t CG by Fletcher-Reeves with restart n\nRequired tolerance achieved!\nConvergence in 82 iterations\nFunction value = 0.5084404303875517\nNorm of gradient = 9.128024493165525e-06\n\t Gradient Descent\nRequired tolerance achieved!\nConvergence in 391 iterations\nFunction value = 0.5084404304098479\nNorm of gradient = 8.901188370519539e-06\n" ], [ "plt.figure(figsize=(8, 6))\nplt.semilogy([np.linalg.norm(grad_f(x)) for x in cg_fr.get_convergence()], label=r\"$\\|f'(x_k)\\|^{CG_{FR}}_2$ no restart\", linewidth=2)\nplt.semilogy([np.linalg.norm(grad_f(x)) for x in cg_fr_rest.get_convergence()], label=r\"$\\|f'(x_k)\\|^{CG_{FR}}_2$ restart\", linewidth=2)\nplt.semilogy([np.linalg.norm(grad_f(x)) for x in scopt_cg_array], label=r\"$\\|f'(x_k)\\|^{CG_{PR}}_2$\", linewidth=2)\n\nplt.semilogy([np.linalg.norm(grad_f(x)) for x in gd.get_convergence()], label=r\"$\\|f'(x_k)\\|^{G}_2$\", linewidth=2)\nplt.legend(loc=\"best\", fontsize=16)\nplt.xlabel(r\"Iteration number, $k$\", fontsize=20)\nplt.ylabel(\"Convergence rate\", fontsize=20)\nplt.xticks(fontsize=18)\n_ = plt.yticks(fontsize=18)", "_____no_output_____" ] ], [ [ "#### Время выполнения", "_____no_output_____" ] ], [ [ "%timeit scopt.minimize(f, x0, method=\"CG\", tol=tol, jac=grad_f, options={\"maxiter\": max_iter})\n%timeit cg_fr.solve(x0, tol=tol, max_iter=max_iter)\n%timeit cg_fr_rest.solve(x0, tol=tol, max_iter=max_iter)\n%timeit gd.solve(x0, tol=tol, max_iter=max_iter)", "18.2 ms ± 1.37 ms per loop (mean ± std. dev. of 7 runs, 100 loops each)\n85.7 ms ± 1.51 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n113 ms ± 1.46 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n583 ms ± 7.44 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] ], [ [ "## Резюме\n\n1. Сопряжённые направления\n2. Метод сопряжённых градиентов\n3. Сходимость\n4. Эксперименты", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
4a2f1a0bc440dcd6d81f03ea2813414a92574a00
54,867
ipynb
Jupyter Notebook
docs/source/jupyter/Example_02_Read_Data.ipynb
tanimislam/kosh
aba17fd5393090e9fbfb3c6b3e7ab0f4a301ab26
[ "MIT" ]
6
2020-09-28T19:26:29.000Z
2022-03-14T20:28:57.000Z
docs/source/jupyter/Example_02_Read_Data.ipynb
tanimislam/kosh
aba17fd5393090e9fbfb3c6b3e7ab0f4a301ab26
[ "MIT" ]
null
null
null
docs/source/jupyter/Example_02_Read_Data.ipynb
tanimislam/kosh
aba17fd5393090e9fbfb3c6b3e7ab0f4a301ab26
[ "MIT" ]
2
2021-03-02T20:22:54.000Z
2021-06-17T23:57:23.000Z
91.445
24,888
0.855214
[ [ [ "# Reading Data\n\n## Connect to store (using sina local file)\n\nFirst let's create an empty database with you as a single user\n\nIn a real application only admin user should have write permission to the file", "_____no_output_____" ] ], [ [ "import os\nimport sys\nimport shlex\nfrom subprocess import Popen, PIPE\nimport kosh\n\nkosh_example_sql_file = \"kosh_example_read.sql\"\n\n# Create a new store (erase if exists)\nstore = kosh.create_new_db(kosh_example_sql_file)", "_____no_output_____" ] ], [ [ "## Adding datasets to the store\n\nLet's add a dataset and associate hdf5 file with it.", "_____no_output_____" ] ], [ [ "dataset = store.create()\ndataset.associate(\"../tests/baselines/node_extracts2/node_extracts2.hdf5\", mime_type=\"hdf5\", absolute_path=False)", "/g/g19/cdoutrix/miniconda3/envs/kosh/lib/python3.6/site-packages/kosh/sina/core.py:868: UserWarning: \nIn the next version the search function will return a generator.\nYou might need to wrap the result in a list.\n \"\\nIn the next version the search function will return a generator.\\n\"\n" ] ], [ [ "## Querying Data\n\nIn Kosh data retrievable are called \"features\"\n\nLet's see which feature are associated with this dataset:", "_____no_output_____" ] ], [ [ "features = dataset.list_features()\nprint(features)", "['cycles', 'direction', 'elements', 'node', 'node/metrics_0', 'node/metrics_1', 'node/metrics_10', 'node/metrics_11', 'node/metrics_12', 'node/metrics_2', 'node/metrics_3', 'node/metrics_4', 'node/metrics_5', 'node/metrics_6', 'node/metrics_7', 'node/metrics_8', 'node/metrics_9', 'zone', 'zone/metrics_0', 'zone/metrics_1', 'zone/metrics_2', 'zone/metrics_3', 'zone/metrics_4']\n" ] ], [ [ "Let's get more information on a specific features", "_____no_output_____" ] ], [ [ "info = dataset.describe_feature(\"node/metrics_5\")\nprint(info)", "{'size': (2, 18), 'format': 'hdf5', 'type': dtype('<f4'), 'dimensions': [{'name': 'cycles', 'first': 11, 'last': 8, 'length': 2}, {'name': 'elements', 'first': 17, 'last': 15, 'length': 18}]}\n" ] ], [ [ "## Opening Data\n\nWe might want to simply acces the URI (to add ata to it for example).\n\nfor this we will need the *id* of the associated_uri", "_____no_output_____" ] ], [ [ "associated_id = dataset.search(mime_type=\"hdf5\", ids_only=True)[0]\nh5_file = dataset.open(associated_id)\nh5_file", "/g/g19/cdoutrix/miniconda3/envs/kosh/lib/python3.6/site-packages/kosh/sina/core.py:508: UserWarning: \nIn the next version the search function will return a generator.\nYou might need to wrap the result in a list.\n \"\\nIn the next version the search function will return a generator.\\n\"\n" ] ], [ [ "## Getting Data\n\nLet's access this feature by calling the `get_execution_graph()` function.\nThis returns a Kosh representation of how to get to a feature's data.\nNote that is just a representation (a path) to the data, not the data itself.", "_____no_output_____" ] ], [ [ "feature = dataset.get_execution_graph(\"node/metrics_5\")\nfeature", "_____no_output_____" ] ], [ [ "This can be shorten as:", "_____no_output_____" ] ], [ [ "feature = dataset[\"node/metrics_5\"]\nfeature", "_____no_output_____" ] ], [ [ "This gives us a handle to this feature's data, no data has actually been read yet.\n\nLet's retrieve the data by calling the `traverse` function. This will connect the feature's origin (uri) to the data, applying any *transformer* or *operator* to it (see other notebooks to learn about these)", "_____no_output_____" ] ], [ [ "data = feature.traverse()\nprint(data)", "<HDF5 dataset \"metrics_5\": shape (2, 18), type \"<f4\">\n" ] ], [ [ "Which is equivalent to:", "_____no_output_____" ] ], [ [ "data = feature()\nprint(data)", "<HDF5 dataset \"metrics_5\": shape (2, 18), type \"<f4\">\n" ] ], [ [ "This is equivalent of what versions 1.1 and below used to do:", "_____no_output_____" ] ], [ [ "data = dataset.get(\"node/metrics_5\")\nprint(data)", "<HDF5 dataset \"metrics_5\": shape (2, 18), type \"<f4\">\n" ] ], [ [ "Note that you can also slice the feature directly", "_____no_output_____" ] ], [ [ "data = feature[:]\nprint(data)", "<HDF5 dataset \"metrics_5\": shape (2, 18), type \"<f4\">\n" ], [ "# If you know the dims you can select by value and/or indices\nprint(dataset.describe_feature(\"node/metrics_1\"))\nfeature2 = dataset[\"node/metrics_1\"]\ndata2 = feature2(cycles=slice(0,1), elements=[17, 15])\nprint(data2.shape)", "{'size': (2, 18), 'format': 'hdf5', 'type': dtype('<f4'), 'dimensions': [{'name': 'cycles', 'first': 11, 'last': 8, 'length': 2}, {'name': 'elements', 'first': 17, 'last': 15, 'length': 18}]}\n(1, 2)\n" ] ], [ [ "## Associating Multiple Sources\n\nLet's add an image file", "_____no_output_____" ] ], [ [ "dataset.associate(\"../share/icons/png/Kosh_Logo_K_blue.png\", mime_type=\"png\", absolute_path=False)\ndataset.list_features()", "_____no_output_____" ], [ "img = dataset[\"image\"]\nprint(img[:].shape)\ntry:\n import matplotlib.pyplot as plt\n %matplotlib inline\n plt.imshow(img[...,-1]) # Plot last channel\nexcept ImportError:\n print(\"You will need matplotlib to plot the picture\")", "(403, 431, 4)\n" ] ], [ [ "We can also retrieve the png as the raw binary data", "_____no_output_____" ] ], [ [ "raw = img(format=\"bytes\")\nlen(raw), type(raw)", "_____no_output_____" ] ], [ [ "We can associate many image files but this leads to duplicate \"image\" feature", "_____no_output_____" ] ], [ [ "# let's remove hdf5 for clarity\ndataset.dissociate(\"../tests/baselines/node_extracts2/node_extracts2.hdf5\", absolute_path=False)\ndataset.list_features()", "_____no_output_____" ] ], [ [ "Now let's associate a second image file", "_____no_output_____" ] ], [ [ "dataset.associate(\"../share/icons/png/Kosh_Logo_K_orange.png\", mime_type=\"png\", absolute_path=False)\ndataset.list_features() # URI is now added to feature to disambiguate them", "_____no_output_____" ], [ "dataset.describe_feature(\"image_@_../share/icons/png/Kosh_Logo_K_orange.png\")", "_____no_output_____" ], [ "try:\n plt.imshow(dataset.get(\"image_@_../share/icons/png/Kosh_Logo_K_orange.png\")) # Plot last channel\nexcept Exception:\n print(\"With matplotlib you would have seen a \")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
4a2f24151768e05efca27a27bffdd4649f47380d
19,881
ipynb
Jupyter Notebook
field_detector/data/YoloV3_web.ipynb
bhecquet/yolo-training-data
eefcc36b6a583b3244104f462b08fe57eea6bd54
[ "Apache-2.0" ]
1
2021-08-11T01:00:22.000Z
2021-08-11T01:00:22.000Z
field_detector/data/YoloV3_web.ipynb
bhecquet/yolo-training-data
eefcc36b6a583b3244104f462b08fe57eea6bd54
[ "Apache-2.0" ]
null
null
null
field_detector/data/YoloV3_web.ipynb
bhecquet/yolo-training-data
eefcc36b6a583b3244104f462b08fe57eea6bd54
[ "Apache-2.0" ]
null
null
null
72.294545
1,525
0.575072
[ [ [ "import torch\nprint('PyTorch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))", "PyTorch 1.9.0+cu102 _CudaDeviceProperties(name='Tesla T4', major=7, minor=5, total_memory=15109MB, multi_processor_count=40)\n" ], [ "from google.colab import drive\ndrive.mount('/content/drive')\n\n%cd /content/\n\n# import yolo project\n!rm -rf yolov3\n!git clone https://github.com/ultralytics/yolov3/\n\n# import training data project\n!rm -rf yolo-training-data\n!git clone https://github.com/bhecquet/yolo-training-data\n\n%cd yolov3", "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n/content\nCloning into 'yolov3'...\nremote: Enumerating objects: 9862, done.\u001b[K\nremote: Total 9862 (delta 0), reused 0 (delta 0), pack-reused 9862\u001b[K\nReceiving objects: 100% (9862/9862), 9.19 MiB | 26.44 MiB/s, done.\nResolving deltas: 100% (6667/6667), done.\nCloning into 'yolo-training-data'...\nremote: Enumerating objects: 9019, done.\u001b[K\nremote: Counting objects: 100% (9019/9019), done.\u001b[K\nremote: Compressing objects: 100% (7349/7349), done.\u001b[K\nremote: Total 9019 (delta 1683), reused 8957 (delta 1637), pack-reused 0\u001b[K\nReceiving objects: 100% (9019/9019), 50.06 MiB | 31.51 MiB/s, done.\nResolving deltas: 100% (1683/1683), done.\n/content/yolov3\n" ], [ "# création des données de test / entrainement\n!python /content/yolo-training-data/generate_train_files.py /content/yolo-training-data/field-detector/dataset_extracted /content/yolo-training-data/field-detector/dataset_generated_small --output /content/yolov3/dataset", "INFO:root:2721 files added to /content/yolov3/dataset/training\nINFO:root:290 files added to /content/yolov3/dataset/testing\n" ], [ "import torch\nimport os\nfrom IPython.display import Image, clear_output \nprint('PyTorch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))\nbest_fitness = 0.0\ncfg = '/content/yolo-training-data/field-detector/data/web-generated.yaml'\n#cfg = 'web-generated.yaml'\nmodel = 'yolov3-tiny.pt'\n\nepoch_to_run = 3\n\nname = os.path.basename(cfg).split(\".\")[0] + '-' + model.split('.')[0]\n\n\n!python train.py --img 448 --batch 16 --epochs $epoch_to_run --data $cfg --weights $model --name $name --exist-ok \nweights_file_path = '/content/yolov3/runs/train/%s/weights/best.pt' % (name)\nw = torch.load(weights_file_path)\n\nfitness = w['best_fitness']\nprint(\"epoch completed: \" + str(epoch_to_run))\nprint(\"mAp: \" + str(fitness))\n\n!cp /content/yolov3/runs/train/{name}/weights/best.pt /content/drive/My\\ Drive/best_{name}_{epochs_completed}.pt\n", "PyTorch 1.9.0+cu102 _CudaDeviceProperties(name='Tesla T4', major=7, minor=5, total_memory=15109MB, multi_processor_count=40)\n\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov3/ ✅\nYOLOv3 🚀 v9.5.0-13-g1be3170 torch 1.9.0+cu102 CUDA:0 (Tesla T4, 15109.75MB)\n\nNamespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=False, cfg='', data='/content/yolo-training-data/field-detector/data/web-generated.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=True, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[448, 448], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='web-generated-yolov3-tiny', noautoanchor=False, nosave=False, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/web-generated-yolov3-tiny', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov3-tiny.pt', workers=8, world_size=1)\n\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n2021-07-29 09:33:29.598269: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv3 logging with 'pip install wandb' (recommended)\nDownloading https://github.com/ultralytics/yolov3/releases/download/v9.5.0/yolov3-tiny.pt to yolov3-tiny.pt...\n100% 16.9M/16.9M [00:00<00:00, 63.8MB/s]\n\nOverriding model.yaml nc=80 with nc=9\n\n from n params module arguments \n 0 -1 1 464 models.common.Conv [3, 16, 3, 1] \n 1 -1 1 0 torch.nn.modules.pooling.MaxPool2d [2, 2, 0] \n 2 -1 1 4672 models.common.Conv [16, 32, 3, 1] \n 3 -1 1 0 torch.nn.modules.pooling.MaxPool2d [2, 2, 0] \n 4 -1 1 18560 models.common.Conv [32, 64, 3, 1] \n 5 -1 1 0 torch.nn.modules.pooling.MaxPool2d [2, 2, 0] \n 6 -1 1 73984 models.common.Conv [64, 128, 3, 1] \n 7 -1 1 0 torch.nn.modules.pooling.MaxPool2d [2, 2, 0] \n 8 -1 1 295424 models.common.Conv [128, 256, 3, 1] \n 9 -1 1 0 torch.nn.modules.pooling.MaxPool2d [2, 2, 0] \n 10 -1 1 1180672 models.common.Conv [256, 512, 3, 1] \n 11 -1 1 0 torch.nn.modules.padding.ZeroPad2d [[0, 1, 0, 1]] \n 12 -1 1 0 torch.nn.modules.pooling.MaxPool2d [2, 1, 0] \n 13 -1 1 4720640 models.common.Conv [512, 1024, 3, 1] \n 14 -1 1 262656 models.common.Conv [1024, 256, 1, 1] \n 15 -1 1 1180672 models.common.Conv [256, 512, 3, 1] \n 16 -2 1 33024 models.common.Conv [256, 128, 1, 1] \n 17 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n 18 [-1, 8] 1 0 models.common.Concat [1] \n 19 -1 1 885248 models.common.Conv [384, 256, 3, 1] \n 20 [19, 15] 1 32340 models.yolo.Detect [9, [[10, 14, 23, 27, 37, 58], [81, 82, 135, 169, 344, 319]], [256, 512]]\n/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py:718: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at /pytorch/c10/core/TensorImpl.h:1156.)\n return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)\nModel Summary: 59 layers, 8688356 parameters, 8688356 gradients, 13.0 GFLOPS\n\nTransferred 68/72 items from yolov3-tiny.pt\n\nWARNING: Dataset not found, nonexistent paths: ['/content/yolov3/yolo-training-data/dataset/testing/images']\nTraceback (most recent call last):\n File \"train.py\", line 541, in <module>\n train(hyp, opt, device, tb_writer)\n File \"train.py\", line 97, in train\n check_dataset(data_dict) # check\n File \"/content/yolov3/utils/general.py\", line 213, in check_dataset\n raise Exception('Dataset not found.')\nException: Dataset not found.\n" ], [ "from utils import utils; utils.plot_results()\n\nw = torch.load('weights/best.pt')\nepochs_completed = str(w['epoch'])\nfitness = w['best_fitness']\nprint(\"epoch completed: \" + epochs_completed)\nprint(\"mAp: \" + str(fitness))", "_____no_output_____" ], [ "!python3 detect.py --source \"dataset_real/ac3bfc152e8ba5ba0b3423755fe6d234.jpg\" --weights /content/drive/My\\ Drive/best_web-generated_-1.pt --img-size 640 --exist-ok --save-txt", "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=True, img_size=640, iou_thres=0.45, name='exp', nosave=False, project='runs/detect', save_conf=False, save_txt=True, source='dataset_real/ac3bfc152e8ba5ba0b3423755fe6d234.jpg', update=False, view_img=False, weights=['/content/drive/My Drive/best_web-generated_-1.pt'])\nYOLOv3 🚀 bh1-8-g0ae89779 torch 1.8.1+cu101 CUDA:0 (Tesla P100-PCIE-16GB, 16280.875MB)\n\nFusing layers... \nModel Summary: 269 layers, 62589598 parameters, 0 gradients, 155.9 GFLOPS\nimage 1/1 /content/yolov3/dataset_real/ac3bfc152e8ba5ba0b3423755fe6d234.jpg: 384x640 8 fields, 2 radios, 3 buttons, 2 radio_with_labels, Done. (0.026s)\nResults saved to runs/detect/exp\n10 labels saved to runs/detect/exp/labels\nDone. (0.066s)\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
4a2f2e5a4a5b7fc3f12f305a90393a3c24814ce8
15,760
ipynb
Jupyter Notebook
4_Rows and Columns/3_Adding_Column_Rows/1_Column_Addition.ipynb
sureshmecad/Pandas
128091e7021158f39eb0ff97e0e63d76e778a52c
[ "CNRI-Python" ]
null
null
null
4_Rows and Columns/3_Adding_Column_Rows/1_Column_Addition.ipynb
sureshmecad/Pandas
128091e7021158f39eb0ff97e0e63d76e778a52c
[ "CNRI-Python" ]
null
null
null
4_Rows and Columns/3_Adding_Column_Rows/1_Column_Addition.ipynb
sureshmecad/Pandas
128091e7021158f39eb0ff97e0e63d76e778a52c
[ "CNRI-Python" ]
null
null
null
26.621622
133
0.352728
[ [ [ "https://www.geeksforgeeks.org/adding-new-column-to-existing-dataframe-in-pandas/", "_____no_output_____" ], [ "<h3 style=\"color:blue\" align=\"left\"> Column Addition </h3>", "_____no_output_____" ] ], [ [ "Adding new column to existing DataFrame in Pandas by below methods:\n\n 1. By declaring a new list as a column\n\n 2. By using DataFrame.insert()\n \n 3. By Using Dataframe.assign()\n \n 4. By using a dictionary", "_____no_output_____" ] ], [ [ "# Import pandas package \nimport pandas as pd", "_____no_output_____" ], [ "# Define a dictionary containing Students data \n\ndata = {'Name': ['Jai', 'Princi', 'Gaurav', 'Anuj'], \n 'Height': [5.1, 6.2, 5.1, 5.2], \n 'Qualification': ['Msc', 'MA', 'Msc', 'Msc']} ", "_____no_output_____" ], [ "# Convert the dictionary into DataFrame\n\ndf = pd.DataFrame(data)\ndf", "_____no_output_____" ] ], [ [ "<h3 style=\"color:blue\" align=\"left\"> 1. By declaring a new list as a column </h3>", "_____no_output_____" ] ], [ [ "# Declare a list that is to be converted into a column \naddress = ['Delhi', 'Bangalore', 'Chennai', 'Patna']", "_____no_output_____" ], [ "# Using 'Address' as the column name \n# and equating it to the list\n\ndf['Address'] = address\ndf", "_____no_output_____" ] ], [ [ "-------------------------------------------------------------------------------------------------------------------------------", "_____no_output_____" ], [ "<h3 style=\"color:blue\" align=\"left\"> 2. By using DataFrame.insert( ) </h3>", "_____no_output_____" ] ], [ [ "# Using DataFrame.insert() to add a column\n\ndf.insert(2, \"Age\", [21, 23, 24, 21], True)\ndf", "_____no_output_____" ] ], [ [ "-------------------------------------------------------------------------------------------------------------------------------", "_____no_output_____" ], [ "<h3 style=\"color:blue\" align=\"left\"> 3. By Using Dataframe.assign( ) </h3>", "_____no_output_____" ] ], [ [ "# Using 'Address' as the column name and equating it to the list\n\ndf2 = df.assign(address = ['Delhi', 'Bangalore', 'Chennai', 'Patna'])\ndf2 ", "_____no_output_____" ] ], [ [ "-------------------------------------------------------------------------------------------------------------------------------", "_____no_output_____" ], [ "<h3 style=\"color:blue\" align=\"left\"> 4. By using a dictionary </h3>", "_____no_output_____" ] ], [ [ "# Define a dictionary with key values of \n# an existing column and their respective \n# value pairs as the # values for our new column.\n\naddress = {'Delhi': 'Jai', 'Bangalore': 'Princi', \n 'Patna': 'Gaurav', 'Chennai': 'Anuj'}", "_____no_output_____" ], [ "# Provide 'Address' as the column name\n\ndf['Address'] = address\ndf", "_____no_output_____" ] ] ]
[ "markdown", "raw", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "raw" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
4a2f341e840aa2cab88badb4f9a78f6f7534b18a
2,121
ipynb
Jupyter Notebook
examples/notebook/sat/boolean_product_sample_sat.ipynb
jspricke/or-tools
45770b833997f827d322e929b1ed4781c4e60d44
[ "Apache-2.0" ]
1
2020-07-18T16:24:09.000Z
2020-07-18T16:24:09.000Z
examples/notebook/sat/boolean_product_sample_sat.ipynb
jspricke/or-tools
45770b833997f827d322e929b1ed4781c4e60d44
[ "Apache-2.0" ]
1
2021-02-23T10:22:55.000Z
2021-02-23T13:57:14.000Z
examples/notebook/sat/boolean_product_sample_sat.ipynb
jspricke/or-tools
45770b833997f827d322e929b1ed4781c4e60d44
[ "Apache-2.0" ]
1
2021-03-16T14:30:59.000Z
2021-03-16T14:30:59.000Z
34.209677
83
0.577558
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4a2f3c7a5f34c122a2a3becabc435e51c82d9c8b
20,051
ipynb
Jupyter Notebook
notebooks/investigate_intersectional_discrimination.ipynb
mraahemi/Bias_analyzer
3c362976c1e7501b51b277c81c47f0d584c46311
[ "MIT" ]
2
2020-11-01T14:28:32.000Z
2020-11-10T11:39:53.000Z
notebooks/investigate_intersectional_discrimination.ipynb
mraahemi/Bias_analyzer
3c362976c1e7501b51b277c81c47f0d584c46311
[ "MIT" ]
4
2020-11-01T14:22:30.000Z
2021-11-28T11:04:38.000Z
notebooks/investigate_intersectional_discrimination.ipynb
mraahemi/Bias_analyzer
3c362976c1e7501b51b277c81c47f0d584c46311
[ "MIT" ]
1
2020-11-05T17:49:29.000Z
2020-11-05T17:49:29.000Z
24.245466
1,070
0.422323
[ [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import fetch_openml\ndata = fetch_openml(data_id=1590, as_frame=True)\nX = pd.get_dummies(data.data)\ny_true = (data.target == '>50K') * 1\nsex = data.data[['sex', 'race']]\nsex.value_counts()", "_____no_output_____" ], [ "from fairlearn.metrics import group_summary\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.tree import DecisionTreeClassifier\n\nclassifier = DecisionTreeClassifier(min_samples_leaf=10, max_depth=4)\nclassifier.fit(X, y_true)\n\ny_pred = classifier.predict(X)\n#group_summary(accuracy_score, y_true, y_pred, sensitive_features=sex)", "_____no_output_____" ], [ "from fairlearn.metrics import selection_rate_group_summary\n#selection_rate_group_summary(y_true, y_pred, sensitive_features=sex)", "_____no_output_____" ], [ "from fairlearn.widget import FairlearnDashboard\nFairlearnDashboard(sensitive_features=sex,\n sensitive_feature_names=['sex', 'race'],\n y_true=y_true,\n y_pred={\"initial model\": y_pred})", "_____no_output_____" ] ], [ [ "Can we find intersectional discrimination with Fairlearn?", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ], [ "X = pd.DataFrame(np.random.randint(0, high=2, size=(100, 3), dtype='l'), columns=['sex', 'race', 'Y'])", "_____no_output_____" ], [ "X['cnt'] = 1", "_____no_output_____" ], [ "counts = X.groupby(['sex', 'race']).Y.count()", "_____no_output_____" ], [ "\nf = lambda x: [np.random.choice([0,1], 17, p=[0.65, 0.35])[0] for _ in range(x)]", "_____no_output_____" ], [ "X.at[(X['sex'] == 1) & (X['race'] == 1),'result'] = f(counts.loc[1,1])", "_____no_output_____" ], [ "X.groupby(['sex', 'race']).agg({'result':'sum', 'Y':['sum', 'count']})", "_____no_output_____" ], [ "# now let's create a biased scoring function", "_____no_output_____" ] ], [ [ "Idea: first sample from the biased distribution p_bias, then calculate the expectancy value of the unbiased distribution p_0 and caluculate how much you need to bias p_0 to get the exectancy of value of the unbiased distribution p_0 -> p_correction", "_____no_output_____" ] ], [ [ "X[(X[['sex', 'race']] == 1).all(1)].shape", "_____no_output_____" ], [ "X.groupby(['sex', 'race']).agg({'result':'sum', 'Y':['sum', 'count']}).loc[[1]*len()]", "_____no_output_____" ], [ "a = tuple([1 for _ in range(len(counts.index.levels))])", "_____no_output_____" ], [ "a", "_____no_output_____" ], [ "counts.loc[a]", "_____no_output_____" ], [ "def biased_score(df, sensitive_cols, biased_prob):\n #todo make this agnostic of specific columns\n counts = df.groupby(sensitive_cols).agg({sensitive_cols[0]:'sum'})\n indexer = tuple([1 for _ in range(len(counts.index.levels))])\n df[(df[sensitive_cols] == 1).all(axis=1)]['result'] = np.random.choice([0,1], counts.loc[indexer].values, p=[biased_prob, 1-biased_prob])\n return df", "_____no_output_____" ], [ "type(counts)", "_____no_output_____" ], [ "biased_score(X, ['sex', 'race'], 0.3)", "<ipython-input-47-82b4e2ca2acf>:5: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n df[(df[sensitive_cols] == 1).all(axis=1)]['result'] = np.random.choice([0,1], counts.loc[indexer].values, p=[biased_prob, 1-biased_prob])\n" ], [ "def shift_prop(counts, expected_distribution):\n expected_values = counts.sum() * expected_distribution\n ", "_____no_output_____" ], [ "counts.sum()", "_____no_output_____" ], [ "counts", "_____no_output_____" ], [ "counts.loc[1,:].sum()", "_____no_output_____" ], [ "i = 1000003054", "_____no_output_____" ], [ "i", "_____no_output_____" ], [ "i = i + 1", "_____no_output_____" ], [ "a = i * 3", "_____no_output_____" ], [ "i = 2\ni", "_____no_output_____" ], [ "i == 2", "_____no_output_____" ], [ "type(i)", "_____no_output_____" ], [ "type(\"adfaserer\")", "_____no_output_____" ], [ "\"1\" == 1", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2f427bc4501195e6c048cc77f5157d795d47de
5,007
ipynb
Jupyter Notebook
pure/15.enums.ipynb
majeeddl/python
fbb8b3630fc0a4ca0a3a74909d16c40f461ab61f
[ "Apache-2.0" ]
null
null
null
pure/15.enums.ipynb
majeeddl/python
fbb8b3630fc0a4ca0a3a74909d16c40f461ab61f
[ "Apache-2.0" ]
1
2022-03-01T21:07:42.000Z
2022-03-01T21:07:42.000Z
pure/15.enums.ipynb
majeeddl/python
fbb8b3630fc0a4ca0a3a74909d16c40f461ab61f
[ "Apache-2.0" ]
null
null
null
21.216102
184
0.511883
[ [ [ "## enum in Python\n\nEnumerations in Python are implemented by using the module named “enum“.Enumerations are created using classes. Enums have names and values associated with them.\nProperties of enum:\n\n1. Enums can be displayed as string or repr.\n2. Enums can be checked for their types using type().\n3. “name” keyword is used to display the name of the enum member.\n ", "_____no_output_____" ] ], [ [ "from enum import Enum\n\nclass Animal(Enum):\n dog=1\n cat=2\n lion=3", "_____no_output_____" ], [ "# printing enun number as string\nprint(\"The string representation of enum member is : \", end=\"\")\nprint(Animal.dog)", "The string representation of enum member is : Animal.dog\n" ], [ "# printing enum member as repr\nprint(\"The repr representation of enum member is : \", end=\"\")\nprint(repr(Animal.dog))\n", "The repr representation of enum member is : <Animal.dog: 1>\n" ], [ "# printing the type of enum member using type()\nprint(\"The type of enum member is : \", end=\"\")\nprint(type(Animal.dog))\n", "The type of enum member is : <enum 'Animal'>\n" ], [ "# printing name of enum member using \"name\" keyword\nprint (\"The name of enum member is : \",end =\"\")\nprint(Animal.dog.name)", "The name of enum member is : dog\n" ], [ "# Displaying value\nprint(\"The value associated with dog is : \", end=\"\")\nprint(Animal.dog.value)\n", "The value associated with dog is : 1\n" ], [ "# Accessing enum member using value\nprint(\"The enum member associated with value 2 is : \", end=\"\")\nprint(Animal(2))\n", "The enum member associated with value 2 is : Animal.cat\n" ] ], [ [ "### enum.IntEnum in Python\nWith the help of enum.IntEnum() method, we can get the enumeration based on integer value, if we compare with normal enum based class it will fail by using enum.IntEnum() method.", "_____no_output_____" ] ], [ [ "from enum import IntEnum\n\nclass Author(IntEnum):\n GEEK = 1\n FOR = 2\n GEEKS = 3", "_____no_output_____" ], [ "print(Author.FOR)\nprint(Author.FOR == 2)\n", "Author.FOR\nTrue\n" ], [ "class language(Enum):\n Python = 1\n Java = 2\n\n\nprint(Author.GEEK == language.Python)\n", "False\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
4a2f4fadef7de755be330ef9dc3e726949dc8818
3,062
ipynb
Jupyter Notebook
Simple_Cleaner.ipynb
adamrossnelson/TxtProcessing
d930aba835f55592446b651a23b7e70fd4fa765b
[ "MIT" ]
null
null
null
Simple_Cleaner.ipynb
adamrossnelson/TxtProcessing
d930aba835f55592446b651a23b7e70fd4fa765b
[ "MIT" ]
null
null
null
Simple_Cleaner.ipynb
adamrossnelson/TxtProcessing
d930aba835f55592446b651a23b7e70fd4fa765b
[ "MIT" ]
null
null
null
22.350365
110
0.535598
[ [ [ "# Simple text prep function\nFor use with natural language processing\n\nReference:\n\ntowardsdatascience.com/a-beginners-guide-to-natural-language-processing-e21e3e016f84", "_____no_output_____" ] ], [ [ "import requests", "_____no_output_____" ], [ "def simple_cleaner(raw_txt):\n import re\n from bs4 import BeautifulSoup\n from nltk.corpus import stopwords\n # Remove HTML (if there is any)\n raw_txt = BeautifulSoup(raw_txt).get_text()\n # Extract letters\n raw_txt = re.sub(\"[^a-zA-Z]\", \" \", raw_txt)\n # Convert to works and parse\n raw_wds = raw_txt.lower().split()\n # Remove stopwords\n raw_txt = [w for w in raw_wds if not w in set(stopwords.words(\"english\"))]\n # Rejoin words to string, return result\n return(' '.join(raw_txt))", "_____no_output_____" ], [ "print(\n simple_cleaner(\n requests.get('https://www.gutenberg.org/files/1112/1112.txt').text)[0:100])", "project gutenberg ebook romeo juliet william shakespeare ebook use anyone anywhere cost almost restr\n" ], [ "rjtxt = requests.get('https://www.gutenberg.org/files/1112/1112.txt')\nprint(simple_cleaner(rjtxt.text)[0:100])", "project gutenberg ebook romeo juliet william shakespeare ebook use anyone anywhere cost almost restr\n" ], [ "print(simple_cleaner('This is some random text to test out.'))", "random text test\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
4a2f5dca17d414b4badc383bd76a37a2f15dac04
8,551
ipynb
Jupyter Notebook
_posts/python-v3/scientific/ternary/ternary.ipynb
arauzo/graphing-library-docs
c99f3ed921727e1dcb321e6a52c5c7f7fe74356d
[ "CC-BY-3.0" ]
43
2020-02-06T00:54:56.000Z
2022-03-24T22:29:33.000Z
_posts/python-v3/scientific/ternary/ternary.ipynb
arauzo/graphing-library-docs
c99f3ed921727e1dcb321e6a52c5c7f7fe74356d
[ "CC-BY-3.0" ]
92
2020-01-31T16:23:50.000Z
2022-03-21T05:31:39.000Z
_posts/python-v3/scientific/ternary/ternary.ipynb
arauzo/graphing-library-docs
c99f3ed921727e1dcb321e6a52c5c7f7fe74356d
[ "CC-BY-3.0" ]
63
2020-02-08T15:16:06.000Z
2022-03-29T17:24:38.000Z
33.665354
325
0.55245
[ [ [ "#### New to Plotly?\nPlotly's Python library is free and open source! [Get started](https://plotly.com/python/getting-started/) by downloading the client and [reading the primer](https://plotly.com/python/getting-started/).\n<br>You can set up Plotly to work in [online](https://plotly.com/python/getting-started/#initialization-for-online-plotting) or [offline](https://plotly.com/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plotly.com/python/getting-started/#start-plotting-online).\n<br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!", "_____no_output_____" ], [ "#### Version Check\nNote: Ternary Plots are available in version 1.9.10+\nRun pip install plotly --upgrade to update your Plotly version", "_____no_output_____" ] ], [ [ "import plotly \nplotly.__version__", "_____no_output_____" ] ], [ [ "### Basic Ternary Plot with Markers", "_____no_output_____" ] ], [ [ "import plotly.plotly as py\nimport plotly.graph_objs as go\n\nrawData = [\n {'journalist':75,'developer':25,'designer':0,'label':'point 1'},\n {'journalist':70,'developer':10,'designer':20,'label':'point 2'},\n {'journalist':75,'developer':20,'designer':5,'label':'point 3'},\n {'journalist':5,'developer':60,'designer':35,'label':'point 4'},\n {'journalist':10,'developer':80,'designer':10,'label':'point 5'},\n {'journalist':10,'developer':90,'designer':0,'label':'point 6'},\n {'journalist':20,'developer':70,'designer':10,'label':'point 7'},\n {'journalist':10,'developer':20,'designer':70,'label':'point 8'},\n {'journalist':15,'developer':5,'designer':80,'label':'point 9'},\n {'journalist':10,'developer':10,'designer':80,'label':'point 10'},\n {'journalist':20,'developer':10,'designer':70,'label':'point 11'},\n];\n\ndef makeAxis(title, tickangle): \n return {\n 'title': title,\n 'titlefont': { 'size': 20 },\n 'tickangle': tickangle,\n 'tickfont': { 'size': 15 },\n 'tickcolor': 'rgba(0,0,0,0)',\n 'ticklen': 5,\n 'showline': True,\n 'showgrid': True\n }\n\ndata = [{ \n 'type': 'scatterternary',\n 'mode': 'markers',\n 'a': [i for i in map(lambda x: x['journalist'], rawData)],\n 'b': [i for i in map(lambda x: x['developer'], rawData)],\n 'c': [i for i in map(lambda x: x['designer'], rawData)],\n 'text': [i for i in map(lambda x: x['label'], rawData)],\n 'marker': {\n 'symbol': 100,\n 'color': '#DB7365',\n 'size': 14,\n 'line': { 'width': 2 }\n },\n }]\n\nlayout = {\n 'ternary': {\n 'sum': 100,\n 'aaxis': makeAxis('Journalist', 0),\n 'baxis': makeAxis('<br>Developer', 45),\n 'caxis': makeAxis('<br>Designer', -45)\n },\n 'annotations': [{\n 'showarrow': False,\n 'text': 'Simple Ternary Plot with Markers',\n 'x': 0.5,\n 'y': 1.3,\n 'font': { 'size': 15 }\n }]\n}\n\nfig = {'data': data, 'layout': layout}\npy.iplot(fig, validate=False)", "_____no_output_____" ] ], [ [ "#### Reference\nSee https://plotly.com/python/reference/#scatterternary for more information and chart attribute options!", "_____no_output_____" ] ], [ [ "from IPython.display import display, HTML\n\ndisplay(HTML('<link href=\"//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700\" rel=\"stylesheet\" type=\"text/css\" />'))\ndisplay(HTML('<link rel=\"stylesheet\" type=\"text/css\" href=\"http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css\">'))\n\n! pip install git+https://github.com/plotly/publisher.git --upgrade\nimport publisher\npublisher.publish(\n 'ternary.ipynb', 'python/ternary-plots/', 'Python Ternary Plots | plotly',\n 'How to make Ternary plots in Python with Plotly.',\n name = 'Ternary Plots',\n thumbnail='thumbnail/ternary.jpg', language='python',\n page_type='example_index', has_thumbnail='true', display_as='scientific', order=9,\n ipynb= '~notebook_demo/39') ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
4a2f639484fd82dba3778927f11488e20cb84f91
5,289
ipynb
Jupyter Notebook
Getting Started with SystemML.ipynb
MadisonJMyers/Journeys
887ee028f16bb340d6b3493cdcb2b73f59dbe5da
[ "Apache-2.0" ]
null
null
null
Getting Started with SystemML.ipynb
MadisonJMyers/Journeys
887ee028f16bb340d6b3493cdcb2b73f59dbe5da
[ "Apache-2.0" ]
null
null
null
Getting Started with SystemML.ipynb
MadisonJMyers/Journeys
887ee028f16bb340d6b3493cdcb2b73f59dbe5da
[ "Apache-2.0" ]
null
null
null
31.861446
486
0.524107
[ [ [ "%load_ext autoreload\n%autoreload 2\n%matplotlib inline\nimport pycurl\nimport os\n\nimport numpy as np \nimport matplotlib.pyplot as plt \nplt.rcParams['figure.figsize'] = (10, 6)\n\nsc.addPyFile(\"https://raw.githubusercontent.com/apache/incubator-systemml/3d5f9b11741f6d6ecc6af7cbaa1069cde32be838/src/main/java/org/apache/sysml/api/python/SystemML.py\")\n\n#%%sh\n\ncurl -O http://snap.stanford.edu/data/amazon0601.txt.gz \n#os.system (\"wget http://snap.stanford.edu/data/amazon0601.txt.gz\")\ngunzip amazon0601.txt.gz \n", "_____no_output_____" ], [ "import pyspark.sql.functions as F \ndataPath = \"amazon0601.txt\"\n\nX_train = (sc.textFile(dataPath) \n .filter(lambda l: not l.startswith(\"#\"))\n .map(lambda l: l.split(\"\\t\"))\n .map(lambda prods: (int(prods[0]), int(prods[1]), 1.0))\n .toDF((\"prod_i\", \"prod_j\", \"x_ij\"))\n .filter(\"prod_i < 500 AND prod_j < 500\")\n .cache())\n\nmax_prod_i = X_train.select(F.max(\"prod_i\")).first()[0] \nmax_prod_j = X_train.select(F.max(\"prod_j\")).first()[0] \nnumProducts = max(max_prod_i, max_prod_j) + 1 \nprint(\"Total number of products: {}\".format(numProducts)) ", "_____no_output_____" ], [ "from SystemML import MLContext \nml = MLContext(sc) ", "_____no_output_____" ], [ "pnmf = \"\"\" \nX = read($X) \nX = X+1 \nV = table(X[,1], X[,2]) \nsize = ifdef($size, -1) \nif(size > -1) { \n V = V[1:size,1:size]\n}\nmax_iteration = as.integer($maxiter) \nrank = as.integer($rank)\n\nn = nrow(V) \nm = ncol(V) \nrange = 0.01 \nW = Rand(rows=n, cols=rank, min=0, max=range, pdf=\"uniform\") \nH = Rand(rows=rank, cols=m, min=0, max=range, pdf=\"uniform\") \nlosses = matrix(0, rows=max_iteration, cols=1) ", "_____no_output_____" ], [ "i=1 \nwhile(i <= max_iteration) {\n\n H = (H * (t(W) %*% (V/(W%*%H))))/t(colSums(W)) \n W = (W * ((V/(W%*%H)) %*% t(H)))/t(rowSums(H))\n\n\n losses[i,] = -1 * (sum(V*log(W%*%H)) - as.scalar(colSums(W)%*%rowSums(H)))\n i = i + 1;\n}\n\nwrite(losses, $lossout) \nwrite(W, $Wout) \nwrite(H, $Hout) \n\"\"\"", "_____no_output_____" ], [ "ml.reset() \noutputs = ml.executeScript(pnmf, {\"X\": X_train, \"maxiter\": 100, \"rank\": 10}, [\"W\", \"H\", \"losses\"]) ", "_____no_output_____" ], [ "losses = outputs.getDF(sqlContext, \"losses\") \nxy = losses.sort(losses.ID).map(lambda r: (r[0], r[1])).collect() \nx, y = zip(*xy) \nplt.plot(x, y) \nplt.xlabel('Iteration') \nplt.ylabel('Loss') \nplt.title('PNMF Training Loss')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
4a2f79ef62168882ebd4f2aa7f73f5fdc33b3580
708,686
ipynb
Jupyter Notebook
archived_lectures/Network-Modeling-Summer-School-2021/Parameter-Fitting/Parameter-Fitting-Part-3-Detailed-Example.ipynb
BioModelTools/topics-course
cd0d73e4056663d170465669ecd699e8e74e35a0
[ "MIT" ]
2
2018-10-24T21:31:30.000Z
2019-10-23T20:29:22.000Z
archived_lectures/Network-Modeling-Summer-School-2021/Parameter-Fitting/Parameter-Fitting-Part-3-Detailed-Example.ipynb
BioModelTools/topics-course
cd0d73e4056663d170465669ecd699e8e74e35a0
[ "MIT" ]
1
2019-11-20T21:46:26.000Z
2019-11-20T22:09:00.000Z
archived_lectures/Network-Modeling-Summer-School-2021/Parameter-Fitting/Parameter-Fitting-Part-3-Detailed-Example.ipynb
BioModelTools/topics-course
cd0d73e4056663d170465669ecd699e8e74e35a0
[ "MIT" ]
9
2018-10-31T20:48:42.000Z
2019-11-20T21:47:43.000Z
708,686
708,686
0.952228
[ [ [ "# **PARAMETER FITTING DETAILED EXAMPLE**\n\nThis provides a detailed example of parameter fitting using the python-based tool ``SBstoat``. \nDetails about the tool can be found at in this [github repository](https://github.com/sys-bio/SBstoat).", "_____no_output_____" ], [ "# Preliminaries", "_____no_output_____" ] ], [ [ "IS_COLAB = True", "_____no_output_____" ], [ "if IS_COLAB:\n !pip install -q SBstoat\n !pip install -q tellurium\n pass", "\u001b[K |████████████████████████████████| 28.1MB 108kB/s \n\u001b[K |████████████████████████████████| 307kB 31.5MB/s \n\u001b[K |████████████████████████████████| 122kB 40.6MB/s \n\u001b[K |████████████████████████████████| 163kB 49.5MB/s \n\u001b[K |████████████████████████████████| 6.2MB 23.9MB/s \n\u001b[K |████████████████████████████████| 102kB 11.0MB/s \n\u001b[K |████████████████████████████████| 36.7MB 84kB/s \n\u001b[K |████████████████████████████████| 5.6MB 28.8MB/s \n\u001b[K |████████████████████████████████| 2.5MB 29.5MB/s \n\u001b[K |████████████████████████████████| 3.2MB 32.2MB/s \n\u001b[K |████████████████████████████████| 2.0MB 24.2MB/s \n\u001b[K |████████████████████████████████| 5.8MB 27.7MB/s \n\u001b[K |████████████████████████████████| 16.6MB 170kB/s \n\u001b[K |████████████████████████████████| 3.1MB 30.8MB/s \n\u001b[?25h Building wheel for SBstoat (setup.py) ... \u001b[?25l\u001b[?25hdone\n Building wheel for lmfit (setup.py) ... \u001b[?25l\u001b[?25hdone\n Building wheel for docstring-expander (setup.py) ... \u001b[?25l\u001b[?25hdone\n Building wheel for asteval (setup.py) ... \u001b[?25l\u001b[?25hdone\n\u001b[31mERROR: libroadrunner 2.0.5 has requirement numpy==1.19.3, but you'll have numpy 1.19.5 which is incompatible.\u001b[0m\n\u001b[31mERROR: tellurium 2.2.0 has requirement numpy==1.19.3, but you'll have numpy 1.19.5 which is incompatible.\u001b[0m\n\u001b[31mERROR: tellurium 2.2.0 has requirement scipy>=1.5.1, but you'll have scipy 1.4.1 which is incompatible.\u001b[0m\n\u001b[K |████████████████████████████████| 14.9MB 182kB/s \n\u001b[K |████████████████████████████████| 28.5MB 113kB/s \n\u001b[31mERROR: datascience 0.10.6 has requirement folium==0.2.1, but you'll have folium 0.8.3 which is incompatible.\u001b[0m\n\u001b[31mERROR: albumentations 0.1.12 has requirement imgaug<0.2.7,>=0.2.5, but you'll have imgaug 0.2.9 which is incompatible.\u001b[0m\n\u001b[?25h" ], [ "# Python packages used in this chapter\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport urllib.request # use this library to download file from GitHub\nimport tellurium as te\nfrom SBstoat.namedTimeseries import NamedTimeseries, TIME\nfrom SBstoat.modelFitter import ModelFitter\nimport SBstoat", "_____no_output_____" ] ], [ [ "# Constants and Helper Functions", "_____no_output_____" ] ], [ [ "def getSharedCodes(moduleName):\n \"\"\"\n Obtains common codes from the github repository.\n\n Parameters\n ----------\n moduleName: str\n name of the python module in the src directory\n \"\"\"\n if IS_COLAB:\n url = \"https://github.com/sys-bio/network-modeling-summer-school-2021/raw/main/src/%s.py\" % moduleName\n local_python = \"python.py\"\n _, _ = urllib.request.urlretrieve(url=url, filename=local_python)\n else:\n local_python = \"../../src/%s.py\" % moduleName\n with open(local_python, \"r\") as fd:\n codeStr = \"\".join(fd.readlines())\n print(codeStr)\n exec(codeStr, globals())\n\n# Acquire codes\ngetSharedCodes(\"util\")\n\n# TESTS\nassert(isinstance(LINEAR_PATHWAY_DF, pd.DataFrame))", "import pandas as pd\nimport urllib.request\n\n# Linear pathway data\nBASE_URL = \"https://github.com/vporubsky/network-modeling-summer-school/raw/main/\"\nBASE_DATA_URL = \"%sdata/\" % BASE_URL\nBASE_MODULE_URL = \"%ssrc/\" % BASE_URL\nBASE_MODEL_URL = \"%smodels/\" % BASE_URL\nLOCAL_FILE = \"local_file.txt\"\n\n\ndef getData(csvFilename):\n \"\"\"\n Creates a dataframe from a CSV structured URL file.\n\n Parameters\n ----------\n csvFilename: str\n Name of the CSV file (w/o \".csv\" extension)\n\n Returns\n -------\n pd.DataFrame\n \"\"\"\n url = \"%s%s.csv\" % (BASE_DATA_URL, csvFilename)\n filename, _ = urllib.request.urlretrieve(url, filename=LOCAL_FILE)\n return pd.read_csv(LOCAL_FILE)\n\ndef getModule(moduleName):\n \"\"\"\n Obtains common codes from the github repository.\n \n Parameters\n ----------\n moduleName: str\n name of the python module in the src directory\n \"\"\"\n url = \"%s%s.py\" % (BASE_MODULE_URL, moduleName)\n _, _ = urllib.request.urlretrieve(url, filename=LOCAL_FILE)\n with open(LOCAL_FILE, \"r\") as fd:\n codeStr = \"\".join(fd.readlines())\n return codeStr\n\ndef getModel(modelName):\n \"\"\"\n Creates returns the string for the antimony model.\n\n Parameters\n ----------\n modelName: str\n Name of the model w/o \".ant\"\n\n Returns\n -------\n str\n \"\"\"\n url = \"%s%s.ant\" % (BASE_MODEL_URL, modelName)\n filename, _ = urllib.request.urlretrieve(url, filename=LOCAL_FILE)\n with open(LOCAL_FILE, \"r\") as fd:\n result = \"\".join(fd.readlines())\n return result\n\n# Set models\nWOLF_MODEL = getModel(\"wolf\")\nWOLF_DF = getData(\"wolf\")\nWOLF_ARR = WOLF_DF.to_numpy()\nLINEAR_PATHWAY_DF = getData(\"linear_pathway\")\nLINEAR_PATHWAY_ARR = LINEAR_PATHWAY_DF.to_numpy()\nLINEAR_PATHWAY_MODEL = getModel(\"linear_pathway\")\n\n" ], [ "def plotTS(ts, title=\"\"):\n \"\"\"\n Plots columns in a timeseries.\n \n Parameters\n ----------\n ts: NamedTimeseries\n \"\"\"\n p = plt.plot(ts[TIME], ts[ts.colnames])\n _ = plt.legend(p, ts.colnames, bbox_to_anchor=(1.05, 1), loc='upper left')\n _ = plt.title(title)", "_____no_output_____" ] ], [ [ "# Running SBstoat", "_____no_output_____" ], [ "``SBstoat`` is a python package intended to simplify the programmatic aspects of fitting. The package provides handles the programming details\nof the interactions between the optimization codes (``lmfit``) and ``tellurium`` simulations.\n\nThe required inputs to ``SBstoat`` are:\n- the model for which parameter values are being estimated;\n- observational data; and\n- specification of the parameters, their value ranges, and initial values.\n\nFor the linear pathway model, we ``LINEAR_PATHWAY_MODEL`` and ``LINEAR_PATHWAY_DF`` for the model and data, respectively.\nThe description of the paarameters is done using a python dictionary, as shown below.", "_____no_output_____" ] ], [ [ "# Name, minimal value, initial value, and maximum value of each parameter\nLINEAR_PATHWAY_PARAMETERS = [\n SBstoat.Parameter(\"k1\", lower=1, value=50, upper=100),\n SBstoat.Parameter(\"k2\", lower=1, value=50, upper=100),\n SBstoat.Parameter(\"k3\", lower=1, value=50, upper=100),\n SBstoat.Parameter(\"k4\", lower=1, value=50, upper=100),\n]", "_____no_output_____" ] ], [ [ "The python class ``ModelFitter`` does fitting for ``SBstoat``. Details of using this can be found below.", "_____no_output_____" ] ], [ [ "?ModelFitter", "_____no_output_____" ], [ "fitter = ModelFitter(LINEAR_PATHWAY_MODEL, \n NamedTimeseries(dataframe=LINEAR_PATHWAY_DF), \n parametersToFit=LINEAR_PATHWAY_PARAMETERS)\nfitter.fitModel()", "_____no_output_____" ] ], [ [ "``SBstoat`` provides a textual report of the results of the fit.", "_____no_output_____" ] ], [ [ "print(fitter.reportFit())", "[[Variables]]\n k1: 1.013537247499141\n k2: 2.15812984596342\n k3: 3.079339266311967\n k4: 4.280401549489099\n[[Fit Statistics]]\n # fitting method = leastsq\n # function evals = 6\n # data points = 500\n # variables = 4\n chi-square = 102.939841\n reduced chi-square = 0.20754000\n Akaike info crit = -782.231674\n Bayesian info crit = -765.373241\n[[Correlations]] (unreported correlations are < 0.100)\n C(k3, k4) = -0.297\n C(k2, k3) = -0.263\n C(k2, k4) = -0.243\n C(k1, k2) = -0.227\n C(k1, k3) = -0.188\n C(k1, k4) = -0.182\n" ] ], [ [ "The report is in three sections.\n\nThe first section contains measures of the fit quality. The most commonly used measures are chi-square and reduced chai-square.\nWe want both of these to be \"small\", although small is relative.\nThese measures are most useful when comparing different fit results.\n\nThe \"Variables\" section gives parameter estimates. We se that the estimates obtained are fairly close to\nthe true values in the original models.\n\nThe final section provides information about the relationships between parameter estimates. This can be useful\nin models where the structure of the model makes it difficult to separate one parameter from another.\nIn these cases, there will be a large correlation between parameter (absolute) parameter values.", "_____no_output_____" ], [ "``SBstoat`` provides many plots to aid in understanding the fitting results.\nYou can see these by typing in a Jupyter code cell ``fitter.pl`` and then pressing the tab key.\nArguably the most useful is ``plotFitAll``, which, for each floating species (column in observed data), plots the fitted and observed values.\nThis is shown below.", "_____no_output_____" ] ], [ [ "fitter.plotFitAll(figsize=(20, 5), numCol=5, color=[\"red\", \"blue\"], titlePosition=(0.5, 1.05))", "_____no_output_____" ] ], [ [ "These fits seem to be quite consistent with the observed data, with the possible exception of ``S5``.\nIn the latter case, there is considerable variability that likely makes a good fit more difficult.", "_____no_output_____" ] ], [ [ "# See the options for plotFitAll\n?fitter.plotFitAll", "_____no_output_____" ] ], [ [ "If you are interested in a more complete analysis of the residuals, use ``plotResidualsAll``.", "_____no_output_____" ] ], [ [ "fitter.plotResidualsAll(figsize=(20, 10))", "_____no_output_____" ] ], [ [ "# Workflow for fitting the Linear Pathway Model", "_____no_output_____" ], [ "Although ``SBstoat`` eliminates the burden of programming details, fitting is often complex.\nThis is because of the complexity of the fitting surface, as illustrated earlier in this chaper.\nThis section illustrates how to use ``SBstoat`` to explore complex fitting surfaces.\n\n``SBstoat`` allows you to explore fitting along three dimensions.\n1. **Fitting surface**. The fitting surface changes based on the following:\n a. the selection of float species (columns in the observed data) that we attempt to fit;\n b. the time span we fit over\n1. **Optimization Algorithms**. As we noted previously, gradient descent is fast, but it only works well for convex fitting surfaces. We might want to try both gradient descent and differential evolution to see which works best for our model. Also, some optimization algorithms are stochastic, and so the search strategy may also choose to run\nthe same algorithm multiple times. Finally, it may be desirable to do multiple optimizations in succession, using the results of the $n-1$-st to be the starting point for the $n$-th.\n1. **Search start & scope**. This refers to the initial values of parameter values and the range of parameter values that are explored.", "_____no_output_____" ], [ "\nIn the following explorations of the above dimensions of parameter fitting, we use the above workflow that consists of:\n1. Select a subset of the observed data based on a specified time span (in this case, just ending time)\n1. Construct a fitter for the linear pathway, observed data, columns to consider in fitting, the fitting methods, and parameter ranges/initial values.\n1. Fit the model.\n1. Print the fitting report.\n1. Plot observed and fitted values.\n\nThis workflow is encapsulated in the the function ``doFit``.\nThe arguments of the function have default that reproduce the\nresults in the previous section.", "_____no_output_____" ] ], [ [ "def doFit(selectedColumns=None, \n endTime=10, \n fitterMethods=[\"differential_evolution\", \"leastsq\"], \n parametersToFit=LINEAR_PATHWAY_PARAMETERS,\n isTest=False):\n \"\"\"\n Encapsulates the workflow to fit the linear pathway model.\n\n Parameters\n ----------\n selectedColumns: list-str\n endTime: int\n fitterMethods: list-str\n parametersToFit: list-SBstoat.Parameter\n isTest: bool\n Test mode\n\n Returns\n -------\n ModelFitter\n \"\"\"\n model = te.loada(LINEAR_PATHWAY_MODEL)\n observedTS = NamedTimeseries(dataframe=LINEAR_PATHWAY_DF)\n # Find the last index to use\n lastIdx = len([t for t in observedTS[TIME] if t <= endTime])\n observedTS = observedTS[:lastIdx]\n # Construct the fitter and do the fit\n fitter = ModelFitter(model, observedTS, selectedColumns=selectedColumns, \n fitterMethods=fitterMethods, \n parametersToFit=parametersToFit)\n fitter.fitModel()\n if not isTest:\n print(fitter.reportFit())\n fitter.plotFitAll(figsize=(20, 5), numCol=5, color=[\"red\", \"blue\"],\n titlePosition=(0.5, 1.05))\n return fitter\n\n# TESTS\nresult = doFit(isTest=True)\nassert(isinstance(fitter, ModelFitter))", "_____no_output_____" ], [ "doFit()", "[[Variables]]\n k1: 1.0135372427877214\n k2: 2.1581298124785184\n k3: 3.0793391146838847\n k4: 4.280402472583022\n[[Fit Statistics]]\n # fitting method = leastsq\n # function evals = 6\n # data points = 500\n # variables = 4\n chi-square = 102.939841\n reduced chi-square = 0.20754000\n Akaike info crit = -782.231674\n Bayesian info crit = -765.373241\n[[Correlations]] (unreported correlations are < 0.100)\n C(k3, k4) = -0.297\n C(k2, k3) = -0.263\n C(k2, k4) = -0.243\n C(k1, k2) = -0.227\n C(k1, k3) = -0.188\n C(k1, k4) = -0.182\n" ] ], [ [ "## Fitting Surface", "_____no_output_____" ], [ "We begin by exploring the effect of the fitting surface.\n\nWe can control the fitting surface in two ways. The first is by the selection of columns that are matched with observational data.\n\nFor example, suppose that we only consider ``S5`` and so the fitting surface is residuals\nfrom fitting ``S5``.", "_____no_output_____" ] ], [ [ "doFit(selectedColumns=[\"S5\"])", "[[Variables]]\n k1: 2.8522843151867034\n k2: 2.852370672593212\n k3: 1.0739377508342487\n k4: 2.8524011700392844\n[[Fit Statistics]]\n # fitting method = leastsq\n # function evals = 12\n # data points = 100\n # variables = 4\n chi-square = 90.7507757\n reduced chi-square = 0.94532058\n Akaike info crit = -1.70531652\n Bayesian info crit = 8.71536422\n[[Correlations]] (unreported correlations are < 0.100)\n C(k1, k3) = -0.955\n C(k2, k3) = 0.916\n C(k1, k2) = -0.904\n C(k1, k4) = -0.535\n C(k3, k4) = 0.407\n C(k2, k4) = 0.124\n" ] ], [ [ "We see that we get poor estimates for most of the parameters, something that we can check because we know the true values of the parameters (``k1=1``, ``k2=2``, ``k3=3``, ``k4=4``).", "_____no_output_____" ], [ "Another consideration is to focus on a subset of the dynamics. Below, we only consider through 2 seconds.", "_____no_output_____" ] ], [ [ "doFit(endTime=2)", "[[Variables]]\n k1: 1.0340913918485295\n k2: 2.0933052035872084\n k3: 2.919246677173704\n k4: 3.8265925478967384\n[[Fit Statistics]]\n # fitting method = leastsq\n # function evals = 6\n # data points = 60\n # variables = 4\n chi-square = 9.99250125\n reduced chi-square = 0.17843752\n Akaike info crit = -99.5505775\n Bayesian info crit = -91.1731993\n[[Correlations]] (unreported correlations are < 0.100)\n C(k3, k4) = -0.147\n C(k1, k2) = 0.138\n" ] ], [ [ "This improved the quality of the fit. We see this visually in the above plots and also in the significant reduction in chi-square. A lot of this improvement\nis a result of not inluding regions of high variability in observed values for ``S5``.", "_____no_output_____" ], [ "## Optimization Algorithms", "_____no_output_____" ], [ "The main consideration here is the choice of optimization algorithms.\nAny valid ``method`` for ``lmfit`` can be used, and multiple methods can be used in combination. We illustrate this below.", "_____no_output_____" ] ], [ [ "# Fit with Levenberg-Marquardt \ndoFit(fitterMethods=[\"leastsq\"])", "[[Variables]]\n k1: 1.013537272208723\n k2: 2.1581300220005524\n k3: 3.079339166352762\n k4: 4.280404936773545\n[[Fit Statistics]]\n # fitting method = leastsq\n # function evals = 47\n # data points = 500\n # variables = 4\n chi-square = 102.939841\n reduced chi-square = 0.20754000\n Akaike info crit = -782.231674\n Bayesian info crit = -765.373241\n[[Correlations]] (unreported correlations are < 0.100)\n C(k3, k4) = -0.297\n C(k2, k3) = -0.263\n C(k2, k4) = -0.243\n C(k1, k2) = -0.227\n C(k1, k3) = -0.188\n C(k1, k4) = -0.182\n" ], [ "# Fit with differential evolution\ndoFit(fitterMethods=[\"differential_evolution\"])", "[[Variables]]\n k1: 1.0135368131654405\n k2: 2.1581283330109793\n k3: 3.0793355829615363\n k4: 4.280413888605413\n[[Fit Statistics]]\n # fitting method = differential_evolution\n # function evals = 1680\n # data points = 500\n # variables = 4\n chi-square = 102.939841\n reduced chi-square = 0.20754000\n Akaike info crit = -782.231674\n Bayesian info crit = -765.373241\n## Warning: uncertainties could not be estimated:\n this fitting method does not natively calculate uncertainties\n and numdifftools is not installed for lmfit to do this. Use\n `pip install numdifftools` for lmfit to estimate uncertainties\n with this fitting method.\n" ], [ "# Fit with differential evolution and then Levenberg-Marquardt \ndoFit(fitterMethods=[\"differential_evolution\", \"leastsq\"])", "[[Variables]]\n k1: 1.0135371221930065\n k2: 2.1581297205926795\n k3: 3.0793394480158964\n k4: 4.280401360560127\n[[Fit Statistics]]\n # fitting method = leastsq\n # function evals = 6\n # data points = 500\n # variables = 4\n chi-square = 102.939841\n reduced chi-square = 0.20754000\n Akaike info crit = -782.231674\n Bayesian info crit = -765.373241\n[[Correlations]] (unreported correlations are < 0.100)\n C(k3, k4) = -0.297\n C(k2, k3) = -0.263\n C(k2, k4) = -0.243\n C(k1, k2) = -0.227\n C(k1, k3) = -0.188\n C(k1, k4) = -0.182\n" ] ], [ [ "For this model, we see that Levenberg-Marquardt works better than differential evolution, and doing the two in combination offers no benefit.", "_____no_output_____" ], [ "## Search Start & Scope", "_____no_output_____" ], [ "Where we start the search and how far we search depends on the ranges of parameter values and the specification of initial values.\nThis is specified by the ``parameterDct`` argument to ``ModelFitter``. This argument defaults to ``LINEAR_PATHWAY_PARAMETER_DCT``.", "_____no_output_____" ], [ "If we create a bad parameter range, then we get very poor fits. Below, we start the search with a negative value for each parameter.\nNote that the observed values appear to be constant because of the large scale of the fitted values.", "_____no_output_____" ] ], [ [ "parametersToFit = [\n SBstoat.Parameter(\"k1\", lower=-11, value=-1, upper=1),\n SBstoat.Parameter(\"k2\", lower=-11, value=-1, upper=1),\n SBstoat.Parameter(\"k3\", lower=-11, value=-1, upper=1),\n SBstoat.Parameter(\"k4\", lower=-11, value=-1, upper=1),\n ]\ndoFit(parametersToFit=parametersToFit)", "[[Variables]]\n k1: 1.0\n k2: 1.0\n k3: 1.0\n k4: 1.0\n[[Fit Statistics]]\n # fitting method = leastsq\n # function evals = 13\n # data points = 500\n # variables = 4\n chi-square = 1131.12055\n reduced chi-square = 2.28048498\n Akaike info crit = 416.177979\n Bayesian info crit = 433.036412\n[[Correlations]] (unreported correlations are < 0.100)\n C(k1, k4) = -0.821\n C(k3, k4) = -0.462\n C(k2, k4) = 0.239\n C(k1, k2) = -0.207\n C(k1, k3) = -0.128\n C(k2, k3) = -0.123\n" ] ], [ [ "# Exercise", "_____no_output_____" ], [ "This exercise is about fitting parameters in the Wolf model for glycolytic oscillations.\nThe model is ``WOLF_MODEL`` and the observational data for this model are ``WOLF_DF``.\n\n1. Implement a ``doFit`` function that encapsulates the workflow for the Wolf model.\n1. Try fitting the model using ``WOLF_PARAMETERS``. First try ``leastSquares`` (a graident descent method) and then ``differential_evolution``. How did the two methods differ as to fitting time and quality? Why? What would you try next to get better fits?\n1. Limit the parameter values so that the upper value is twice the true value. Try fits using leastsqs and differential evolution.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
4a2f7c8c2875dee8a56c2840c2b8731a05266e91
23,714
ipynb
Jupyter Notebook
notebooks/basics/java.ipynb
LucienZhang/website-binder
76148742640d77dfa33ed3af34c170fe8145c11d
[ "MIT" ]
null
null
null
notebooks/basics/java.ipynb
LucienZhang/website-binder
76148742640d77dfa33ed3af34c170fe8145c11d
[ "MIT" ]
null
null
null
notebooks/basics/java.ipynb
LucienZhang/website-binder
76148742640d77dfa33ed3af34c170fe8145c11d
[ "MIT" ]
null
null
null
16.287088
141
0.425993
[ [ [ "## Version", "_____no_output_____" ] ], [ [ "System.out.println(System.getProperty(\"java.version\"));", "11.0.7\n" ] ], [ [ "## Identifiers\n\n1. Available characters: lowercase letters (a to z), uppercase letters(A to Z), digits (0 to 9), underscore `_`, and dollar sign `$`\n2. Cannot start with a digit\n3. Case sensitive\n\n## Keywords\n\n- abstract\n- continue\n- for\n- new\n- switch\n- assert\n- default\n- package\n- synchronized\n- boolean\n- do\n- if\n- private\n- this\n- break\n- double\n- implements\n- protected\n- throw\n- byte\n- else\n- import\n- public\n- throws\n- case\n- enum\n- instanceof\n- return\n- transient\n- catch\n- extends\n- int\n- short\n- try\n- char\n- final\n- interface\n- static\n- void\n- class\n- finally\n- long\n- strictfp\n- volatile\n- float\n- native\n- super\n- while", "_____no_output_____" ], [ "## Comments", "_____no_output_____" ] ], [ [ "// this is a single-line comment\n\n/*\n * this is\n * a multi-line\n * comment\n */", "_____no_output_____" ] ], [ [ "## Multi-line statement", "_____no_output_____" ] ], [ [ "String total = \"line 1 \" +\n \"line 2 \" +\n \"line 3\";\nSystem.out.println(total);", "line 1 line 2 line 3\n" ] ], [ [ "## Empty statement", "_____no_output_____" ] ], [ [ "{}", "_____no_output_____" ] ], [ [ "## Basic Data Type", "_____no_output_____" ] ], [ [ "int a = 1;\ndouble b = 1;\nchar c = 97;\nboolean d = false;\nSystem.out.printf(\"%d %f %s %s %s\\n\", a, b, c, d, null);", "1 1.000000 a false null\n" ], [ "Integer.SIZE", "_____no_output_____" ], [ "Integer.MIN_VALUE;", "_____no_output_____" ], [ "Integer.MAX_VALUE;", "_____no_output_____" ], [ "Double.SIZE", "_____no_output_____" ], [ "Double.MIN_VALUE;", "_____no_output_____" ], [ "Double.MAX_VALUE;", "_____no_output_____" ], [ "Character.SIZE", "_____no_output_____" ], [ "Integer.valueOf(Character.MIN_VALUE);", "_____no_output_____" ], [ "Integer.valueOf(Character.MAX_VALUE)", "_____no_output_____" ], [ "Boolean.FALSE;", "_____no_output_____" ], [ "Boolean.TRUE;", "_____no_output_____" ], [ "Byte.SIZE", "_____no_output_____" ], [ "Byte.MIN_VALUE;", "_____no_output_____" ], [ "Byte.MAX_VALUE;", "_____no_output_____" ], [ "Short.SIZE", "_____no_output_____" ], [ "Short.MIN_VALUE;", "_____no_output_____" ], [ "Short.MAX_VALUE;", "_____no_output_____" ], [ "Long.SIZE", "_____no_output_____" ], [ "Long.MIN_VALUE;", "_____no_output_____" ], [ "Long.MAX_VALUE;", "_____no_output_____" ], [ "Float.SIZE", "_____no_output_____" ], [ "Float.MIN_VALUE;", "_____no_output_____" ], [ "Float.MAX_VALUE;", "_____no_output_____" ] ], [ [ "## Variable assignment", "_____no_output_____" ] ], [ [ "int a = 1, b = 2, c = 3;\nSystem.out.printf(\"%d %d %d\\n\", a, b, c);\na = b = 1;\nSystem.out.printf(\"%d %d\", a, b);", "1 2 3\n1 1" ] ], [ [ "## Operations", "_____no_output_____" ] ], [ [ "1+2", "_____no_output_____" ], [ "4.3-2", "_____no_output_____" ], [ "3*7", "_____no_output_____" ], [ "2/4", "_____no_output_____" ], [ "4/2", "_____no_output_____" ], [ "2.0/4", "_____no_output_____" ], [ "4/2.0", "_____no_output_____" ], [ "5 % 3", "_____no_output_____" ], [ "-5 % 3", "_____no_output_____" ], [ "5 % -3", "_____no_output_____" ], [ "-5 % -3", "_____no_output_____" ], [ "int a = 1;\na++;\na", "_____no_output_____" ], [ "int a = 1;\na--;\na", "_____no_output_____" ], [ "1 < 2", "_____no_output_____" ], [ "1 == 1.0", "_____no_output_____" ], [ "1 != 2", "_____no_output_____" ], [ "5 >= 1", "_____no_output_____" ], [ "3 <= 5", "_____no_output_____" ], [ "int a = 1;\na += 1;\na -= 1;\na *= 1;\na /= 1;\na %= 1;\na", "_____no_output_____" ], [ "int a = 0b0011;\nint b = 0b0101;\nprintf(\"%d %d\\n\", a, b);\nSystem.out.println(String.format(\"%4s\", Integer.toBinaryString(a)).replace(\" \", \"0\"));\nSystem.out.println(String.format(\"%4s\", Integer.toBinaryString(b)).replace(\" \", \"0\"));", "3 5\n0011\n0101\n" ], [ "System.out.println(String.format(\"%4s\", Integer.toBinaryString(a & b)).replace(\" \", \"0\"));", "0001\n" ], [ "System.out.println(String.format(\"%4s\", Integer.toBinaryString(a | b)).replace(\" \", \"0\"));", "0111\n" ], [ "System.out.println(String.format(\"%4s\", Integer.toBinaryString(a ^ b)).replace(\" \", \"0\"));", "0110\n" ], [ "~a", "_____no_output_____" ], [ "a = -1;\nSystem.out.println(String.format(\"%32s\", Integer.toBinaryString(a ^ b)).replace(\" \", \"0\"));\nSystem.out.println(String.format(\"%32s\", Integer.toBinaryString(a >> 1)).replace(\" \", \"0\"));\nSystem.out.println(String.format(\"%32s\", Integer.toBinaryString(a >>> 1)).replace(\" \", \"0\"));\nSystem.out.println(String.format(\"%32s\", Integer.toBinaryString(a << 1)).replace(\" \", \"0\"));", "11111111111111111111111111111010\n11111111111111111111111111111111\n01111111111111111111111111111111\n11111111111111111111111111111110\n" ], [ "a &= 1;\na |= 1;\na ^= 1;\na >>= 1;\na <<= 1;\na >>>= 1;\na", "_____no_output_____" ], [ "true && false", "_____no_output_____" ], [ "true || false", "_____no_output_____" ], [ "!true", "_____no_output_____" ], [ "String a = \"abc\";\nString b = \"ab\";\nb += \"c\";\nSystem.out.println(a);\nSystem.out.println(b);\nSystem.out.println(a == b);\nSystem.out.println(a.equals(b));", "abc\nabc\nfalse\ntrue\n" ], [ "true || false && false", "_____no_output_____" ], [ "true || (false && false)", "_____no_output_____" ], [ "(true || false) && false", "_____no_output_____" ], [ "true ? 1 : 0", "_____no_output_____" ], [ "false ? 1 : 0", "_____no_output_____" ], [ "Integer.valueOf(1) instanceof Integer", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2f81ea87d38da15dff15b4c68a93cdd02a2654
8,775
ipynb
Jupyter Notebook
nbs/01_wicht_dataset_creation.ipynb
omlnaut/sudoku
9e4dc60c475133447cc81c3bf4c8348019a2fd13
[ "MIT" ]
null
null
null
nbs/01_wicht_dataset_creation.ipynb
omlnaut/sudoku
9e4dc60c475133447cc81c3bf4c8348019a2fd13
[ "MIT" ]
null
null
null
nbs/01_wicht_dataset_creation.ipynb
omlnaut/sudoku
9e4dc60c475133447cc81c3bf4c8348019a2fd13
[ "MIT" ]
null
null
null
23.214286
174
0.437265
[ [ [ "import pandas as pd\nimport numpy as np\nfrom pathlib import Path", "_____no_output_____" ] ], [ [ "# Dataset wicht", "_____no_output_____" ], [ "Goal: Create a dataframe that maps the img filenames to numpy arrays containing the ocr digits.", "_____no_output_____" ] ], [ [ "save_path = Path('../data/wicht')\nraw_dir = save_path/'raw'\n", "_____no_output_____" ] ], [ [ "Images and solutions (OCR) are paired as:\n- \\<name\\>.jpg\n- \\<name\\>.dat", "_____no_output_____" ] ], [ [ "all_files = list(raw_dir.glob('*'))\nall_files[:2], len(all_files)", "_____no_output_____" ], [ "names = [filename.name.split('.')[0] for filename in raw_dir.glob('*.dat')]\nassert len(names)*2==len(all_files)\n\nnames[:5]", "_____no_output_____" ] ], [ [ "### Extract solution from text description", "_____no_output_____" ] ], [ [ "name = names[0]\ndat_path = raw_dir / (name+'.dat')", "_____no_output_____" ], [ "text_description = dat_path.open('r').read()", "_____no_output_____" ], [ "' '.join([row.strip() for row in text_description.split('\\n')[2:-1]])", "_____no_output_____" ], [ "def mat_from_text(text_description):\n return ' '.join([row.strip() for row in text_description.split('\\n')[2:-1]])", "_____no_output_____" ], [ "mat = mat_from_text(text_description)\nassert len(mat.split(' '))==81\nmat", "_____no_output_____" ] ], [ [ "### All files", "_____no_output_____" ] ], [ [ "pairs = []\n\nfor name in names:\n file_path = raw_dir / (name+'.jpg')\n dat_path = raw_dir / (name+'.dat')\n ocr = mat_from_text(dat_path.open('r').read())\n \n pairs.append((file_path, ocr))", "_____no_output_____" ], [ "pairs[0]", "_____no_output_____" ], [ "df = pd.DataFrame(pairs, columns=['path', 'ocr'])\ndf.head()", "_____no_output_____" ], [ "df.to_csv(save_path/'wicht.csv')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
4a2f8a94e35842288da38f2dfb42ecace436939c
11,036
ipynb
Jupyter Notebook
datasets/iq2_corpus/apply_complexity.ipynb
maubinle/Cornell-Conversational-Analysis-Toolkit
5339c2b9bd3f147d3f44f3d1456607f01f46af54
[ "MIT" ]
1
2021-12-24T13:56:20.000Z
2021-12-24T13:56:20.000Z
datasets/iq2_corpus/apply_complexity.ipynb
maubinle/Cornell-Conversational-Analysis-Toolkit
5339c2b9bd3f147d3f44f3d1456607f01f46af54
[ "MIT" ]
null
null
null
datasets/iq2_corpus/apply_complexity.ipynb
maubinle/Cornell-Conversational-Analysis-Toolkit
5339c2b9bd3f147d3f44f3d1456607f01f46af54
[ "MIT" ]
null
null
null
42.610039
2,040
0.63465
[ [ [ "# import required modules and set up environment\nimport os\n# replace file path below with your own local convokit\nos.chdir('/home/lucas/Cornell-Conversational-Analysis-Toolkit')\nimport convokit\n\nfrom convokit import Corpus, Parser, ComplexityTransformer, Transformer\nimport nltk", "_____no_output_____" ], [ "iq2 = convokit.Corpus(filename='datasets/iq2_corpus/iq2_corpus')", "_____no_output_____" ], [ "iq2.print_summary_stats()", "Number of Users: 471\nNumber of Utterances: 26562\nNumber of Conversations: 108\n" ], [ "complexity_transformer = ComplexityTransformer()", "_____no_output_____" ], [ "compl = complexity_transformer.transform(iq2)", "doing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\ndoing a convo\n" ], [ "compl.print_summary_stats()", "Number of Users: 471\nNumber of Utterances: 26562\nNumber of Conversations: 108\n" ], [ "for i in [513, 461, 1436, 8635, 368, 9756]:\n print(compl.utterances[str(i)].text)\n print(compl.utterances[str(i)].meta[\"complexity\"])", "Gentlemen, let me pull something out of the exchange we've just been through and bring to Mark Zandi. Basically, Glenn Hubbard was saying that he understood what your aspirations were for funding certain kinds of government programs that would be restorative of education, et cetera, and he said that you can't get it from the wealthy, that if-- politically, you can't get it, you can't raise rates to that degree for the reason that Bob gave because it's a closed loop. I want to know economically could you get it, because their argument is that economically you can't because the behavior of the wealthy would change in such a way that they just wouldn't-- they wouldn't participate. Number one, they wouldn't participate-- they would move their money offshore, and they wouldn't invest here. And that's kind of the core of their argument. Can you take that on?\n{'gunning_fog': 11.84295302013423, 'flesch_kincaid': 57.08981543624162, 'num_words': 177, 'num_sentences': 6}\nJohn, you asked me, \"Why is it relevant that in two years, that is, 1928, and 2007, the rich took home the highest percentage of total income that they've taken--\n{'gunning_fog': 15.628571428571428, 'flesch_kincaid': 58.47857142857143, 'num_words': 39, 'num_sentences': 1}\n…the Emancipation Proclamation. World War II was followed by the Warren Court. The last point: complacency, I think, is the real enemy. Even having this debate is a show, a sign of the confidence we have that our system is working. But we should be aware that we’re fighting a different kind of enemy, a non- nation state that conceals its activities as normal civilian communications, as normal civilian travel. The hard thing is how, as a society do we fight that, fight a war against an enemy who is not following the rules of war and tries to disguise itself as one of us. I’ll close by saying I think so far we have made a successful balance between civil liberties and security. We have reduced attacks and threats on the country. I think those gains have been worth it. And the question is, I think, for all of us today, is whether we would feel comfortable reducing the amount of government powers and by doing so, give up some of the security we’ve bought at such a, I think, dear price since 9/11. Thank you.\n{'gunning_fog': 7.972678132678133, 'flesch_kincaid': 63.70657248157249, 'num_words': 218, 'num_sentences': 11}\nSo I've read a lot of Dr. Barnard's studies, and I'm very impressed by the way that they're designed and the way that they're reported. But if you look at the data, what I think Dr. Barnard has clearly shown is that the metabolic effects and the improvements in almost every health measure are due to weight loss. And what Dr. Barnard has created is a run-of-the-mill, weight-loss diet with mediocre efficacy, where there's lots of positive effects due mostly to the weight loss that occurs in the first three to six months and then starts creeping back up. And if you –\n{'gunning_fog': 11.63106796116505, 'flesch_kincaid': 63.23106796116505, 'num_words': 116, 'num_sentences': 4}\nThank you, thank you. And may I invite one more round of applause for Robert Rosenkranz for bringing this to us. If you ask, \"do the rich pay their fair in taxes?\" you really do have to ask a couple of other questions. You’ve got to ask, well, what do you mean by rich?How much, how much is rich? How much money does that take? And, what do we mean by \"enough\"? Now, consider that to be counted in the top 1 percent of earners in America, you need to be making approximately $380,000 a year. Consider, also, that that 1 percent pays more than a third, as of 2009, pays more than a third of all of the federal income tax that comes in to the federal government. Is that too much? Is that too little? Well, let's make a debate of it. True or false, the rich are taxed enough. Another debate from Intelligence Squared U.S., I'm John Donvan, we have four superbly qualified debaters who will be arguing for, and against, this motion: The Rich Are Taxed Enough. They go in three rounds and then the audience votes to choose a winner, and only one side wins. Our debaters include, on the side arguing for the motion, Glenn Hubbard, Dean of Columbia Business School and economic advisor to Mitt Romney. Your partner it Art Laffer, he is best known for the Laffer Curve, one of the main theoretical constructs of supply side economics. On the side arguing against the motion, Robert Reich, he's a professor at UC Berkeley and former Secretary of Labor in the Clinton administration. And his partner, Mark Zandi, he's chief economist of Moody's Analytics. Our motion is this: The Rich Are Taxed Enough. Let's meet our debaters and welcome first, Glenn Hubbard. Glenn, you are dean of the Columbia Business School, you are also-- throughout 2012, you've been advisor to Mitt Romney's campaign. Recently you were profiled in The New York Times, and you were described there as, \"succinct, authoritative, and unabashedly partisan.\"I want to know, is that fair? Are you unabashedly partisan? And could you be succinct and authoritative?\n{'gunning_fog': 6.857606837606838, 'flesch_kincaid': 66.63596153846156, 'num_words': 443, 'num_sentences': 26}\nWell, over half of my ninth graders last year refused to take the Common Core test when they were in eighth grade, with their parents' permission. So, I think it's safe to say they're not fans.\n{'gunning_fog': 8.810810810810812, 'flesch_kincaid': 83.89959459459459, 'num_words': 44, 'num_sentences': 2}\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
4a2f9f2c7006aaa18f91ed163f47aeb5b04ab811
9,526
ipynb
Jupyter Notebook
data_science_scripts.ipynb
thomasrmulhern/useful_scripts
b5d42e523ba52d91fc68de5538bc106c11c07da1
[ "MIT" ]
null
null
null
data_science_scripts.ipynb
thomasrmulhern/useful_scripts
b5d42e523ba52d91fc68de5538bc106c11c07da1
[ "MIT" ]
null
null
null
data_science_scripts.ipynb
thomasrmulhern/useful_scripts
b5d42e523ba52d91fc68de5538bc106c11c07da1
[ "MIT" ]
null
null
null
32.735395
125
0.535482
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport statsmodels as sm\n%matplotlib inline\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler, Normalizer, Binarizer", "_____no_output_____" ] ], [ [ "# Prepare data for Machine Learning", "_____no_output_____" ] ], [ [ "#Scripts for preprocessing data for machine learning\n#Borrowed generously from https://machinelearningmastery.com/prepare-data-machine-learning-python-scikit-learn/\n\n\nclass Prepare: \n '''\n Prepare data for machine learning in Python using scikit-learn.\n \n Functions: prepare_data_from_csv, describe_data, rescale_data, standardize_data, normalize_data, binarize_data\n Input: file_path_as_string, y_column_name, column_names=None, header=0\n '''\n \n def __init__(self, file_path_as_string, y_column_name, column_names, header):\n self.file_path_as_string = file_path_as_string \n self.y_column_name = y_column_name\n self.column_names = column_names\n self.header = 0\n\n\n\n def prepare_data_from_csv(self):\n\n '''Import and prepare data'''\n\n dataframe = pd.read_csv(self.file_path_as_string, names=self.column_names, delimiter=',',header=self.header)\n \n if self.header != None:\n dataframe.columns = [x.replace(' ', '_') for x in dataframe.columns]\n print(dataframe.head())\n\n self.X = dataframe.drop(self.y_column_name, axis=1)\n self.y = dataframe[self.y_column_name]\n\n prepared_df = dataframe\n #self.prepared_df = dataframe\n \n return prepared_df, self.X, self.y #self.prepared_df\n\n\n def describe_data(self, prepared_df):\n\n ''' Print shape and descriptive statistics'''\n \n print('\\nColumns: ','\\n'+'--'*25 + f'\\n{prepared_df.columns}')\n print('--'*25, '\\n'+'--'*25)\n print('\\nInfo: ','\\n'+'--'*25 + f'\\n{prepared_df.info()}')\n print('--'*25, '\\n'+'--'*25)\n print('\\nUnique: ','\\n'+'--'*25 + f'\\n{prepared_df.nunique()}')\n print('--'*25, '\\n'+'--'*25)\n print('\\nNulls: ', '\\n'+'--'*25 + f'\\n{prepared_df.isnull().sum()}')\n print('--'*25, '\\n'+'--'*25)\n print('\\nDescribe: ', '\\n'+'--'*25 + f'\\n{prepared_df.describe()}')\n print('--'*25, '\\n'+'--'*25)\n print('\\nHead: ', '\\n'+'--'*25 + f'\\n{prepared_df.head()}')\n print('--'*25, '\\n'+'--'*25)\n\n \n def rescale_data(self, X): \n\n '''\n When your data is comprised of attributes with varying scales, many machine learning algorithms \n can benefit from rescaling the attributes to all have the same scale.Often this is referred to as \n normalization and attributes are often rescaled into the range between 0 and 1. This is useful for \n optimization algorithms in used in the core of machine learning algorithms like gradient descent. \n It is also useful for algorithms that weight inputs like regression and neural networks and algorithms \n that use distance measures like K-Nearest Neighbors.\n\n Input: dataframe data to be used for features\n Return: scaled data\n '''\n\n scaler = MinMaxScaler(feature_range=(0, 1))\n rescaledX = scaler.fit_transform(self.X)\n\n # summarize transformed data\n np.set_printoptions(precision=3)\n print(rescaledX[0:5,:])\n\n return rescaledX\n\n\n def standardize_data(self, X):\n\n '''\n Standardize attributes with a Gaussian distribution and differing means and standard deviations\n to a standard Gaussian distribution with a mean of 0 and a standard deviation of 1. It is most suitable\n for techniques that assume a Gaussian distribution in the input variables and work better with rescaled \n data, such as linear regression, logistic regression and linear discriminate analysis.\n\n Input: dataframe data to be used for features\n Return: standardized data\n '''\n\n stand_scaler = StandardScaler().fit(X)\n stand_rescaledX = stand_scaler.transform(self.X)\n\n # summarize transformed data\n np.set_printoptions(precision=3)\n print(stand_rescaledX[0:5,:])\n\n return stand_rescaledX\n\n\n def normalize_data(self, X):\n\n '''\n Rescale each observation (row) to have a length of 1 (called a unit norm in linear algebra). This \n preprocessing can be useful for sparse datasets (lots of zeros) with attributes of varying scales when \n using algorithms that weight input values such as neural networks and algorithms that use distance measures \n such as K-Nearest Neighbors.\n\n Input: dataframe data to be used for features\n Return: normalized data\n '''\n\n norm_scaler = Normalizer().fit(self.X)\n normalizedX = norm_scaler.transform(self.X)\n\n # summarize transformed data\n np.set_printoptions(precision=3)\n print(normalizedX[0:5,:])\n\n return normalizedX\n \n\n def binarize_data(self, X):\n\n '''\n Transform data using a binary threshold. All values above the threshold are marked 1 and all\n equal to or below are marked as 0. This is called binarizing your data or threshold your data. It can\n be useful when you have probabilities that you want to make crisp values. It is also useful when feature\n engineering and you want to add new features that indicate something meaningful.\n\n Input: dataframe data to be used for features\n Return: binarized data\n '''\n\n binarizer = Binarizer(threshold=0.0).fit(self.X)\n binaryX = binarizer.transform(X)\n\n # summarize transformed data\n np.set_printoptions(precision=3)\n print(binaryX[0:5,:])\n\n return binaryX", "_____no_output_____" ] ], [ [ "### Tests", "_____no_output_____" ] ], [ [ "# names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']\n# prep_obj = Prepare('data/pima-indians-diabetes.data copy.csv', 'class', names, 0) \n# prepared_df, X, y = prep_obj.prepare_data_from_csv()", "_____no_output_____" ], [ "#prep_obj.describe_data(prepared_df)", "_____no_output_____" ], [ "#resc_x = prep_obj.rescale_data(X)", "_____no_output_____" ], [ "#stand_x = prep_obj.standardize_data(X)", "_____no_output_____" ], [ "#norm_x = prep_obj.normalize_data(X)", "_____no_output_____" ], [ "#bin_x = prep_obj.binarize_data(X)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
4a2fa2ec23df230e8091c2cb7ac3caeaeebcba70
5,116
ipynb
Jupyter Notebook
Altair.ipynb
JuanPabloHA/Altair_data_viz
098cd168b9a1f802fadc5d5d607c307e09da1460
[ "MIT" ]
null
null
null
Altair.ipynb
JuanPabloHA/Altair_data_viz
098cd168b9a1f802fadc5d5d607c307e09da1460
[ "MIT" ]
null
null
null
Altair.ipynb
JuanPabloHA/Altair_data_viz
098cd168b9a1f802fadc5d5d607c307e09da1460
[ "MIT" ]
null
null
null
36.542857
787
0.501955
[ [ [ "# Notebook Setup", "_____no_output_____" ] ], [ [ "import altair as alt\nfrom vega_datasets import data", "_____no_output_____" ], [ "counties = alt.topo_feature(data.us_10m.url, 'counties')\nsource = data.unemployment.url\n\nalt.Chart(counties).mark_geoshape().encode(\n color='rate:Q',\n tooltip='rate:Q'\n).transform_lookup(\n lookup='id',\n from_=alt.LookupData(source, 'id', ['rate'])\n).project(\n type='albersUsa'\n).properties(\n width=500,\n height=300\n)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ] ]
4a2fa7320a0a46e7087c0fb1c50cccead336ea13
568
ipynb
Jupyter Notebook
jupyterlab/examples/tmp.ipynb
datalayer/jupyter-examples
b279db0de6f63452080cd90f6d2d9648c1db041f
[ "MIT" ]
4
2022-01-31T10:07:49.000Z
2022-02-02T08:53:18.000Z
jupyterlab/examples/test.ipynb
datalayer/jupyter-examples
b279db0de6f63452080cd90f6d2d9648c1db041f
[ "MIT" ]
null
null
null
jupyterlab/examples/test.ipynb
datalayer/jupyter-examples
b279db0de6f63452080cd90f6d2d9648c1db041f
[ "MIT" ]
2
2022-02-23T00:45:38.000Z
2022-03-08T03:15:43.000Z
17.212121
42
0.53169
[]
[]
[]
4a2fb0106a250552103aa20161d693e2953f4ddc
69,615
ipynb
Jupyter Notebook
JupyterNotebookCode/cifar10_model_1.ipynb
DragonYong/Tensorflow
1f2b9fd81916515eb27f76827d31a61c31e03edb
[ "MIT" ]
null
null
null
JupyterNotebookCode/cifar10_model_1.ipynb
DragonYong/Tensorflow
1f2b9fd81916515eb27f76827d31a61c31e03edb
[ "MIT" ]
2
2020-06-01T04:32:46.000Z
2020-06-02T01:10:32.000Z
JupyterNotebookCode/cifar10_model_1.ipynb
DragonYong/Tensorflow
1f2b9fd81916515eb27f76827d31a61c31e03edb
[ "MIT" ]
null
null
null
107.930233
25,880
0.808073
[ [ [ "%matplotlib inline\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport sklearn\nimport sys\nimport tensorflow as tf\nimport time\n\nfrom tensorflow import keras\n\nprint(tf.__version__)\nprint(sys.version_info)\nfor module in mpl, np, pd, sklearn, tf, keras:\n print(module.__name__, module.__version__)", "1.13.1\nsys.version_info(major=3, minor=5, micro=3, releaselevel='final', serial=0)\nmatplotlib 3.0.3\nnumpy 1.16.4\npandas 0.24.2\nsklearn 0.21.2\ntensorflow 1.13.1\ntensorflow._api.v1.keras 2.2.4-tf\n" ], [ "class_names = [\n 'airplane',\n 'automobile',\n 'bird',\n 'cat',\n 'deer',\n 'dog',\n 'frog',\n 'horse',\n 'ship',\n 'truck',\n]\n\ntrain_lables_file = './cifar10/trainLabels.csv'\ntest_csv_file = './cifar10/sampleSubmission.csv'\ntrain_folder = './cifar10/train/'\ntest_folder = './cifar10/test'\n\ndef parse_csv_file(filepath, folder):\n \"\"\"Parses csv files into (filename(path), label) format\"\"\"\n results = []\n with open(filepath, 'r') as f:\n lines = f.readlines()[1:]\n for line in lines:\n image_id, label_str = line.strip('\\n').split(',')\n image_full_path = os.path.join(folder, image_id + '.png')\n results.append((image_full_path, label_str))\n return results\n\ntrain_labels_info = parse_csv_file(train_lables_file, train_folder)\ntest_csv_info = parse_csv_file(test_csv_file, test_folder)\n\nimport pprint\npprint.pprint(train_labels_info[0:5])\npprint.pprint(test_csv_info[0:5])\nprint(len(train_labels_info), len(test_csv_info))", "[('./cifar10/train/1.png', 'frog'),\n ('./cifar10/train/2.png', 'truck'),\n ('./cifar10/train/3.png', 'truck'),\n ('./cifar10/train/4.png', 'deer'),\n ('./cifar10/train/5.png', 'automobile')]\n[('./cifar10/test/1.png', 'cat'),\n ('./cifar10/test/2.png', 'cat'),\n ('./cifar10/test/3.png', 'cat'),\n ('./cifar10/test/4.png', 'cat'),\n ('./cifar10/test/5.png', 'cat')]\n50000 300000\n" ], [ "# train_df = pd.DataFrame(train_labels_info)\ntrain_df = pd.DataFrame(train_labels_info[0:45000])\nvalid_df = pd.DataFrame(train_labels_info[45000:])\ntest_df = pd.DataFrame(test_csv_info)\n\ntrain_df.columns = ['filepath', 'class']\nvalid_df.columns = ['filepath', 'class']\ntest_df.columns = ['filepath', 'class']\n\nprint(train_df.head())\nprint(valid_df.head())\nprint(test_df.head())", " filepath class\n0 ./cifar10/train/1.png frog\n1 ./cifar10/train/2.png truck\n2 ./cifar10/train/3.png truck\n3 ./cifar10/train/4.png deer\n4 ./cifar10/train/5.png automobile\n filepath class\n0 ./cifar10/train/45001.png horse\n1 ./cifar10/train/45002.png automobile\n2 ./cifar10/train/45003.png deer\n3 ./cifar10/train/45004.png automobile\n4 ./cifar10/train/45005.png airplane\n filepath class\n0 ./cifar10/test/1.png cat\n1 ./cifar10/test/2.png cat\n2 ./cifar10/test/3.png cat\n3 ./cifar10/test/4.png cat\n4 ./cifar10/test/5.png cat\n" ], [ "height = 32\nwidth = 32\nchannels = 3\nbatch_size = 32\nnum_classes = 10\n\ntrain_datagen = keras.preprocessing.image.ImageDataGenerator(\n rescale = 1./255,\n rotation_range = 40,\n width_shift_range = 0.2,\n height_shift_range = 0.2,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True,\n fill_mode = 'nearest',\n)\ntrain_generator = train_datagen.flow_from_dataframe(\n train_df,\n directory = './',\n x_col = 'filepath',\n y_col = 'class',\n classes = class_names,\n target_size = (height, width),\n batch_size = batch_size,\n seed = 7,\n shuffle = True,\n class_mode = 'sparse',\n)\n\nvalid_datagen = keras.preprocessing.image.ImageDataGenerator(\n rescale = 1./255)\nvalid_generator = valid_datagen.flow_from_dataframe(\n valid_df,\n directory = './',\n x_col = 'filepath',\n y_col = 'class',\n classes = class_names,\n target_size = (height, width),\n batch_size = batch_size,\n seed = 7,\n shuffle = False,\n class_mode = \"sparse\")\n\ntrain_num = train_generator.samples\nvalid_num = valid_generator.samples\nprint(train_num, valid_num)", "Found 45000 images belonging to 10 classes.\nFound 5000 images belonging to 10 classes.\n45000 5000\n" ], [ "for i in range(2):\n x, y = train_generator.next()\n print(x.shape, y.shape)\n print(y)", "(32, 32, 32, 3) (32,)\n[2. 1. 4. 4. 4. 4. 6. 5. 2. 8. 4. 6. 6. 3. 7. 1. 7. 2. 8. 8. 3. 0. 5. 3.\n 9. 1. 4. 5. 6. 7. 9. 2.]\n(32, 32, 32, 3) (32,)\n[0. 7. 2. 7. 5. 5. 7. 0. 5. 4. 9. 7. 6. 3. 0. 4. 4. 4. 6. 3. 5. 4. 6. 6.\n 4. 1. 8. 2. 4. 4. 3. 0.]\n" ], [ "model = keras.models.Sequential([\n keras.layers.Conv2D(filters=128, kernel_size=3, padding='same',\n activation='relu', \n input_shape=[width, height, channels]),\n keras.layers.BatchNormalization(),\n keras.layers.Conv2D(filters=128, kernel_size=3, padding='same',\n activation='relu'),\n keras.layers.BatchNormalization(),\n keras.layers.MaxPool2D(pool_size=2),\n \n keras.layers.Conv2D(filters=256, kernel_size=3, padding='same',\n activation='relu'),\n keras.layers.BatchNormalization(),\n keras.layers.Conv2D(filters=256, kernel_size=3, padding='same',\n activation='relu'),\n keras.layers.BatchNormalization(),\n keras.layers.MaxPool2D(pool_size=2),\n keras.layers.Conv2D(filters=512, kernel_size=3, padding='same',\n activation='relu'),\n keras.layers.BatchNormalization(),\n keras.layers.Conv2D(filters=512, kernel_size=3, padding='same',\n activation='relu'),\n keras.layers.BatchNormalization(),\n keras.layers.MaxPool2D(pool_size=2),\n keras.layers.Flatten(),\n keras.layers.Dense(512, activation='relu'),\n keras.layers.Dense(num_classes, activation='softmax'),\n])\n\nmodel.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer=\"adam\", metrics=['accuracy'])\nmodel.summary()", "WARNING:tensorflow:From /usr/local/lib/python3.5/dist-packages/tensorflow/python/ops/resource_variable_ops.py:435: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 32, 32, 128) 3584 \n_________________________________________________________________\nbatch_normalization_v1 (Batc (None, 32, 32, 128) 512 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 32, 32, 128) 147584 \n_________________________________________________________________\nbatch_normalization_v1_1 (Ba (None, 32, 32, 128) 512 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 16, 16, 128) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 16, 16, 256) 295168 \n_________________________________________________________________\nbatch_normalization_v1_2 (Ba (None, 16, 16, 256) 1024 \n_________________________________________________________________\nconv2d_3 (Conv2D) (None, 16, 16, 256) 590080 \n_________________________________________________________________\nbatch_normalization_v1_3 (Ba (None, 16, 16, 256) 1024 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 8, 8, 256) 0 \n_________________________________________________________________\nconv2d_4 (Conv2D) (None, 8, 8, 512) 1180160 \n_________________________________________________________________\nbatch_normalization_v1_4 (Ba (None, 8, 8, 512) 2048 \n_________________________________________________________________\nconv2d_5 (Conv2D) (None, 8, 8, 512) 2359808 \n_________________________________________________________________\nbatch_normalization_v1_5 (Ba (None, 8, 8, 512) 2048 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 4, 4, 512) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 8192) 0 \n_________________________________________________________________\ndense (Dense) (None, 512) 4194816 \n_________________________________________________________________\ndense_1 (Dense) (None, 10) 5130 \n=================================================================\nTotal params: 8,783,498\nTrainable params: 8,779,914\nNon-trainable params: 3,584\n_________________________________________________________________\n" ], [ "epochs = 20\nhistory = model.fit_generator(train_generator,\n steps_per_epoch = train_num // batch_size,\n epochs = epochs,\n validation_data = valid_generator,\n validation_steps = valid_num // batch_size)", "Epoch 1/20\n157/157 [==============================] - 5s 35ms/step - loss: 1.6870 - acc: 0.3806\n1407/1407 [==============================] - 631s 449ms/step - loss: 2.5543 - acc: 0.2675 - val_loss: 1.6870 - val_acc: 0.3806\nEpoch 2/20\n157/157 [==============================] - 3s 18ms/step - loss: 1.7154 - acc: 0.4232\n1407/1407 [==============================] - 91s 65ms/step - loss: 1.7151 - acc: 0.3767 - val_loss: 1.7154 - val_acc: 0.4232\nEpoch 3/20\n157/157 [==============================] - 3s 18ms/step - loss: 1.7433 - acc: 0.4360\n1407/1407 [==============================] - 91s 65ms/step - loss: 1.5059 - acc: 0.4543 - val_loss: 1.7433 - val_acc: 0.4360\nEpoch 4/20\n157/157 [==============================] - 3s 18ms/step - loss: 1.1556 - acc: 0.6002\n1407/1407 [==============================] - 91s 65ms/step - loss: 1.3400 - acc: 0.5198 - val_loss: 1.1556 - val_acc: 0.6002\nEpoch 5/20\n157/157 [==============================] - 3s 18ms/step - loss: 1.0857 - acc: 0.6226\n1407/1407 [==============================] - 91s 65ms/step - loss: 1.1878 - acc: 0.5788 - val_loss: 1.0857 - val_acc: 0.6226\nEpoch 6/20\n157/157 [==============================] - 3s 18ms/step - loss: 1.0947 - acc: 0.6430\n1407/1407 [==============================] - 91s 65ms/step - loss: 1.0565 - acc: 0.6303 - val_loss: 1.0947 - val_acc: 0.6430\nEpoch 7/20\n157/157 [==============================] - 3s 18ms/step - loss: 0.7530 - acc: 0.7496\n1407/1407 [==============================] - 91s 65ms/step - loss: 0.9525 - acc: 0.6676 - val_loss: 0.7530 - val_acc: 0.7496\nEpoch 8/20\n157/157 [==============================] - 3s 18ms/step - loss: 1.2433 - acc: 0.6230\n1407/1407 [==============================] - 91s 65ms/step - loss: 0.8687 - acc: 0.7010 - val_loss: 1.2433 - val_acc: 0.6230\nEpoch 9/20\n157/157 [==============================] - 3s 18ms/step - loss: 0.6869 - acc: 0.7760\n1407/1407 [==============================] - 92s 65ms/step - loss: 0.8051 - acc: 0.7236 - val_loss: 0.6869 - val_acc: 0.7760\nEpoch 10/20\n157/157 [==============================] - 3s 18ms/step - loss: 0.7114 - acc: 0.7798\n1407/1407 [==============================] - 92s 65ms/step - loss: 0.7481 - acc: 0.7428 - val_loss: 0.7114 - val_acc: 0.7798\nEpoch 11/20\n157/157 [==============================] - 3s 18ms/step - loss: 0.6984 - acc: 0.7746\n1407/1407 [==============================] - 91s 65ms/step - loss: 0.7112 - acc: 0.7580 - val_loss: 0.6984 - val_acc: 0.7746\nEpoch 12/20\n157/157 [==============================] - 3s 19ms/step - loss: 0.5960 - acc: 0.8136\n1407/1407 [==============================] - 93s 66ms/step - loss: 0.6698 - acc: 0.7698 - val_loss: 0.5960 - val_acc: 0.8136\nEpoch 13/20\n157/157 [==============================] - 3s 19ms/step - loss: 0.5687 - acc: 0.8196\n1407/1407 [==============================] - 92s 65ms/step - loss: 0.6366 - acc: 0.7813 - val_loss: 0.5687 - val_acc: 0.8196\nEpoch 14/20\n157/157 [==============================] - 3s 18ms/step - loss: 0.7316 - acc: 0.7654\n1407/1407 [==============================] - 91s 65ms/step - loss: 0.6090 - acc: 0.7940 - val_loss: 0.7316 - val_acc: 0.7654\nEpoch 15/20\n157/157 [==============================] - 3s 20ms/step - loss: 0.5415 - acc: 0.8276\n1407/1407 [==============================] - 91s 65ms/step - loss: 0.5821 - acc: 0.8022 - val_loss: 0.5415 - val_acc: 0.8276\nEpoch 16/20\n157/157 [==============================] - 3s 18ms/step - loss: 0.6255 - acc: 0.8126\n1407/1407 [==============================] - 92s 65ms/step - loss: 0.5611 - acc: 0.8073 - val_loss: 0.6255 - val_acc: 0.8126\nEpoch 17/20\n157/157 [==============================] - 3s 19ms/step - loss: 0.5124 - acc: 0.8350\n1407/1407 [==============================] - 92s 65ms/step - loss: 0.5346 - acc: 0.8194 - val_loss: 0.5124 - val_acc: 0.8350\nEpoch 18/20\n157/157 [==============================] - 3s 18ms/step - loss: 0.5804 - acc: 0.8248\n1407/1407 [==============================] - 92s 65ms/step - loss: 0.5129 - acc: 0.8261 - val_loss: 0.5804 - val_acc: 0.8248\nEpoch 19/20\n157/157 [==============================] - 3s 20ms/step - loss: 0.5762 - acc: 0.8194\n1407/1407 [==============================] - 99s 71ms/step - loss: 0.4913 - acc: 0.8332 - val_loss: 0.5762 - val_acc: 0.8194\nEpoch 20/20\n157/157 [==============================] - 3s 19ms/step - loss: 0.5128 - acc: 0.8442\n1407/1407 [==============================] - 101s 72ms/step - loss: 0.4836 - acc: 0.8359 - val_loss: 0.5128 - val_acc: 0.8442\n" ], [ "def plot_learning_curves(history, label, epcohs, min_value, max_value):\n data = {}\n data[label] = history.history[label]\n data['val_'+label] = history.history['val_'+label]\n pd.DataFrame(data).plot(figsize=(8, 5))\n plt.grid(True)\n plt.axis([0, epochs, min_value, max_value])\n plt.show()\n \nplot_learning_curves(history, 'acc', epochs, 0, 1)\nplot_learning_curves(history, 'loss', epochs, 0, 2)", "_____no_output_____" ], [ "test_datagen = keras.preprocessing.image.ImageDataGenerator(\n rescale = 1./255)\ntest_generator = valid_datagen.flow_from_dataframe(\n test_df,\n directory = './',\n x_col = 'filepath',\n y_col = 'class',\n classes = class_names,\n target_size = (height, width),\n batch_size = batch_size,\n seed = 7,\n shuffle = False,\n class_mode = \"sparse\")\ntest_num = test_generator.samples\nprint(test_num)", "Found 300000 images belonging to 10 classes.\n300000\n" ], [ "test_predict = model.predict_generator(test_generator,\n workers = 10,\n use_multiprocessing = True)", "_____no_output_____" ], [ "print(test_predict.shape)", "(300000, 10)\n" ], [ "print(test_predict[0:5])", "[[1.8115582e-02 3.0195517e-02 9.7707666e-02 2.2199485e-01 9.6216276e-02\n 1.4796969e-02 3.6596778e-01 2.3226894e-02 1.2524511e-02 1.1925392e-01]\n [9.3144512e-01 2.5595291e-04 3.6763612e-02 9.3153082e-03 9.9368917e-04\n 9.1112546e-05 1.5013785e-02 3.5342187e-04 5.2798474e-03 4.8821873e-04]\n [7.2171527e-04 8.8273185e-01 3.1592429e-06 1.9850962e-05 2.2674351e-06\n 1.8648565e-06 1.6326395e-06 1.5337924e-05 6.6775086e-05 1.1643546e-01]\n [1.7911234e-05 7.6694396e-06 7.3977681e-06 1.4877276e-06 1.0498322e-06\n 2.0850619e-07 1.4016325e-06 4.9560447e-07 9.9995601e-01 6.3446323e-06]\n [9.0831274e-01 1.8281976e-04 6.2809147e-02 1.6991662e-02 8.5249258e-04\n 4.1505805e-04 3.8536564e-03 6.0711574e-04 5.2569183e-03 7.1851560e-04]]\n" ], [ "test_predict_class_indices = np.argmax(test_predict, axis = 1)", "_____no_output_____" ], [ "print(test_predict_class_indices[0:5])", "[6 0 1 8 0]\n" ], [ "test_predict_class = [class_names[index] \n for index in test_predict_class_indices]", "_____no_output_____" ], [ "print(test_predict_class[0:5])", "['frog', 'airplane', 'automobile', 'ship', 'airplane']\n" ], [ "def generate_submissions(filename, predict_class):\n with open(filename, 'w') as f:\n f.write('id,label\\n')\n for i in range(len(predict_class)):\n f.write('%d,%s\\n' % (i+1, predict_class[i]))\n\noutput_file = \"./cifar10/submission.csv\"\ngenerate_submissions(output_file, test_predict_class)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2fbaee44944b9eb81244d1321ee37cce1710d4
67,010
ipynb
Jupyter Notebook
PGI_individual_inversion/GRAV/OnlyHKsignature/.ipynb_checkpoints/Gravity_petro_only_HK-checkpoint.ipynb
simpeg-research/Astic-2020-JointInversion-DO27
fef1c89e21c47d0f629faf9be5726a27e19dc551
[ "MIT" ]
12
2020-03-02T19:03:50.000Z
2022-03-27T02:54:28.000Z
PGI_individual_inversion/GRAV/OnlyHKsignature/.ipynb_checkpoints/Gravity_petro_only_HK-checkpoint.ipynb
thast/Astic-2020-JointInversion
f1931aef577b4d04e4845ea45dca00292e6a23fb
[ "MIT" ]
8
2020-07-26T20:47:58.000Z
2021-11-22T17:35:45.000Z
PGI_individual_inversion/GRAV/OnlyHKsignature/.ipynb_checkpoints/Gravity_petro_only_HK-checkpoint.ipynb
thast/Astic-2020-JointInversion
f1931aef577b4d04e4845ea45dca00292e6a23fb
[ "MIT" ]
8
2020-07-22T17:41:21.000Z
2022-03-18T13:54:22.000Z
119.874776
34,432
0.822146
[ [ [ "\"\"\"\n\nScript of petro-inversion of gravity over TKC\n\nNotes:\nThis version of the script uses data with less noises\nbut still invert with a higher assumed noise level.\nThis is equivalent to increase the chi-factor.\nThis has been needed in order to fit both geophysical\nand petrophysical data set.\n\"\"\"\n\n# Script of petro-inversion of gravity over TKC\nimport SimPEG.PF as PF\nfrom SimPEG import *\nfrom SimPEG.Utils import io_utils\nimport matplotlib\nimport time as tm\nimport mpl_toolkits.mplot3d as a3\nimport matplotlib.colors as colors\nimport matplotlib.pyplot as plt\nimport scipy as sp\nfrom scipy.interpolate import NearestNDInterpolator\nfrom sklearn.mixture import GaussianMixture\nimport numpy as np\nimport copy\nimport pickle\nfrom pymatsolver import PardisoSolver\n%matplotlib inline\nmatplotlib.rcParams['font.size'] = 14\nimport seaborn\nimport sys\nsys.path.append('../../../')\nfrom DO27_Utils import *\n\n# Nicer plot\nseaborn.set()\n# Reproducible Science\nnp.random.seed(518936)\n\n# We first need to create a susceptibility model.\n# Based on a set of parametric surfaces representing TKC,\n# we use VTK to discretize the 3-D space.\n\n# Reproducible Science\nnp.random.seed(518936)\n\n# Load Mesh\nmesh = Mesh.load_mesh('../../../Forward/mesh_inverse')\n\n# Define no-data-value\nndv = -100\n\n# Load topography file in UBC format and find the active cells\n# Import Topo\nmodel_dir = '../../../Geology_Surfaces/'\ntopofile = model_dir + 'TKCtopo.dat'\ntopo = np.genfromtxt(topofile, skip_header=1)\n# Find the active cells\nactv = Utils.surface2ind_topo(mesh, topo, gridLoc='N')\n# Create active map to go from reduce set to full\nactvMap = Maps.InjectActiveCells(mesh, actv, ndv)\nprint(\"Active cells created from topography!\")\n\n# Load data\nsurvey = io_utils.readUBCgravityObservations(\n \"../../../Forward/GRAV_noisydata.obs\"\n)\n\n# Now that we have a survey we can build the linear system ...\nnactv = np.int(np.sum(actv))\n# Creat reduced identity map\nidenMap = Maps.IdentityMap(nP=nactv)\n# Create the forward model operator\nprob = PF.Gravity.GravityIntegral(mesh, rhoMap=idenMap, actInd=actv)\n# Pair the survey and problem\nsurvey.pair(prob)\n\n# If you formed the sensitivity gravity matrix before, you can load it here\n#G = np.load('../../../Forward/G_Grav_Inverse.npy')\n#prob._G = G\n\n# Define noise level\nstd = 0.01\neps = 0.\nsurvey.std = std\nsurvey.eps = eps\n\n# **Inverse problem**\n\n# Petro Inversion\n\n# It is potential fields, so we will need to push the inverison down\n# Create distance weights from our linera forward operator\n# rxLoc = survey.srcField.rxList[0].locs\n# wr = PF.Magnetics.get_dist_wgt(mesh, rxLoc, actv, 3., np.min(mesh.hx)/4.)\n# wr = wr**2.\nwr = np.sum(prob.G**2., axis=0)**0.5\nwr = (wr / np.max(wr))\n\n#Initial model\nm0 = np.ones(idenMap.nP) * -1e-4\n\n# Load ground-truth models for comparison\nmodel_grav = mesh.readModelUBC(\n '../../../Forward/model_grav.den'\n)\ngeomodel = mesh.readModelUBC(\n '../../../Forward/geomodel'\n) \nmodel_grav = model_grav[model_grav != -100.]\n\n# Load petrophysics\nclf = pickle.load(open('../../../Petrophysics/gmm_density.p','rb'))\nn = clf.n_components\n\n# wires map\nwires = Maps.Wires(('m', m0.shape[0]))\n# PGI Regularization\nreg = Regularization.MakeSimplePetroRegularization(\n GMmref=clf,\n GMmodel=clf,\n mesh=mesh,\n wiresmap=wires,\n maplist=[idenMap],\n mref=m0,\n indActive=actv,\n alpha_s=1.0, alpha_x=1.0, alpha_y=1.0, alpha_z=1.0,\n alpha_xx=0., alpha_yy=0., alpha_zz=0.,\n cell_weights_list=[wr]\n)\nreg.mrefInSmooth = False\nreg.approx_gradient = True\nreg.objfcts[0].evaltype = 'approx'\n\n\n# Data misfit\ndmis = DataMisfit.l2_DataMisfit(survey)\n# Assign flat uncertainties of 0.01mGal\nwd = np.ones(len(survey.dobs)) * 0.01\ndmis.W = 1 / wd\n\n# Optimization scheme\nopt = Optimization.ProjectedGNCG(\n maxIter=50, lower=-1., upper=0., maxIterLS=20, maxIterCG=100, tolCG=1e-3\n)\n#Create inverse problem\ninvProb = InvProblem.BaseInvProblem(dmis, reg, opt)\n\n# Add directives to the inversion\n\n# Smoothness weights\nAlphas = Directives.AlphasSmoothEstimate_ByEig(\n alpha0_ratio=1.,\n ninit=10, verbose=True\n)\n# Beta initialization\nbeta = Directives.BetaEstimate_ByEig(beta0_ratio=1., ninit=10)\n#Beta Schedule\nbetaIt = Directives.PetroBetaReWeighting(\n verbose=True, rateCooling=5., rateWarming=1.,\n tolerance=0.1, UpdateRate=1,\n ratio_in_cooling=False,\n progress=0.2,\n update_prior_confidence=False,\n progress_gamma_cooling=1.,\n ratio_in_gamma_cooling=False,\n alphadir_rateCooling=1.,\n kappa_rateCooling=1.,\n nu_rateCooling=1.,\n)\n# Targets misfits\ntargets = Directives.PetroTargetMisfit(verbose=True)\n# Include mref in Smoothness\nMrefInSmooth = Directives.AddMrefInSmooth(\n wait_till_stable=True,\n verbose=True\n)\n# GMM, mref and Ws updates\npetrodir = Directives.GaussianMixtureUpdateModel(\n keep_ref_fixed_in_Smooth=True,\n verbose=False,\n nu=1e8,\n kappa=1e8,\n alphadir=1e8\n)\n# Pre-conditioner\nupdate_Jacobi = Directives.UpdatePreconditioner()\n\n# Create inversion\ninv = Inversion.BaseInversion(\n invProb,\n directiveList=[\n Alphas, beta,\n petrodir, targets,\n betaIt, MrefInSmooth, update_Jacobi\n ]\n)", "Active cells created from topography!\nBegin linear forward calculation: \nDone 0.0 %\nDone 10.0 %\nDone 20.0 %\nDone 30.0 %\nDone 40.0 %\nDone 50.0 %\nDone 60.0 %\nDone 70.0 %\nDone 80.0 %\nDone 90.0 %\nLinear forward calculation ended in: 1284.959203004837 sec\n" ], [ "vmin, vmax = -1.2,0\nplt.plot(\n np.linspace(vmin, vmax, 100), np.exp(\n clf.score_samples(np.linspace(vmin, vmax, 100)[:, np.newaxis])\n ),\n color='blue'\n)\nplt.plot(\n np.linspace(vmin, vmax, 100), (\n clf.predict(np.linspace(vmin, vmax, 100)[:, np.newaxis])\n ),\n color='red'\n)\nplt.show()", "_____no_output_____" ], [ "# Run inversion...\nmcluster = inv.run(m0)", "\n SimPEG.InvProblem is setting bfgsH0 to the inverse of the eval2Deriv.\n ***Done using same Solver and solverOpts as the problem***\nAlpha scales: [1, 1]\nAlpha scales: [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\nAlpha scales: [0.0, 6436.967081679338, 6418.893111252958, 6415.428171608268]\nApproximated diag(JtJ) with linear operator\nmodel has any nan: 0\n=============================== Projected GNCG ===============================\n # beta phi_d phi_m f |proj(x-g)-x| LS Comment \n-----------------------------------------------------------------------------\nx0 has any nan: 0\n 0 1.91e+01 4.31e+06 0.00e+00 4.31e+06 6.12e+02 0 \nDM: [768469.5818998] [False] ; CL: 744027.4618429622 False ; DP: True ; All: False\nprogress [768469.5818998] >< [3451606.67703696]\nmref changes in 31322 places\n 1 1.91e+01 7.68e+05 2.12e+04 1.17e+06 1.69e+02 0 \nDM: [260935.70968323] [False] ; CL: 329505.14568252885 False ; DP: True ; All: False\nprogress [260935.70968323] >< [614775.66551984]\nmref changes in 6309 places\n 2 1.91e+01 2.61e+05 1.76e+04 5.98e+05 1.14e+02 0 Skip BFGS \nDM: [226203.35636156] [False] ; CL: 283796.74406946707 False ; DP: True ; All: False\nprogress [226203.35636156] >< [208748.56774658]\nupdate beta for countering plateau\nmref changes in 5145 places\n 3 3.83e+00 2.26e+05 1.68e+04 2.91e+05 5.13e+02 0 \nDM: [68186.94029004] [False] ; CL: 431564.0865545956 False ; DP: True ; All: False\nprogress [68186.94029004] >< [180962.68508925]\nmref changes in 7227 places\n 4 3.83e+00 6.82e+04 2.98e+04 1.82e+05 9.75e+01 0 \nDM: [36890.79198421] [False] ; CL: 294421.6117356196 False ; DP: True ; All: False\nprogress [36890.79198421] >< [54549.55223203]\nmref changes in 3659 places\n 5 3.83e+00 3.69e+04 2.47e+04 1.31e+05 9.08e+01 0 \nDM: [26976.7258769] [False] ; CL: 225641.27097097068 False ; DP: True ; All: False\nprogress [26976.7258769] >< [29512.63358737]\nmref changes in 2098 places\n 6 3.83e+00 2.70e+04 2.10e+04 1.07e+05 7.81e+01 0 \nDM: [22184.57095373] [False] ; CL: 200061.43071407685 False ; DP: True ; All: False\nprogress [22184.57095373] >< [21581.38070152]\nupdate beta for countering plateau\nmref changes in 1335 places\n 7 7.66e-01 2.22e+04 1.95e+04 3.71e+04 2.34e+02 0 \nDM: [4915.63459333] [False] ; CL: 232954.5103458208 False ; DP: True ; All: False\nprogress [4915.63459333] >< [17747.65676298]\nmref changes in 5241 places\n 8 7.66e-01 4.92e+03 2.60e+04 2.49e+04 7.04e+01 0 \nDM: [2801.38356866] [False] ; CL: 212108.68142515095 False ; DP: True ; All: False\nprogress [2801.38356866] >< [3932.50767466]\nmref changes in 3163 places\n 9 7.66e-01 2.80e+03 2.52e+04 2.21e+04 3.91e+01 0 \nDM: [2482.10522626] [False] ; CL: 205822.1026925709 False ; DP: True ; All: False\nprogress [2482.10522626] >< [2241.10685492]\nupdate beta for countering plateau\nmref changes in 1653 places\n 10 1.53e-01 2.48e+03 2.49e+04 6.29e+03 1.36e+02 0 Skip BFGS \nDM: [315.28477122] [ True] ; CL: 225216.8870911773 False ; DP: True ; All: False\nprogress [315.28477122] >< [1985.68418101]\nupdate alpha_s for clustering: 1.524019057864131\nmref changes in 3676 places\n 11 1.53e-01 3.15e+02 3.77e+04 6.09e+03 6.73e+01 0 \nDM: [368.45639614] [ True] ; CL: 209493.38103470497 False ; DP: True ; All: False\nprogress [368.45639614] >< [528.55]\nupdate alpha_s for clustering: 1.9874567655087798\nmref changes in 963 places\n 12 1.53e-01 3.68e+02 4.36e+04 7.05e+03 5.30e+01 0 \nDM: [446.69339832] [ True] ; CL: 201862.99429250965 False ; DP: True ; All: False\nprogress [446.69339832] >< [528.55]\nupdate alpha_s for clustering: 2.1378712544468104\nmref changes in 309 places\n 13 1.53e-01 4.47e+02 4.51e+04 7.35e+03 2.83e+01 0 Skip BFGS \nDM: [473.21936548] [ True] ; CL: 200035.1922687637 False ; DP: True ; All: False\nprogress [473.21936548] >< [528.55]\nupdate alpha_s for clustering: 2.170763102055028\nmref changes in 171 places\n 14 1.53e-01 4.73e+02 4.53e+04 7.41e+03 9.06e+00 0 Skip BFGS \nDM: [477.94245624] [ True] ; CL: 199821.71748918452 False ; DP: True ; All: False\nprogress [477.94245624] >< [528.55]\nupdate alpha_s for clustering: 2.182379190068236\nmref changes in 135 places\n 15 1.53e-01 4.78e+02 4.54e+04 7.44e+03 3.18e+00 0 \nDM: [478.77627364] [ True] ; CL: 199830.88772732127 False ; DP: True ; All: False\nprogress [478.77627364] >< [528.55]\nupdate alpha_s for clustering: 2.1902363558276643\nmref changes in 102 places\n 16 1.53e-01 4.79e+02 4.55e+04 7.46e+03 2.14e+00 0 \nDM: [479.23506402] [ True] ; CL: 199829.69596831413 False ; DP: True ; All: False\nprogress [479.23506402] >< [528.55]\nupdate alpha_s for clustering: 2.196017461971069\nmref changes in 72 places\n 17 1.53e-01 4.79e+02 4.56e+04 7.47e+03 1.64e+00 0 \nDM: [479.61902465] [ True] ; CL: 199850.57549043192 False ; DP: True ; All: False\nprogress [479.61902465] >< [528.55]\nupdate alpha_s for clustering: 2.200051157773665\nmref changes in 63 places\n 18 1.53e-01 4.80e+02 4.57e+04 7.48e+03 1.22e+00 0 \nDM: [479.76956701] [ True] ; CL: 199834.18184593972 False ; DP: True ; All: False\nprogress [479.76956701] >< [528.55]\nupdate alpha_s for clustering: 2.2034006614932498\nmref changes in 39 places\n 19 1.53e-01 4.80e+02 4.57e+04 7.48e+03 9.96e-01 0 \nDM: [480.02247383] [ True] ; CL: 199824.23742957634 False ; DP: True ; All: False\nprogress [480.02247383] >< [528.55]\nupdate alpha_s for clustering: 2.2055926035915485\nmref changes in 24 places\n 20 1.53e-01 4.80e+02 4.57e+04 7.49e+03 6.81e-01 0 \nDM: [480.22265494] [ True] ; CL: 199835.8532578255 False ; DP: True ; All: False\nprogress [480.22265494] >< [528.55]\nupdate alpha_s for clustering: 2.2068664089963415\nmref changes in 21 places\n 21 1.53e-01 4.80e+02 4.58e+04 7.49e+03 4.77e-01 0 \nDM: [480.25914726] [ True] ; CL: 199842.90971666644 False ; DP: True ; All: False\nprogress [480.25914726] >< [528.55]\nupdate alpha_s for clustering: 2.207973165278\nmref changes in 14 places\n 22 1.53e-01 4.80e+02 4.58e+04 7.49e+03 4.02e-01 0 \nDM: [480.33368715] [ True] ; CL: 199860.8240325254 False ; DP: True ; All: False\nprogress [480.33368715] >< [528.55]\nupdate alpha_s for clustering: 2.2087376636469815\nmref changes in 15 places\n 23 1.53e-01 4.80e+02 4.58e+04 7.49e+03 3.50e-01 0 \nDM: [480.3322294] [ True] ; CL: 199865.47325069612 False ; DP: True ; All: False\nprogress [480.3322294] >< [528.55]\nupdate alpha_s for clustering: 2.209509132288095\nmref changes in 10 places\n 24 1.53e-01 4.80e+02 4.58e+04 7.50e+03 3.08e-01 0 \nDM: [480.38428515] [ True] ; CL: 199868.33679127597 False ; DP: True ; All: False\nprogress [480.38428515] >< [528.55]\nupdate alpha_s for clustering: 2.2100413583342653\nmref changes in 8 places\n 25 1.53e-01 4.80e+02 4.58e+04 7.50e+03 2.52e-01 0 \nDM: [480.40645834] [ True] ; CL: 199868.6269090235 False ; DP: True ; All: False\nprogress [480.40645834] >< [528.55]\nupdate alpha_s for clustering: 2.210471683407769\nmref changes in 5 places\n 26 1.53e-01 4.80e+02 4.58e+04 7.50e+03 1.99e-01 0 \nDM: [480.44360946] [ True] ; CL: 199858.949937939 False ; DP: True ; All: False\nprogress [480.44360946] >< [528.55]\nupdate alpha_s for clustering: 2.210731130470889\nmref changes in 0 places\nadd mref to Smoothness. Percent_diff is 0.0\n 27 1.53e-01 4.80e+02 7.50e+04 1.20e+04 6.14e+01 0 \nDM: [760.16880557] [False] ; CL: 140461.97496756294 True ; DP: True ; All: False\nprogress [760.16880557] >< [528.55]\nupdate beta for countering plateau\nmref changes in 0 places\n 28 3.06e-02 7.60e+02 3.17e+04 1.73e+03 6.42e+01 0 \n" ], [ "# Get the final model back to full space\nm_petro = actvMap * mcluster\nm_petro[m_petro == ndv] = np.nan", "_____no_output_____" ], [ "# Plot the recoverd models\nmesh = Mesh.TensorMesh([mesh.hx, mesh.hy, mesh.hz], x0=\"CCN\")\nnpad = 10\nX, Y = np.meshgrid(mesh.vectorCCx[npad:-npad:2], mesh.vectorCCy[npad:-npad:2])\n\nvmin, vmax = -1.2, 0.1\nfig, ax = plt.subplots(3, 1, figsize=(10, 12))\nax = Utils.mkvc(ax)\nmesh.plotSlice(\n m_petro, ax=ax[0], normal='Y',\n clim=np.r_[vmin, vmax], pcolorOpts={'cmap':'viridis'}\n)\nax[0].set_aspect('equal')\nax[0].set_title('Petro model')\n\ndat_true = mesh.plotSlice(\n actvMap*model_grav, ax=ax[1], normal='Y',\n clim=np.r_[vmin, vmax], pcolorOpts={'cmap':'viridis'}\n)\nax[1].set_aspect('equal')\nax[1].set_title('True model')\n\npos = ax[1].get_position()\ncbarax = fig.add_axes(\n [pos.x0 - 0.15, pos.y0, pos.width * 0.1, pos.height * 0.75]\n) # the parameters are the specified position you set\ncb = fig.colorbar(\n dat_true[0], cax=cbarax, orientation=\"vertical\",\n ax=ax[1], ticks=np.linspace(vmin, vmax, 4)\n)\nmcluster = m_petro[~np.isnan(m_petro)]\nax[2].hist(mcluster, bins=100, density=True)\nax[2].plot(\n np.linspace(vmin, vmax, 100), np.exp(\n clf.score_samples(np.linspace(vmin, vmax, 100)[:, np.newaxis])\n ),\n color='blue'\n)\nax[2].plot(\n np.linspace(vmin, vmax, 100), np.exp(\n reg.objfcts[0].GMmodel.score_samples(np.linspace(vmin, vmax, 100)[:, np.newaxis])\n ),\n color='k'\n)\nax[2].set_ylim([0., 5.])\n\nplt.show()\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
4a2fcd556ce5168418aa71739866ef42bec9ef10
34,623
ipynb
Jupyter Notebook
PlanspielLL_Teil1.ipynb
vale95ntino/binder_julia_test
a6068c8bf23b753db77c6e89ca8c671486b1ab56
[ "BSD-3-Clause" ]
null
null
null
PlanspielLL_Teil1.ipynb
vale95ntino/binder_julia_test
a6068c8bf23b753db77c6e89ca8c671486b1ab56
[ "BSD-3-Clause" ]
null
null
null
PlanspielLL_Teil1.ipynb
vale95ntino/binder_julia_test
a6068c8bf23b753db77c6e89ca8c671486b1ab56
[ "BSD-3-Clause" ]
null
null
null
29.770421
1,136
0.455073
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
4a2ff028fe7fb61b9a7f47305b7018edcd755fb3
25,183
ipynb
Jupyter Notebook
sklearn_logistic/sklearn_logistic.ipynb
zhenxuanzhang/sklearn
0e9c33009dd719079022efaf15541c9abd8b41c7
[ "MIT" ]
null
null
null
sklearn_logistic/sklearn_logistic.ipynb
zhenxuanzhang/sklearn
0e9c33009dd719079022efaf15541c9abd8b41c7
[ "MIT" ]
null
null
null
sklearn_logistic/sklearn_logistic.ipynb
zhenxuanzhang/sklearn
0e9c33009dd719079022efaf15541c9abd8b41c7
[ "MIT" ]
null
null
null
69.759003
17,388
0.828575
[ [ [ "import time\nimport matplotlib.pyplot as plt\nimport numpy as np\n", "_____no_output_____" ], [ "from sklearn.datasets import fetch_openml", "_____no_output_____" ], [ "#from sklearn.datasets import fetch_openml\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.utils import check_random_state\n", "_____no_output_____" ], [ "print(__doc__)\n# 输出文件开头注释的内容", "Automatically created module for IPython interactive environment\n" ], [ "to = time.time()", "_____no_output_____" ], [ "train_samples = 5000", "_____no_output_____" ] ], [ [ "X, y = fetch_openml('mnist_784', version= 1, return_X_y=True)", "_____no_output_____" ] ], [ [ "mnist_data = fetch_openml(\"mnist_784\")\nX= mnist_data[\"data\"]\ny=mnist_data[\"target\"]", "_____no_output_____" ], [ "type(X)", "_____no_output_____" ], [ "print(X.shape)\nprint(y.shape)", "(70000, 784)\n(70000,)\n" ], [ "random_state = check_random_state(0)\npermutation = random_state.permutation(X.shape[0])", "_____no_output_____" ], [ "X = X[permutation]\ny = y[permutation]\nX = X.reshape((X.shape[0], -1))", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_test_split(\n X, y, train_size = train_samples, test_size = 10000)", "_____no_output_____" ], [ "scaler = StandardScaler()\nX_train = scaler.fit_transform(X_train)\nX_test = scaler.transform(X_test)\n", "_____no_output_____" ] ], [ [ "scikit-learn中fit_transform()与transform() \n二者的功能都是对数据进行某种统一处理(比如标准化~N(0,1),将数据缩放(映射)到某个固定区间,归一化,正则化等) \nfit_transform(partData)对部分数据先拟合fit,找到该part的整体指标,如均值、方差、最大值最小值等等(根据具体转换的目的),然后对该partData进行转换transform,从而实现数据的标准化、归一化等等。 \n根据对之前部分fit的整体指标,对剩余的数据(restData)使用同样的均值、方差、最大最小值等指标进行转换transform(restData),从而保证part、rest处理方式相同。 \n必须先用fit_transform(partData),之后再transform(restData) \n如果直接transform(partData),程序会报错 \n如果fit_transfrom(partData)后,使用fit_transform(restData)而不用transform(restData),虽然也能归一化,但是两个结果不是在同一个“标准”下的,具有明显差异。 \n", "_____no_output_____" ] ], [ [ "clf = LogisticRegression(C = 50./train_samples,\n multi_class = 'multinomial',\n penalty = 'l1', solver = 'saga' ,tol = 0.1)\n# C 正则化系数,其越小正则化越强\n# penalty 惩罚项 'netton-cg', 'sag', 'lbfgs'只支持'l2',这三种算法需要损失函数的一阶或二阶连续可导。\n# multi_class 决定了我们分类方式的选择 'multinomial'即为'multinomial'即为MvM\n# solver:逻辑回归损失函数的优化方法\n# 'sag':随机平均梯度下降。每次迭代仅仅用一部分的样本来计算梯度,适合于样本数据多的时候。\n# saga:线性收敛的随机优化算法的的变重\n# tol:优化算法停止的条件。当迭代前后的函数差值小于等于tol时就停止。", "_____no_output_____" ], [ "clf.fit(X_train , y_train)\nsparsity = np.mean(clf.coef_ == 0)*100\nscore = clf.score(X_test , y_test)", "_____no_output_____" ], [ "coef = clf.coef_.copy()", "_____no_output_____" ], [ "plt.figure(figsize = (10, 5))\nscale = np.abs(coef).max()\nplt.show()", "_____no_output_____" ], [ "for i in range(10):\n l2_plot = plt.subplot(2, 5, i+1)\n l2_plot.imshow(coef[i].reshape(28,28), interpolation='nearest',\n cmap = plt.cm.RdBu, vmin = -scale ,vmax = scale)\n l2_plot.set_xticks(())\n l2_plot.set_yticks(())\n l2_plot.set_xlabel(('Class %i' % i))\n\n#interpolation代表的是插值运算,'nearest'只是选取了其中的一种插值方式。\n# cmap表示绘图时的样式,这里选择的是RdBu主题。 \nplt.suptitle('Classification vector for ...')", "_____no_output_____" ], [ "run_time = time.time() - to", "_____no_output_____" ], [ "print('Example run in %.3f s' % run_time)", "Example run in 883.234 s\n" ], [ "plt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2ff56de9e1ac1c8b6ae10afb4aa08593f6f810
7,277
ipynb
Jupyter Notebook
notebooks/1.0-tb-initial-data-exploration.ipynb
JaninePow/master-thesis
61e3877dcf99f0ba0413c8ae6fcb8e6e34e06030
[ "BSD-3-Clause" ]
null
null
null
notebooks/1.0-tb-initial-data-exploration.ipynb
JaninePow/master-thesis
61e3877dcf99f0ba0413c8ae6fcb8e6e34e06030
[ "BSD-3-Clause" ]
null
null
null
notebooks/1.0-tb-initial-data-exploration.ipynb
JaninePow/master-thesis
61e3877dcf99f0ba0413c8ae6fcb8e6e34e06030
[ "BSD-3-Clause" ]
null
null
null
31.23176
114
0.502817
[ [ [ "from pathlib import Path\nimport cv2\nimport matplotlib.pyplot as plt\nimport random\nfrom skimage.filters import (threshold_otsu, threshold_niblack,threshold_sauvola)\n\nrandom.seed(10)\n\n\n#for debug \nimport numpy as np", "_____no_output_____" ], [ "#TODO introduce a variabl n to increase speed while debugging\ndef get_raw_data(path):\n p = Path(path).glob('**/*.jpg')\n files = [x for x in p if x.is_file()]\n #return files\n imgs = {}\n for file in files:\n imgs[str(file)] = cv2.imread(str(file))\n return imgs", "_____no_output_____" ], [ "path = '/Users/beantown/PycharmProjects/master-thesis/data/raw'\nfiles = get_raw_data(path)", "_____no_output_____" ], [ "def show_example(files, keep_size=True, n=1, hide_spines=False, gray=False, add_to_title=None):\n for i in range(n):\n dpi = 80\n\n key, value = random.choice(list(files.items()))\n if not gray:\n im_data = cv2.cvtColor(value, cv2.COLOR_BGR2RGB)\n else:\n im_data = value.copy()\n \n title = str(key).split('/')[-1].split('.')[0]\n if add_to_title != None:\n title = add_to_title + ': ' + title\n\n\n if keep_size:\n if gray:\n height, width = im_data.shape\n else:\n height, width, _ = im_data.shape\n\n # What size does the figure need to be in inches to fit the image?\n figsize = width / float(dpi), height / float(dpi)\n\n # Create a figure of the right size with one axes that takes up the full figure\n fig = plt.figure(figsize=figsize)\n ax = fig.add_axes([0, 0, 1, 1])\n\n # Hide spines, ticks, etc.\n #ax.axis('off')\n if hide_spines:\n ax.axis('off')\n else:\n ax.tick_params(axis='both', which='major', labelsize=40)\n ax.tick_params(axis='both', which='minor', labelsize=30)\n \n ax.set_title(title, pad=30, fontsize=50)\n\n # Display the image.\n if gray:\n ax.imshow(im_data, cmap='gray')\n else:\n ax.imshow(im_data)\n else:\n plt.title(title, pad=20, fontsize=20)\n if hide_spines:\n plt.axis('off')\n if gray:\n plt.imshow(im_data, cmap='gray')\n else:\n plt.imshow(im_data)\n\n plt.show() ", "_____no_output_____" ], [ "show_example(files, keep_size=False, n=2, hide_spines=False)", "_____no_output_____" ], [ "key, value = random.choice(list(files.items()))\ntest_file = {key:value}\nshow_example(test_file, keep_size=True, n=1, hide_spines=False)", "_____no_output_____" ], [ "def get_forground(files, method='otsu'):\n # use a specific method if nothing else is given or it uses global theshold with otsu as default\n forground_files = {}\n window_size = 25\n if method == 'niblack':\n for key, value in files.items():\n # Prprocessing\n image = cv2.cvtColor(value, cv2.COLOR_BGR2GRAY)\n #blurred = cv2.GaussianBlur(im_gray, (7, 7), 0)\n thresh_niblack = threshold_niblack(image, window_size=window_size, k=0.8)\n binary_niblack = image > thresh_niblack\n forground_files[key] = binary_niblack\n \n elif method == 'sauvola':\n for key, value in files.items():\n # Prprocessing\n image = cv2.cvtColor(value, cv2.COLOR_BGR2GRAY)\n #blurred = cv2.GaussianBlur(im_gray, (7, 7), 0)\n thresh_sauvola = threshold_sauvola(image, window_size=window_size)\n binary_sauvola = image > thresh_sauvola\n forground_files[key] = binary_sauvola\n \n \n else:\n for key, value in files.items():\n # Prprocessing\n image = cv2.cvtColor(value, cv2.COLOR_BGR2GRAY)\n #blurred = cv2.GaussianBlur(im_gray, (7, 7), 0)\n binary_global = image > threshold_otsu(image)\n forground_files[key] = binary_global\n \n return forground_files", "_____no_output_____" ], [ "otsu_files = get_forground(files,)\nsauvola_files = get_forground(files, method = 'sauvola')\nniblack_files = get_forground(files, method = 'niblack')", "_____no_output_____" ], [ "otsu_img = {key: otsu_files[key]}\nsauvola_img = {key: sauvola_files[key]}\nniblack_img = {key: niblack_files[key]}", "_____no_output_____" ], [ "show_example(otsu_img, keep_size=True, n=1, hide_spines=False, gray=True, add_to_title='otsu_img')\nshow_example(sauvola_img, keep_size=True, n=1, hide_spines=False, gray=True, add_to_title='sauvola_img')\nshow_example(niblack_img, keep_size=True, n=1, hide_spines=False, gray=True, add_to_title='niblack_img')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a2ff96169958d30900d391334bd4f7cbe28927c
5,040
ipynb
Jupyter Notebook
fauxfactory_tests.ipynb
rg3915/orcamentos
5cd8c4f6cbeabeb5e069c58f583c38f44c3af99a
[ "MIT" ]
94
2016-01-27T14:35:05.000Z
2022-02-22T00:01:02.000Z
fauxfactory_tests.ipynb
pliniocefet/orcamentos
b84eaad7dbc49cb3ad13e037b12074b466b70a70
[ "MIT" ]
96
2015-07-31T21:50:28.000Z
2021-06-24T15:31:10.000Z
fauxfactory_tests.ipynb
pliniocefet/orcamentos
b84eaad7dbc49cb3ad13e037b12074b466b70a70
[ "MIT" ]
43
2015-07-28T12:09:48.000Z
2021-08-06T18:20:55.000Z
20.079681
457
0.510714
[ [ [ "import fauxfactory\nimport datetime", "_____no_output_____" ], [ "strings = fauxfactory.gen_string('alphanumeric', 15)\nstrings", "_____no_output_____" ], [ "for _ in range(10):\n string = fauxfactory.gen_string('alphanumeric', 15)\n print(string)", "R9p1fBk1qXouqry\nYYzCD2WRu2vup5H\najAqvtbhZ635AEN\n96NR2MLXvYGXr46\nQS7gRenYPUJmp7k\nIj8XhYZWXPvCKcx\nDcWthm6RewHeZhd\nDMEIZ7kMWE5w3Ng\ndbKFOoSvEJ62vZp\nzGZBH0C1Vaw1Lnl\n" ], [ "string_numerics = fauxfactory.gen_string('numeric', 5)\nstring_numerics", "_____no_output_____" ], [ "for _ in range(10):\n string_numeric = fauxfactory.gen_string('numeric', 5)\n print(string_numeric)", "93933\n36713\n43282\n55309\n85214\n69323\n47436\n43686\n24648\n46973\n" ], [ "mydt = fauxfactory.gen_datetime()\nmydt", "_____no_output_____" ], [ "week_days = ('seg', 'ter', 'qua', 'qui', 'sex', 'sab', 'dom')\nfauxfactory.factories.choices.gen_choice(week_days)", "_____no_output_____" ], [ "for _ in range(10):\n week_day = fauxfactory.factories.choices.gen_choice(week_days)\n print(week_day)", "dom\nter\nqui\nseg\nter\nsex\nsab\nqui\nqua\nseg\n" ], [ "fauxfactory.factories.strings.gen_iplum()", "_____no_output_____" ], [ "fauxfactory.factories.strings.gen_iplum(words=10)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
4a30061217bea85f82fe66b32d1a7a582c8bceab
2,170
ipynb
Jupyter Notebook
Lesson02/ComparisonOperations_exercises_answers.ipynb
KeyBlades/AI_course
6e01c47500da129d786dde987fd69ddc86fb84af
[ "MIT" ]
15
2019-05-15T14:25:51.000Z
2019-11-15T07:42:04.000Z
Lesson02/ComparisonOperations_exercises_answers.ipynb
KeyBlades/AI_course
6e01c47500da129d786dde987fd69ddc86fb84af
[ "MIT" ]
null
null
null
Lesson02/ComparisonOperations_exercises_answers.ipynb
KeyBlades/AI_course
6e01c47500da129d786dde987fd69ddc86fb84af
[ "MIT" ]
10
2019-06-12T06:34:56.000Z
2020-01-10T07:52:14.000Z
15.839416
51
0.44424
[ [ [ "# Answers for Comparison Operations Exercises", "_____no_output_____" ], [ "Question 1\n\nWhich one is the odd number?\nA) 13\nB) 72\nC) 255", "_____no_output_____" ] ], [ [ "13 % 2 == 1", "_____no_output_____" ], [ "72 % 2 == 1", "_____no_output_____" ], [ "255 % 2 == 1", "_____no_output_____" ] ], [ [ "Question 3\n\nCheck if 63 is between 50 and 100.", "_____no_output_____" ] ], [ [ "a = 63\n50 < a < 100", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
4a300b191b1e1cae24d1be20b09b7558f3ce3aba
132,555
ipynb
Jupyter Notebook
_static/rotationDithering.ipynb
lsst-sims/smtn-004
173677a7fca90c8aafd10218611a67fbe2a7b8c5
[ "CC-BY-4.0" ]
null
null
null
_static/rotationDithering.ipynb
lsst-sims/smtn-004
173677a7fca90c8aafd10218611a67fbe2a7b8c5
[ "CC-BY-4.0" ]
null
null
null
_static/rotationDithering.ipynb
lsst-sims/smtn-004
173677a7fca90c8aafd10218611a67fbe2a7b8c5
[ "CC-BY-4.0" ]
null
null
null
384.217391
53,988
0.942839
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport lsst.sims.maf.slicers as slicers\nimport lsst.sims.maf.metrics as metrics\nimport lsst.sims.maf.metricBundles as metricBundles\nimport lsst.sims.maf.db as db\nimport healpy as hp\nfrom lsst.sims.utils import haversine", "_____no_output_____" ], [ "names = ['fieldRA', 'fieldDec', 'rotSkyPos', 'observationStartMJD']\ntypes = [float]*4\nnpts = 180\ndata = np.zeros(npts, dtype=list(zip(names,types)))\ndata['rotSkyPos'] = np.arange(0,npts*2,2)\ndeltaMJD = 34.*npts/3600./24.\nexpMJD_0 = 57000.\ndata['observationStartMJD'] = expMJD_0+np.arange(0,deltaMJD, 34./3600./24.)", "_____no_output_____" ], [ "outDir = 'temp'\nresultsDb = db.ResultsDb(outDir=outDir)", "_____no_output_____" ], [ "nside = 2048\nmetric = metrics.CountMetric(col='fieldRA')\nslicer = slicers.HealpixSlicer(nside=nside, useCamera=True, useCache=False)\nbundle = metricBundles.MetricBundle(metric,slicer,'')", "Healpix slicer using NSIDE=2048, approximate resolution 1.717743 arcminutes\n" ], [ "bg = metricBundles.MetricBundleGroup({1:bundle},None, outDir=outDir, resultsDb=resultsDb)\nbg.setCurrent('')\nbg.runCurrent('',simData=data)", "/Users/yoachim/git_repos/sims_maf/python/lsst/sims/maf/metricBundles/metricBundleGroup.py:110: UserWarning: Warning: dbObj should be an instantiated Database (or child) object.\n warnings.warn('Warning: dbObj should be an instantiated Database (or child) object.')\n" ], [ "bundle.metricValues.data[np.where(bundle.metricValues.mask == True)] = hp.UNSEEN\nhp.gnomview(bundle.metricValues.data)", "_____no_output_____" ], [ "print('fill factor = %f' % (bundle.metricValues.mean()/bundle.metricValues.max()))", "fill factor = 0.893305\n" ], [ "good = np.where(bundle.metricValues.mask != True)\n# compute the ra and dec of each pixel, find distance to 0,0\nlat, ra = hp.pix2ang(nside, np.arange(hp.nside2npix(nside))[good])\ndec = np.pi/2. - lat\nradius = haversine(ra,dec,0.,0.)", "_____no_output_____" ], [ "plt.plot(np.degrees(radius), bundle.metricValues.data[good], 'ko', alpha=.5)\nplt.xlabel('Radius (degrees)')\nplt.ylabel('Number of Observations (max %i)' % npts)", "_____no_output_____" ], [ "plt.plot(np.degrees(radius), bundle.metricValues.data[good]/npts, 'ko', alpha=.5)\nplt.xlabel('Radius (degrees)')\nplt.ylabel('Fill Factor')", "_____no_output_____" ], [ "# Let's look at just one pointing\nbg.runCurrent('',simData=data[0:1])", "Running: [1]\nCompleted metric generation.\nRunning reduce methods.\nRunning summary statistics.\nCompleted.\n" ], [ "bundle.metricValues.data[np.where(bundle.metricValues.mask == True)] = hp.UNSEEN\nhp.gnomview(bundle.metricValues.data)", "_____no_output_____" ], [ "print('fill factor = %f' % (bundle.metricValues.mean()/bundle.metricValues.max()))", "fill factor = 1.000000\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]