hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
cb9989aabfca5df3e29c92954cdb8cae04c0015a
182,538
ipynb
Jupyter Notebook
multifidelity/codes_for_plots/Figure3cd/plot_figure3cd.ipynb
dgaines2/megnet
b2fd0903c743237646a1f5a9cfafc9614da182ed
[ "BSD-3-Clause" ]
367
2018-12-13T14:49:00.000Z
2022-03-31T10:17:04.000Z
multifidelity/codes_for_plots/Figure3cd/plot_figure3cd.ipynb
kdmsit/MEGNet
4f3c76c6b99edcb41d52ae5e8ae9dc89956d33d1
[ "MIT" ]
162
2019-02-08T20:38:12.000Z
2022-03-31T21:13:06.000Z
multifidelity/codes_for_plots/Figure3cd/plot_figure3cd.ipynb
kdmsit/MEGNet
4f3c76c6b99edcb41d52ae5e8ae9dc89956d33d1
[ "MIT" ]
119
2018-12-17T10:16:12.000Z
2022-03-31T17:26:57.000Z
903.653465
107,460
0.953686
[ [ [ "import matplotlib.pyplot as plt\n%matplotlib inline\nimport pickle\nimport numpy as np\nfrom scipy.spatial.distance import pdist, squareform\n\nwith open('exp_features.p', 'rb') as f:\n data = pickle.load(f)", "_____no_output_____" ] ], [ [ "## visualize", "_____no_output_____" ] ], [ [ "def get_continuous_quantile(x, y, n_interval=100, q=1):\n \"\"\"\n Take continuous x and y, bin the data according to the intervals of x\n and then calculate the quantiles of y within this bin\n \n Args:\n x (list): array of x values\n y (list): array of y values\n n_interval (int): number of intervals on x\n q (float): quantile value [0, 1]\n \"\"\"\n ind = np.argsort(x)\n x = x[ind]\n y = y[ind]\n boundaries = np.linspace(x[0], x[-1], n_interval+1)\n \n dx = boundaries[1] - boundaries[0]\n x_center = np.linspace(x[0]+dx/2, x[-1]-dx/2, n_interval)\n y_q = []\n for x_min, x_max in zip(boundaries[:-1], boundaries[1:]):\n ind = (x>=x_min) & (x<x_max)\n ys = y[ind]\n if len(ys) > 0:\n y_q.append(np.quantile(ys, q))\n else:\n y_q.append(y_q[-1])\n y_q = np.array(y_q)\n return x_center, y_q\n\n\ndef visualize(key, n_interval=100, interval=5, alpha=0.5, data_file=\"100_0.xlsx\"):\n \"\"\"\n Visualize the data specified by key. \n \n Args:\n key (str): key in data \n n_interval (int): number of intervals for drawing the quantile bounds\n interval (int): subsamping of the data. Sometimes the input data is too large for visualization\n we just subsample the data\n \"\"\"\n keys = list(data['band_gap'].keys())\n f = np.concatenate([data[key][i] for i in keys], axis=0)\n values = np.array([data['band_gap'][i] for i in keys])\n sort_index = np.argsort(values)\n fscale = (f-np.min(f, axis=0)) / (np.max(f, axis=0) - np.min(f, axis=0))\n d = pdist(fscale)\n v_dist = pdist(values.reshape((-1, 1)))\n ind = (d>0) & (d<1) \n d_ = d[ind]\n v_ = v_dist[ind]\n \n #print(d_.shape, v_.shape)\n x_center, y_q = get_continuous_quantile(d_, v_, n_interval=n_interval, q=1)\n plt.rcParams['font.size'] = 22\n plt.rcParams['font.family'] = 'Arial'\n plt.figure(figsize=(5.7, 5.0 ))\n d_ = d_[::interval]\n v_ = v_[::interval]\n print(v_.shape)\n plt.plot(d_, v_, 'o', alpha=alpha, c='#21c277')\n plt.plot(x_center, y_q, '--', c='#21c277', lw=2, alpha=0.5)\n \n \n import pandas as pd\n x = np.round(np.concatenate([d_, x_center]), 3)\n y = np.round(np.concatenate([v_, y_q]), 3)\n df = pd.DataFrame({\"dF\": x, \"dEg\": y})\n with pd.ExcelWriter(data_file) as writer:\n df.to_excel(writer)\n \n plt.xlim([0, 1])\n plt.ylim([0, 13])\n plt.xticks(np.linspace(0, 1, 5))\n plt.yticks(np.linspace(0, 12.5, 6))\n plt.xlabel('$d_{F}$ (a.u.)')\n plt.ylabel(\"$\\Delta E_{g}$ (eV)\")\n plt.tight_layout()\n \n", "_____no_output_____" ], [ "visualize('100_0', n_interval=100, interval=15, alpha=0.08, data_file='100_0.xlsx')\nplt.savefig(\"100_0.pdf\")", "(51864,)\n" ], [ "visualize('100_41000', n_interval=100, interval=15, alpha=0.08, data_file='100_41000.xlsx')\nplt.savefig(\"100_41000.pdf\")", "(12016,)\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb998ace9d2a66fee5f7ff8f028325b9b71e2736
1,169
ipynb
Jupyter Notebook
examples/basic-example-ibis-pandas.ipynb
isabella232/ibis-vega-transform
6019825a05cefec153363ded01d5ded4a6befa75
[ "Apache-2.0" ]
null
null
null
examples/basic-example-ibis-pandas.ipynb
isabella232/ibis-vega-transform
6019825a05cefec153363ded01d5ded4a6befa75
[ "Apache-2.0" ]
1
2021-04-08T11:03:25.000Z
2021-04-08T11:03:25.000Z
examples/basic-example-ibis-pandas.ipynb
isabella232/ibis-vega-transform
6019825a05cefec153363ded01d5ded4a6befa75
[ "Apache-2.0" ]
null
null
null
21.648148
71
0.485885
[ [ [ "import altair as alt\nimport ibis_vega_transform\nimport ibis\nimport pandas as pd\n \n \nsource = pd.DataFrame({ \n 'a': ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'], \n 'b': [28, 55, 43, 91, 81, 53, 19, 87, 52]\n})\n \n# or ibis.pandas if ibis version < 1.4 \nconnection = ibis.backends.pandas.connect({'source': source })\ntable = connection.table('source')\n \nalt.Chart(table).mark_bar().encode(\n x='a',\n y='b' \n)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
cb999b45590e6d438b655ff81a8386c5d8b8c461
214,509
ipynb
Jupyter Notebook
Lab 8 Notebook.ipynb
ahlove3/480
cbf1a3d122510f988c5cd51f0c4c6d48a042809a
[ "MIT" ]
null
null
null
Lab 8 Notebook.ipynb
ahlove3/480
cbf1a3d122510f988c5cd51f0c4c6d48a042809a
[ "MIT" ]
null
null
null
Lab 8 Notebook.ipynb
ahlove3/480
cbf1a3d122510f988c5cd51f0c4c6d48a042809a
[ "MIT" ]
null
null
null
957.629464
85,192
0.953564
[ [ [ "import sklearn\nfrom sklearn.model_selection import train_test_split\nfrom matplotlib import pyplot as plt\n%matplotlib inline\nimport pandas\nimport numpy as np\nimport mglearn\nfrom collections import Counter\nfrom sklearn.metrics import cohen_kappa_score\nfrom sklearn import preprocessing\n\n\ndf = pandas.read_excel('house_price_label.xlsx')\n# combine multipl columns into a 2D array\n# also convert the integer data to float data\nX = np.column_stack((df.built_in.astype(float),df.price.astype(float))) \nX = preprocessing.scale(X) # scale the data before training the model\ny = df.house_type\nX_train, X_test, y_train, y_test = train_test_split(X, y,test_size =0.3,stratify = y, random_state=0) \n\n# for classification, make sure a stratify splitting method is selected\nmglearn.discrete_scatter(X[:,0],X[:,1],y) # use mglearn to visualize data\n\nplt.legend(y,loc='best')\nplt.xlabel('build_in')\nplt.ylabel('house price')\nplt.show()", "_____no_output_____" ], [ "from sklearn.neural_network import MLPClassifier\n\nmlp = MLPClassifier(solver='lbfgs',hidden_layer_sizes=(10,), random_state=0).fit(X_train, y_train)\nmglearn.discrete_scatter(X_train[:, 0], X_train[:, 1],mlp.predict(X_train))\nplt.legend(y,loc='best')\nplt.xlabel('build_in')\nplt.ylabel('house price')\nplt.show()\n\nprint(\"Training set accuracy: {:.2f}\".format(mlp.score(X_train, y_train)))\nprint (\"Training Kappa: {:.3f}\".format(cohen_kappa_score(y_train,mlp.predict(X_train))))\nprint(\"Test set accuracy: {:.2f}\".format(mlp.score(X_test, y_test)))\nprint (\"Test Kappa: {:.3f}\".format(cohen_kappa_score(y_test,mlp.predict(X_test))))", "_____no_output_____" ], [ "from sklearn.neural_network import MLPClassifier\n\nmlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=(20,20,20), random_state=0).fit(X_train, y_train)\nmglearn.discrete_scatter(X_train[:, 0], X_train[:, 1],mlp.predict(X_train))\nplt.legend(y,loc='best')\nplt.xlabel('build_in')\nplt.ylabel('house price')\nplt.show()\n\nprint(\"Training set accuracy: {:.2f}\".format(mlp.score(X_train, y_train)))\nprint (\"Training Kappa: {:.3f}\".format(cohen_kappa_score(y_train,mlp.predict(X_train))))\nprint(\"Test set accuracy: {:.2f}\".format(mlp.score(X_test, y_test)))\nprint (\"Test Kappa: {:.3f}\".format(cohen_kappa_score(y_test,mlp.predict(X_test))))", "_____no_output_____" ], [ "fig, axes = plt.subplots(2, 4, figsize=(20, 8))\nfor axx, n_hidden_nodes in zip(axes, [10, 20]):\n for ax, alpha in zip(axx, [0.0001, 0.01, 0.1, 1]):\n mlp = MLPClassifier(solver='lbfgs', random_state=0,\n hidden_layer_sizes=[n_hidden_nodes, n_hidden_nodes],\n alpha=alpha)\n\n mlp.fit(X_train, y_train)\n mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], mlp.predict(X_train), ax=ax)\n ax.set_title(\"n_hidden=[{}, {}]\\nalpha={:.4f}\\nkapa={:.4f}\".format(\n n_hidden_nodes, n_hidden_nodes, alpha,cohen_kappa_score(y_train,mlp.predict(X_train))))\n \nplt.subplots_adjust(hspace=0.5)", "_____no_output_____" ], [ "mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=(20,20), random_state=0).fit(X_train, y_train)\nfig, axes = plt.subplots(1, 3, figsize=(20, 8))\nfor i , ax in zip(range(3),axes):\n\n img = ax.imshow(mlp.coefs_[i], interpolation='none', cmap='viridis')\n\n ax.set_title(\" No.{} layer\".format(i))\n ax.set_xlabel(\"Columns in weight matrix\")\n ax.set_ylabel(\"Input feature\")\n fig.colorbar(img, ax = ax)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
cb99a30355e5899526a35d4c47c766830bab4b39
96,298
ipynb
Jupyter Notebook
Nombres Complexes.ipynb
Pango01/conda
c4afdfe13f25a31d728c4774d277b2b60e6ca207
[ "BSD-3-Clause" ]
null
null
null
Nombres Complexes.ipynb
Pango01/conda
c4afdfe13f25a31d728c4774d277b2b60e6ca207
[ "BSD-3-Clause" ]
null
null
null
Nombres Complexes.ipynb
Pango01/conda
c4afdfe13f25a31d728c4774d277b2b60e6ca207
[ "BSD-3-Clause" ]
null
null
null
224.995327
18,204
0.917371
[ [ [ "# **Lab III : Nombres et signaux complexes**\n-----------------\n+ **Cours \"Physique du Numérique\"** - Portail René Descartes - AMU\n\nPréparé par :\n\n- Jean-Marc Themlin (v. 2021-09), Aix-Marseille Université © Contenus à diffusion restreinte, dans le cadre de ce cours.\n\n------------------", "_____no_output_____" ], [ "#### La cellule ci-dessous, à exécuter (`Shift-Enter`) en tout premier lieu, contient les appels aux librairies nécessaires pour exécuter la suite du TP, ainsi que la fonction `cmplxdraw` qui vous sera utile pour visualiser des nombres complexes dans le diagramme d'Argand. ", "_____no_output_____" ] ], [ [ "import math as m\nimport cmath as c\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef cmplxdraw(z1):\n \"\"\" Draws a list of complex numbers as vectors in the complex plane \n \n :z1: array. Array of complex numbers [za,zb,zc,...]\n :lim: float/int. Limits of the (square) graph\n \"\"\"\n\n tt=np.arange(0,1,1/2**6)\n z = np.exp(1j*2*np.pi*tt)\n\n fig = plt.figure() # initialise la figure\n ax=fig.add_subplot(1,1,1)\n plt.plot(np.real(z),np.imag(z))\n plt.plot(np.real(z1),np.imag(z1),'or')\n plt.axis('square')\n plt.plot(0,0,'xb')\n m1 = max(abs(np.real(z1)))\n m2 = max(abs(np.imag(z1)))\n m = max(m1,m2)\n zoom=1.35\n plt.xlim(-zoom*m,zoom*m)\n plt.ylim(-zoom*m,zoom*m)\n plt.grid('on')\n \n plt.quiver([0, 0], [0, 0], [0, 1], [1, 0], angles='xy', scale_units='xy', scale=1)\n plt.quiver([0, 0], [0, 0], [0, m], [m, 0], angles='xy', scale_units='xy', scale=1)\n i=-1\n \n for ind in z1:\n i=i+1\n ax.annotate('z'+str(i+1), (np.real(z1[i])+0.3,np.imag(z1[i])),fontsize=10)\n ax.quiver(0, 0, np.real(z1[i]), np.imag(z1[i]), angles='xy', scale_units='xy', scale=1)\n \n plt.title('Plan complexe')\n plt.xlabel('Axe des réels x')\n plt.ylabel('Axe des imaginaires y')\n ax.set_facecolor(\"pink\")", "_____no_output_____" ] ], [ [ "## **III.1 Nombres complexes avec Python**\n\n### III.1.A Définition d'une variable contenant un nombre complexe\n\nIl y a deux manières possibles pour définir le nombre complexe sous sa forme cartésienne. On peut également utiliser la forme polaire $z=|z| \\ e^{Arg(z)}$, le résultat est affiché sous forme cartésienne. ", "_____no_output_____" ] ], [ [ "y=complex(1,1)\nprint(y)\nprint(y.real,y.imag) # real et imag sont des attributs d'une variable complexe\nprint(np.real(y),np.imag(y),abs(y))\nf\"Le module de y vaut {abs(y):.4f}\" # This is a formatted string literal", "(1+1j)\n1.0 1.0\n1.0 1.0 1.4142135623730951\n" ], [ "x=1+1j\nprint(x)\nprint(x.real,x.imag,abs(y))\n## ou bien\nprint(np.real(x),np.imag(x),abs(y))\nprint(\"Conversion de x sous forme polaire :\")\nprint(\" module et argument : \",c.polar(x))", "(1+1j)\n1.0 1.0 1.4142135623730951\n1.0 1.0 1.4142135623730951\nConversion de x sous forme polaire :\n module et argument : (1.4142135623730951, 0.7853981633974483)\n" ], [ "z=2*np.exp(1j*np.pi/4)\nprint(z)\nprint(\"Conversion de z sous forme cartésienne :\")\nprint(\" Partie réelle : \",z.real,\" Partie imaginaire\",z.imag)\nprint(\"Autre manière possible, à partir des coordonnées polaires :\")\nprint(\" Partie réelle : \",c.rect(2,np.pi/4))", "(1.4142135623730951+1.4142135623730951j)\nConversion de z sous forme cartésienne :\n Partie réelle : 1.4142135623730951 Partie imaginaire 1.4142135623730951\nAutre manière possible, à partir des coordonnées polaires :\n Partie réelle : (1.4142135623730951+1.4142135623730951j)\n" ] ], [ [ "### III.1.B Tracé d'un nombre dans le plan complexe\n\nLa fonction `cmplxdraw` définie ci-dessus affiche dans le plan complexe les nombres complexes contenus dans un *array* de *numpy* de type [z1,z2,...]. ", "_____no_output_____" ] ], [ [ "z=[1+1j]\ncmplxdraw(z)\nz", "_____no_output_____" ], [ "z = [1+1j,1j,2*np.exp(1j*3*np.pi/4),np.exp(1j*3*np.pi/2)]\ncmplxdraw(z)", "_____no_output_____" ], [ "y=complex(1,1)\ny", "_____no_output_____" ] ], [ [ "### III.1.C Opérations sur les nombres complexes\n\n", "_____no_output_____" ] ], [ [ "x=np.sqrt(2)*(1+1j)/2\ny=np.sqrt(2)*(1-1j)/2\nza=x+y\nzb=x-y\nzc=y-x\ncmplxdraw([x,y,za,zb,zc])", "_____no_output_____" ], [ "x=3*np.exp(2j*np.pi/3)\ny=4*np.exp(-1j*np.pi/6)\nz=x-y\ncmplxdraw([x,y,z])", "_____no_output_____" ], [ "print(-1.5-2*np.sqrt(3))\nnp.sqrt(3)/2+2\nargz=m.atan(-2/np.sqrt(2))\nprint(argz)\nprint(3*argz)\nprint(180*argz/np.pi)\n8*np.sqrt(6)\n4*np.sqrt(2)\n\nz=np.sqrt(2)-2j\n# z=2*np.exp(1j*np.pi)\nz8=z**3\nz9=1/z\nz10=np.sqrt(z)\nz11=(1-1j)**2\nz12=-4+3j\nz13=1/z12\nprint(abs(z),abs(z8),abs(z9),abs(z10))\ncmplxdraw([z,z9,z10,z11,z12,z13])\n\n6**(3/2)\n6**0.25\nnp.sin(np.pi/6)\nm.atan(-3/4)\nnp.exp(4j)\n5*m.atan(-4/3)\n5**5\n13**2", "-4.964101615137754\n-0.9553166181245092\n-2.8659498543735276\n-54.735610317245346\n2.449489742783178 14.69693845669907 0.408248290463863 1.5650845800732873\n" ] ], [ [ "## **Exponentielles complexes avec Python**", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
cb99a3686a5a73234827d682bab6bd3d68361196
26,754
ipynb
Jupyter Notebook
Pydata/Untitled1.ipynb
NoobSolver/Python-Practice-files
2e2dcf34835f50d0f1a738e0fd2069f20f0d8b52
[ "Apache-2.0" ]
1
2020-03-08T10:52:31.000Z
2020-03-08T10:52:31.000Z
Pydata/Untitled1.ipynb
NoobSolver/Python-Practice-files
2e2dcf34835f50d0f1a738e0fd2069f20f0d8b52
[ "Apache-2.0" ]
null
null
null
Pydata/Untitled1.ipynb
NoobSolver/Python-Practice-files
2e2dcf34835f50d0f1a738e0fd2069f20f0d8b52
[ "Apache-2.0" ]
null
null
null
23.84492
1,524
0.487628
[ [ [ "print(\"Hello world!\")", "Hello world!\n" ], [ "a=10", "_____no_output_____" ], [ "a", "_____no_output_____" ], [ "b=5", "_____no_output_____" ], [ "b", "_____no_output_____" ], [ "\n#addition demo\nsum=a+b\nprint(\"the sum of a and b is:\",sum)", "the sum of a and b is: 15\n" ], [ "x=2**3\nx", "_____no_output_____" ], [ "y=5/2\ny", "_____no_output_____" ], [ "y=5//2\ny", "_____no_output_____" ], [ "input(\"Enter some variable\")\n", "Enter some variable25\n" ], [ "a=int(input(\"enter the first number\"))\nb=int(input(\"enter the second number\"))\nint(\"The sum of first number and second number is:\",a+b)\nint(\"The difference of the first and second number is:\",a-b)\nint(\"The product of the first and second numberis:\",a*b)\nint(\"The quotient of the first and second number is:\",a//b)\n\n", "enter the first number5\nenter the second number6\n" ], [ "new=(\"String demo\")\nnew", "_____no_output_____" ], [ "#Slice operator\n#slice operator works like this\n# string variable[x::y::z]\n#[x::y::z]\n#[start_index=:end index 1:skip index:2]", "_____no_output_____" ], [ "new[0:5:2]", "_____no_output_____" ], [ "new[0:]", "_____no_output_____" ], [ "new[5:]", "_____no_output_____" ], [ "new*2", "_____no_output_____" ], [ "new*5\n", "_____no_output_____" ], [ "#repetition operator\nnew*3", "_____no_output_____" ], [ "#concatenation operator\nnew +\" Test\"", "_____no_output_____" ], [ "new", "_____no_output_____" ], [ "#short way a=10", "_____no_output_____" ], [ "new += \" Extra\"", "_____no_output_____" ], [ "listt= [ \"Kar\",'']\n", "_____no_output_____" ], [ "listt", "_____no_output_____" ], [ "#length method to find the value of n(the number of elements in list)\n#len()\nlen(listt)", "_____no_output_____" ], [ "students=[\"Akhil\",\"Akila\",\"John\",\"sonum\",\"khushi\"]\nstudents", "_____no_output_____" ], [ "#append function- add a new element to the existing list\n#append9()\nstudents.append(\"jyoti\")", "_____no_output_____" ], [ "students", "_____no_output_____" ], [ "#Insert function- add a new eleent at the end of index position given\n#insert (index,value)\nstudents.insert(3,\"papa\")\nstudents\n", "_____no_output_____" ], [ "students\n", "_____no_output_____" ], [ "#if else in python\nage=int(input(\"enter your age:\"))\nif age>=18:\n print(\"You are eligible\")\n else:\n print(\"You are not eligible\")\n ", "_____no_output_____" ], [ "#No switch case in python\n#if-elif-lse block\n\na=200\nb=33\n\nif b>a:\n print(\"B is greater than A\")\nelif a==b :\n print(\"A is equal to B\")\nelse:\n print(\"A is greater than B\")\n ", "A is greater than B\n" ] ], [ [ "### Nested if example\n\nage=18\nif age>=18:\n print(\"Allow inside club\")\n if age>=21:\n print(\"Drinking allowed\")\n else:\n print(\"Drinking not allowed\")\nelse : \n print(\"Not allowed inside club\")", "_____no_output_____" ] ], [ [ "# For loop\n#range function = for var in range(int,int,int):\n#range(value)= start from 0 upto value\n#range(value 1,value2)= start from value 1 and go upto value2\n#range(v1,v2,v3)= start from v1 and go upto v2 and skip every v3\n#for x in range(2,9,2):\n#x=[2,4,6,8]\n#print all value of variable one by one\n\n for x in range(2,9,2):\n print(\"The value of x is:\",x)", "_____no_output_____" ], [ "# i=[1,2,3,4,5]\n# i=5\nfor i in range(1,6):\n print(i*\"*\")", "*\n**\n***\n****\n*****\n" ], [ "# while loop\n# while condition:\n# statement\n# incrementer\n\nx=0\nwhile x<4:\n print(x)\n x+=1", "0\n1\n2\n3\n" ], [ "for x in range(1,10):\n if x==6:\n print(\"Existing loop\")\n break\n else:\n print(\"The value of x is :\",x)\n ", "The value of x is : 1\nThe value of x is : 2\nThe value of x is : 3\nThe value of x is : 4\nThe value of x is : 5\nExisting loop\n" ], [ "def add(a,b):\n print(\"a=\",a)\n print(\"b=\",b)\n return a+b\n\nc= add(5,2)\nprint(c)", "a= 5\nb= 2\n7\n" ] ], [ [ "###### for i=[1,2,3,4,5,6]\n#fori=5\n for i in range(1,6):\n print(\"i=1,i<=6,i++\")", "_____no_output_____" ] ], [ [ "i=1\n \nfor i in range(1,6):\n j=1\n for j in range(i,i+1):\n print(j,end=\" \")\n print()", "1 \n2 \n3 \n4 \n5 \n" ], [ "class student:\n def_init_(self,sname,sage,spercent):\n ", "_____no_output_____" ], [ "listt=['kar','abcd',706,2.33,'johny',70.2,36.3,755]\nlistt", "_____no_output_____" ], [ "type(listt[0])", "_____no_output_____" ], [ "type(listt[3])", "_____no_output_____" ], [ "int i2;\ndouble d2;\nchar s2[100]; // this is not scalable for input of unknown size\n\n// Read inputs from stdin\nscan(\"%d\", &i2);\nscan(\"%lf\", &d2);\nscan(\"%*[\\n] %[^\\n]\", s2); \n\n// Print outputs to stdout\nprint(\"%d\\n\", i + i2);\nprint(\"%.01lf\\n\", d + d2);\nprint(\"%s%s\", s, s2);", "_____no_output_____" ], [ "i=int(input(4))\nd=int(input(4.0))\ns=int(input(Hackerank))\ni2 = int(input(12)) # read int\nd2 = float(input()) # read double \ns2 = input() # read string\n\n# print summed and concatenated values\nprint(i + i2)\nprint(d + d2)\nprint(s + s2)", "9\n5\n5\n" ], [ ">>>str=\"hello\"\n>>>str[:2]\n>>>", "_____no_output_____" ], [ "import maths\ndef add(a,b):\n return a+b\ndef sub(a,b):\n return a-b\ndef mul(a,b):\n return a*b\ndef div(a,b):\n return a/b\ndef sqrt(a)\n return (math,sqrt.(a))\ndef powa\n return (a**a)\nwhile true:\n enter = input(\"enter function name:\")\n if function== 'addition'\n try: \n a =int(input(\"enter first number\"))\n b=int(input(\"enter second number\"))\n print(add(a,b))\n except Value error:\n print(\"please provide valid numbers\")\n if function == 'subtraction'\n try:\n a= int(input(\"enter first number\"))\n b= int(input(\"enter second number\"))\n print(sub(a,b))\n except Value error:\n print(\"please provide valid numbers\")\n if function == 'multiplication'\n try:\n a=int(input(\"enter first number\"))\n b= int(input(\"enter second number\"))\n print(mul(a,b))\n except Value error:\n print(\"please provide valid numbers\") \n if function ==;'division'\n try :\n a=int(input(\"enter first number\"))\n b= int(input(\"enter second number\"))\n print(div(a,b))\n except Value error:\n print(\"please\")\n \n \n \n \n\n ", "_____no_output_____" ], [ "import numpy as np\na= np.arange(30).reshape(2,15)\na\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb99b66cd6e4156aa50df170487ed58284656625
62,557
ipynb
Jupyter Notebook
HeroesOfPymoli/final_copy.ipynb
danieljahnsen/Pandas-Challenge
9e2d3f7dbc350a744fa79cc13a48dcbae294306c
[ "ADSL" ]
null
null
null
HeroesOfPymoli/final_copy.ipynb
danieljahnsen/Pandas-Challenge
9e2d3f7dbc350a744fa79cc13a48dcbae294306c
[ "ADSL" ]
null
null
null
HeroesOfPymoli/final_copy.ipynb
danieljahnsen/Pandas-Challenge
9e2d3f7dbc350a744fa79cc13a48dcbae294306c
[ "ADSL" ]
null
null
null
32.48027
156
0.378455
[ [ [ "### Note\n* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.", "_____no_output_____" ] ], [ [ "# Dependencies and Setup\nimport pandas as pd\n\n# File to Load (Remember to Change These)\nfile_to_load = \"Resources/purchase_data.csv\"\n\n# Read Purchasing File and store into Pandas data frame\npurchase_data = pd.read_csv(file_to_load)", "_____no_output_____" ], [ "#Display the purchase DataFrame\npurchase_data.head()", "_____no_output_____" ] ], [ [ "## Player Count", "_____no_output_____" ], [ "* Display the total number of players\n", "_____no_output_____" ] ], [ [ "#Create a dataframe using the lenght of the unique values in the \"SN\" column\nplayers = pd.DataFrame([{\"Total Players\" : len(purchase_data[\"SN\"].value_counts())}])\nplayers", "_____no_output_____" ] ], [ [ "## Purchasing Analysis (Total)", "_____no_output_____" ], [ "* Run basic calculations to obtain number of unique items, average price, etc.\n\n\n* Create a summary data frame to hold the results\n\n\n* Optional: give the displayed data cleaner formatting\n\n\n* Display the summary data frame\n", "_____no_output_____" ] ], [ [ "#Counting the number of unique items\nunique_items = len(purchase_data[\"Item Name\"].value_counts())\n\n#Calculate the average price\navg_price = purchase_data[\"Price\"].mean()\n\n#Total the number of purchases\npurchases = purchase_data[\"Purchase ID\"].count()\n\n#Calculate the total revenue\ntotal_revenue = purchase_data[\"Price\"].sum()\n\n\n#Create a DataFrame using the created values\nsummary_table = pd.DataFrame([{\"Number of Unique Items\": unique_items,\n \"Average Price\": avg_price,\n \"Number of Purchases\" : purchases,\n \"Total Revenue\": total_revenue}])\n\n#Changing the format so Average price and total revenue are shown as a currency\nsummary_table[\"Average Price\"] = summary_table[\"Average Price\"].map('${:,.2f}'.format)\nsummary_table[\"Total Revenue\"] = summary_table[\"Total Revenue\"].map('${:,.2f}'.format)\n\n\n#Display the summary table\nsummary_table", "_____no_output_____" ] ], [ [ "## Gender Demographics", "_____no_output_____" ], [ "* Percentage and Count of Male Players\n\n\n* Percentage and Count of Female Players\n\n\n* Percentage and Count of Other / Non-Disclosed\n\n\n", "_____no_output_____" ] ], [ [ "#First remove the duplicates from the 'SN' column so the counts are accurate\nunique_df = purchase_data.drop_duplicates(subset = ['SN'])\n\n#Get the total count of players\ntotal_players = unique_df['SN'].count()\n\n#Get the value counts for the genders\ngender_counts = unique_df['Gender'].value_counts()\n\n#Calculate the percentages of each gender\nmale_percent = (gender_counts[0] / total_players)\nfemale_percent = (gender_counts[1] / total_players)\nother_percent = (gender_counts[2] / total_players)\n\n#Create a percentage list for a pd dataframe\npercentages = [male_percent, female_percent, other_percent]\n\n\n#Create a pandas DataFrame and add the percentage column\ngender_df = pd.DataFrame(gender_counts)\ngender_df['Percentage of Players'] = percentages\n\n\n#Format the percentage column and rename the gender column\ngender_df['Percentage of Players'] = gender_df['Percentage of Players'].map('{:.2%}'.format)\ngender_df = gender_df.rename(columns = {\"Gender\" : \"Total Count\"})\n\n#Display the gender dataframe\ngender_df", "_____no_output_____" ] ], [ [ "\n## Purchasing Analysis (Gender)", "_____no_output_____" ], [ "* Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by gender\n\n\n\n\n* Create a summary data frame to hold the results\n\n\n* Optional: give the displayed data cleaner formatting\n\n\n* Display the summary data frame", "_____no_output_____" ] ], [ [ "#Create a df for the gender counts then sort so the index is alphabetized \ngender_df = pd.DataFrame(purchase_data['Gender'].value_counts())\ngender_df = gender_df.sort_index()\n\n#Getting a df for each gender\nfemale_df = purchase_data.loc[purchase_data['Gender'] == \"Female\", :]\nmale_df = purchase_data.loc[purchase_data['Gender'] == \"Male\", :]\nother_df = purchase_data.loc[purchase_data['Gender'] == \"Other / Non-Disclosed\", :]\n\n#Calculating the total purchase for each gender\nfemale_total = female_df['Price'].sum()\nmale_total = male_df['Price'].sum()\nother_total = other_df['Price'].sum()\n\n#Calculate the average price for each gender\nfemale_mean = female_df['Price'].mean()\nmale_mean = male_df['Price'].mean()\nother_mean = other_df['Price'].mean()\n\n#Getting the unique gender counts from the df created earlier\nun_gender =unique_df['Gender'].value_counts()\n\n#Calculating the average total per Person\nfemale_avg = female_total / un_gender[1]\nmale_avg = male_total / un_gender[0]\nother_avg = other_total / un_gender[2]\n\n#Creating the lists to append to the summary df\navg_price = [female_mean, male_mean, other_mean]\ntotal_value = [female_total, male_total, other_total]\navg_per_person = [female_avg, male_avg, other_avg]\n\n#Appending the new columns to the dataframe\ngender_df['Average Purchase Price'] = avg_price\ngender_df['Total Purchase Value'] = total_value\ngender_df['Avg Total Purchase Per Person'] = avg_per_person\n\n#Renaming the gender column and formatting the values\ngender_df['Average Purchase Price'] = gender_df['Average Purchase Price'].map('${:,.2f}'.format)\ngender_df['Total Purchase Value'] = gender_df['Total Purchase Value'].map('${:,.2f}'.format)\ngender_df['Avg Total Purchase Per Person'] = gender_df['Avg Total Purchase Per Person'].map('${:,.2f}'.format)\ngender_df = gender_df.rename(columns = {\"Gender\" : \"Purchase Count\"})\n\ngender_df\n", "_____no_output_____" ] ], [ [ "## Age Demographics", "_____no_output_____" ], [ "* Establish bins for ages\n\n\n* Categorize the existing players using the age bins. Hint: use pd.cut()\n\n\n* Calculate the numbers and percentages by age group\n\n\n* Create a summary data frame to hold the results\n\n\n* Optional: round the percentage column to two decimal points\n\n\n* Display Age Demographics Table\n", "_____no_output_____" ] ], [ [ "#First we will create the bins for the ages\nbins = [0,9.9,14.9,19.9,24.9,29.9,34.9,39.9,1000]\n\n#Create a name for each of the bins\ngroup_names = [\"<10\", \"10 to 14\", \"15 to 19\",\n \"20 to 24\", \"25 to 29\", \"30 to 34\",\n \"35 to 39\", \"40+\"]\n\n#Us pd.cut to create a column for the demographics using the unique df from earlier\ndemographics = pd.DataFrame(pd.cut(unique_df[\"Age\"], bins, labels=group_names, include_lowest=True))\n\n#Use value_counts to total the brackets and create a dataframe\nsummary_demo = pd.DataFrame(demographics[\"Age\"].value_counts())\n\n#Sorting and renaming the column\nsummary_demo = summary_demo.sort_index()\nsummary_demo = summary_demo.rename(columns = {\"Age\" : \"Total Count\"})\n\n#Calculating percentage by age group\n#First get the total count for all groups\ntotal_count = summary_demo[\"Total Count\"].sum()\n\n#Calculate the average for each age band\naverages = [(value / total_count) for value in summary_demo[\"Total Count\"]]\n\n#Add the averages to the DataFrame and format the column to percent\nsummary_demo[\"Percentage of Players\"] = averages\nsummary_demo[\"Percentage of Players\"] = summary_demo[\"Percentage of Players\"].map('{:.2%}'.format)\n\n#Display the data\nsummary_demo\n", "_____no_output_____" ] ], [ [ "## Purchasing Analysis (Age)", "_____no_output_____" ], [ "* Bin the purchase_data data frame by age\n\n\n* Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below\n\n\n* Create a summary data frame to hold the results\n\n\n* Optional: give the displayed data cleaner formatting\n\n\n* Display the summary data frame", "_____no_output_____" ] ], [ [ "#Note: at this point, I realized I had not been using groupby and I did not feel like going back and re-doing \n#the first parts again, groupby made this a lot easier\n\n#Using the bins from before, we will append the age bands to the entire df and the unique df\npurchase_data['Age Band'] = pd.cut(purchase_data[\"Age\"], bins, labels=group_names, include_lowest=True)\n\n#Next we will make a groupby for the age bands\ngrouped = purchase_data.groupby(\"Age Band\")\n\n#We will count the totals for the age bands\nband_counts = grouped['Purchase ID'].count()\n\n#Using groupby we will get the total and the average per band\ntotal_bands = grouped['Price'].sum()\navg_bands = grouped['Price'].mean()\n\n#Get the individual counts from the unique df earlier \nu_grouped = unique_df.groupby(\"Age Band\")\nUband_counts = u_grouped['Purchase ID'].count()\n\n#Calcuate the average per person using the total purchase and the unique counts\nper_person = total_bands / Uband_counts\n\n#Add all of the calculations to a summary df\nanalysis = pd.DataFrame(band_counts)\nanalysis['Average Purchase Price'] = avg_bands\nanalysis['Total Purchase Value'] = total_bands\nanalysis['Avg Total Purchase Per Person'] = per_person\n\n#Formatting the columns\nanalysis['Average Purchase Price'] = analysis['Average Purchase Price'].map('${:,.2f}'.format)\nanalysis['Total Purchase Value'] = analysis['Total Purchase Value'].map('${:,.2f}'.format)\nanalysis['Avg Total Purchase Per Person'] = analysis['Avg Total Purchase Per Person'].map('${:,.2f}'.format)\nanalysis = analysis.rename(columns = {\"Purchase ID\" : \"Purchase Count\"})\n\n#Display the results\nanalysis\n", "_____no_output_____" ] ], [ [ "## Top Spenders", "_____no_output_____" ], [ "* Run basic calculations to obtain the results in the table below\n\n\n* Create a summary data frame to hold the results\n\n\n* Sort the total purchase value column in descending order\n\n\n* Optional: give the displayed data cleaner formatting\n\n\n* Display a preview of the summary data frame\n\n", "_____no_output_____" ] ], [ [ "#Using groupby we will group by the sn\nsn_group = purchase_data.groupby('SN')\n\n#First we will total the SN \nsn_total = sn_group['Item ID'].count()\n\n#Next we get the purchase sum for each SN\nsn_price = sn_group['Price'].sum()\n\n#Calculate the average for each SN\nsn_mean = sn_group['Price'].mean()\n\n#Create a summary dataframe\nsn_data = pd.DataFrame(sn_total)\nsn_data['Average Purchase Price'] = sn_mean\nsn_data['Total Purchase Value'] = sn_price\n\n#Sort and format the columns\nsn_data['Average Purchase Price'] = sn_data['Average Purchase Price'].map('${:,.2f}'.format)\nsn_data['Total Purchase Value'] = sn_data['Total Purchase Value'].map('${:,.2f}'.format)\nsn_data = sn_data.sort_values(by='Item ID', ascending = False)\nsn_data = sn_data.rename(columns = {\"Item ID\" : \"Purchase Count\"})\n\n\n#Display the data\nsn_data.head()", "_____no_output_____" ] ], [ [ "## Most Popular Items", "_____no_output_____" ], [ "* Retrieve the Item ID, Item Name, and Item Price columns\n\n\n* Group by Item ID and Item Name. Perform calculations to obtain purchase count, average item price, and total purchase value\n\n\n* Create a summary data frame to hold the results\n\n\n* Sort the purchase count column in descending order\n\n\n* Optional: give the displayed data cleaner formatting\n\n\n* Display a preview of the summary data frame\n\n", "_____no_output_____" ] ], [ [ "#I used the same code as before but I changed the grouping from SN to Items and Item ID\n\n#Using groupby we will group by the Item ID and Item Name\nitem_group = purchase_data.groupby(['Item ID', 'Item Name'])\n\n#First we will total the SN\nitem_total = item_group['SN'].count()\n\n#Next we get the purchase sum for each Item\nitem_price = item_group['Price'].sum()\n\n#Calculate the average for each Item\nitem_mean = item_group['Price'].mean()\n\n#Create a summary dataframe\nitem_data = pd.DataFrame(item_total)\nitem_data['Item Price'] = item_mean\nitem_data['Total Purchase Value'] = item_price\n\n# #Sort and format the columns\nitem_data['Item Price'] = item_data['Item Price'].map('${:,.2f}'.format)\nitem_data['Total Purchase Value'] = item_data['Total Purchase Value'].map('${:,.2f}'.format)\nitem_data = item_data.sort_values(by='SN', ascending = False)\nitem_data = item_data.rename(columns = {\"SN\" : \"Purchase Count\"})\n\n\n# #Display the data\nitem_data.head()", "_____no_output_____" ] ], [ [ "## Most Profitable Items", "_____no_output_____" ], [ "* Sort the above table by total purchase value in descending order\n\n\n* Optional: give the displayed data cleaner formatting\n\n\n* Display a preview of the data frame\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb99c0108f8825caa0aa065e6b58507d96c82e9c
35,175
ipynb
Jupyter Notebook
nbs/02a_data_anime_heads.ipynb
cwza/deep_t2i
22877fdd28ad407984ddc3bc4d57109c54c22fc0
[ "Apache-2.0" ]
null
null
null
nbs/02a_data_anime_heads.ipynb
cwza/deep_t2i
22877fdd28ad407984ddc3bc4d57109c54c22fc0
[ "Apache-2.0" ]
null
null
null
nbs/02a_data_anime_heads.ipynb
cwza/deep_t2i
22877fdd28ad407984ddc3bc4d57109c54c22fc0
[ "Apache-2.0" ]
1
2020-11-30T06:11:02.000Z
2020-11-30T06:11:02.000Z
88.602015
24,692
0.823056
[ [ [ "# export\nimport pandas as pd\nimport numpy as np\nfrom PIL import Image\nfrom sklearn.model_selection import train_test_split\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom fastcore.all import *", "_____no_output_____" ], [ "# default_exp data_anime_heads", "_____no_output_____" ], [ "data_dir = Path('../data/tiny_data/anime_heads')", "_____no_output_____" ] ], [ [ "# Data AnimeHeads\n> ", "_____no_output_____" ], [ "## Items", "_____no_output_____" ] ], [ [ "# exporti\ndef get_items(data_dir, pct=1, valid_pct=0.2):\n df = pd.read_csv(data_dir/'tags.csv', header=None, names=['id', 'cap'])\n df = df[:int(len(df)*pct)]\n if valid_pct==0:\n return df, pd.DataFrame(data=None, columns=['id', 'cap'])\n train_items, valid_items = train_test_split(df, test_size=valid_pct, random_state=42, shuffle=True, stratify=df.cap)\n return train_items, valid_items", "_____no_output_____" ], [ "train_items, valid_items = get_items(data_dir)\ntest_eq(len(train_items), 240)\ntest_eq(len(valid_items), 60)\ntrain_items[:5]", "_____no_output_____" ] ], [ [ "## Datasets", "_____no_output_____" ] ], [ [ "# export\nclass Tokenizer():\n def __init__(self):\n self.vocab = [\n '<pad>', 'orange hair', 'white hair', 'aqua hair', 'gray hair','green hair', 'red hair',\n 'purple hair', 'pink hair','blue hair', 'black hair', 'brown hair', 'blonde hair', 'black eyes', 'orange eyes',\n 'purple eyes', 'pink eyes', 'yellow eyes', 'aqua eyes', 'green eyes', 'brown eyes', 'red eyes', 'blue eyes',\n ]\n self.o2i = {v:k for k,v in enumerate(self.vocab)}\n self.max_seq_len = 2\n self.vocab_sz = len(self.vocab)\n self.pad_id = 0\n def encode(self, cap):\n \"cap: 'aqua hair aqua eyes', returns: tag: [2, 17], tag_len: 2\"\n cap = cap.split()\n tags = [' '.join(cap[:2]), ' '.join(cap[2:])]\n return [self.o2i[tags[0]], self.o2i[tags[1]]], self.max_seq_len\n def decode(self, o):\n \"o: [2, 17], returns: 'aqua hair aqua eyes'\"\n tags = [self.vocab[idx] for idx in o]\n# tags = [self.vocab[o[0]], self.vocab[o[1]]]\n return ' '.join(tags)", "_____no_output_____" ], [ "tokenizer = Tokenizer()\nori_cap = 'aqua hair aqua eyes'\ntags, tag_len = tokenizer.encode(ori_cap)\ntest_eq(tags, [3, 18])\ntest_eq(tag_len, 2)\nout_cap = tokenizer.decode(tags)\ntest_eq(out_cap, ori_cap)", "_____no_output_____" ], [ "# exporti\nclass AnimeHeadsDataset(Dataset):\n def __init__(self, items, data_dir):\n \"items: df of id and cap\"\n self.data_dir = data_dir\n self.items = list(items.itertuples(index=False, name=None))\n self.tokenizer = Tokenizer()\n def __len__(self):\n return len(self.items)\n def __getitem__(self, idx):\n return self.tfm(self.items[idx])\n def tfm(self, item):\n ''' item: (0, aqua hair aqua eyes), \n returns: tag: (2,), tag_len: (), img64: (64, 64, 3) '''\n img_id, cap = item\n tag, tag_len = self.tokenizer.encode(cap)\n \n img_path = self.data_dir/f'imgs/{img_id}.jpg'\n img64 = np.array(Image.open(img_path))\n if len(img64.shape)==2:\n img64 = np.repeat(img64[...,None], 3, axis=2)\n return torch.tensor(tag), torch.tensor(tag_len), torch.tensor(img64)", "_____no_output_____" ], [ "ds = AnimeHeadsDataset(train_items, data_dir)\ntag, tag_len, img64 = ds[0]\ntest_eq(tag.shape, (2,))\ntest_eq(tag_len.shape, ())\ntest_eq(img64.shape, (64, 64, 3))\n\nprint(tag, tag_len)\nplt.imshow(img64)", "tensor([ 3, 13]) tensor(2)\n" ], [ "# export\nclass Datasets():\n def __init__(self, data_dir, pct=1, valid_pct=0.2):\n train_items, valid_items = get_items(data_dir, pct=pct, valid_pct=valid_pct)\n self.train = AnimeHeadsDataset(train_items, data_dir)\n self.valid = AnimeHeadsDataset(valid_items, data_dir)", "_____no_output_____" ], [ "dsets = Datasets(data_dir)\ntest_eq(len(dsets.train), 240)\ntest_eq(len(dsets.valid), 60)", "_____no_output_____" ] ], [ [ "## DataLoaders", "_____no_output_____" ] ], [ [ "# export\nclass DataLoaders():\n def __init__(self, dsets, bs=64):\n self.dsets = dsets\n self.train = DataLoader(dsets.train, batch_size=bs, shuffle=True, num_workers=2, drop_last=True)\n self.valid = DataLoader(dsets.valid, batch_size=bs, shuffle=False, num_workers=2)", "_____no_output_____" ], [ "dls = DataLoaders(dsets, bs=16)", "_____no_output_____" ], [ "for tag, tag_len, img in dls.train:\n test_eq(tag.shape, (16, 2))\n test_eq(tag_len.shape, (16,))\n test_eq(img.shape, (16, 64, 64, 3))\n break", "_____no_output_____" ] ], [ [ "## Export -", "_____no_output_____" ] ], [ [ "# hide\nfrom nbdev.export import notebook2script\nnotebook2script()", "Converted 00_torch_utils.ipynb.\nConverted 02a_data_anime_heads.ipynb.\nConverted 02b_data_birds.ipynb.\nConverted 03a_model.ipynb.\nConverted 04a_trainer_DAMSM.ipynb.\nConverted 04b_trainer.ipynb.\nConverted 05a_inference_anime_heads.ipynb.\nConverted 05b_inference_birds.ipynb.\nConverted index.ipynb.\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb99f180cd82394aa045ad8283605062cb63683e
418,650
ipynb
Jupyter Notebook
tutorials/Certification_Trainings/Public/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
yogeshmj/spark-nlp-workshop
89c6668edaed432517c6fcbfe1f7745c54b19f40
[ "Apache-2.0" ]
1
2020-12-14T06:07:12.000Z
2020-12-14T06:07:12.000Z
tutorials/Certification_Trainings/Public/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
lkafle/spark-nlp-workshop
5e83fd3f1d98fd2421747e5dcdd72cbbb1d546d5
[ "Apache-2.0" ]
null
null
null
tutorials/Certification_Trainings/Public/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb
lkafle/spark-nlp-workshop
5e83fd3f1d98fd2421747e5dcdd72cbbb1d546d5
[ "Apache-2.0" ]
null
null
null
88.322785
173,290
0.704627
[ [ [ "![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png)", "_____no_output_____" ], [ "\n\n[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Public/2.Text_Preprocessing_with_SparkNLP_Annotators_Transformers.ipynb)", "_____no_output_____" ], [ "# 2. Text Preprocessing with Spark NLP", "_____no_output_____" ], [ "**Note** Read this article if you want to understand the basic concepts in Spark NLP.\n\nhttps://towardsdatascience.com/introduction-to-spark-nlp-foundations-and-basic-components-part-i-c83b7629ed59", "_____no_output_____" ], [ "## Colab Setup", "_____no_output_____" ] ], [ [ "import os\n\n# Install java\n! apt-get update -qq\n! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null\n\nos.environ[\"JAVA_HOME\"] = \"/usr/lib/jvm/java-8-openjdk-amd64\"\nos.environ[\"PATH\"] = os.environ[\"JAVA_HOME\"] + \"/bin:\" + os.environ[\"PATH\"]\n! java -version\n\n# Install pyspark\n! pip install --ignore-installed -q pyspark==2.4.4\n! pip install --ignore-installed -q spark-nlp==2.6.3", "_____no_output_____" ] ], [ [ "\n<b> if you want to work with Spark 2.3 </b>\n```\nimport os\n\n# Install java\n! apt-get update -qq\n! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null\n\n!wget -q https://archive.apache.org/dist/spark/spark-2.3.0/spark-2.3.0-bin-hadoop2.7.tgz\n\n!tar xf spark-2.3.0-bin-hadoop2.7.tgz\n!pip install -q findspark\n\nos.environ[\"JAVA_HOME\"] = \"/usr/lib/jvm/java-8-openjdk-amd64\"\nos.environ[\"PATH\"] = os.environ[\"JAVA_HOME\"] + \"/bin:\" + os.environ[\"PATH\"]\nos.environ[\"SPARK_HOME\"] = \"/content/spark-2.3.0-bin-hadoop2.7\"\n! java -version\n\nimport findspark\nfindspark.init()\nfrom pyspark.sql import SparkSession\n\n! pip install --ignore-installed -q spark-nlp==2.5.5\n\nimport sparknlp\n\nspark = sparknlp.start(spark23=True)\n```", "_____no_output_____" ], [ "## 1. Annotators and Transformer Concepts", "_____no_output_____" ], [ "In Spark NLP, all Annotators are either Estimators or Transformers as we see in Spark ML. An Estimator in Spark ML is an algorithm which can be fit on a DataFrame to produce a Transformer. E.g., a learning algorithm is an Estimator which trains on a DataFrame and produces a model. A Transformer is an algorithm which can transform one DataFrame into another DataFrame. E.g., an ML model is a Transformer that transforms a DataFrame with features into a DataFrame with predictions.\nIn Spark NLP, there are two types of annotators: AnnotatorApproach and AnnotatorModel\nAnnotatorApproach extends Estimators from Spark ML, which are meant to be trained through fit(), and AnnotatorModel extends Transformers which are meant to transform data frames through transform().\nSome of Spark NLP annotators have a Model suffix and some do not. The model suffix is explicitly stated when the annotator is the result of a training process. Some annotators, such as Tokenizer are transformers but do not contain the suffix Model since they are not trained, annotators. Model annotators have a pre-trained() on its static object, to retrieve the public pre-trained version of a model.\nLong story short, if it trains on a DataFrame and produces a model, it’s an AnnotatorApproach; and if it transforms one DataFrame into another DataFrame through some models, it’s an AnnotatorModel (e.g. WordEmbeddingsModel) and it doesn’t take Model suffix if it doesn’t rely on a pre-trained annotator while transforming a DataFrame (e.g. Tokenizer).", "_____no_output_____" ], [ "By convention, there are three possible names:\n\nApproach — Trainable annotator\n\nModel — Trained annotator\n\nnothing — Either a non-trainable annotator with pre-processing\nstep or shorthand for a model\n\nSo for example, Stemmer doesn’t say Approach nor Model, however, it is a Model. On the other hand, Tokenizer doesn’t say Approach nor Model, but it has a TokenizerModel(). Because it is not “training” anything, but it is doing some preprocessing before converting into a Model.\nWhen in doubt, please refer to official documentation and API reference.\nEven though we will do many hands-on practices in the following articles, let us give you a glimpse to let you understand the difference between AnnotatorApproach and AnnotatorModel.\nAs stated above, Tokenizer is an AnnotatorModel. So we need to call fit() and then transform().", "_____no_output_____" ], [ "Now let’s see how this can be done in Spark NLP using Annotators and Transformers. Assume that we have the following steps that need to be applied one by one on a data frame.\n\n- Split text into sentences\n- Tokenize\n- Normalize\n- Get word embeddings", "_____no_output_____" ], [ "![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABYwAAAGqCAYAAACoFQDjAAAgAElEQVR4AezdB7BlXVoW4DZiaamlhWUotUyllqlELWMZQMsAWIqKGJhSwAyCYgaVoMIgGAgCghJNgAQFZRBkBiMDIjkMScLMMANM/z3MP3//Yfo/1rPOec9dd999bt/uG/p233dX7bv2XnvtFd71rW9937vXWffWnTt3Nk899dTmjW984+bpp58e55vf/OaN89lnn93cvXt389xzz43Qvevnn39+88ILL2ze8pa37M/NZjOu792753KTUJrcu078iOyfIlAEikARKAJFoAgUgSJQBIpAESgCRaAIFIEiUASKQBG4Ngjcun379iCMf+iHfmiTM4TxM888M4hipLFrJDGy2Ik4DgHsPuTxiy++uL9GDjvFOYS5vjYItCJFoAgUgSJQBIpAESgCRaAIFIEiUASKQBEoAkWgCBSBIjAQuGVlsRXGb3rTm8bqYiHCOKFVxU6kcELEMSI4JHFWGycOKRyiOHFKm+OL/6NHQP/pu0PH8vncl4feOU+88q76g8Kyjeepf959FO1I2Q2LQBEoAkWgCBSBIlAEikARKAJFoAgUgSJQBIrAeRC4ZUuKbEsRktjWFFYUI4iRx1ld7B7BGOI4ZBsi0RHycSaL8yxE4GWTjucB46a9m/471G7PfSjIsbxP/MOEkYu8Sz6yaj1xVxHeD4MHrcOjaseD1rPpi0ARKAJFoAgUgSJQBIpAESgCRaAIFIEiUASKwBoCt7J/se0oEMZOBLHTVhTZwzgE8rz9RAjEkG5IwBDC4pBniQthvFaJxj0aBNJvh0pP/x56/rDxIVUf9v2LfO9+GFxkWc2rCBSBIlAEikARKAJFoAgUgSJQBIpAESgCRaAIXHcE9oSxVcbI4vzjO6HVxSGMsyUFgm1eYex+PjXYvSNkceLEiyt5POA59x84BssZ62Q8P0+cdDn0R1aFSzs/k8bzeYXxWn7SHerT5CmcD+mtJl7mt5bPoTzmd5NmLmPtWv7O+bgfBkk7lzfHJb/5+dyOuW65Fi6P+ZnrnMt0vS8CRaAIFIEiUASKQBEoAkWgCBSBIlAEikARKAKXicD4p3fIYnsZW2Wcf3gXshhhjCBOGBIx2weEcENwuQ4p53q+TyNCjOW+4cMjkNXeQmf6JDmmb3IP+xC14vIcaSzeORPEnp92r6/z3tq7iRPKK2WmrkKnejmkCwHrPvVK+uSRfDwX5z1nyO+R2eJPykqeKdP7p+UTzFIHYY60P3VI/eZ2LNN4Jo+UL1zWLWlmLFJmwyJQBIpAESgCRaAIFIEiUASKQBEoAkWgCBSBInCZCNxCEtuWQphVxSGLhbaiQGghjRFZCZFj84n4co/kcs73rhOvMe57nB+BEI0hFuGvj3K4n0lUuHse/JNe6MjzOT9l5JBuvnedd6VR1vzuXM78njRzPZO/uPn9ua55Z/k85ed5ykyewjxLnHfOmo90KcP7c5uTr3bP5c7tSBp5SOP0PHkK5z5y73mPIlAEikARKAJFoAgUgSJQBIpAESgCRaAIFIEi8CgQGP/0LoSxLSmyl3FWGmdlMeI4RFdILiECLSSaewRZyDPXTodnjvn5iOifh0YA7jPZKKOZrEw/pYCQlekfz5fkZPrSO567zzHf68e5rKRZhspK2jxb3id+zk+7lm1b1m1Z9/n95ClMu9U/bc/zNQxOy0edvONIO4TzMb+fNHO5cx7ymtu5Vp85714XgSJQBIpAESgCRaAIFIEiUASKQBEoAkWgCBSBy0RgTxjbkiJ7GGeFMbLYiahDGCO2kGFWGYf0CuGFGHPtzPUcakRIM/E9zo/ATKAmN/2jDxzpmzwLcZp+WD6XLv2a9+9HGCevlJFQ3t7NqV459P98n3hxkY21tqmb07FW9/n95JlQvvKURh6p9/3y8Z703sv73nGkHckrZc31SJo8E8oveXg3dRK31u753V4XgSJQBIpAESgCRaAIFIEiUASKQBEoAkWgCBSBy0Rg/0/vsocxYjirixNmD2PEFlIroWsn0ivXKoskyyneIRSX5+Oif86FwJJcDDkZnGEeglVB7vVdCM7lc2nSt0mvjBzS535ZVtII8yzl5D5plveJV7ZnDvWe6y5uWbe153k/eS5DddKGvHsIg+QjnTQ5ZszP0o61NMs8pVnGpbyGRaAIFIEiUASKQBEoAkWgCBSBIlAEikARKAJF4CoRGITxvCUF4jhEsRBJhySz0jjX7kO6hXBDeCHjnK4diLCceZb4q2zkk1pWyEvYhnQUl0PfhITNc/fSOzyXPn2T9Mvnc35z/q7nfnef/k458pLGfQ5xqVfK9ixxrlMX4Xw/1y1yNhIs3k+cUJ3mentvzve0fLw3p1XH3Mt3blfKnNuxlmYu3zszjsmjYREoAkWgCBSBIlAEikARKAJFoAgUgSJQBIpAEXgUCIx/epfVxdm/2Crj+ZxXGIcwQ3qF6EKgzSeSbCbV3DsSH9LvUTT4SSoT/vojpz6ZsXWdNJ6FvEwa9+LzvrTpKzjpQ3E5lvdz/pGLpE2+3vee5/ORuLlMaZblp25rz5QxH8s087PURxrXwUA9TstHfdTRe0krdHgmfnnM9VhLo7zk4V3X3snp+YzDMv/eF4EiUASKQBEoAkWgCBSBIlAEikARKAJFoAgUgctCYKwwvnPnzsZpD2NnVhhbVYw4Rhg7EVrIM2dIr5BoIbkQXTk9cz2TY4i6kHWX1aibkq9+mLE9T7uvc59c57qdB3PvZvykjUJjSd/2KAJFoAgUgSJQBIpAESgCRaAIFIEiUASKQBEoAleNwCCMb9++vbG6GFEsfPrppwdRjCDOthRZ/RjiOIQxYiskV8jhEMYaEyIs5HHur7qhT2J5F0kYP4n4PA5tyjia62qslDCeEel1ESgCRaAIFIEiUASKQBEoAkWgCBSBIlAEisBVIXDLdhRWFwsRxVldnDDbUcxEcYjKhCGKkcGuEyY+BLL4EsYX17XB/+JybE5XjYCxkZX7WVmsX8X3KAJFoAgUgSJQBIpAESgCRaAIFIEiUASKQBEoAleNwC2ri5HFIY5DFNuKwvWSMM7qx5CV7tfOEF7CkMQhknN/1Y190sqbsX3S2nbT2pOxcdPa3fYWgSJQBIpAESgCRaAIFIEiUASKQBEoAkWgCFwvBMaWFMhiW1EIQxgLkcZWPyKN51Mckjg/p5/JY83zLGGuEWK5Dpl8vaBobYpAESgCRaAIFIEiUASKQBEoAkWgCBSBIlAEikARKAI3G4H9P73L3sX5p3fZxzgrjJHE80/m87P5hFkhKQw5jBhGEmclrHhHCeObLXRtfREoAkWgCBSBIlAEikARKAJFoAgUgSJQBIpAESgC1xOBsYfxU089NVYXL8nieVsK1whjxHHIY8TvvLoYOewMYTwTxCGNwVDC+HoKQ2tVBIpAESgCRaAIFIEiUASKQBEoAkWgCBSBIlAEisDNRmCsMEYYZ0uKrCzO1hR3794dpHDCrDJGDGd1MQI4ZLHQkbiQwzOJnDQ3G/q2vggUgSJQBIpAESgCRaAIFIEiUASKQBEoAkWgCBSBInC9ELh1586djRNhjCRGGOfMHsYJbU+RFcZZWZx9jEMYz8TwGmms+Vl5fL2gaG2KQBEoAkWgCBSBIlAEikARKAJFoAgUgSJQBIpAESgCNxuBscIYWYw09k/vbEuRrSiEIYuFCOOZIA4hjEQOURziGKzissJYmPQ3G/K2/qIQIGunfXyIrF5UeY8yn3lcPcp6tOwiUASKQBEoAkWgCBSBIlAEikARKAJFoAgUgScbgbHC2JYUiOL847tsSxHi2GpiW1Ig4JyukcRIrMQtCeEQXEJHiOPl9ZMNb1u3hsAsC2vPzxKHKI4MHkqfVfCHnj9O8Rlrj1OdW9ciUASKQBEoAkWgCBSBIlAEikARKAJFoAgUgccPgWN7GGdLihDFiOHsXZyVxki4EHUhhbNyOCs6Qx4j9ZzSCUMUJnz84GqNz4tAiN7z5nOW90sYnwWlpikCRaAIFIEiUASKQBEoAkWgCBSBIlAEikARKAJHCNyyDUX+6d28whh5jCR2It5sR5EtKWbC2DNEcchhZHDI4TlU5PL+qBq9Og8CwXwtD8/WCPo57n7v67flcagvxSd90szvKov8zOny/LR6JM0crqVPmcI1wnh+PueV67U8Pbvfe3l/GXrPOR/JS1yul2mSPvXpCuMg0rAIFIEiUASKQBEoAkWgCBSBIlAEikARKAJF4DIR2G9JYQ9jJLGtKWxJgRy2uhhhnFBciKuECK2sLBbn3uF6vg/xdZmNuWl5wxT5mhNBmiPPxHkuTN9II07/5F3h/Dzv5/mcd95L3vo/h2funct3Pcs7Qiei1Jn7hMnvtHBZ59Qr5QrF5chzZXg21zvtPe2Z9J7P7yXvZahNykh+whwpK/VJupk0Tpo8W9Y3eTUsAkWgCBSBIlAEikARKAJFoAgUgSJQBIpAESgCF4nAnjDOP7xDGCOOcyKJQ1a5RnwJQx4jthBoCZFerpFhQmfiEorrcX4E9MVMiKYf5Lz2bCYt06chKb0757X2fvrSu3MfzvchQZOX/Ofn3nM/H9LOJKz71GtOt7xey3eu19yGZT3kNb+vzLybtKnDnI/30rZlfeZ7ec3p5jyCQdqZ8pJ+eZ8yZ4zmsnpdBIpAESgCRaAIFIEiUASKQBEoAkWgCBSBIlAELgqBQRjbisKJJLa62GlVsfusMHaPYEN8hfxCcIkLURmSTOh0SOM65Nt8fVGNuIn5wHEmPGcM1p6lr9IP3g1BmX4KIbn2fvJPPsKckYfkI+/5mJ8n7/n5nGfqNz8/dD23Xx7KmY+53LmMtXrP77me84aLvNT9YQ5tkodyHcFgbuv8PHWdn4tL/zxMHfpOESgCRaAIFIEiUASKQBEoAkWgCBSBIlAEikAROAsC45/e2Y7CPsYhi7O6GEnsRJYh0HK6DxmH/ArZNRNi4p0hvebrxJ2lgk2zjgA89ccalnk2v5m4pJ8JUelmQnKZds5HuvS965zeWeaT9yIf7pN3niVMvurl+izH3Ia5jLyberpP/qlvwrnekWvhnHfeF3fW+slXnaRPfmnXGgZz/VPXtCPlS9OjCBSBIlAEikARKAJFoAgUgSJQBIpAESgCRaAIXCYCgzBGFtuSIv/0LoSxbSesMBYijvMP8BBaIdRCdIlDhDldzyeSUnzCy2zQTck7pKNwecA5/ZNn6bPcex7yWJznISRPy3uZT/JLOOeTOGWJdyTvPFuGeZ70y+fz/dxG6RGzOYJB8hFKf+iY88q7a9gmnxm7tTwzLvJM3VKXtDHPhHP6PJ/L8H76Z36v10WgCBSBIlAEikARKAJFoAgUgSJQBIpAESgCReAiEdhvSZE9jK0yDmGcEGGMrArxlxABFiLMtRPZ5UR2OcWF+Eoorsf5EQiJGKzdw96x9mwmHPVh+kN6fTI/X3s//Zr+XytXPt7NM/dzWeLdJy/30jgd7ud2iJ/rNRLt/iQft/Kb6zXLa/Kdn8/lLOuUOs9Ypn4pJ/VQ1zxLnHCOT35Jt8xDevXNc/fqKi74yO8QDnO5vS4CRaAIFIEiUASKQBEoAkWgCBSBIlAEikARKALnQeDYP71bbkmRlcXIKyuM3bsOeYXgyqkSrkOyJQwJ5n5+fp5K990tAiE99YkzWHu6fBbyMdhJL00O786E5PL9Oe8QnpGF+Znr1CfPIwtzWfOzuayZaJVeujn/5JFnc94pO3kkzDtnqbd3ki55C8WnzqmPeotLupQjnN+RHrZ5zzPvzcf8XHzakvYnj/mdXt9cBO69uNn0LAaVgcpAZaAyUBmoDFQGKgOXLQM31+Juy4tAESgCNxuBW7dv3x77F+cf3+Wf31ldPP/DO9dILcRZyDMkVogx104kWq49c4pzCHN9s2G/utZfJt5reev7mXg+T0vljzBdK+e8+Z7n/byrrcZCjyJw1QhctmPQ/Ot8VgYqA5WBykBloDJQGagMkIEeRaAIFIEicDMRuGUrCnsYv+lNbxr/9E6ILE5oVbETMZYQIYgIDjGc1ZGJQ/CFKE4ceOf4mwn3k9/qiySMZ9m5jshF/q9j3VqnJxuBOnB14CoDlYHKQGWgMlAZqAxUBq5CBp5sq7qtKwJFoAgUgUMIjC0p7ty5syeNkcW2psg/u8tKY6QwwjghAjnkIGLP4ZljJovzLKtErzsJOBrQPw+NQGTioTPoi0WgCNwXgatwDlpGndDKQGWgMlAZqAxUBioDlYH7GqZNUASKQBEoAk8kAresLrbK2FYUVhU7kcXzdhS2BQiBHKI4qytzjyxGBocQ9jwrisWFMH4iUWyj9gjo5/b1Ho5eFIFLQeAynLe3vOXe5vmhx1/s/sgre0S/MPZn988n31J8VvAZMnnvxQ2cbjxG917cPP+CbbruVVZuiExciv7cYUeWLkPnX1Se2v7c2K6OH9D5I7pQv9GHh3Aubo8RCWss3qc/D/XzkxJ/KcZsMy0CRaAIFIFrj8CeMLbKGFmcf3wntLrYP7tDGGdLCkRwtqYIaTyHWuzeEQI5ceJLHg9o+qcIFIEi8NAIDKecYz6fSxIvzw7F5/m9FzfP3L079D2df5qD+6Q4Pg/UDvg888zm2d2HUxhx9OF09+6zY757oPx2/RGy4Jln7o5+fO655zd3bf90zcmhQ219/vkXBk5bu+HZgyTJofcvO1799NdlE1rKgIFzfICZxt95ZeayMTpL/s8++9wYC2dJe+Eycc//yNiOPVi6Pks9LjXNJelPskPvkCP6+VLbMMnog5ZDZ5GJzCFLmX/Q/K5z+qXOPlRXc0V0AL2+lu4EbkN/8rf8evNi5PpJ0Ddr2F11nI8hb37zdiyS9asu/1LKu/fimA8fZLw+tMHaF4tAESgCReCxRmD80ztkcVYZx8gRIoudCOKESF/kMRI5RLHVxVaVhhCeVxnPpDGkpBPXowgUgSJQBB4OgRAJg0x45pnhZC6dim2aZ044NzOhxQkKQfnc8y8MJ5eTuczrJt9z7PdO/70XN3CCR0iB/bMHIF0QArAf7yLud+/CHvHyODulW9vh+hHGGTNXId/KgMPSGT+PzERGHml478U9EfYgxPtFyMRWP21JG/aoDy3Rf8POfIDxdxkYXpb+pCeuK2G8/QhzRGa7fxC5uIx+uKw8D+nsQ+Vt07/5aO6Y5PMgbmN+2ZKTF0EaP/b6ZsLsEM5XFr/TfY/z3Dxjxe4b+vMBPkY9nLXat4pAESgCReBxR+CWrShsSyHMquKtcX9kjGd1MZIYeTyTxSGNl4TxfB+SOESx+x5FoAgUgSLwcAgw/JGNdPUaARZn9dBzP61ERMwOxGURHnMZj9t1iL81jBEjBwmSiQRetvkY8bDmEE8rmpfvPg73W/vh+hHG42O3LVfWML/guMjNkjA+VWYuuA6X1c6H+Vn2eWUiumn5gSWkx0WQa+fFK3Vc1RXn6NsLIYxP0UcP3e4dgfYwH8weusxz4HieMu+rs1fqlTn4BD5nwC0rWs8t16fNURbvrNT7oeOGjF1wnmv1u6pylmU/YYTxmIteeOGBVrM/nLXat4pAESgCReBxR2D807sQxrakyF7GDPytkb9dYWx1zkwUW1U8HLCxb9n2H+BlhXEI4Xl1sWcOcXn+uIPX+heBIlAEHgUCnLqQUiccUmTytD3A6vPd9gezc3hZhMdcxuN2vXXct1tQnLXunPyxzcTS4dzdj59vP3OcrF/mfZ2IsGXd3Ft1fWif4vOSg2vlPW5xGZsnCOMDMvG4tW+tvpcpE1t79JmTq1d3JM65ibUL6JfL0p/nJYzvp4/W+vIscTdJxs+is5eYwZ3cLuffs+LG57qs1aw+dFp9vKzzQ9/vxuGlry6/qnLW9MGTRhivtfE+cY/C1m2ZRaAIFIEi8OgR2BPGtqTIHsYhirMlhRXGjBckcVYZhzBGBLtGBLsOabwMNTVEcVYaP/rmtwZFoAgUgccPgeHY7RyYNXLST5i3BMb2H5guHUHPl2TWCcJj9w+X7k/GbPcVPS3dsWe7fM/kXFpNNJ+zQ5P4tbi1Zy/aX387T52p7GnbiWP1n8ob8cqa4rbkwtHPtOdnyjW/3tdZ36Vbkg3H8prK3MePPV63/ytgH7eS7lh7HqQ/dhhqw1kJ4zNj/oD10L4Z//uWs9v/donLnIf+2doux/t0+c69sQUXWVrfa/Q0UuhYeembRd2kkcd95XRX30P1OFbvjImEKVt4Wtzy2aKu6YdLkYlJ3tijx9qzqz+9dQKnuY5nxCiYr/ZPsJrz3X20C/Yn9OeM6xLD5HcgnOtyKmG8qw9ZST2WGJ2qj3blz+WdwPJAHbe/UrHtysoWRqfgNNfvWLkHyhntgp/nu748Ucdd/CEMjsq8/1x1lHb3D9nOqrNH/bf564/ogKUOPxW3CYNnd78gOtHWKc2Juq48O4bf7vkgo+9DGHvvbLrQ9kxbH/G+9Uk/Lut52viY3jlzOYv8t21Z19X7Oh+SreQVOXj2JNEOpzXZ22OYPHbhSJt2HSr3DDJ9rMxzzp97HFLXXX5zGY+fpdwaF4EiUASKwEUgsP+nd9nDmCHB8J/P7GGcFcYJt5PkW/b7F7t3ZGVxSGRxW8Nju3dxCeOL6LqLzSN9ebG5NrciUAQuA4EY99mPeOjcydCnvxn6yIJc5x0O6Im43apk8WP10e49985VgvPei/t9fNUDcT0+LGYPZM9HPts5BbGQ+ibfQfak3otQ/ef0rmfnJfszzg415zxzmBWPaTNyPMQL4kn5p5XtPfknr9E2+6YirXbtzrMxt6n74p9fSZvzqB7bfQMHGbhob9Ik3O7TevpK5KQNVuqk3Sf6Yle/8/SHskKKwU9Zad/YmmPXHs/UXVwwSvq5/1J3WIy8dnWXVj3nfk3ahHPf6Fv18F7OuW8HNs8e/SO65LGt304Wnnt+P1aSx2od7ifzOwxCFs0fZeY6R2bmusHAGNF3qQOZXZWV3T8s8nzb39u+WMNXe0c50z/je5ixpB4ZO/PYugyZSB+l7sEjuM3P5+tjdbz77Ca/EMj75GSJEQIP5jDRLmn1xYz7sp/IZnSTtOoQHPLeFvNtX0qzJA7nem+vX9xtM/TM7n+GHP0jOfU+ln6nW9WbnEZm5n45iz46S9uPlbuT7zHmpj2k93qAjp8wXMNJfvoFJtJGprRhHvNJoz9gG32fvhQH47X4E3U+47g98d6LmzEmlZl+XUsz2jn07q5Nu+tR9+mf3h3CbS1vcd5fJeR3/bBal92zNX0jT7hvMTzS30PXee+YXG3nEWmXYyblbmX8SLeOMXT32dEnSbPPc+jo46um5/fHOAqJ6mPMbtss5at3xpv71XKWmIw+38qZ9jnpS3K1r9vuo4+yhw5IH67oCe1Q9h4rsjHNcXO+W5y3Y19dU96FyPSuj7RFfR7UrlKXY3Pf7n8ypI7b/I7rw+iVy7Blm2cRKAJFoAhcfwQGYTxvSYE4NgnlRA5bYcyoy7V7Z1YZC51WEDtdO2biOM8Sf/2huVk15Iil325Wy9vaIvD4IRDjPiTFTEzFKZUmq5mkyztxGnOfMHlxDqThzIVQMB9wmJJWyBEaRMa9o5U7IQuSdjiEOwKBo5V8t8TY+urnuQzXylgrP47v3HbpOcozOcNJ8/5MNqWt6rMsL/dz3VNvcZ4Ltw7olO9YEWQ7im27pMmZPI/qcoRZni3D5L+MP3E/HNktyRTcpYGBduvDvKM+6aOH6o/9P2XafljYt2+SDWVuHfMtoSNNVsvN/aJOcJV+7kP9JG6ZNm0Qjjx3HzX0t3ttl98Judj3y06OdsSCdzjOymLfDFnYpQ05AaO53LPI/OF2nS4zo83P295rKzeRlWUdtFMbtXvf37u4E8TigkQ511jarXgbeE0fY9QhdYVn6r+v24ubgfFZZWLGO9eR2WA05500I5zqCKNBtu36NONBvBXi0qvrtv+PSB15izuG5S4P7zrJgTYbW+mfNZ0ijbQH6zv1j/KQVuo0twlux+pCx+1kf9Zp5EG9hxzLd1Hn9EvyP3Pbpzru67X755/b/jje5/I9DSdjPe8lv/08NZGr8olcjfH5wnZOytwhD9hqr7SJ3/bvbnXwru5nHbepzxymDsqY43MtXh+R0bmft/ELPXYKbskv4f79BcGZ5/cLvX9CTnY6EnaeSeNMvbfj7PjWL/Cc5exYuXN+Oz085ydf72cO0lfKXsrofn7Y2SnbdFsdB1vl593o+2PlrMgomdnW/ajflO1Me+c89+3afwxS7tG73gluc9rkQU7m+KQf439XP/lFns4j0/KJTtSP6iDutDGwr9tO72Xum21DeWjjHGe8Dtnuvx56/ByF1rgIFIEicEEIjH96l9XF2b/YJDuf8wrjrC5GLs6ksQk957yyOKSx+iY+W1NcUBuazQUgMAyyHdF/Adk1iyJQBC4RgRj/MfBj0It3vSfbdk4O5yTvcLg4GblPuEZ4eJb4Qb7sHB/XHIs5Ttoj5+mIoA6RESct5SVeGxK3FqZ8eee56ziEc9s93zqJ27TBR1l5d4TBZSa+VpzO1HFZd3mEgKI757y3c+cR3vOz0/Kb07k+Sns6PidIgakdmcfn+h/le/wDQOLv1x8hfEICLOtNLmYnOc/Fj1WEx+q3deqTJuGe2JzS5lnCI/yP45M+1/akFZIXdZjjIq+c5znetfelDx4PIvP7fBf/ZO+ozmsyc7y+R3U4Hp9+GrbVhM+hvOd2nWcsjXwOjJuLlIm5vkfXR79m0CdOOKRvjtJtiVLPl3pBmowV9XXv/aEvZ7Jn9P1JWZH+SCaO959nS2yVMYje6WOKdGtn3l3qEmnXCGOkk3rP7d+TrgtZ3uqAk/roQdu+rPeh8SDdaTipp7of0x8H5Wp9nskYOJbHpDNnHA/VM2MU9su2zfcpa9ah8/NBRtM1i36GLzk0Luf0h+ozpxnXO0yUf+LZATlaplvTCanXWr5bWTmub4ZuXLRtLkQeRAgAACAASURBVCc4LvtCGtjM5ezTLuZMaZU95o1BMk8fTnZl79+dPoDO9Zivo48yzvPM+J/rs5XTw313TIekP5493h/B8xhhvOufoYemFcbqcaj/1Uv6JY6Jn2VaPolfyl3i1SvtXguD5yz/R7gd6Tf5qLM8ehSBIlAEisDNRGCsML5z587GaQ9jp0lrO9FtnTmEsXNeYYwwNoE5XTtDDiccE9xub+PAO680TlzD8yEA70MkfPriUAl5N/24TJfny3jlHXq2TNv7IlAELhaB2QHYOz2clJ1TM3Tvzmnh9NDncSyQdnEA5nxCWnAk5vg1xyJ5ciY5GTnjoHJakkccmJSf+Dgny/LyPOHeIZscb46f95d5azc88m7aJF3qmBAmSwIz7yVc5p94Ydo6Yy1e+YfyDW5LLOZ8c30o/zxPuG3HUZsTL0z9Z4wTt6zDWftjn+6A4761HaYVm5HDBWELt6RNnySEn2fLOs5tO8LnpGMcnGen+UEJ48iOOik3eZ5F5jNm8m7qfVTn42NsKzMn+zAE54xD+jtYJdzX70C/qMN5xtJow0Fib7tqdEl2pN3p59wnXOuTPFsL1T/yK0/nCaLmQB3lF5mD61r+2zT3jn7VsJPdpD3UT55HXvS9c+iAU4i25Ck8TWesEcbzu+P63ov71fJLPE7Le5kPfM/yscZ7h4gvz07DaVmm+/TL8kPToTLIvL5fylvSz/H7cXGGuWqtbnl/HoNJl/FEJhOXMM+M+cQJU0fhHH/iOnK8IBxPpFvI6Px8Td+kXmt1hj9cx/g4o+xG7uU7l50+SryQXMBzTpfr1FW49gHvUDl5fw5P67Oki8wd0gUn5qBdfyzTB8/luFMOLM8u07vV1wu5iLzMMi3v6MGlXAb3ec5Pm+cwcxRcE584/bT2/sVasc2tCBSBIlAEHhcEBmF8+/btjdXFJjfh008/PSZ2BLE4Yc4QxyGMrTJGHoYwNgnPJGWIzMTn/nEB6DrXE87pF6G+yJFn4vJMXI48zzOhPsyR597Pmb7Tl0kvdN+jCBSBq0MgBr6QA0NPM/A5FYz9+XkcCCHnQto4cXO6OGRLRyFOxOxYhBAQt3bOeZzXsVHHbXm7VXK7n7lqS+qW8jibzrQr2HDm1uo5MDnF4T5Ud/nHwR1z25THsbpO8d45iyObuh/KP8+FcVaXP1lPGm0ec/jkFB5qU+QkWCaPZbhPt3Bsk055SyfZsyU5GEdYH631jbilM5wyhEf4HCcqPEsb57Ysy5cu8rNGUBhLW+y28vQgMr/Pd0c2p95HdT5OFiXvpEsY+Q0O6W9tOYTZ2thOfsJj8vkAY2nkERJrQfpclEzM9Tz12k/7d1uu6KNjZMqBOh6r//SLC/H6i8wYRyPMB4vF+D3UT/LIWIvsSZt+u19bxphZYJp3DhLGu+0NyILxlnKXxNWx/l6050HanvokzPhdIz5Pw2l+nz7cyvJ2+5yl3jhURuKP9ftExs7xqcuh8TLriNRtDk/T2dERa/ojY5Wczvml7mu4zenIzmlycSztSr96njEyz1GpF3lZ5uFZPhiQuyFL9yGOt/gctznku9Vd8thuaaCfT5tv1VF7T//YerKcZRvc63N5rT1LXPph2T95Thblsdenu/7QrqQRBs/luPNs9N+C8E+5y/5P/Cy78jgUn/G+1DF7Xbz48D/X2XXmKP0zP4seS93n8XF1Fm5LKgJFoAgUgeuEwC3bUVhdLEQUmyTmM9tRzEQxAtHknhC56B6h6Dph4oWJD+l4nUB4XOsS/FP/kPbu156Jc+iDJdGr/2bCeO19aRze1Z+O9PG46Z8iUASuBIHZwI9DwWHhzJxwBCfHU1rO1Px+ruMozA6CZ2uORZzwvHtaeF7HRt7Jg3PGoeKkpkyObZw4ziY9lWdbp3Vy+g441km/DFPu0imTbs0ZF7/F5uRPwD07Lb8HKXuf9t72Z8/3I4xnQuNQHc7qaO7TXRBhTCb37XmA/jnC/zBhvHf2VwhrZUa2Z3xSl4yrkBwPIvP7fC+JMI68p64PEqb/H3QsjTImXTKXeVEyMee5v0ZYHSCt6Dz26jE8DtRxW//teNmnt1/p7h9dzURNyKJ9HXZyeZoMRH/CInjMemqZV+5DOB1Ku0YYb4nKnd7bYUPvwWJJXB3URw/Y9tQ34dH4ODl+T8NJe+kr5zw+1f0yCePU+0HDjJc1GUw/r+mP9OuSkDwNt2N1C0G5QuweS3eKzjzSkUd9lHpp16F81DErbPXloXTiPV/221H8ro8PjN853+CCWJ7jc32onDyfQ2nJ01qfJV3KW/ZPnkcH7GU0/fHscdyC53LcyeeyZFreh+QyMrm049KuhJmjloSx59qU/LUBVuJ7FIEiUASKwM1E4JbVxcjiEMcmB2cm3CVhzChFJoZQdL92zoRiSOIQybm/mZBfTKvhOxO3c65rz/SR9LCfr/OeuBDGeX/uV8/yXN+7Th8nj4ZFoAhcDQIx+ke4c2S2K4PWf0rICaPXESWHHMUQHktHY82xiDNFBxyry4rzGsdj6byd1bGRf+rgHcTKXEdtMl9xcoRzfdKmODzzs7NcH6q7d9eccfHqcGiVVAiuvRO6glfqpZ36LPeHwu2cfbzdSZv6H8Nr98uhh+2Pfb8dIHrVZ41AiAymbuaXIZOnEBdJuxYe4X9SBrdj4Th2y/LluZerxSorz5J/sHsQmd/ne8GEsXrB7JB8reG0jJvr9iBjaeRzgIy9KJlY1tW9Maw/156lLXsC2Hg6UEfvJ33InfQxWZzz3/f1Ynxux/b6WIuuUYa81ElfwWbO+8R19Pcz6208QRinfYuVixlPaVvKOaSPHrTtyS9hCLc13XoaTpmnlvpnTW8cKiPxM8mvXmvx+748w1yVts3haTo78rQ2p+6JxIdcYbx/f7G/9ly3+12v9XHyXavzMr/IcGR6+TxjTTnHnkVGpw+7x54vxpV8yEXmq6VsHCxnmc/uPn2+HNdzHYLDoQ81ZJhM7t/ZtemYrplXGC8xeIwJ47Q5GEX/Xo1121KKQBEoAkXguiEwtqRAFtuKQmiCzGnCRDIijedTnIkYaSicyWMNFJcw1yEqxZdoHPCc609I3TXyPc/mAhKXftBn85H+FJe04pan5/LQ9+RAPtL3KAJF4OoQiEGfcOuEb3X3CWdrR7pEr685+PJZEh7JO06x54kLQXTI2ZrrcMgJTB4HndGFM4g44Qial+75Ncvu+Xa13ZakXDrBe4fHO2urnNbipnIP1X3gtftJPB2ZughPI0v2hMbinfn9XOvTbVvX/1FW0qWOJ3AcDu72H9jObU/6OU5eZ+2PpBOmDnNIzvTTHOf6JGH74mif9EsMl++u3R+RIccJ4728Lhz4k+UfEYgnVgjusPNOyk67zyLzqcMSo6M6n01mQtjMfZW4eTymjiO8j0xL8zBjKXmP/l0QQcFm2d7U6+wycVLWtfPQOFgt9xSyaisDR7pgLxMLzEI4pf4JTxvbJ/TnbruPrXwfl9HklzD1ODGGd30Vwkb6yNZJXXfaCuOTJHfKnGVL/ofanromjC5bm08O4rTrm7UPHmsycqiMxJ+FMI6MnGXcpm1zuC9rTWfv9IS6L3HM3LMkU5OfcC5neZ1+Xk23kNflu7lf0zep15L4VH96Ie8K6WVtO6EfM/ee8tx7y/yS5xIr9TR+jtp8fG7Zx082yFzP5XUwPtjnu/pnFfWyPmn3MYx2snssTj67eONpWY/LkmnlnHceD6bzPCLPZV/DMP14dRZuSyoCRaAIFIHrhMD+n95l7+L807vsY5wVxshBJCGCMCeiMIQhEtG9MKSk++EITvEaX4Lx/CIAQ32yhiX8l8/0g7jg71q6HPpR/x56nnRz6P0Qx3N8r4tAEbhcBJaOSRzDQw5SnMQ1xzZ5ZSUVRyJxwjXHQnwIB07GeOeeXy/QS88fW8UcgsuzOd848kunf04zXyefJVESh03blnX3/h6bu89u9zrlbO/+SdRMxMxl5TplLus+5zvmuIlkDuGybdfi5/Q7p/xEG6b3R9k7J3S5WjD1OhbuiKlBwkxEQuq+xCTxyzadtT8iD8NBHuUdb+PWST7pPEde5ronLwST8ofjbm9qP4le/GR9fs91+nXfvt17nNvRrxMW0osb8j9hnfJnnPWntOq0xChtuJ/Mq7+yls536ryUmUMEG/tr1Hluy06GxKv3qONuP2/jf+A4tXGJm/vIwAk53MmdvPe4znntni8/CATHi5CJZX1PELHqY/yOf3y28nP4uY7B7d69fZtnAi55P+vjwqS/9MfAfW77fT4GrenP4DLkcZHX3E7yoDwnmdGn3tU/4o69v+v/rXz6nyHbvdyjd2ZZVkbil/roQds+19d19MWMZ9IckudRn51Mj/cG5m/Z982QnwmnQ2U8aPxZx23qfyzc4X1irOzqGRy1Gcbpu+A+ZOsMbTpW5p4QPPq4keeRiUP1STrhur7Z7o28lZ/dnGyc7MbNrPOO3j8+d6eMyK22ei9t9/xYPTO2nn9++zHx3oujz6VRxr7fJ9lWn7OUkzTLMPgLjaXUje5LG/f1nz+A7VY7w2euQ/BZs7FS1lYfb//pZbDzbK5byOzluHlQmY4OT1tSRvLZjveTH+CSLrqJ/CZOf8g399s2H235dblWbXMvAkWgCBSB64rA2MP4qaeeGquLl2SxCZOxymlxHXIQ2egaWYloNOnOZwjjEJLSOef76wrI41SvkLxwdYa814a1ZyGEPU8fek/fJX3an/vk7V3p3M9ks7jkq49dS9OjCBSBy0Ngb9DvHNEY/5yF5bPcIxTXVnZ5Hod6S1ps/1GN+C0Jsp0HPNs7dsrl9I1VtkfPzRPDAdk5oCHpBukxSMEtGR1nMuUdc1Im5zp1F8bR0tY53jUnTtnL+NzDJfPZtsw3D4d26WwlvbYt667Oeb58xjnMM45aVi4hL5cOJsxOq6t8kEbqqR7J99Rw54CHLJW/vjrWvpU2xWl90P7QpuCordosrxnjQXLdRw60SX/OeMo3Tv5pbY5Drs3bc0usLQkzc9RRf2y320q7M27yvrKlHX29hv39ZH4ni8fys/JsBXv1H23f/YO1lJ0+mzGB6zHyebf/bPpgvDv+qdTJsbGG4cOMpeUYUr/UVRkXKRNznbcYHdcxW6wmXTPrjB3xdYTNdpW9+s31HWXsx82WrCV30mz12pGsrPXTPOYP6c8ZF304kzNzG12faOfduyMuskQuhy2904WR6X2+U7tnPX1QH52x7ct6HrUpfbL7QLNvw/bDzLaP7g5CcM7DeAzBpm0Za5H3yNVWno7KCPG61VVH8emHU3XYGcbtXMfl9f109pY0Tp3evN82KTIY0nDZJm1dlpV7/TqwmWV7ELHb+UPep80PwXPbD34tejRH0ZHyP/aMjtrFkR/XZIxcpk5r4VzOlozdpaejdv84bl/ORE5mzAxZnXRtCE/vwCttPFjOAp+5jtqcNm3rcPIf7xnv6jnaOz4qHt8bXH4zXiOfhe6DUcZp6i1f10mvHcv+fyiZXplLMp+dOgYmnI6PoaNfBG3tva3eS/8MGdz1z+VZtM25CBSBIlAErjMCY4UxwjhbUmRlcSY6kwbyMCFC0D2jTxgy2H1ODRbvXugIMZnwOoPyuNQNlvoAgeuEd47lsyWRK+38nvsQv/JYez95S5tyIwOeJU/v9igCReDyEJidomtxPT4KnpHcnByXK6/7jsC8mnIP4HFvu9fyGhmgXltH87iD/yD19X6c7Ad575Gn3a1CO2s9kAHslNHeh5SpraM/rwQ+0Gdr+V8TmX9s+3sN09PidvJx3/buiNNBNslvIqMOytZZ0pxWt4t89iA66oHqfUC2HyiPwysWD2J7CJtHVu5D6Mf76Oy0fauLDuB8CIeVeHMDAjMfCJJ/wnxcyP2FhtHDD9I/p6X17NDz0+LXnq3FreB3Ag/vneHd88wlyryvbjpLXR91mlnPLupyeRZtcy4CRaAIFIHrjMCtO3fubJwIY84XwjgngwWpmNBK45CMiMKQjMKcyELXwjXSGBglFK9OJIr11WHdkorAVSFwwiFaGPZ9foHExiVgy7E0ryIGZic1q3Tn1WDty/W+vBzCeL2s9sFjhMuSML6E8Vt5eIzk4YL6/5DOvkhZUEbI4nleWJZB9x364LhM2/ubJ6uX1edXZd+2nCJQBIpAEbheCIwVxshipLF/emdbCo7s+BnN+EnSEWmMMLYKNeRwCGEkcojiPNPMkMaux5fy3arj6wVBa1MEikAReLwQuCyHoPleoXO520c5P8n1c1AkwP1+Atw+2vZRCeMrlNULIt2uRHZLGJ+6hcCV9MHjJC8PUteFzr5QLMcWDn7RudvLfa1eY2uN58YWCmdZMXuh9VurT+Nu1Fh7vKzk1rYIFIEiUAQuCoGxwtiWFIji/OO7bEsR4thqYk4tstjpGkmMHE7ckhAOcSx0eJ5jvk5cwyJQBIpAETgbAnUES5bdZBlgV2R/TPuHnrYa7xBO3gnpfGzvzZIgjy8JMv4Z3vYfyO33Xz3DT9EPyUjjq2evkwzQWf2gWJl8VDJ5Nuu0qYpAESgCReBJQ+DYHsbZkiJEMWLYiTAWhyR2HbI4pDAC2HVWH4c8tsI4K4+Xq42fNCDbniJQBIrAVSHwqByGlltn9TrIgH8WNJ9WZz9ovazkm/MY/4Co5OID4/iguF9memTask/ZppdZZvOuTqwMVAZuggxclX3bcopAESgCReB6IXDLNhT5p3fzCuN5Swokse0osiXFTBh7higOOYwsDjk8h5q9vL9eULQ2RaAIFIEiUASKQBEoAkWgCBSBIlAEikARKAJFoAgUgZuNwH5LCnsYI4ltTWFLCuSw1cVWFicUN68kRhQjiLOyeKzk2G094Xq+D5F8s+Fu64tAESgCRaAIFIEiUASKQBEoAkWgCBSBIlAEikARKALXF4E9YZx/eIcwRhznRBJbUYwUdj2vNg4hPBPHWUWcZyGK59B1jyJQBIpAESgCRaAIFIEiUASKQBEoAkWgCBSBIlAEikARuF4IDMLYVhROJLHVxU6rit1nhbH77GGMNM4K4pDJSGDnTBRrau4RyY4Qx9cLhtamCBSBIlAEikARKAJFoAgUgSJQBIpAESgCRaAIFIEiUATGP72zHYV9jEMWZ3UxktiJIEYM53Qf0hgBPG9JAdKQxzM5PF+HPC78RaAIFIEiUASKQBEoAkWgCBSBIlAEikARKAJFoAgUgSJwfRAYhDGy2JYU+ad3IYxtQWGFsRBx7DpEMfJ4SRaHKM7q44TZpiLh9Wl+a1IEikARKAJFoAgUgSJQBIpAESgCRaAIFIEiUASKQBEoAkFgvyVF9jC2yjiEcUKEsVXESGIkcELXIZBDDoc0Rg47xQsdCcX1KAJFoAgUgSJQBIpAESgCRaAIFIEiUASKQBEoAkWgCBSB64XAsX96t9ySIiuLEcRWGLt3jSSet6EIASxEGDsS5pn7+fn1gqG1KQJFoAgUgSJQBIpAESgCRaAIFIEiUASKQBEoAkWgCBSBW7dv3x77F+cf3+Wf31ldPP/DO9dIYmSxM6uMEcHzKuOsKg457HlWFgtzXeiLQBEoAkWgCBSBIlAEikARKAJFoAgUgSJQBIpAESgCReB6IXDLVhT2MH7Tm940/umdEFmc0KpiJ1I4IeI4K4YRw1ltnDikcIjixGn2HH+9YGhtikARKAJFoAgUgSJQBIpAESgCRaAIFIEiUASKQBEoAkVgbElx586dPWmMLLY1Rf7ZXVYaI4URxgmzqjjkMShdO2ay2LUjK4tnAnk86J8iUASKQBEoAkWgCBSBIlAEikARKAJFoAgUgSJQBIpAEbgWCNyyutgqY1tRWFXsRBbP21HYfiIEcohiK4uduQ9xHELYs6woDoF8LVrcShSBIlAEikARKAJFoAgUgSJQBIpAESgCRaAIFIEiUASKwCoCe8LYKmNkcf7xndDqYv/sLv/ozgpjRHC2pghpPIdKce9AFDsTJ77k8YCjf4pAESgCRaAIFIEiUASKQBEoAkWgCBSBIlAEikARKALXDoHxT++QxVlljCTOiSx2IogTIn2tKs4/vXNvdbHVxCGEkcKu5/u0PKuOc9+wCBSBIlAEikARKAJFoAgUgSJQBIpAESgCRaAIFIEiUASuBwK3bEVhWwphVhVnZbHQVhQIYqQxkjhhCOGES8J4vg9JnNXG7nsUgSJQBIpAESgCRaAIFIEiUASKQBEoAkWgCBSBIlAEisD1QmD807sQxrakyF7G8ypjJDHieLmqGFmMTM4+xllRHEIYQRyS2DOH+zy/XlC0NkWgCBSBIlAEikARKAJFoAgUgSJQBIpAESgCRaAIFIGbjcCeMLYlRfYwzgrjbEmBEEYY23oiq4xdZ3Vx/uFd7pHCIY8TgjlEcUjkmw19W18EikARKAJFoAgUgSJQBIpAESgCRaAIFIEiUASKQBG4Xgjs/+ld9jBGDGd1ccLsYZwVxglDECOCc615COGc4h0zcVzC+HoJQWtTBIpAESgCRaAIFIEiUASKQBEoAkWgCBSBIlAEikARgMAgjOctKRDHIYqFyGErjK02znW2ocgqY6ETcex07QhpLMyzxI8E/VMEikARKAJFoAgUgSJQBIpAESgCRaAIFIEiUASKQBEoAtcGgfFP77K6OPsXW2U8n/MK46wuRgojjoVZXZwQQZyVxSGNtTjxyOMeRaAIFIEiUASKQBEoAkWgCBSBIlAEikARKAJFoAgUgSJwvRAYK4zv3LmzcdrD2JkVxlYVI44Rxs55hfFMFLt2hhxOmG0oQh5r+rzS+HpB0doUgSJQBIpAESgCRaAIFIEiUASKQBEoAkWgCBSBIlAEbjYCgzC+ffv2xupiRLHw6aefHkQxgjjbUrh2hjgOYWyVcbahyAriEMagzWrikMe5v9mwt/VFoAgUgSJQBIpAESgCRaAIFIEiUASKQBEoAkWgCBSB64fALdtRWF0sRBRndXHCbEcxE8VIYgRwwhDFyGDXCRMfAll8CePrJwStUREoAkWgCBSBIlAEikARKAJFoAgUgSJQBIpAESgCRQACt6wuRhaHOA5RbCsK10vCOERxyGL3ayeS2BEC2XWI5JLGA5r+KQJFoAgUgSJQBIpAESgCRaAIFIEiUASKQBEoAkWgCFwrBMaWFMhiW1EIQxgLkcbZhgJxnFMcknjeliKksda5TphrJHGuQyZfKyRamSJQBIpAESgCRaAIFIEiUASKQBEoAkWgCBSBIlAEisANR2D/T++yd3H+6V32Mc4KYyQxgtjK4pyIX9dZRZww5LB7JPEcD2/3PYpAESgCRaAIFIEiUASKQBEoAkWgCBSBIlAEikARKAJF4HohMPYwfuqpp8bq4iVZPG9L4RphjDgOeYz4XduaIoRxtp6QLqSx5pcwvl5C0NoUgSJQBIpAESgCRaAIFIEiUASKQBEoAkWgCBSBIlAEIDBWGCOMsyVFVhZna4q7d+8OUjhhVhlbOZzVxQhg9zllnLiQwzOJLF2PIlAEikARKAJFoAgUgSJQBIpAESgCRaAIFIEiUASKQBG4XgjcunPnzsaJMEYSI4xzZg/jhLanyArjrCzOPsYhi2dieI001vysPL5eULQ2RaAIFIEiUASKQBEoAkWgCBSBIlAEikARKAJFoAgUgZuNwFhhjCxGGvund7alyFYUwpDFQoTxTBCHEEYihygOcQxWcVlhLEz6mw15W18EikARKAJFoAgUgSJQBIpAESgCRaAIFIEiUASKQBG4ngiMFca2pEAU5x/fZVuKEMdWE9uSAlnsdI0kRg4nbkkIhzgWOkIcL6+vJyytVREoAkWgCBSBIlAEikARKAJFoAgUgSJQBIpAESgCReDmIXBsD+NsSRGiGDGcvYuz0hh5HLI4pHBWDmf1cchjK4yz8ni52vjmQd0WF4EiUASKQBEoAkWgCBSBIlAEikARKAJFoAgUgSJQBK43ArdsQ5F/ejevMEYeI4mdSGLbUWRLipkw9gxRHHIYWRxyeA7BsLy/3tC0dkWgCBSBIlAEikARKAJFoAgUgSJQBIpAESgCRaAIFIGbhcB+Swp7GCOJbU1hSwrksNXFCOOE4rINRUIEcVYWi3PvmFcfuw+RfLPgfXxaq398CMhHgoT63EeBHg+GQD6grL3lmfFhXGVMraV7XOPoA+3Kh6RD7YAD2YIDPC7jINPRZ+pEx11WWZdR/+ZZBIpAESgCRaAIFIEiUASKQBEoAkWgCBSBq0ZgTxjnH94hjJEqOZEsSJeQQPNqY8RLCOOEWUWcZyGK59B1j+uDgD67ffv25r//9/+++dzP/dzN533e523+w3/4D5vP//zP33zxF3/x5iu/8ivHKvQSbWfrM2MEnk+/6en9B5T5TWPqVa961eYLvuALNv/1v/7Xzatf/er58WN9TUa+9Vu/dfOKV7xitFFbDx10zVd91VdtvuRLvmTz2te+9lCyh47XD//rf/2vzb/+1/9688pXvnLzP/7H/xjy/X3f930PnWdfLAJFoAgUgSJQBIpAESgCRaAIFIEiUASKwJOOwCCMbUXhRBJbjee06s99Vhi7RxwjYZyIIWfIZCSwU1yugZd7pKTDs1w/6eA+Lu3TRwi13/27f/fmZ//sn735Rb/oF21+8S/+xeP8Zb/sl23e5m3eZvOn/tSf2nzjN37j6M/HpV2Pop7Gw8tf/vLNP/kn/2Tzv//3/x7jY64H2f+Wb/mWzV/7a39t8wt+wS/YvO3bvu3mMz/zM+ckj/U1nfGP//E/3rzjO77j5tM//dM3frlw6IDDu7/7u29+z+/5PYM4PpTuYeLpmf/5P//n5iUvecnmfd/3fcdHj3/+z//55o/8kT+yednLXlY5fhhQ+04RKAJFoAgUgSJQBIpAESgCRaAIFIEicCMQGP/0DqljH+OQxVldjCR2IogRYTln0hgxM29JAbUQxjM5PF+XML5eYSuC8AAAIABJREFUsqX//uN//I+bH/fjftzmV//qX735O3/n72w+4iM+YvP3//7f37zXe73X5mf8jJ+x+eE//Idvfufv/J2b7/iO7xj9e71acH1q8/Vf//WbP/SH/tDmD/7BPzhWtMJ2Pu4+c3fzGZ/xGZtf+At/4ebWrVubn/yTf/Lm7/7dvzsneayv3/CGN2ze9V3fdfMrfsWv2HzO53zO5tm7h1cYW/37S3/pL9381t/6WzcXveqXnP65P/fnNr/rd/2usUreSmf18VHkpR/60qHrHmugL7HydLWPSGQ3ul3cVR+P4zyhzrBbO+c58CKwvAp8LrsMmOQDtGvydtllXgT2T1oep8ntUpbPqgvkKa33H0WfPsqyIx/BNXo0YZ43LAJFoAgUgSJQBIpAESgC1xmBQRgji21JkX96F8IYyWKFsRBx7DrOHfI4Dl4cCvcx0hMXZ0F8HIjrDMhNrJutAT7pkz5p8xN+wk/Y/JW/8lfGyvL0FQfHStBf82t+zeaH/bAftvkbf+NvDBm4iTidpc228Ph1v+7Xbf7sn/2zA7elo/y1X/u1mz/+x//4xsrt3/t7f+/mR//oH735k3/yT54l62ufRlsRtX/gD/yBQQJ/2Zd92cE6P//c82Ol78/5OT9nrDA+mPAhHpDZD/7gD978xJ/4Ezf/9J/+00FYyMaWFL/9t//2zbu8y7tcOEH9ENW8lq/A7qu/+qs3H/uxH7v503/6T2/e4z3eY2Nl9v/9v/9342PHZvtDkUuvu/ni+77vdY+EaDpP477/+79/yPXHfMzHbD7yIz9ynP/sn/2zzad8yqeMbVp+4Ad+4DzZ79/VTz70mocv82AbsAf0x0Uf7Aq/Avhjf+yPja1i/DLDLw76UfKikT49Pzbat33bt20+/MM/fC+zkd1l6EOyPrPd0v0O8qlPP/mTP3lsU3S/9Bf5nD1r/vm0T/u0MQ/fe8vFy+9Z6usD6r/6V/9q8xf/4l8cH4rf7/3er79wOQtwTVMEikARKAJFoAgUgSJwLRDYb0mRPYytMg5hnJBjx0HlnHIuEroOgezaybEMOYxAEhfSLKG4HtcHge/93u/dcGR+yk/5KZu/9/f+3rGK6TN9b3sFK2KtMv5//+//7QkEssDBt++xrQiQI1aOIhpmksH1G++8cWx98fEf//Gbl770pWNvWWS0/B3kDVn1RV/0RZvXvvZon1nvKlMZHFv30n7hF37hqNdrXvOaQcYgB//9v//3G+1RL/vi2odZnewXbBVr6qRd0sj3sz7rszYf9mEftkHy/Lf/9t9G3pFVoTGhbHs5I9e//Mu/fPPRH/3RYyuJ7/7u7x71D0Z//a//9c3P/Jk/c/P7f//vH3V53etet8eTA/0v/+W/3Pz8n//zB0mMpP+xP/bHbv7oH/2jo8x9wt0qfRjaPoEjr19sXfFd3/Vde7zSBluFyOtDPuRDRhv0lbLmthqn2mqVrfzUX1uVkbYal+qrrfqQs2sva+n1iQ9Kyce+wPC277Wy5OG0d/Fv+S2/ZfP7ft/vG7j+m3/zb0Y6+xTfvn1Ulj2eP/ETPnFse/K3//bfHk33Pl2jj72H9CUr/+f//J/xwSr1lPjrvu7rNv/pP/2n0aff/M3fPNpv/+3Xv/71Y0WxVcuI+G/4hm/Y1+1rvuZrxpYUb//2b7+PnzG/yddkxXjxQehH/sgfOX5RgHD/ST/pJw0ZFecjyPd8z/fs5eoy8FIP41lZVqobo4/TQf6tYoeXsf3jf/yPH7/ceKu3equN8zf9pt80ZJv+etiDHP/hP/yHh8wbR5dx6AeEm61l/vN//s/jY/FFlkPX+Ahh+6N3fud3HuOePvJLFgRj5oSLLLN5rSNAFj/u4z5u8yN+xI8Yskpm9UNOvzwS5yTX0po3TjvIDx39J/7En9j8+l//6wdxfFr6i35mTvCxy3gjV+pz1Qebmiz/vJ/38zYf9VEfNewk86IPcWyhHkXgUSBA9/qQw170McNioLXjv/yX/zK2VjOnsf0ep4OtyBb8F//iX4yPYOYTtqs5jS1tDn2Ufih73bzn4xub6x/9o3+08bG5x/VFgPzwd/iK/JnlnGIeNdf4ZS6fcilfZNJ7/+Af/IPhT53HBrwfSvxUH3v/4T/8h/sFXmRe3cj/ofMDPuADhl91XrtSWy0wtICKH+f/2izxul8bHvS5+RbuFrvwk087+O18cb/2tYiDDsQd0Bc4iWXfnZbXZT5Tt0/91E/dfOAHfuDgMHAZ7//+798FT5cJevM+iMCxf3q33JIiK4s57QaUe9cUpwnYoMqpBNdRCgkz8NzPzw/WqA+uHAGOnW0Ufvkv/+X7/XRnck7fUcCcLw4+R19fmvCsnLPfsW0rfskv+SVjD+Sf9tN+2iCgo7SlRWr+pb/0lzaeIaatLH3rt37rzW/+zb95rC7TaJOKrRx+1a/6VWNiDhBk74M+6IMGAUN5kj9EoHJ/42/8jRsEoK0N7L+M5LJPLfIXEWufYHUTj8zNP1dDhsrr1/7aX7v56T/9p492eV9aq4GyRYK6I0+RP8iTv/yX//JwAOXLgbaFh7r80Bt/aPNu7/buIx1iSNuQOt598d6L40Q0p30mqi/90i8d9VZ/bc8Be6QZg5KzDnNbWPyYH/NjxlYPVsrmI4426Lef9bN+1kijLdrKCEUQOzj3+uk3/IbfsPmpP/WnjnTS6wvbNiSd8a9e2mVLCQ43PBDg2s/QUZ7V0Uhv71uVbuIN8cwQVl9EI4ykg4U+h3/+wR/ZsI+zcpDD2qxP5KVf1ZM8qaf6KHt2bN7t3d5t5Ks/yIutPaxsZrAxUNQbKT4TjgyBP//n//zm7d7u7cZq41nGg/1NDfW9PaeteCdrDBTOzNd97deNPidjnv3Nv/k3hyN2WTjRKQyiH/WjftQgRB8n4pCu8LHFLwzsTc5w9YGGIUof0QfGxa/8lb9yjLOHbRu9Zjx+0cu+aHNZKyfpv3d4h3cY+tGHMjr3og7ziY9Cxiv9mY8QdCAdbL98Dk+Pq0GAHPpQS07Jqw8EPtiYe/yyyNxBrj3zAZbuJuunHZ4jpczv5uNv//ZvPy35hT6j133E/tAP/dChr3xAvurj7t1nB1bmSgSxuYvt4RqBDI/OP1fdKy0PAmwiv7Iyn7NVyWL8tRkh84z5ykePx00fs4+Rw+wW25K5t7CB/WeO8c+m76fDZiwu6tqYp0vZ6H6tYREFO1gdL3qevag6N58tAnS4RTJ8Hjbq0ib6pm/6pvFx1LzJtmFTzwcSNr6aeRShehkHubY1Il+QLU/mzPHmQ3Xnt/G/f+7P/bknTs+08bwfL+gTi77oED4c3/cyx1vmfP48m4N9edqBWOZf/tW/+lfHAjNzM53oF6j8nkd9aI+PdH4hhXegK3ACfu3Jf3iS/u/Ro8a65Z8dgVuUGLKHcObktDspNErS6ZrSoSSdjA4KgGJw79pJ0HPtmVOcQ5jrs1exKS8bAY4dks/KOGTw8tCHnK7f8Tt+x5hsEJb6mPIy6SBtGZUmKat+rUJGEnI0n376zcPBRBZKa+Ut0tHqUPsjm9QQBEhLTiuSEQE4T1hkEdlihbPVpsrm3CIJESdI2H/7b//t+AJnwrBSClH5Z/7Mn9l83ud+3uYTP/ETB4mj/tpKzqVHVlj9ZBWrsq1QRdIiSBE8DrKPeLQdhxVXnF/7PX/VV33VSKt8jjSDmhH6237bbxtGgVW58vTVk8z7UihOvZC03/Ht3zHyQMogcpHHOeTlS6e81ceXYXn9hb/wFwaRBi+rJ+RpNS+jAKb6TqhNCFqTIuwQshx/p7bA0MpheTMc/IM+41mef+tv/a2BM0eBc/slX/wlg7jTT+qjf+y5bNUJA4SB4UOA/qMfENXS6WvkIqy0m6HkXYSyw9Yc+k3bGcr6BFkNe/XUDqvEPvuzP3uQyowwsqWeZOOd3umdxmo3zrfy4aVvGePkj5xZaT3rG3Ukh1YfM9jmZ8H+JoZwsLJc3+s7X7HJYPQ63c/5YvgZQ1/91V8z+hpWCEtzyHd+53cOx1PfeG8+jFfjQDoygkx5zWteO97R7w51MNYQlcawjwSIK/KbfhKSecag8wd/4Af39Uh55MN8pjz1EFpNwNjiNC4PeWrf93z394yVBcgwZS4P6eSlna961asGXspK3aTXFrqEnPvopFxlapc81dmHD2PJdjT5KJWy4PT613//qAcn3gcUujeHeho3yGh5WL0h/6SBbfChX97wg284gY+81Nne4j7eaIs2zThzdBjNPrr4h5Rf8RVfsccueDGwtUcbyUrqMPK/t8VUP8hXvZUlLSzIiNVt9JaPFN6VL/1DzyPcr9Mqj+D/JIfbcbldFECm3ud93md8oOQYZ+zoO8/IqX4VTwYQ/rao8GE0h/zMP/S5ba7IiMO7ZFRfG085xBsP+p08IlLEzQc5+sEf/MHhiMufPjFelzqHPJFh8cakOktLx62d6jIf3ie72uYjhudrOg0BJd1zzz43VgzTIcpUb3MxR48Dasw6pPUrLJiYp+YxM5ff6yJwmQgYR0gH9jS7lp02j0VlG4tITHYgu+txk1Xt4X+wD9k19AWd4NcR7Fbz0VW3iV5g6/IB2K/0ygsvvGXzaZ/6acP/YhvTKT2uJwLkhV9oMQOSfyaE6X1+jvHE/6T7zVU52Dd0v3f5XcaXuMs4zM3xj/nADnOPRTY+EFlQw7ZmP86nXxT4JWl8ufPUjaybOy3+4RObpy9zvGXO1T5jy5x82kEH4Cg+4RM+YWCjP/wKiH5Q18vqm9PqND+jr/QFfoAfbsGdNvJb+ex83h5F4KoRuGVipUwY9BSgkMJJyNh2MpgTEmaDnwA73QsTZ7C5TuiZI/ePejBeNcjXuTx9Y3JBFL3kJS8ZztyyvtJwnnyRMwFYkewesYNU+MIvfNnmqafuDBngVCIDEBqcTumQhkhlys8ERc6kYzwhGD/4gz54883f/C3j5zqIRumUGXlBVpt0kIa+WnrmH/NZcSutfCK/iBiT8nu/93sPR41x/MpXvnKsoEYkInV9CaaErW5lpHmXDHMuGcdWtyJljQ1xVl9nOw6TiTyl93VSnZCb0nJ2EWpWDFtZbMxog/oqlwGOIEUwewYbJDADMkQq7I1HE5rJz37HjBHlqTe8PEMmMXoZ/vrERKKusLVKV76wRjTbF1Q6pKq85aXN6oGcZVTIi6MOZytP4GMStW+tyQq5AwMfCTjdyDL9yiCHtXdhoL6IfD99EhcnH8msPVauajtCikzoE+TTq77lVYMI0xZfT7UDbkKkErIeCaeNyHpEOZlFdusT7WFsM8p8/CCDcJt1jVVnyIusap+fLWX+Jt3DQV8zgo0pP4/UR45ghHBhLDOKkZkcHZj7IANTX+cZwj6GIIqQQjkQSwwzZCmj1M/krF738cVP4MgCOSGvPiLpazJodULGCrm1DQpZU47TNbk3FlJPsmqFhDHgA5BfJtjawhhA5iK3cpjPfIxhQNN9DE1pjS9Gc/JEiqu3jw3qRB+ov6//s+Hv2tY+fu1ADmfnT17kGflqhbvxSEc4zJVklYNrLFqdgjgl78pVT4f2qCf9xJE31ny4MV8bQz6CwcS7+uN93/d9x4ch2OUw9hny+tF2AT7a2EPYuDQ+1JEugoV+oJu01Tue0f3GFjz1gTrACykmb4f6+ADGAPeTZ6E8yJf208dIb2Q0veOAD/1BlswpZCL5jQT9c6UI+His/9kG+jMHGbBayJZS+pQMkElj2lwTvUH26Quy6uevZJwuN8bJNZLKxwrxxg1HzZxL7ugFchDZTtneRf742PnZn/05Y/4lu+YqHxvpKAe9ZB4n035xYn4w/1g1vXZadahdDm01zjhk0TPekf9MAESnmQ/pO/aI+hs72uXDrHmI/oEJ+dZ++figRD+kzFFw/xSBK0CAzLEj2dlsLXYdOV1+oPPTe7bh27zN2wxdnLmQTjYO5OGjCBuPXM8HeX/d614/xjU7kb1ofkCQSms8mrPlZZ6lN6TJeDAG2cZ5J/Epw72xrg5sf7/wY2+kjtLJz8cuc4l50TvyNTbpG/fmXHVZO81L0idPeso8ahED+1P96Jk8V2bqZS7WbnVkX8hfOm2mK9XJHCi9/wkBa/Ms20HdelxfBPxqjG3ql42xq/QjW44/yUfi55g/88sWfW++MM6sLGcbkQ8Hm4cskWOyZd4ka/MhzWte/ZrxUZLMSc8Gj3waU8aCcUR+1Ms2f3xTC7mUTwb5637xoq7eYVfOJ5/cOeRy54e++ntfPcr1QViZGWvq6FQ3ukO8umc8GNvsBOQ5O4HdrX7Gw+033F79ZZz81F86uiULDGYsXEsHB2NZWuNNe/gKsH/P93zPY/9rRVp2q3FLd8ENNwGfz//8LxhtFk8XCbXfO/oMltpCv8CYjcxXWR7qpP0WzcEjfUMGtMnzHNEjdADcUmaeC+lVtjxfIT6QeJj6CM1Oiu08v9frInCZCIwtKQg6ITZICLrBZ1BQJu5dGzTuEzIKDCz3GQyuHe4pDmGeRZG4j0K6zIY177MhQFkhiKJo1/pGnAmN8kfwMcY4Sn7ewvmLQkwfIxIZm8gX78nfquGXfuhLN69/3XY1j7RkK8aaPKwE5kzNe9qSKUrSZItwJafqox6c0Xwh1FrKHBHBGLZncZxXX1Ntp8G5NBFZ3WPFq3RIFXkgkxgDHFkrKZEu6qQ8dfLlWD4zPiZCxoN9ho0RzqnVvb7kmvxzyEe+6usdE7Y4k53yGRGMxRzGmRWdDEvP4Agbk65JzAQJF/hxZhG0yFcGMqykM1GZ5KzQVSfOAYJbO9NW94gr5BIjxIRoxTASGGGjj7SXoy4ehsrOmEZwWS2FMCIT3mcoSTf/5E8+yAMELxzUyyRohTeCjOFOZsigjxKIRfKVunL24Y9g0h//7t/9u9FHVhErM32ibgw55XPiGT/zwWhgsFthjIyIvM5pbuo13Y8AzXYkCCEkYTCCMUOP08PYdSIdGaFWhDLSEC/61MlJk8aqQ4aaseenoIwdhCiDDfFqrPkIol/JmZUZjG4OFHIIYeojARnj3PpQgHhETNMpDCoEkvo7fGyIvNMRSFH18pGArBtHDrJhrKiXcSatNkhndT6ZYwxrPxKIzCgfoU3G6DfEsI9F6u5gKJIvmCCGloalvBi5xrP2SWMsGTtIWPqI7CrbuICNcv2iA5ZIMPWjh+FAdyBg6T376gUP9UOicfZ9wKLvMk8zaJHeyqczkFxpszKVQx/KSznwpVcYp1bw5124qae+NGbhIm+H9nz4h3/EwJuehQe98F7v+V5DryD0tXW5ZQx95kMTolJbo79Hpv1zZQggdoxpOpeDN297YqzZh9dY8rHAvIXgNRbIAH1Mzjlf5NO87YOfsUTnm5/Np8ahcWMeNM7NjeZ3MmXvefKhHGPUfOgw7s1D5JWeILvmWk6wuQDx4jAPIKnpAfMAR1dd5JtTWeZNNgwdQb+RfWNd2+gw72hTZJhOy1g3P5J97aYLlIVg9j49Rt6NHXO8Ayb0Abk2FyPEM2+NBP1TBK4AATrVOGRXsh2NE3O++ZUfmMMcYAyyV81rDnO/MSDOoghzpjmcHZA00hn75gzjh/0pjfHql25+WeiefcfeNoaMRePXXEbfsAcs/BBv/Jn/MpfSBWwCczp7kD0hNHf50JM5A9lkLtYGPoixzebLrxGNPXOmMasO80nvsAXUx3tOY/p93vt9RrvNqdrjY5AP1DnoHfvGsk1g7KMqnNjZ6sXG9aHX/Azr2NGIcXO19xBFiU++Da8PAhbdmDPIBzvHQdb0sfmEvWY+Ms/xqRz63nhg15BJ44j8safIAvIvMuVDJf9w9l3YtC9515eMhRBsJ3OL98wtTu9kPJI5H+ndG+PGk7lHOgSpcfWd3/Gd9517vGMuU65FOfSDcWGsaav5j12P1LRoS51wAdqcuY6fxpY3juEllM58q14ZqzCCiUUe8JEOHuwKePNl5elgxyJZfQzWRum0WTrjke3Ob3TAGClLz6m3tK7ZI3Bg48JaPegjuuYVr/iy8QtIvr586UF2OptdefSUj+PzxwAfhtgZbGLl0A3GMj8cZvBKf9JF9KI07APppaVf+Dk51JEN4l3y5YABfcOG8S78exSBq0TgFgOYMU9YOd1Ok7OTkDtNmO5N1gaXgZgz93FITXZ5RsDdOzPgr7JxLev+CCCFKCykiX1yHMu+IhOeUf6cNLLAUfMPcDiQ7ud3GEucLpNaJhXKz8QX5096Z2SFUqRIOZMmmhzkzwSImDXpIhXIKzLFxIwElIeDM+YrL4NUfA4TH8OR8v2u7/ruzUd/9Mfs/ykV4kQ+HD+ne5OO9iE2ETycWWcIJPkaNyZ6P2vRRu1CuDH8rDYyWeRgNMMDASN/Rq5JVugeUcoADoZCBqXVXdJ5jqxSnn+aABNpjCvt5LQjbZC3HH2GMCJev9leQNuRtcu2IobhqE9N9PJmDDF69KnD+Nb3jB0kYuLVAbGr/fBVX6s9Oc7kaXYg9I8VVdphRSlDy6pDWMkbaeA5+eK86Ie5T5AOVr6atPUBco0xxOHQD8FNXzE4TMa+4tNJ84F8gKeJH7mV9+Y0N/VaH5FZssTwJUvk05iDleczXsY4gyvkKCfRGGZcIkTIgP4wd9hrF8FpXDHKGJvkjQNFXhhv9BA5UD5Zowt8HDEvcVLJLgeVIacsJBDjmawgkhjB6kc2yApymjOs7tKqF9lGVDu0FWlD/hm6DDl1QK4aL8aROjK0Eav0Iz0kL8YiohhRRNfEKGd8xnj2wSd6aZYp44IhrAz50WccXfWAN6cWOascH9rUmYFIF5F9JB1dyMBVX+Qe41J9Ob30MacTcWdsqaM20wfqjZDTDzD38Uwf+bgGX2187WteO9LSNe6tUPGuD4vGH8KOYQ4XDjJnntOt/tIac+pFn+lH2PmwREfSVdLLG+nHIZ9lig7VT4hKRjrZ6XH1CBifWX1orKWPyC5ZNOY4TeSeTNG1ZIA+MC7ItPHEueL8cYaMK/MKfWE8k2erlsgMeUTkGD/y87GVQ0gH0Qf0Atmg28mUecOcQ87IMGfU/MfhNCd6n2NoTChbfaQ1JjhcX/qlLx8fhOVFp8hDG8motsmfreJDjParo/mIc0fuHdoJIzYQx1I58nb6ECK9XyQoO4c2GN/qyrlc0w9J27AIXAYC7DZji46la40V871fln3bt37bXib9Qg3BZA7xQd/pVwDGlI+axiU707j1QcVca752sLOMXb8yNKf5VZ107EsLBsxfxo582OTmUPZcbGKhePOZeZKdaa5x0E10DZvQXMjW9hFTOnZfFirQTcgp8x+7xDj0MVJbjV1jD7FkbmSTsAGMY/U2puVprEvHDqcnjGl2jbYqS5lwYM/SO8a+97QFTuY+dfQLJnO3crQdDvPYNwfDn06Qx/xsNLp/rg0CbC12kQ8U3/s93zvmJbqf3+lk07F19T17h1zoX/LKv2Hz6F82F/k3F5i/fGiQp3QhVM1JfDgLqPhOfGNyYh4iU+YohCQ/zlhhB7OryKkPE2SZ/aw8Nib/07i2lZs4H4KXZ7aVYu+RV+WqE5vNu8qzJYJ6iFMXtrrxoR4+BLMB+YNsTW02HtibfAk2Ah/PIqWskOXDfdhLP2zMveqvvT4isxf4B+b6+N7IYvVQljFITyCCtZlOEbIxtQ9ZLA3flz0qT/2in9TBojD6j0/LtvCuj1HsTnM4H0LbQ+oqy/vqlcVj+od+kI5to83aSUa8F11C/9Ch6sHW1gY2lHLpEXqD3uRv802kY4uQt9mPZdvTWfgUC9V6FIGrRGBPGBvghN/q4pyEnIFhABFkBq+BKAxR7H4+Vd69g7J0Jk68+zgg40H/PFIEOGmUP2VnEnPM/aPPECcmCUqQchTHEORwffmXv/KYQvM+w4wTiBRkdCEaTASUcfKWh4nCpEHOOHMmFcQqBy71IJcUvbKsUPCePE2IFHKMVOkRLOqIlEy8ycAKCMYrBa1M94gPhBED1CSe0yTzFa/8iuFgqhcS1IRHoSs7B8POhA+7b33Vt45JR7mcTZNmfh7L4TQ5mKAYzozS+TTZ2UIDRt4xPpTr2mRvQkOe+KIonQnbpGEM5iMPDK2uQtCHBObkwuBDPuRDh0FvsuPYc+Tntn7lV3zl6F9OOYcAMc6oz6Ec9TXxM8YzeXH4OeUmNatG1JlzgdhG2skvh2vGDAfEM5jAHlbIdM4Aw1uf6uu5jnNdySHdxLjf7uH6JQOHlOM9TgJjxPXcX9JYAa6/OByIiR7HEYAXXHywYQAyFvUJA9iYYbQ5zAlkwnMyY3VA9Lz+4hwi5ZEj5hQfUhjGxis5zfwhrbzJNHLG2JQvIwvBRFcwXK1oEodoog+U5fS+ehqbyB71s9qCgWjlg7GjTcqzT6B4Mqv+VncYv/QeWSHX0iqPgUqWlSUfBDQnm2MYmTK2OHjkncNgPmSUG+MI4RBLxxHeDCeCjBo33pMPo5FxCid7C2u39ukLuofRbIzcufPG0Va6NYQ0XeDjFsf4Uz75U/Y6RH3iXOsLWLnXDxx8xqa2OOkazjO9rY1whAvjni6GDUyQwhwTbYsegCUDn36j/+CnLxjjHBykPkNZenjSZQxk+kB/zwcdghyga6zYKGE8o3N111brGPfmpOhx8iSeU8ZhMbb0KVnVv8gjDpdxQp44VGTN2PbrEvYC3Ws+JV/kzocIDh3bwsde45Tce0ZexRvb0pkz5EUPmOd9KJFW/cwdnEBznnfzUQJhTVbVUbxTXX1ENf9zusxp2uYgkwglq6nkL70yzHX0DKcwq5jJtTyMLfOqcSC98pBEdIs2eV/5DraIODrDmM0YGg/7pwhcAQJ0rHFNx9oj0735h25HTpivyTL9zd70Ydi4NFf5+GMcsA+QGsaeD5TmM/Zp5kH2IELa+GB3+miLNDPXIVKV7Rl/AJkrD+M3iwLMbeY+5Jt49l4+1Mo2fOJDAAAgAElEQVTP/IGY8RHIvOh/VSBYxJu3jTN5m8PM0ewTcw1dwz8wDxqX5jq6xSl/tqfxb97TbvMV0i8feOVND2m3cUy/IJDM++YqugGZZC60CIMOY7PKX5n0GR9HnaMTdLk8keSemTurF65gIDxkEfxfcyD9zWZlf7E5yZWV8fzYfKjQ/2TR/MR/ZccaX/rfvMbm+6iP/KixuMa8hAz18cS8yd4is8rwnvf5inxgH27Ncfw9eZB944uckUVyjxxmb5vbjGeLfeShnsbxO77DOx471ZntbS6jE6x6RwjLh81n3pa/hQrGCLuUjJvv1Mccz94zPo0F7TE+6AFj0Dgy/v3/HL6behsT6qdNfEH1oguk49PCgL7wazllsCsQucYRUl1e9AR9w55WV/OucQ07H3f4KD7wGN/y5G+af+kg411faJey1YGPqO5IX4s1YKkexqj++cAP+MBhN/jYxv+Wnj2tnuqe/Nix9KeTPUGHai/dAld+t3oa7/SLPNj/dI6t9/AgZIzunA96V73JoHQ9isBVIjD+6R0hZcQbAJRFTorGSdgTMooNckrBtTOOg2sTsTPPcp9GmSjF9bgeCJgIrBJlTFHKjvSffjdhUc4UvwmA8aUPXZuAKDkkXg4GksnQhGJypagZqBSqssiFQ1mUMufKJGdFAFIZ4UKROsgZQptjZzKIoccxlD/FaRLJYWJhdFKkZNhhkjCJmQwSj+Tylc/7KUu91N2qIF9MTQ4mdySKVQFWs85ya3WkSc9XWBOcsYOIVI4JzHiR3gRrIjUhM0KVMZ8IFBNY8JeXL84mC2MS1vBlFPuZE9LVJG0SQ4yabI1fWDG+EbH6Uxs49dqqTtpqcnVoq2f2jmZ0IMzyVZPjYFJ1KNs/zlJ3EyxZSP9pA+fBRItkVz4DgrGMUGDM5zCRqgNykJNiEtTv+tu7ZMrHBYQe5yBlK0s+DBZGEYzh415ecEh9vMP5ZzAgBbVP/XPAUP+TJfi+4Q1HcpM0NzWEExydrsk9Aw9Bq3/JnNUKVoGTNUaOvosxaJUPB85pPCCOkJzGAbKF8eRDhpWp7h3mDKStMUTWyQBDjeHHyGPEORjdjCdlWd1IZ6QsMsF4NHZ8nadTGKeMYuMjhCMSE+Et3soMbQiJ7eOIceYIDnQKp9B79JGyjW1jKGWrB5IWNvQafcOYZfjRN6n/yHj3R5s5lXQhQ/Qbv+Ebh95Dzhmz9GTyF3I0GJzGDrn/pm/65jHmOKX6R3+ReWPBuPV+8NEnPlTBDlFnVROd5YMQvaGPMz7ko1+0GWaMXsaycU8/SseoVRcyMet7TaMb9SOHiY5BHNBVHOB8uJGHPkbWkaWlPvWckY+gp9vVt47zLD1Xd21c6euMdyWTD3MKWUQYkZ8cZN+qPeOBDJAZZI950/gk6+SObkfEOsiZ+dz48YzOnvWItOYSzhVnjTwYi05jKHOxMW9lFhkn32wW8ywih57PnEe+jHMfIhC/HK6s8Fcfjj0dxX5g28xj3QcRZJTndBY5VVekl4/QMxbahBzPh91gJDR/IeVg4n3jrkcRuCoEjBnzCOKJPceGI4PmabJsfqX72Xl0vzjybl5gF5tLfKxxbzw5EWZsSPM22TZn0vF8A/ahseuQ1nvmYPOp+deYcyB3zZvIYR+ZYzvzC+gbH3LMp8av8eVjsgUb5iEnHUA3GaNWahqffAT3bA76if4yr6gnm1Z9csCAr0J/0QtsB7YIvNgN5la2LR9FneXHXvaB1DNkHB0QMpGOUk/5Kse4hwmd6kNT7JKUr/70F7tWGZ33gsz1C9l65IStxf/w4Z1PYY5DYJJTCybIMjJR35Jh403f8pPISWSKf5x5gNwbXwhj9p+5y3gwjoybzMfSs/uUyxZkYznIGh+QrWYeM29JS/7YfMYdH8s4WJ7i2WxsNzJujGmTuviFGPkml3wohLE68QPkb7xZJMWPRc6Sb/Vg1xpPma/VT3rzK91inpaOz8D+5QPSP8YXLPi87G5zKYLcPZuSb8FHff757QdrOg1pT+fwV7xPb6kPW1ufKNeBD4CFscg3Vndt1qfi2dX8Uz4JYtsCJ3arg/4y38PFu/Klm7STrQ6jtJHfqj7sGz4v7MgHu8T8T8doq77hu8CCrPA92FDyREpHR44KbDaDT4mPFT89zxoWgctG4BaBNWiEJl+DLQNWSGApOYY4RZDQAJzPDBSTrHO+d514DXLf49EjoG8pLgQNpWw1GaJN+PIvffl4ZvJDHiAlkHvpO4aPicNKBMQHpcoANUlxyJCG7hleSACkqS9tvoSaJBliHDCTm4lHmZSpL3AmVordRIOs5ngioeSnfM6lsjmuZNRBhjmX6kpBkzcHJW3SyYpHMsyxZNSZXKQ1EUhH6SMzrLjg9FLecFE+4iptl6/VBsrKtgjy4CiqP4LUWDKRq5PJMCsWR6WmP9qK8FIfRoBJyGSsfQxw9WCEw4dRaRJjLDOIOdreNZHCWVqGvcmGwQFDK728p/2uxcnLSjD99B7v/h7jH87pdxOxOBOXw/j+hq//hmH8MHqUEQwYxIwJhjQHwzOTOay0hWPO4JHO5KuunGTprCDhkMCGLNApnAZ4WqnCEEMsmLwRgFaVITH0NUxh7OMAZyX9rL4mfmWTPXWaD+VwKhgQyI27d7f7Ys5pbuK1PtZPxoFVBTn0M7z1BaOJjiDfxq+fwnHwyCjjidGaE5FMVjifPvaQZ/2hz7ynPIe8Gc5klYFEBsgdQ5IxHaeKIcW4tapW39FDKcvHAQ4gcsnKjhCVxgS5yyFfOoPD6wONccLJo3+Mo9RJm/1ET10cxhkZZ0wvy1aHGOyMWfrPxxoGMqc1RmbqIFQuval9dBKdoc6cCVjCLm1LCGc60njgrKiPsQBX87IPRtIYX2t9od6IM2OO06rNnOgc2qz98BYaTxwdH+isbOGkM96RDOIYt/OYkw+9rh8Z0xxzDgZdjozXRoefOtI99JI60N0jfmcLqIe0nGYY0lHpl5Gwf64EATYBvczZsZWMvnfQp+TQqiWyRPZy+ABjHiS/nFqySSY4qOTauDP/e5f+iG7h8JB9c5p357Ht3timszmE+YkoXcHJzMExNI8bT+ZwThtnjrOoDuZhh9D4MWbJn3rmY7P6qLO5K2M94y8hnWYVIDuAPrEaSx39Ksv7Tk4hAsE8at4y182H8owRYzXbZ8zPe10ELhMB8sl+YksbW3Q1uaV32YchudjHxgkbjX1q8QSdEBt61svmNCSRPM2lbDLzDYLLPDiPDXYqfWCOZ6975jBejYl8bBLnny3TKepkzJj/+KjykJ4tab5FnliVaUxLy6ZIncyzbGhzGzuGPjJm5/q7NmbpHnkg7NhD4uFjnCOX2NBIKXV0+ihM77BNfKSV1tyODDLHzR+hPcuvF9i5wSR9TW/pD2XxcZbza9I1fPQIsNfIAnKRn8fGMbcZA+YYssP2ZcN4Tp6Qtz7esyvZvXw0cuKDZUhGLTM+2UXGHhkzJ7GlLG4y98Y+N48YA3w1cmWOzlgylvlk7DE+GFkyDxkL6sle9PGVrzmf4sxtfE3vqDdfmJ/Ftnawac2b8tKG2OiIT2PA+LGgQLy8jDdjU5vnQ570A9teuWxL/gXbT3vkDVcfgo0L/jDcjWW6w8phYzyH8eUXTuZuH6/0A50Bd/VlqwQf7eMLaIMPUuqGqGYv8InZqMYo8pz+Q9jnoNvYI+xsdddG9WNj83UcKYdNxIaiH+Qpnk7Aj+gHuspHB21iT9CH6qnP+cvyVcbyoI9hyvfBj/QoAleJwPindyGMKS+TstBAclJGFB1HIk6lyZRyExJyp2snZZNB49rp8MwxPx8R/fPIEKDokQeIAIqYQ+SkoDlKnH7OHgU1fzFXYSsHTJomRsQCZcvgkZ6iZ5iSF/JD8TGuKFZpTaiULgOU00i2fJEzkZhIKVKKn9NFcSNcEdMmBrJF2TIwKc/IlcnURM7wY4Dl4CQiEU2iFDP5Qx4yOBEtCGH/jOkd3v4dRt21iUMsX6sIEF2UvjGSQ7uQYCYkJKz6m6h9OeX8WmGNHDPpm3TdI0DWDpMQo5dRgLxXjo32OahOE5SVDNovDSOZ8Qx/ZL44/WNFlHxgrL3abdxKp4+1Q98im7TZF1X1QhSaYBniJnzvao/DuLb6hJHPCTDhwR82jBFtg6O262/GuD4gO8gBZL9+h7P39ZeyPvMzPnPIFgy1RZ4mSsSfyZTzr//Jhzppu6++yiVLMEYGRB6CK6JL2doYYsIz/eUnhOSaTHGAopfy7k0N6XgOE9wYM2sHJ8a41I/62eogBhZ5Z+AyqnIygDiKDDwGprFoTCGbZwNHvyNt6R0/ZyNzyBfEsPQ5OIDGmY84nqccIeIfCYqwNW8xotXLuGO45zCe6QX9z3H0jEFq7JCnzFfknR7RJuOSvMqPTEo3l22lBWPU+GCEkl96y5g1NkI6pw5wZoiqA50JJ/OsNjEs4aOecxlw4YTSvfQoIx4+4pRJrjkU+iU6J+9zBpSBaDMuGP0+iHEyOOI51FMfwVk/G2PyZ3xb4aDedBI9LI4eC17yMPaNNw6A/jAmreKm3+mz6BLjzdY90nKIOArz4bnyGco+GsG/Y3RG6GquzRfGitW9PgSTBwf58SGGXjeOjZUc+spHYfrffE5Xm+fNm+Z0c2HmdjIpT3LFuST7fm2yJvvk38dksm8+sFqH7vFujny8MndwzMhjPoaSPzJkzqH/Oabk1HyKhHaQZaf5AmFsbCLMMo6E8jEW6EnOup/tGq/aqK0ZD+pFd5iDzW2cxvlQN/Vku9CPeW9O0+sicFkIGEc+mBpHZDdklbFsTmDPkXN2QEghH2fMr/S/OdhH/PnwHMFD3yNgzA/8BvZlCC7pjQ2rEM0hbMHodqG5WLyxbKw62A7II/ki1tybKxFnbEw2LL1hnLF/kbpv97bbxQZ0GFLJ/J6f2Jub6Brpcyg7hJW20ws+jPFtHXQeohz5xj5ihyrLyUZl/4hTv+Dggy4cojeVwd4wP5vb2B/Lca8O7Cu2PR3W4/oiYO7jR/FrfBgw57Hp2MX6mk/ND7O6nX0rHX1vHtPv5gQyxQZi17HhcrDF2LM+OBqHSFHEJ/+XvxNbiqzxu/h0tomKPAnJGvuZXR3/mL8TGxN5nPQpdxmqkw8i5m9kauZbc6T5S93JvENexgzdwLa0oEB6dilewVycdAlhYh6mVxDYxgw/D0Gb8SXk34pjz8sXJnwB5DSsc2gzzPmF7HJ+Kr+ZTjC25rSwM24RrsGfTQovbVJ3obHK5sABpN7wNI7pQgQx24DNQB7y8VlaY5/ugzkdpN8sRHnh+e3/9/CxWB18ZIMlTMmP/icndJt2z/5S2iqOfcEWYy/3KAJXicCeMDbIGBAmbIOGcZGTQUHoTaSUCcEOYWxwuDYoXTtzPYcaFUU1D+CrbGzLOo4AB8aqAMQessPJCKP8GVyIRkYcI1Cfpf+E7u3DIw0y1lcxRLB7X97IUNIhhxiqFDOF6zQhIVsRDA6ED+Uu3nMTAIIEUUP5m4Ds70m+5OP5TBhy3EwkDN84g/I1KcjT+yF9yS+FLn+Gs/K0HdFhYhoK/t69YbyZrLyv3LRf22w14asxEtozcZxZ+fmCaVIwySPP/bwneBzvge3KaBjDm6MqLxMgAxee+ga28pUfI9Z4lM7kYULWX9IKYQWzrG4yXmFjskXoI0ydjGPYGPfGLzxMkFZCO7RVPLJJHdQNbg5lI6RMdPqQ3mAIw99kq69hxpiGK6PHZKvecNDPnBL1dO9QT3XQB9pBDhk++ijyJ50VZdrB4c67I4PNZtTBKmd4KS96hpzbWoQTg0BTX0f6M+/fxBDu+oNxx4hisOjfHHnO6EUk+bCjDxg60scB8o7+Q0qSZXLz/HPbDw7IFX32qm95VbIdH23IrA9MiFSGtFUCiCqymgORxJnSryE0UxZZR1TF2CJ/jFx9HFmVj/HHIST3dAD9wOAWZ+9ebZRnvu5rJ/miXxDAHEbtDC4MZOOO8/qKl79ilOWDCzKNMWlsMBDJO70mX7KufI6EccMQNb68x1lmRL/m1UckFh1qDNBN9IE6GlPwQcqRffVBWPvII606knn5woQjryzjXJuNLW32ccnhfZjSpxxi/arOxp3+ftkXvmw/32sb4zhle18dEGQIAk5QVpTTzRwpuiFjUIhs0B7EnTE9H9qnLcpAKJCHjs8Zoau5Jrs+kOg/Mpr+I8PmAU7OZ33mZ+2JHfJNBqTXt/oVSUMmEP+cKs4Ux1C+5gNj0Pg0xjhVxgVCSH87OZLk2fyqDt73HifTO6kT+eNgI3npd3rd/IicVj75Y896hxPGKTSfI7gds3wpw4dReRlHxpBDfsadMWEcieeUwiEfS0fCzWbILAfeeMyKszzzHv3IEfTBm93QowhcJQLGCxvSmEMEZb9844DMGzPGHULWij8fSuhhOsH4ZpfNhLH5w5ig+8k1/W/eN8cYb8rLYcyzhel3tmTG3u3bT418QyCZBxz0DcLIfGpeMl7YFT7gmr/oGHMce4V9aV5E4hrH4tgL3vUrIWWbk6Shq3Kou71cEXLIab+yM/+lbog2xI4528dhc7U2On3sQhjCSF2NbfqRraN8h3xgxGaCH1ylTf7SeM43oZts3RW7PXVseL0QYJeRSzYpPY9YnP+/B5LXHMOmI3/6lV+VsUAWyQi7kv8226n8FHNXPr74eGDuUYb5M4cPIvlIIz7yJC/2Mtvcibw271jA4Jc85tD4PslrLTTnkmVjjb2Xw3hhB1rIAweH/C04YL+ScfKtjexyvww1NueDX2Y8WeRgTPMV2QV8RTZyxhc9Ymz5daMPtbCAM1zNuzleeGH7/w74dohn+onOUj5do87Bx1jj6yC22e7GJf2DfKV/+CHwMdbZAXzP4EUveFc71dW7sEZgw4PeyfGmH3rTqKMV1nCEkV/q4gS8R0Zgpn3y83HOxwX95cM2PUzX8tvng91jIRhinM3E9upRBK4Sgf0/vUMcEUBkGeU2nxQR8jiOdUID0JmJ0bWDYOdMnDCGfsKrbGjLOomAvjY5ULIMnpzuKXPP8sU/Sje5uPczY1/sGJsUMyXonUyCI82OXKZQOV+MLltbICrmvMmEewoVaWtiMYGSQ3VxrTxyxJALcZoyTBLSmViUn3jlMgQRJt4V7zAByFPd1Umo7kkjXcqev+SJNwGYGJUnTcpi7MFO3Tml2sJgtRowaYJfQnmZQEwOo407I9NEJg4WHGh9Y0L5/+ydCZgU1bm/idHEm/xv7r1JjNFErwGjxn1DBAQJEAQUkU1QMOK+xTVxAc3VxBh3E8ENRRBQFFFwV4xGjbuAcV8BEQSFrupZunuGgZn5/s972m8o2p6enpnunu7qr56n+lTXeup3zqlzzltffQed9Fwci/500tGfSpy3wEFd2VfvlXhxPvYn/pRjtnPPVPLcD+djHRPXIu4cR4der00ITENXtuvxXJv1PEu4d+KDrtwXcdVroRPbCVVvrsc+wDHiR8NBj2UbxzIH4xM8ln24JzoldBxocHA+9qHDQ8OfSpZz631wTLlPaIHFPQ1BYCsNFSx1gax07LAW5lMxYApQhnzCCyBgL9auNI4BfTT8eFFCQ4s0IM/wfMGqCCjMYBGMzqwT6YCFBNfl+qQrx9PIBATTSST9KM9YG9Dw5vrAH6wUuQZv7+kI8iygDLGOhl+wcU35JA6sx4KH++Ue6KRxPPmCRjENNSydcItCh0AtXDk/jUIaoDRcsR7iPDRy2Y88zvOGlz50Ami00vkERNMpB4KzP50ErCh48cPzRssDeZYGJ/G75uprXEeYl3jANyA+8JRyUlFR6fShwQyMonxQziiDpBkaA4SBZLy00QY/0I190IGOA41TLBTo8Oo9awefdOCZAAwjvYkPz1riivZ0rAFn5A2uj4akHx1hrkka0lmmMU6e4TmvE7rzPANIABd4nulEueb5DRDgumip+ug+FhZGAfItYIXPryNrk1/0cGXtSFFmKPt0XKhz+BoGOIOVFeWSsoW1Op0aOr+0KXkG8ywn3SkfdLRYR31Bp5s8yTOHMg9wpbzzLKI8Uq7JKxwLgCZ/kV+YqFd4SUQHm84X5RBAzb506hYvftM978lzdB55XvFCi3xLGWPmCxttz3JfXJfOOGWdMqLWR8SJZxrliBdadHiB1dqhJD7kYeJBxxkwx746UVZ5CYx+PA/4b5MpUCgFKDO0zagPqFd49gfbT5RT6i3gFHUgdR4AhecwbUbKNzOgVp/NtBWpF6j3gLjUmbQD8GcP6ApOXJuXkoBo6jydaHOynrJNW5k4MdE+oBxRx1LugGtcn7LOs4f6hHYt0JhyS73IvdHmoOwSJwxZeH7wnKCtwTOJ6zFxTtqDvAymbqdu0vawPl+od6lHqfeAPVqeAXaUbywLaVtwTXQB/vBinH6ETtwPdSiQCrCODsGJ5yXxZDv1Hs8TvX5wP1suDgWoi6hPeOlCW4X8SR4kP2oZox0GoMWSn/LkoGVDss6inGHQQnuW+of/TOQh6k/yKfmS+pKZthRAk/yvE31Zyh1tbdqC1HvkM/Il7XHyK/mWvMU2yjXtal5IcJ2W8hd9J17qk7+JOxPHUO7UlQJliokyQfufckk5pA3INsofGtEGoFwx0aelrNG+oI6lfFFfY3hC2aZdzH3wfOHeiTfH03/jOtS31OOcg349caK/yIsbtAa0Un6oW9GB5xx1Pc8J9qUvQX3OM4g6mHjyHKANQhxoy7MP21hHPwL9mDgvL4q5Btt5rsA7aAfQXubrB65BG+CBB5ID63H/PDe4J46ljc49aNzpUwPUaX9j2IEhCm0OzsmXEMHnJHGg30V/gr4C90yes8kUKKQCDhjzYOHBpYWNwqkzhZ0Cp8CYZZ0p2FrAtRFBoWGZiQytM+uZdb1bsJ/QKKAVJjek6aw3F0z74LJuD4YtbQ/um5Plxm/Gty3nTb3ntpwj3TGcVyuG1GsE/2ejP/sHjwkup7t2W9fpeTXkPMHlbM7Ly4HWToByGvI0doBjPLNovGNNTeOHyphnHFNr49PauJTK/uiAJjT6gCK8/afDRwMKS1v+03jCohbwyLOdvAa4BQrS0QEY0tCl4USDjAYn9QeNJxqpWCoAhLThSJ1BIxA4g9WD7ot1O41wzgn051rMjz76mAwYcIhrcAFXabTSEKdRRSOT42lgYtVE55Nzc1/MNABpXNEIU1cMnBPITMOP++NeyTOAK2c1/PzzTfkDQEUHgXsARAFbWSbedExpnHI/WD9glUvDnJBzMbMvnxiiLXkTiKqNUNUeywqAMnHk/jiG+wPa0ZElvolEjesk0FhEZzoK5G3yPMdz7zTaOQ8NU2YsWwC72hEGCNORB1xzLTq4ADw6PYAz1ZvOBg1wtgEQuQYdbRrwpDPHcQyNZ3Tjaws6MRwPsAYi0wBWS07KAnkGQEinnjSica4T+tER4944Hy/GNP10HwvzrwDpAKQlDwNCtDPLlUk/8iJ5CmseygJ5gGU6e4Be0o2ySJ6h06qW7BxP54yONNCGckv6cn46UpQp8hUdY8ojy4BdABJtTV5QUOZ5eUKnSie287KE/Ej+ZV+sn8m3lG3yO51yOmR04Lkv8j3lixc+7Mf96AtWOsfck5YNQsohEAggxH502uiM8szjRZOWZeLEvWPBSP7GCgpLIya+Nli6ZKl7NnK+oJW03ouFpkA+FaBsY/hAOwhYxfOWMhicAEIKeqhjACJMQBnqIsoLIbCZ8sbXhNQBvESknmE/QBZ5nHKnE9em/mA9ZY8yo9emLGCtSbuB+oznDPtrXCjzxBuwQx1K3cVLIupeyhovialvAb+0UXimAN6oe3neAMiod4DgvKyhjAJ7KPeAK2auzYtx2gcAHcAtsIZ4ql9i6lfgE+1Kni3cC88d4s9zgbYl62hrahuT++deaGugJ88WIJFOaIDGHEdbCrDM/jYVrwLkYyAm9Ql1CPlD60nSk/SlHct2XCJg/BB8OUgdRT6ircMLBOoQ2st8FYdFMu038hhMBh/ItB1pMwXrPfIwL2YoS7TNqXsBluR/1pHXaWvS/2Ff6lfgJS9RaadiQZs6s54yB/DlpSfnoZ7WNhznoUzzDKAdSLliAn7y0hgtaOtTFihvgF2eM7RJKVfEl0HWWUcbj3Yw7VLaDJQ/+hznnH2Oa8tyfeAzZYbyz7OKcsE9so5zUl5pF1DeiCvXw8iCibjxDOAFMdeibPK8AFJzfYAxx6Mx8aK/gbGCPmfoe9AW0ZdjnBNd0Ih2CIYg3CdtXp5FnI/9eZHMM5G2AdbFaM5zi2cacef5w0sw2su8NCf+tF9YT56hjUTa8wwm3wRfPBEHnrHkD84RfIHgbtp+TIECKOAGveOBxkOPmcqOAhecqTgVGFPIKbx0Dnn4EfI/OGulT/xZZtZl9tPGQgHuzy5hCpgCZaQA8AzQSQOMChirLizgtEJGCnv+bJoheD7z3Afi3XH7Ha7hSaOQBiiNIDSlYRd8dlMf0GhBZ4AsDTfeptO4o3HJRPjC8y+4Bi3rdeI8WJjT2AZUM1GP0NCcctsUdz4aZZpO8XjCQWCuhSsTGmU0KumIKbChg4dFAA1wGnI60aClU4e7GACTThxHB43GLOcE8BAfOnDUazoRB3Shkcd12Zdj2A/N2M79YClBI/Taa65tmq+/7nq3Lx1bYBQdVfZl0ntDexquxJFP4GiAch0avNyT7s8xaAiMQ2sa9xzHxD0CydGMbXSUsfilc6OQnv1YZh0dDO6Xe8FPLA1RTTPiQ2OeuLCd85LWxJc0A/Dx+RzH04AGEGveIK6kCQ109Ah2nDkeizRAIA1izqsTjW/9rBhttb2g2y0sjAKkH1aFdCZ5ORQsB8SA7ZQh8iZgiHJPHqAM86wljWk38hKHF0R8GaMTeYj8C5AhDzGRzuRdOlOch7xx+Z8vd504Okua9zmOTil5SiSxHCUAACAASURBVDvnHE++w5qZWfeng8a16ZBRPsjbkydNTpbJa691+VrLKPdJ3LWM8Exgf+LIvf3f/10qU6fe6UA58WfiJRiDAQKVANIaR7ahF/dCh5HOtpZPNFmw4GkHwvnCgOeATaZAIRUgb1KPAVzIn8BdrYM0HpRhQA3WxcAXnvdMlAvKHtt4mYM1MG0sXqhgDUkdQB7Xr12AXDwDdKLsAJgBw5yXcq/XBp4BuCj/wDbWUx6pl4AzWNQRL8oddR/xBy4BTYAnwB39Qoe6D6tFzsVLIups4k5dgwUy+3Ft6mLiAtQD6gCWdOZFF18dYVWNZrRJeInGdYkPM9AaeE27iL4z5Zz4ck6eHcFnAstLPl3iYCAv2Xhm6cQ2IBvH0bbR54Vut7D4FMCKl2c7MJgXJep+hJiSd0lD2jAAQ8oH7brUiXYQ7TxeFJCPyRe8KCU/U7eqFTp1HnkD44BgW4rzATeBirwsATjy0paZ87EOEEr+okwBUxVgUw7Szbw4wZiA++N6GHPccP0NEo8lfYpTZwGhcXVBXqe9zkTZpL1J/KnXeQ5Q3tAHsA7sBuhSfijnvLCiztX2JnHkyyK0BNwCg9kfAM3XfZRV3RfdcFHBNq7Hc4x7JgQE0y5l4pwAdowWuC/SgX3QmWcT5ZeyS7uTdixGKvRzgMJAYl748pKY/oE+p2hf0Cei7QrU5Ro8HzBUoU4nPsSB83MvaI7OXEN1UuMZ0h2jCvbFSpz8RD7iWUV+4dkKRCeNdaLdw0t80pp00vPqdgtNgUIo4CyMqYyZeSgxU0CZqeh5AJCRmanIKSTMQVDMMjOVcXCmUPGfUCcKoBZCXWehKWAKmAK5UAAgRgOGDj3LNMTpdGjnJxfXCOM59LnMs5rGCB0b6gSs43Rb6n2znuc+0Id9WU59trtjG775zE+3nnVcn/NR3wTPxTLnp0NI44n9UrdjlZ7OMp393LYUiyp3zvUbXKOaxmJq/PX8hNRjdAaYGbxCt6kmnD9Y9+myXjd1fz2OUM/PPXN/6M/x6Y6hDub+CVO3U1+jHXV4qj56PY7hPkkvrsV5Uif2YX26NCVexA+9aCOkXodj9Z5Tz0tHCChPgx2ozERcaHQDIujQ0zBPva/U89j//Cjg0u7rvK7LqVfS9AWUaB4gTwQn3YcwOPHf5evA1yNu38ZG19akU0qeSs37zZ2Pcwe36bn0GsH/rEs36z5N5/rahRMd7aqqasFHIvsEJ83f31jf2OheomF5yBcIdKa5pgICOpvBAbGC57RlUyCfCvCcBgbxUpO2EeUsXf7lhREv14HAQYBJPxDgDMQCskycMNG1rfQLH+JOm4EXlVgi8mzQiboEV208+wHHwevychLAywtY6j8m6gQANYCEaxJXJsoRUIoXmbxcAlzxYggLTV6WEneeSwAr/lOv0FflywiuwUtWyiPtQuASL73SzUA7vkxCMyb2x/CAF+iAPs4NyKIe5F6IH/+JTxAgcizXo07jyyWsQXnJpPfPi20stFmPCw7u26biVoAyQR6mjBBq3tRY0wYjLwNXyau0x3TSdOc/ZRGjB15MXnjhRQ6EYrkf3J+Xl5QZ8hb5KDiRr3l5S3m+eOLFctWVV7lyTbnlGPo7XI+2Yro8nrqO8gCs5P4IeYHPfWicdT1xVqtZ4kMbkf15UUI5IV6AVsobZZtyj1a8dAFE8xJZy7neD8do+aFcq/GJuonR/SiPlB8MU3gGAI95WUw6cH3Ku07sy/GAYAArsBZ9eQbwXOE8lDfc7tE/5FieU7wg4h7RMZgWtAcA3WxjH45FE1688UIbIwzuk+cEL7YA7sDx4MS+vCDDIARrZ/QgLkHDF9ybYK2sbkn0eK7Bl3+Ab55zNpkCHaGAA8YUDB4sPPwIqQhpIFCAWEeoM4WdZQoMhZLCzkOF/zzUWEeoDzh94Oh6/d8RN2vXNAVMgXArQIONxhiNcTrsWL0Q2tQ2BVr7vG7t/sFYpR4b/B9cDh6T6+VCXSc13h113dR48D9fcaEtwWeWWKnQ2KcdAWTAUgarEhrp+bx+unu1dR2vQGp+S/1fyBi299p0mvmMlk4fnVA61FgkYflFR1itx9p7nUJqYtcKpwJtzYP0DXmWax8PdVp7LvZPd0y6dannp42Xev1sU6i58zd3fHB/+r7cO33ZTFPwGJaB54Bm3GQAAVlHfxnYx8tTQBPwyaZwKpCaH4L/yU/6gp+7Z5vOmdTQc5AXKQupEFbPlekc2W7Ta6Xu39z61P34zz1msz/siXtJt6+u4555BjR3z+yn+3Jtylbwf7r4tXYd5wQ2A94B5vpM4qtArKqxpsbKmyk1PjAzjucZkDrRb8XNDmCY8zKxL5CcNgQvrfhvkynQEQp04o0sjVpCCisPn+BModTCqaCYDE+h1VBBMQWDZQ11PaGuz3XB7QjR7JqmgClgCpgCpoApkL0CtBmw+gCoYWHBixz8uPEZItZZOlkbQZWwsNQUoB2N+xssCnk58u6777n8jsUxlkRMlr9LLVUtvsWqQCmUJaAgVt2MC4AFNvUg7rz4qoa68IuVXzT1j4tVZ4tXbhUg35ZC3m3vXXfkPaZeO/V/e+6Nl0C4HsEXNS+Hacfy9RCW5Rg/8JVRqqFSNtcHImPdjdsKvtSAveGyA+tiQHLQiro98bdjTYG2KNAJ62IauQqOFRbrW+RUYKygWGEx/9PNAGImBcUsU2CC/9sSYTvGFDAFTAFTwBQwBUpLAep/2ht82gdU4xNjQj7xpd3BlE2jurTu2mJbTgrQvuVTYT5R/ec/n3Of9WJB+NJLydHmLY+XU26wey13BajP6B/zohR/q9R1XsRzPkrx5wwAoi9t9V655xS7/1JSALCLuw18DeNSBl/qvCTGvzuDZDPIHlNbyjUuJ/CRzlgMfC2LW0V8KjM+As8Km0yBjlLAuaQAFuOKglCBMSHQGKtioHFwZh2VIIWGMAiPuRHWaajLWnGyXmGy28l+TAFTwBQwBUwBUyDUCmgbgC+Z+DSfNgZWGHyuyNSWxnWoBbObKzkFyMO0lfF5yMsRPh/FGknbvJbHSy5JLcKmQLsUoMzzFS8D8gKD8J+KFSHuKehL2zOhXfLawaZAwRXQeh4XVAxqDdzlKzncT/ESSMu0hq2J4Jo1a5y1Mi4p+CoJ62X8NPMMsckU6EgFmga9U9/FdN7oyKkfY7UwDrqjABAz0wjWkILBf0JmQDH/NdT13Kw2njvyxu3apoApYAqYAqaAKVAYBYJtgNQrss0mU6DUFciUjzNtK/X7tvibAqZA8wrQ51VjLPrWvEjSfrA9F5rXzbaYAsWoAGVWyy3lGNcRGFjquvbEWZ8V6iKWEP5mkynQ0Qo4H8ZUXlgXp8LioFsKlrEoJuMqPCZjB62LgcPMFBoNuUH2Y9bCxLJNpoApYAqYAqaAKWAKmAKmgClgCpgCpkDYFNB+b7r7yrQt3f62zhQwBYpHgebKb3PriyfmFhNToPUKOAtjgLG6pFDLYn0bypsToLCGQGOFxGpdDABWWEzIpOsUDlOAFCLrPq2Prh1hCpgCpoApYAqYAqaAKWAKmAKmgClgCpgCpoApYAqYAqaAKZAvBTrhF4UZYKyuKIDGCo6xJg76MlYLY4XG6sdYgXEQDKeDxtyIvX3JV3LaeU0BU8AUMAVMAVPAFDAFTAFTwBQwBUwBU8AUMAVMAVPAFGi7As7CGFgMNFa3FOqKglBhMSH+jIOAWIEwEFlBsYJjosQ6tTAm1P3bHl070hQwBUwBU8AUMAVMAVPAFDAFTAFTwBQwBUwBU8AUMAVMAVMgXwo4C2NcUuC/WAe+U+tiBcfqkgJYzIx7CiAxcFjXpQJhBceETAqOU5fzdWN2XlPAFDAFTAFTwBQwBUwBU8AUMAVMAVPAFDAFTAFTwBQwBUyB1imwiQ9jdUmhoBgwrL6L1dIYeKywWKGwWg6r9bHCYyyM1fI41dq4ddG0vU0BU8AUMAVMAVPAFDAFTAFTwBQwBUwBU8AUMAVMAVPAFDAF8q1AJ9xQ6KB3QQtj4DGQmBlIjDsKdUkRBMZsAxQrHAYWKxwOhtxI6v9835yd3xQwBUwBU8AUMAVMAVPAFDAFTAFTwBQwBUwBU8AUMAVMAVMgewWaXFLgwxhIjGsKXFIAh7EuBhhryDp1Q6EhgFgti1nHf6ag9TH/FSRnHzXb0xQwBUwBU8AUMAVMAVPAFDAFTAFTwBQwBUwBU8AUMAVMAVOgkAo0AWMd8A5gDDjWGUiMRTFQmOWgtbECYrYpOFYrYt2moDgYsmyTKWAKmAKmgClgCpgCpoApYAqYAqaAKWAKmAKmgClgCpgCpkBxKeCAMa4omIHEWBczY1XMf7Uw5j/gGGDMrBbECpOBwMxBUMyt6n9AMpOC4+KSwWJjCpgCpoApYAqYAqaAKWAKmAKmgClgCpgCpoApYAqYAqaAKeAGvcMdBX6MFRardTGQmBlADBjWOQiNAcBBlxRIqvA4CIeDywqPTX5TwBQwBUwBUyBMCqyra5RYvEEIra4LU8ravYRdgQ31jVJRWS/r1lF2w363dn+mQDgVqK8XiScaJB7HkMkKcjhT2e6qHBSorW2U6uoGqauzclwO6W33WLwKOGAMLMYlhQ56p8AYFxRYGBMCjnUAPKyGgcepsFhBsVofa6huKjQsXjksZqaAKWAKmAKmQNsV+Pd7dXLjtJg8+ERC/Ki5X2q7knakKVBYBb78ql6uvqlK5jwcF89nTI7CXt+uZgqYAu1XoKKyQe57OCHX3BqTJZ+ttxe37ZfUzmAKdIgCry6uc+X4kadrpLLKKuQOSQS7qCkgIk0uKdSHMVbGCow1BBhjRQwkVlisMDjonoJ1Co2Bw8ysI2TSkHU2mQKmgClgCpgCYVPgoadrpeeoqOw3xJdbZsRlrWf1XdjS2O4nnAp8vGS9HDjck50HenLzXXFZE2kwS+NwJrXdVYgVWPllvZzxxyrZqltETryoUj5Zuj7Ed2u3ZgqEV4G759fITof40n2EL9Puiwsvg2wyBUyBwiuwyaB3qS4p1LIYUIyFMf9ZBhIH3VAoAFZgzG0Ajpl0G/+D2wt/q3ZFU6C4FOCz14qKevfpenHFzGJjCpgCbVVg3lO1stcRvnTac63sfpgvf58ak4hB47bKaceZAgVT4KNP62TfwyPSaa+10mWAJ5OnWdktmPh2IVMgRwoAjE+aWCmdfrVWvt8tIuPOrZRPDRrnSF07jSlQOAVmPFgjW/fxpNM+a2W/4b7cNsugceHUtyuZAhsV6BSNRp3/Yh34Tge/w7o4OOAdy0Bi9V+s1saA4KCVsVoVKxxmu1oWE+ryxijYkilQngq8836dXHhFlUy9Jy6JGvPPVJ65wO46bAoAjPce6svugz05aLQvux7myw13xMT3zdI4bGlt9xMuBT7+tE72Oiwih4yPym5DfNlxsCeT7ozJ2oiV3XCltN1NmBUAGJ88sVI22ycih58Ylf/o4cnosypl2WdmaRzmdLd7C58CM+fVyDZ9PNl1sCe/PtqXfYf5MmVWXKrMPUX4EtvuqKgV6IQrCnwYx2IxN+gdIbBYQ6yKmYHCGgKO1WIYMKzWxroOKKygWNehQnB9UatikTMFCqDAk8+vk5/08aTryOSnNrXrDBoXQHa7hCmQVwWchfFhvpw+sUpmz09I37G+7H6oL5OmmWVEXoW3k5sC7VQAYLz74Igcf2GlTJkVk72H+bIj7immA43tU9h2ymuHmwIFUcAB4wmV8r8HRWTBCzVy4gWVsnnXiIw9p1K++GKDuZkpSCrYRUyB9isAMP5pH09Ou5ixBRJy0Ehfug7zZfr9uE+1Orn9CtsZTIHsFHAuKSorK5ugMbAY1xQ62J1aGgOFAcYaqlWxwmMuxzJTEBazzKSWxUGA7DbYjylQpgo88kytdNovIp26RuSgMVG5+8GEWRqXaV6w2w6PAgqMJ1xRLV+uqZcHHquRvlhGDE1aRlRUWCM3PKltdxImBQDGuzlgXCGfLFkv855IyD7DfNnlUF9unRmXNWut7IYpve1ewqmAc0kxoVJ27O3Jp59tkPc+XC/jz6tw0PikCZWyfMV6G9AynElvdxUyBRww7u3JxKurxa9okFkPJKTHCF8OGuXLvfMTEjdoHLIUt9spVgU6YV2MlTGuKLAqZgYWB91R4H5CAbKCYiyLmfW/gmMFwmxTi2IFyMUqgsXLFOgIBRww3j8iP+vryaDjonLQUckKsCq20Y1LR8TLrmkKmAJtV0CB8UVXVLsXQAzScf8jSUvjbiN8mXpvXPyolfG2K2xHmgL5UUCB8XEXVMiKVRukqrpB5j6akAOPjDpwfNvMuKxeQ9s2P9e3s5oCpkD7FQgC42Ur6KuKvPNBnRx7boV8p1tEzr6sUj5dtl7q660gt19tO4MpkD8FNgLjmKt310TqZdp9Cekx0pc+R0Wd1XF1zF7k5i8F7MymQFKBJmCMlTGwWAe+I8S6mMHudKA7LIwBweqaQqFxMOS0/GcCFDPrOtYbPHZy2I8pIA4Yd43IkFMq5KEFNdJ3nC99x0bdW9OKyuQLF5PJFDAFSkuBVGDMi1MsI+57KCH9fxuV7kf6csfsuER8a+SWVspabMOuQBAYL1+B6zVxvhJ54fPrcVE5cJQvt8yIy8rVtGUNNoU9P9j9laYCqcCYu6Bb+vZ7dXLMORXyo94ROffPVfLhpwzgbuW4NFPZYl0OCgSBMfdLvcuXe4z903OkL78+Oir3zE+YT+NyyAx2jx2qgBv0DlisVsZAYp2BxcwAYg2BvlgVA5GxKl65cqW8/PLL8uKLL8oLL7zgwn/961/CzH9Ctr300ktuZjn4X9dbmNQnbDq8++67Qv6y6ZsKAIy/1TUi4y+oFKwQHVA6xpe+45LuKSqrOmagHQAXL4wWLVrkym/Y8qTdT/ueNTy///3vf7s88s1cbWtSgTGK0MiN+PWujP/m2KgcMCr5iTvrCjlRt/NMtjq4fWUgjM8Q8sRrr70mX331VdNL/0LmzWK4VjpgTLyon+c8kpDfjI8691E33RWXFV8AjTs+1pTpTz75pKkdHsa8affU9ufVq6++KsuXL+/4jFrAGKQDxrRrsSj+97t1csx5FbJNX0/OvqxK3v/YoHEBk8YuZQq0SoFUYMzBlOPVXyahcY9RvvQa48td98elsrIIKuRW3Z3tbAqUjgKdcEWBWwpCtSqmAQogJsQVBYAYaAwk1hBwzLZp06bJmDFj5IgjjpChQ4c2zcH/ukxoc/loMGzYMPnd734nb7zxRumUiALGNAiM+cQVK0Sc+jNIVu+jfLlrTrxD/DNRtumAHnnkka48W5ktnzKbTVrznD/66KPlo48+KmBpKZ1LpQPGxB5o7EWT7il+89vkJ+6Tpsck4hUOGi9btkwuuugiGTJkiNXF1h75Rh4YMWKEzJs3z7X9ACzlNjUHjNEhSv38SEIOOS4qPUdHZfL0mKxclRy3oyN1+vjjj+XKK68U2lvZPL9tn/Kqz0ePHi3XX399R2bRgl87HTAmEkloLPLmO3VyzO8rnTu43/2xSt79cL2sX5/f5x3X9jxP3nnnHffCnZfuNpsGmgfeeust92IH9mLTRgXSAWO2Ao2xNMY9RfdRvnQb6cvU2YWBxrAv2tKadhZaOW4pD1C+V6xY4TjqxtxdWktu0DsFxrikUF/GwGIFx0BiCgjAGJjEjHUxVsmnnXaa/PznP3dw6eSTT5ZTTz3VzaxPXdZ1hDaHW4MzzjhD9thjD9lhhx1k/vz5pVUqChTbIDDmkgAlBsTCZyJvTbuO5K0pL20K+9aUsv36a6/LFltsIfvvv7+ceeaZVl7tmeXywNlnn+3K9ZZbbilYLtn0TQWaA8bsSacR8PTA4zXSb1xUdhviy43TYhKNFqaM02jp06ePdOnSRc466ywr11aum/LAqFGj5Ac/+IH86U9/ckYEBoyTLimCJdxB44cTwlcCXUf5wgufL1YX7oVPMC66zBd+vADadddd5dhjj21KT2tjh7uN3VL6nn766e7F7ve+9z334l/zSzmEzQFj7l2hMZbG486tlG36eHL6JVXy7gd1eYXGtKtnzJgh48ePl2OOOcaVVcqrzaaB5gHqXox1bNqoQHPAmD3oM+PTeMbcuBw43Jd9jvBl2n1xqa7Ob3saY5nzzz/fyq49v1qVBy699FJZtWrVxsxdYktNwBj4qz6MFRTzposZC2OAMRWeWhmzDGjmQde5c2dnlQJh53PX999/34Xvvfee6PzBBx+49YSsI7Q5vBrwQMVC9cc//rHMnTu3xIpFYaKbCoxpyDLH4g3Oj/GeQ3zZe6jnltfV5df6IXjHlPdXXnlFvv3tb7uXPjRgrKyGt6y2Jm0//fRTV66/853vuE+gg/nGlpMKtASMKeMMpvXgYzXSe4wvOw9KuqeoqspvI5fYUUf37NlTevfuLW+++aaVa2uHNOWBe++919XXEyZMEN/3XV1UbmU6k4Ux5ZYJaMzo7L2OTn4lcNNdMVn9VcdBY1y/DRgwQIYPHy7PP/98U3q25rlu+4avfv/www/lsccek80220z69etXVkW5JWDsynJjoyx6q07GnFkpP+4VkTP/r0re/yh/7ikwvDr++ONl8803l0GDBrkvAvgqwGbTgDyw7bbbyr777utcd5ZVYW3hZjMBY8oxM1/nzrw/IfsM8WWfw325e17CDTjdwqnbvJk6F0NJ0mzkyJFWhu05ljEP8OXeD3/4Q9l9990dB21zxuvgA5sGvVMfxoBhtS7WkIoOcKwWxhpyDMAYa6XFixe7fSi8aoWsYXCdLhPaHF4NyNennHKKAeMMBTwVGAd3TSQaZcachOw62JN9hnry0FM1rgIsxFfCCoxp2F5yySUuWlZWw1tWW5O2ZIaTTjpJvvvd7xowDhbYwHImYBzYTRjZee4jya8JdhnkyYy5CQeSg/vkelmBcf/+/WX16i/d6VuT/rZvOJ8DDEaMn9itttpKDBhH5LgLKkQHvUtXBvkiYObcuHQf7cu+w5MD4fF5bEdMjBMCMD7hhBOc9YqV0XCW0damK3kRAx4Dxt8sl2ip06sL18mRZ1TI1r09Of+KKvng4/y4p6AfPXbsWOnUqZMzsAI62WwaaB7o0aOH7LLLLi5PaN60UCQTMFZ9KM6MMzD17rjsPsiXrsN8mfdEss+s++QyfO655xwA7Natm2s3aRpaaOU5XR5gfBA4Kca11MmlOjlgHHRJAQRWUEwIHAYgKTBmWeeghfHChQvdeipirI+Z6ITorI0dXV+qglm8s1fAgHFmrTIBY8pLTW2jTLs3CY0PGOHLI0/XSmV1gzQEGruZr9C2remAcdvOZEeFUQFcD+GSgk+hbfqmAtkCY46MxRtlzkMJOXCEL3sM8WT2/IQbuCNfRTwIjL/8MgmMv3kHtqbcFDBgnEzxTBbGqXnC9+vdQDtA4wNGRWXKrLjzqZivspt6ff0fBMarV6/W1RaaAgaMe3uybMU3gbFmDQXHL7+ehMbb9/Nk4jVV8t5HDOy+ESrr/u0Jg8CYcqrXbs857djwKICVsQHjb6ZnNsCYoyhPuHS8eXpcdj3El56jfHn06Rr3xW6u62QFxoMHD/5mhG2NKZBGgX322af0gTE+i9W6WP0XY2UcnIMWxmpdrC4p8MWkFsbsh1UxnQ9CJgXGusx6qyjT5KYQrjJgnDlRMwFjjqSc1K1vlKn3xGXXQz3pfXRU5j+VkGgFZSy3jdlgTA0YB9Ww5VQFDBinKrLp/9YAY2kUiccbZPa8uHTFB9sw30FjPnvPdSOXWBow3jSt7F9SAQPGSR1aA4w5AmiM/0TcU/Qck4TGq77Mb/2cmmcNGKcqYv9VAbMwzgyM0Un7oy++tk5Gn1khvxzoycXXJgfCy6UruCAwXrlypesbazpZaAoYME6fB1oDjOkX+9EGuXFqXHYe4EnvMVF5aEHyy71ctqcNGKdPK1vbvAKhAMZYCVdWVroZH8bMamGMVTHgmIqOWa2NAUoAY45TH8ZYGLNdAbFCYw1VRipnraB1nYXhVMCAceZ0bQkYczSVHJYOQOPdD/ek/zFRefCJGvEdNM58/rZuNWDcVuXK47hMwLimplEAJks+21CW87LlG+S2u5ON1YuuqG7Rj1qyPhSJxRrknnkJ2XeYL3sf4cusBxPOL1suG7nkTgPG5VFGW3uX2QDj2tpGWf1lvSxdHt6y/eyLNfKrgS27pAjq6/n1MvPBuPx6XFQOGhOVW2fGZeVqoHFwr/wtGzDOn7alfuZ0wJg6Zc3aelkS4nL88sJ1MubMCtnx4JaBsaYxdfG/Xq2V0WdVyK8O9WTC1VXy9vt1kitobMBYlbYwnQLpgDFjXXy+sozb059vkOvviMuPenoy8epYOtk2WUcZpt6NeElo3KW/Jz1H+/Lg4wnnsiJX7WkDxpvIbn+yUCA0wDgajQrWxYBiwng87kAxAFjdUrDMrOBYgbFaGCswVgtjOiBMCod1vf7PQl/bpcQVMGCcOQGzA8a8YBGhsz51dlz2OtyTvuOi8sBjCff5TT4MjQ0YZ063ct+aCRh/+PF6uWlaTM77c5Wc+6fym/9weZUMPSUq3+8WkQlXVQsAvaWJOpEy7iyN5ydkr6G+7HaY5z5396K5BU8GjFtKjfLcng0wXvrZBrnlrpiQx8+5LJxl++QLK2Wb3hE54aJK+Xzlhqyhb8Svl1nzEtLvmKh0PzIqN8+Iy6ovk67Z8p2jDBjnW+HSPX86YMzHn1Nnh7eOpu1xysRK2WWwJ7/s58mylc27pNCU1X5pfX2jvPA1NN5psCcXXJmExrlwT2HAWNW2MJ0C6YDxv15bJ5fdUF2WbWn6D+f/pUoOPSEq3+4akUuuaxkYoytlGfzk+w0y6c6YdO7vSdcRvtz/yNd95hy8yDVgnC4H27pMCoQCGOOOAkthQkCxWhdrSCWHpXEQFAOUAMCAZh30btGiRc6HMR2P3kE70QAAIABJREFUZIFN+i9WUKzrtWLOJKxtC4cCBowzp2M2wJgzaJkBGt85Oy67H+ZJzyN9eeCxmpy+NdXYGjBWJSxMp0AmYPzkszXSf5wvDOLWZ6xflvNBR/nS8yjfWRpna51EGQca19Q0yJyHE7LH4Z50PuRraOznDhobME6Xo21dNsAY67sBx0ade6ReR4WzbPMytscYX665LSZrI7hPyz5v8HKH0dlxHbXfcF9unhFzlpzZn6Ftexowbptu5XBUOmBMnfSb8VHptN9a6R3Scnzw0b4cOMaXcedVyldrWwbG5AVtZwONX3wtaWm8wwBPfn95lbzzfp1zD9eePGPAuD3qhf/YdMD4xulx2bp3RHYaiKFQOOvclvoJtKcPHufLnfclss4E2p7GQvum6THp8hvPGWLQtsbPcXu//jFgnHVS2I5fKxAKYAz0BRYrOFZQjCsKllOBMQAYoMQcHPQOYAxYZrtCYnRSUMwyhTj433JSuBXIBIwrqxrk4yXr3eASDDBRbvP7WGLOiEun/SMy/oLKFjOCNmYbGxrl1hlx+eVgT/Yb5sv8J2sELVvTsW3pYgaMW1KovLdnAsaPLqiRA0f6cuJFlbLguRpnrYPFTjnNz79SK68tqpUvVmOhmD1x0jJOh5WB8HY71BMG4bn7gbjg07i9jVxyrQHj8i67zd19NsD4uRdr5YCRvpw6sVIeWhDesv3iG+tk5eoNsqG+9WUX/4kzH4hL11G+7DrEl1tnxJxPxeZ0z8V6A8a5UDGc52gOGHc7Miqddl0rTz0fzrqZOhjo+/7HdbJ+Q/YmhVoHE/KCbOQZFfLTPp77Yuq9D3HFmP0zITVHGTBOVcT+BxVIB4yvmRKXLbpF5MxLq+Sl19eVVTta+wyU5TfeXOcGlA3q1dKylmW+Dph8Z0x26OfJHkN859KxvX1mA8YtqW/bUxUIBTAG+gKLdfA7BcaEQGN1Q0FlpzPrgMLpgDEisU1DXabw6jKdE5vCr0AmYPzMv2pl8G996THCk4NGld/c+0hPdhvsyWZdI3LyxKqsMoNWgOx8w23VzgLxgOG+PP6PGuFNaq6gsQHjrJKjbHdqCRh3H+HLOZclP+lGJPJtuc6tzSTBMn7vvLj8aqAnXQZ4MvfRmpxAYwPGrU2R8ti/NcD43D9XOf/kvAwJc7lOPrtan/4MhHfXnLjsNsSXXQ715PZZMfclUOvPlN0RBoyz06kc92oOGB84OiqdfrVWqmPhLsP6fGpN2gfrYF6SHXFy1Fl5nv/XKvno07ZDYwPGrUmF8ts3HTC+dkpcvtMt4vz4oojm53IM25IjtCxjhHHdLTHZ7mBP6DM/8Uz7+swGjNuSGuV9TGiAMS4p1HexDnqnfoyp5LAcBhLjt1itiwkBzerD+M0333T76IMMOEwnRENdT5YxYFweBScTML5rbo102nOtbN3HcyOM9xjDp6DlNTOq+qDjozJ5erxVGUIrwetuqZYuhzASrC+PP1PrLI1z8S7GgHGrkqPsdm4tMC47gXJww1rG756bhMZ7DPHkgcfbD40NGOcgcUJ4itYCYwa+y9ULyrDJSdnF0nj6fXH55UBPdj7Ud66k2mvV1JxOBoybU8bWZwOMTaX0Cmgd/M8Xa2XoSVH5WV9PLrqqyn0Z2RZLYwPG6XW2tUkFsgHGplXbFaira5ArJ8Vku96eHDzGlyf/WSuVbTS0MmDc9nQo1yNDAYyBvmplnAqLg24pWAYYA44VHisw7ty5s+CSggoRQExFqyGZg84Is1bABozLo8hkBMYP1Djr2j9cUS2+Vy9ffFGe89q1uHhp/WduWpb+NqXadUr7HxOVR54GKPGipvXnC+ZIA8ZBNWw5VQEDxqmK5O8/5XzG/XHnE3r/4Um/5e0ZCM+Acf7SqpTPbMA4t6lHucWNzMw5cTf41q8OS0LjisrcfQmkMTZgrEpYmKqAAeNURVr3X9vZz72UtDT+3/6eTLiqbZbGBoxbp3257W3AOH8pruWYgaWvmFTtoDFjrTzxbNtcOhowzl9ahfXMoQDGwGJmdUmhlsXqmgLrYgCShmplDBBOHfROgTEJTgdErYv5T4FViExoU/gVaAkY86nNJddnN/Jp2NXSCq2198mnNowEu9MgT35zbFQeeqr90NiAcWtTobz2N2BcmPTWZwLWTDPuT8ivDk2O9jznkRpnwdiWrwkMGBcm7UrtKgaMc59ilF8G2MEHedI9hS9T74lLRUXrBtNrKWYGjFtSqHy3GzDOTdpTll94pVaOOCUqQOMLr6ySD1vpnsKAcW7SIqxnMWCc35TV9jRGVVdMrpbt+3jS52jfGVq19kWuAeP8plUYzx4KYIw7CnVJASQGGOusPow1pMJTC2OgEqAZlxRYGC9evNhZGFMoFQyng8ZkBC24YcwUdk8bFWgJGG/RNSIXX2fAeKNirVvScpSoaXQjwe482JM+Y6My74ka5zOxrZ8MGzBuXTqU294GjAuX4lrG69Y1yCzcUxzmyZ6HezJ7ftxB49aWcQPGhUu7UrqSAeP8pBblF1cUs+clHDTecaAnd9wdl+rq3BlNGDDOT9qF4awGjNufiloH8+IWaDzstKhs09eT31+edE+Rrf2TAeP2p0WYz2DAOP+pq2WZcQau/Boadxvpt7rPbMA4/2kVtiuEAhirdTHQGBcTuKVQVxSECosJqfCwMAYIM7P/scce64AxPozZrttIbAonHREmQgXIYcsIdj/pFTBgnF6XXK7VCrCmplFunh6TXw72pNtITx58IiHV8bYNLmnAOJcpFL5zGTAubJpqGV+/vkHuYSC8wzw34OWsB+Lus/fWQGMDxoVNu1K5mgHj/KYUg9LeOz8hOw3ynS/UKbNiwujtuZgMGOdCxXCew4BxbtJV62C+6PvXa7VyxKlR+e9eEfndH6vkk6UbJJuvfQwY5yYtwnoWA8aFSVkty1VVDXL1TdWyXV+MMHw3sDTrsmlPGzAuTFqF6SqhAMaAYqAxoFgHvlO3FAqOAUi4pAAWM+sgeByrFsY66B3AmEnBsf5XcMy24HKYMoTdy6YKGDDeVI98/dMKsAH3FFPjskN/T/Y63JMHH08IILm1kwHj1ipWXvsbMC58ejeV8YZGB552Gew58DTzgbizYMw2RgaMs1WqvPYzYJy/9NayG4s3yH3z47JtH0++18OTO2bFsuqcthQzA8YtKVS+2w0Y5y7ttRxzxpffqJXDTvQFt3onTaiUzz7f0OKFDBi3KFFZ72DAuHDJr2V53boGueamKtm2T0R2O8yXuY8lhHq6pcmAcUsK2fZUBUIBjIHFamWsLikUFAOG1XexWhoDk3BLAQjmOCyMu3Tp4ga9Yx/W0/lgplAys45QQbGGqYLa/3ApYMC4cOmpFSDva669JeY6pfse4cv8J2tabclkwLhw6VaKVzJg3DGppmUca6bZD8Zl10Ge/KK/J/fMT0gs1nIjl1gbMO6YtCv2q9Ime+mll2SrrbaSCRMmiO/7rs0WjPdzL9bKASN9OffPVbJ0+YacwM7g+cO8rGW3OtYod8+Ny/e6R+S7PSJy1+z2u+QyYBzmnNO+ezNg3D79Uo+mHDMzrvS/Xq2VAcf4snnXiJw8oVK+WJUZGhswTlXT/gcVMGAcVCP/y1on161vlKsnV8s2vSKy3xG+PPQUhlaZ29MGjPOfPmG7QiiAMW4lFBgHLYyBxwBgZgASlR0zFsZBYBy0MGY/BcWpIYnvKtqvQXLYMoPdzzcVMGD8TU3yvYYytq6uUa67JSY/6xORnqN9mfdkjdTUZm9pbMA436lU2uc3YNyx6UcZX78+CZ72ONST3Q/z5N6HElKdBTQ2YNyxaVesVzdgXJiUaWhsFNxT3DU7Llt0i8h/9PBk1v1xydYParpYGjBOp4qtQwEDxvnLB7inwKdx/3G+/GePpHuKlRmgsQHj/KVFGM5swLjjUjGeaJC/Tq6Wn/WOSO/Rvjz6NOyr+T6zAeOOS6tSvXIogLG6pCAEEuOaApcUVG5YFwOMNWQd1sLqx1gtjIOD3tHxYGI/tTbmvwLkUk1si3frFTBg3HrNcnEEQImB8G68MyZdBkSk77ioPPhEjQNKbGtpMmDckkLlvd2AcXGkf+26Rpn1YEL2GepL1xG+zJ6fHOwyU+wMGGdSp3y3GTAuXNo3NDQKvhKxLv5/PSLy3708mXl/29xHEWsDxoVLu1K7kgHj/KYYL27xaTxovC8/7hWRc/5UJctX8PXFN9vZBozzmxalfnYDxh2TgpRViutar0H+OqladugTkX5jo/LwghqJN+OewoBxx6RVKV81VMBYB7wDGAOOdaaSw6IYSMxy0NoYyKyD3i1evNjBZQofnQ+FxQqKgyHLNoVfAQPGHZvGjM5+84y47HaYJ/2PicoDjyWksjrpKiZTzAwYZ1LHthkw7vg8oB3S6uoGmTk3IQeM8KX7kb7MepCB8JLjCKSLpQHjdKrYOtpk5pKicPlAofGM+2Lyk96ebN/Pk+lzEg4kp2FNGSNmwDijPGW90YBx/pOfL/pefL1WDj8xKlv3iTiXPUucy55NobEB4/ynRSlfwYBxx6VekluJ8IUA0HinAZ70HYuhVfov9wwYd1xaleqVQwOMcUXBDCTGupgZq2L+q4Ux/wHHwCRmgDAWxuqSAmCsrirofCgUVnCsHVzW63KpJrzFOzsFDBhnp1M+9qKM6VtToPE+w3zpd0xU5jyccJ/EZrqmAeNM6tg2A8bFkQe0jFdUNMjMBxLSY4wv3Ub5Mn1OXPxoemhswLg40q7YYmHAuPAp4qAx7inuj0vn/p78cqAnU2fHpaKCMT+yj48B4+y1Krc9DRjnP8Wph/l8/YVXa2XYqVH5aZ+InHVplSxZtqlPYwPG+U+LUr6CAeOOTT3KMW5m+EIA9xSMEdL7KN+5e8P4KjgZMA6qYcvZKBAKYAz0VbcUCovVuhhIzAxAAgbrrNA4nUsKhFNgHITDwWUKpk3hV8CAccemMeWMTumatQ1yy4y47Dfcl4PH+nLPvHjGkWANGHdsuhX71Q0YF08KUcaZsSq+e15Ceh3ty77DfZlyd1w8/5vQ2IBx8aRdMcXEgHHHpYbzaXx/XHYa5MlOg3yZMismFZUNIlk2kw0Yd1zaFfuVDRgXJoWogxM1DU3Q+Me9I3LyxCpZGoDGBowLkxalehUDxh2fcpRjoPGKL+rlmltisttgT3oc6ctdcxMShMYGjDs+rUotBqEBxoBfXFLooHcKjKngsDAmBBzrAHhYDQOPOQYL4y5dusiiRYvcOjoebA/OFEIFxoQ2lYcCBow7Pp2TZa9RIn6D3DYrLnse7knPMb6zSKSBm24yYJxOFVunChgwViWKI6SMMwOZ7p2fcFYRfFFwz/waaUhhxgaMiyPNii0WBow7JkUot0yU3Wn3xZ2VcZeBnrz4Gq7gsiPGBow7Ju1K4aoGjAuXSpRlLI1xT3HYCVH5Ya+InDqxSurWJdvZBowLlxaleCUDxsWRapRjoPGqL+vl+ttisssgT/qPj8qTz9U2RdCAcZMUtpClAqEAxlgXA4rVhzFWxgqMNaSiw4exupzQUF1SKDBmPzoeCoeTBY/P65INXw2ByTaFXwEDxsWRxpQ7ZqDxzXfFZcdDPDnoqKjMnp+Qdeu+2Sk1YFwc6VassTBgXJwpwyB4+DPGZ/leh3vO/UzqxzwGjIsz7To6VgaMOzYFNmxolFtnxB1k+l63iLy6qE7Wb/hm3ZwulgaM06li61DAgHFh8wHt6edfrpU9D/flO90icuYlldJQnyzHBowLmxaldjUDxsWVYl9+VS8XX1vlyvHBR/vyxDM1TRE0YNwkhS1kqUBogLFaGKe6pKCCU9/FhPwHFgOUAMjqkgJgTEeUfeh4MGmocJj/LOv6LDW23UpYAQPGxZd4a9bWy6RpcdlxgCd9xkZl7qM1UlO7acfUgHHxpVsxxciAcTGlRjIuWDY98nSNDDo+Kvse4cnkaTGhrKdOBoxTFbH/KEC7zAa965i8ULe+UR59ukb2G+HL//SIuE9h10RoK2cXHwPG2elUjnsZMC5cqvPC9pWF62TkGRXyo54ROe73lfLJkg1NBlMGjAuXFqV4JQPGxZNqX62pl+vviMkvB/uy9xG+3HF3XCLexva0AePiSatSiUkogHE0GnXgVwe+08HvsC4ODnjHMpAYmMQMOFZg3LlzZ+eSggoRS0Z1R0EnhFkti9XSsVQS2OLZPgUMGLdPv3wdTWV47W1x+UmviAw7tULe+HfdJpcyYLyJHPYnRQEDximCdPDfWKxBHl5QI0NOisoBI325aXpcvlyz8cueYPQMGAfVsGVVwICxKlG4EOt/3EI9/myNHDjKl616ReQvk6qd/0Q+ic12MmCcrVLlt58B48KkeU1Ng7z0Rq2MO69Stu0TkZMuqJQPPl7f1PclFgaMC5MWpXoVA8ZFkHKN4lxR3DA1Jnsc7ktXxgOZFXfGF8qxiKUB4yJIqxKLQiiAMa4oAL+xWEywMCYEFmtIJccMRNIQcEwHA3cWxx57rACMFy9e3OSygoKloJhQrYyD60ssrS26bVDAgHEbRCvAIZFog0yeHpf/7RuRsedUyL/fNWBcANlDcwkDxsWTlJWVDTL/yRo54pSo9DrKd4NbAoubmwwYN6dMea83YFzY9AcWx+K86Em4MQV+2tuTy/5W7UZox2NbsHPaUswMGLekUPluN2Cc/7RPJJKD3R37+wrp3N+TUydWyjvvb9qmJhYGjPOfFqV8BQPGHZt6fNGz4osNcsMdMWEMkIPG+HLnvXHBwCq1PjZg3LFpVYpXDwUwBvoyKzQGFgOOdbA7tTQGElPhaQhA5hgd9I6OKMcwBWExy0xa4IIA2W2wn9AqYMC4+JIWmHTH7LgMPC4qY8+ukAXP10p19abfvpqFcfGlWzHFyIBxcaQGA2U98HiNDD0lKn3H+nLbzLis/qp5WEysDRgXR9oVWywMGBcuRYDF1Ln3P5IQfCNu/TUsXvb5BmktLCbWBowLl3aldiUDxvlNMWDxcy/Xym9/XyE7DfTk9Isr5c1369K6kzFgnN+0KPWzGzDuuBRsbBD3sva6KTHZb7gvfcf5MmNuXNZENu0bawwNGKsSFmarQCiAMdAXK2NcUWBVzAz4ZcYnsfowVoAMTFKXE4BmBcYLFy50FsYKhNlHLYpZp8A4W3Ftv9JXwIBxcaXhylX1zrK437iojDu7Qp59sVbiieSAeMGYGjAOqmHLqQoYME5VpPD/q6obZM7DNXLYiV/D4lkx9yldQwPlufn4GDBuXpty3mLAuDCpT9msrG5wA872Pioq2/Tx5IpJMVm2gq/2NhpWtCY2Boxbo1Z57WvAOH/pTduZNvTR51TIToM8OeOSSnnznTphAMt0/V0DxvlLizCc2YBxx6UilsXX3haTfY7wpd8xUZn1YFzWBnwWp8bMgHGqIva/JQVCBYyBv8BiHfiOEOtiBcZAJCo8QDAh/wHN6pJi0aJFbl9EUxcUdEKYdR3rDR63lK3Cs92AcfGk5YpV9fK3qXGhk/rbcyvk+VdqhYGy0jVsDRgXT7oVY0wMGHdsqsTjDXLfQwn5zbFROWi0L7ffnbQsTsLiDLTYLIw7NuGK+OoGjPOfOA4WVyVhcc8xvmzbx5Mrb4oJdXOmlzwtxcyAcUsKle92A8b5SftEolGeebFWjjyzQjofkoTFb73XPCwmFgaM85MWYTmrAeOOSclVXzKmT0z2PNyXPkf7MuvBhKz1kkaOzdXLBow7Jq1K+aqhAMYMegcsVitjILHOamFMRceyAmOAUuqgd2+++abbRyExcFgBsUJjElutjks54S3u2SlgwDg7nfK91+o19TJpelx6HOnLMedUyIuvrRNGZm9uMmDcnDK2HgUMGHdcPmBwnbmPJqTPWF/2HebJLTNiTT7W0r38SY2pWRinKmL/UcCAcX7zgcJiXMh0H52ExQxwR92sUzblV/cNhgaMg2rYclABA8ZBNXKzjKHFcy/VyugzK2W7fp6cMrFSgMWNDc23qbmyAePc6B/WsxgwLnzKrllbL9fdHpOdD/XlwJG+3DUHy+JN3aimi5UB43Sq2LpMCoQCGOOKArcUhGpVrJbFhLiiACBR2QGJNQQGA5qxMO7SpYsAjNmmwJjGrwJjhcQKjtvaMM6UGLat+BQwYNzxaULld/s9cefAf/TvKuT5l2vdJ3OZYmbAOJM6ts2AccfkAayaHlpQI72P9mX3IZ7cODW2CXDKJlYGjLNRqfz2MWCcvzRXWPzo0zXy63FJNxT/d221BAenbE+b2IBx/tKu1M9swDi3KQgs/tdr62TcuZWyXV9PTrqwUha99c0B7tJd1YBxOlVsnSpgwFiVKEy4NoIhVUy6DPJlryOSX+oBkLOZDBhno5LtE1QgFMAY6KvAGJcU6ss4aGVMRQc4Bhir5TCD33EsPow7d+4sixcvbrIw1savwmNE4zgm1un2oJi2HD4FDBh3XJrSSQUWT78/IX2PicqI0yrkHy/Uyrq6zFYQxNiAccelWylc2YBx4VOpOtYgCpz2OtyTv90ec8CptXWpAePCp10pXNGAcX5SCY9sDE75+DM1Mvj4qLNInHBVtaxcnWwPt7b8poulAeN0qtg6FDBgnLt8wNc9L72xTo47v1I690/C4oX/zg4WEwsDxrlLizCeyYBx4VIVMMzXeTsN9hwsvmVGvOlLvWxiYcA4G5Vsn6ACoQLGuKRQH8ZqYawuKQBIAGMgsVoZswxoVh/G6pJCrYpTQ4TTxjGdE5vCr4AB445JY2DxV2vqZfqchPT7bRIWP/VcrSRqsit3Bow7Jt1K5aoGjAuXUknrxHp5aEFC+v7Wl32GeXLjnTGhwav1aWtiY8C4NWqVz74GjHOf1jRz/YoGeeTpGhlycoX8b39PJlxVJZ+t2JDTixkwzqmcoTqZAePcJGci0SAvvl4rx19YKTsP9OSUCdlbFmsMDBirEhamU8CAcTpVcruOOpkve26eEZdfHepJ1xG+TJkZlzWR1rWnDRjnNl3K4WyhAMZAX2Cx+jAGDKt1sYZUdDr4HSBYLY05Rl1SqIUxCU/nQ2e1LFaArNvLIYOU+z0aMO6YHLD6q3q54x78nEZl5BkV8uQ/ayUeb9myWGNrwFiVsDCdAgaM06mSn3WVVfXy4BMKi333CV3Ez+7FT7oYGTBOp4qtM2Cc2zxAx9SLNsi8J2tkyElR6TzAk4lXV8snSzfC4ra88EkXSwPG6VSxdShgwLj9+QBDixderXWWxbsM9uTUiZWy8C3cL2bfpiYWBozbnxZhPoMB4/ymLnUyA9xNnh6XXQ/zpNsoX+6cHRfPb/1X7waM85tWYTx7aIBx0CUFEFhBMSFwGICkwJhlnYMWxgsXLnTraQRjfcyk0JiQ9dpA5r9N4VfAgHHh05hBdG66Ky4HjfYFn8VPP5+ExVr2somRAeNsVCrffQwYFybtcUPxwOMJ6XW0L3sM9eTmu+IScaM3t/36Bozbrl2YjzRgnLvU5asAOqEPPFYjA4+Lyo6HeHLJtfmBxcTagHHu0i5sZzJg3L4UTdQ0yvOv1Mq48ypkp4GenHZxEhZv2NA6WEwsDBi3Ly3CfrQB4/ymMF/d3jgtLrsM9qXbSF9mzI07d1Gt6RtrDPMBjJWT6TXyFXK/xuDypW7z5w0FMMZnsVoXq/9irIyDc9DCWK2L1SUFPowZ9A4LY/ZTS2K1LCZjauYkZH1bCmjzyWBbilUBA8aFTRkqxOtvj8kBI3w56qwKN5Izg3S0trwZMC5supXa1QwY5z/F6KhiWbzvCF92OdSTW2diCZGsO1tbnoOxNWAcVMOWVQHaZi+99JJstdVWMmHCBPF9/xv1xnMv1soBI305989VsnT5BgGM2rSpAmjiRxvk/kcT0m+cL788xJc/XlstSz/PvWWxXtmAsSphYaoCBoxTFcn+f01tEhYfeWaFbN8vCYsXv1PX4qDRzV3BgHFzyth6FDBgnL984EcZ4C4uv+jvyX7DfLn7wYRUVmHI2LZr5hoY82z4+OOPZdWqVU0Gl22LWeajYHwffPCBcyerjC7zEbY1VwqEAhhjJczgdcz4MGZWC2OsigHHZGZmtTYGKAGMOUZ9GGNhzHYFxAqHNVTR6ey2p8Or57Gw+BUwYFy4NOLzV2Dx3kf4MvqMCnn5jXWyfn3bypoB48KlWyleqUVgPNKXY3+ffGHxwSfrpezmj9fLJ0s2uE/S29Ig5SUPfk93HOzJ9v0jMunOmFRVJ7/KaW/dacC4FEtc/uOcLTA+cJQvx51fIU+/UCsffBzOsv3xkvWC25f6+tb1JinrDHDHVwF9xvrSZYDnYPGKVRtHXm9v+U2XEwwYp1PF1qFARmC861pZ+FZdaOvnDz9ZL5+vrJcNG4tf1pmirq5RXnp9nQw7rUK27u3JyRMq5a336qSxHR/HGjDOWv6y3DETML7gymohP5ddW/rre8aVU7SibYUPMIzBxQ97ebLboZ7MuD8ufL3H1Nb6ONfAGIh7xhlnyN133y1A3XxNDz/8sIwbN04Yc0w9AeTrWnbeTRUIDTCORqMukwKKyazxeNyBYgCwuqVgmVnBsQJjtTBWYKwWxnRAmLRA6nr9v6mU9i+MChgwzn+q0kmlQmSU1z0O9+Twk6Ly6iIs/VvX2Q3G1IBxUA1bTlUgEzB+6tkaB0t26BeRg0b50uvI8pt7jvBl4DFRueeBhNStz74cUpaxLMaNzM/6efKTPp5cc1O1xOKb1qWp6dGa/waMW6NW+eybDTB+8dVa6f9brGYj0n2EF8qyfdBIX35ztCd33huTaAUW/dnlAfarijXIwwtq3ECzP+vLAHfVwngCOuWr7WvAWBW2MFWB5oBx//FR6bTPWuk5MqTleJQvvY/05eyLK93Ln1RdMv2nzl7473Uy/PQK+XGviIw/r8LB4kzHZLPNgHE2KpXvPumA8Y3TE/KjXp7Qnu4zuvwvEw5iAAAgAElEQVTa0vQfeo705bDjojLnoUSrMoerk6sbHCDerFtEdujvuQHuctGezjUwnjp1quy4447yt7/9zVn/bnKjjUmORhuNNkRqO0LXpa7nHLpNz3fIIYfI9ttvL6+88ooBYxWlQGEogDHuKLAUJgQUq3WxhlRyWBoHQTFACQAMaNZB7xYtWuR8GGumJmRWUKzr02XqAqWXXabACrQEjDfvGpGJ18WEdwsb6hvLcgbsttWlNxUiFk3T58Rl9yGeDBwfbbIsbk9SGzBuj3rhPzYTMH7vg/Vy4V+rpedRUek6qvzmbkdGpfNAXzrtGZGL/lLtAHA2OYKyHE80yj/+VSs7DPTkh70icsXfqqQtLmUyXc+AcSZ1yncb7bOWXFJ8unS9g6C9jo7K/iPDWbZ3P9yXTnutlfF/qJDlKzZkVTcrLH7i2Ro59MSobNvHc89AtSzOd5vXgHH5ltuW7jwdMGaImatvrpZeIa6j9x7my5bdI9KljydLV2x8adOSXsDif79bJ6PPrJCtekXkmLMr5G0siynk7ZwMGLdTwJAfng4YP/FsrYw5s0IOKMO2NP2HA0dHZYdDku3piVfFss4BFFcMqWbPi0unrhFngDHpDr6e/yZwzfqkgR1zBYx5rsDa/v73v8svfvELufzyy2XZsmXOYJPLwc/YvnbtWlm5cqVzWYFXAIw3OZYZPud5npuDVsMcy75r1qxx52D54IMPlm233VaeeOIJc0sRSM9CLIYCGAN9gcUKjhUU44qC5VRgTCYEKDGTAdUlBcCYjMt2ZjogTAqKWSZzB/8XIpHsGh2nQEvAeLOuETnzsirXMXvv4zopx/mjJetlTSRZVlqTUlSIFRX1MvOBhOx6qCcDfhuVF16pldra9leIBoxbkxLlt28mYMwLED4dW/lFvaxYWX7zV1/Vy4y5CfnVIb5cdEV2wJjB1rFOBDj96nBP/rtXRK6aVN1mlzKZcqQB40zqlO+2bIBxsmzXy6pV4SzXK1fWy0uv1spugyJy3AXZAWMHi6uTZfewr2HxRVdWbeKzON+5yoBxvhUu3fOnA8bk2USiQb4IaR296ot6ZxE89uwK2fFgT5ZlCYyBxfgoPvrsCvlJr4j89pwKee+j9TmBxeQgA8alW44KEfN0wLhuXaNE1oazvs2mf7BmTb1zJ/Hjnp5MvDo7YAx64usgBrX7fveI8LXP5KnVUruu/X1jzQe5AsY8E3hRD0f7wQ9+IIceeqhce+21zpUQHA2fxk8++aRceeWVctZZZ8kf/vAHmTJlihszDANPAPGSJUvktttuk+uuu04++uijJs725Zdfyp133ilXXXWVc0Fx++23S+fOneW//uu/5NRTT5V58+Y5o0+9Jwvzq0AogDHQF1iMKwpCBcaEQGN1Q0HG1pl1ZOZ0wBjJ2aahLgOLdVlhcn6Tx87e0QpkAsaz5tUIFsbAzhGnRuWIk8trHnZyVA4/MSojT6+Q2+9p/ac2vD2ddl/cjdx8yLG+PP8y/sZzUyEaMO7oklPc128OGPOMt0lk/oJa2XtIdsAYySjLDz1dI3sOT1oWX3dLtbNszIeeBowth6ZToCVgnI+8mC4eHb3u02UbZNdBXlbAWGHxo/+okUHHR13H9KKrqgV/izoVQjcDxqq2hakKpAfG4a+nV35ZL6dMrJQde2cHjNdvaJRFb6+To85OuqE47vcV8v7H67P6wiBV8+b+GzBuThlbjwKpwLgQdUcpKD9zXo38tE92wBhYHPHq5c574/KT3p5s18+T22fF3ECVudQzV8AYzjZr1izp0aOHfPvb33ZAd8iQIfL000/L6tWrHTzeaaed3PoBAwZI7969nUuJfv36yeOPP+6A8dKlS2Xs2LHy/e9/X44//vgmjnfzzTfLZptt5s798ssvO9/F3/3ud2WLLbaQvffeWy677DJntVwKeSAMcQwNMMYlhfou1kHv1I8xlRyWw0Bi3maodTEhgFl9GONEO2gmDxymE6IhhVULrAHjMGT/lu8hEzB+/tV1MvacChl+atT53R16UlTKaT7ilKh0G5n8/HX0uZUtixnYAx9Mk++Myf/2jcjA8b7861XK6MbyFdi1TYsGjNskW9kc1BwwLhsBWrjReU/Vyl6HtQyMFTjNe6pGdhuahMU3Tk02blu4RJs3GzBus3ShPrAlYBzqmw/c3Mef1slug1u2MKbsUg/Pe6JG+o6Lyvb96NBWC8BZJ23v6v98hQaM86Vs6Z83HTAu/btq+Q4AxidNyA4YY9+EZTE+i/9fj4iccEHSspgvKnJZhg0Yt5xu5bxHKjAuZy2C9+6Ace+WgbGDxX693HFP3AHm7ftGZMacpMvLXJZj4pYrYEy8YG9YB+Nb+NJLL3VWwjC5OXPmOLDbq1cvue++++SLL75w266++mrZbrvt3AuG5cuXOy4Hf+vevbuzUsaSGEDMPlgUv/DCC24f3/flwAMPdC4pHn7oYefmIujCIqi5LedegVAAY6CvWhmnwuKgWwqWyVxAYYXHCozJlLikoEIEEFMINER2OiPMWmgNGOc+MxbjGTMB45qaBvnyqw2yYmV5zl9+VS+zH0rIt7pGZPwF2QNjwPCVk2Nu5OYBx/hucI7163PbsDVgXIylqXjiZMA4c1pkC4wBTvOfqnE+i/+nd0QmTY05NxScXevKzFdq/VYDxq3XrByOMGCcTOVsgTFf88x5OCE9R/vysz6e/PHamCwPfPqer/KbLi8aME6niq1DAQPGmS2MgUz4KB4wvkJwkXf8+RXy4aeM0ZPbNjVpYcDYymQmBQwYp1cnG2DMC1xcNE69Jy7/3dNzL3DvfiAhjfh7y8OUC2AcbCPgOoJB72688UZniAkwvuCCC2SrrbaSv/zlL4J7CTXWfOutt+TII490+99zzz3u7rBUxsUEVsbbbLONA8NYE996663uOJUAK2XA9Ouvv97E6ILx0P0szL0CoQDGwGJmdUmhlsXqmgLrYjKqhmplDBBOHfROgTFS0wFR62L+kykVIhPaFH4FMgFje0iJPPJMbauAMY3YKybF5D97ePLrMb584Bq2uYdLBozDXzbbc4cGjDOrlw0wZoC7+U8m5Id9PAEW/+226iZInM9nowHjzGlXrlsNGCdTPhtgjK/T+x5KSLdRvvyoZ0Quu6FaVn21sU2bz/KbLn8aME6niq1DAQPGmYHxhx+tlx5jotJp37VyzDkV8tnnGwT4lI8ybMDYymQmBQwYp1cnG2BcWZ100chLH9xQzLx/o7/jfJTlXADj4N3ecccdDgAz+B2wGMth+llAX0Dj0KFDnUUxeaR///7OSnjrrbeWK664ouk0HPenP/1JOnXqJN/61rdkzJgxjtuxg2rAsQDjV155xRmANh1sC3lXIBTAmEzGDDAGEgOMdVYfxhpS4amFMVAJ0IxLCiyMFy9e7N6gkjEVDKeDxsHMm/cUsgt0qAKZgHGHRqxILt4aYAwsZpTX73SPSM8Rvvv0lfcuWhHk8pYMGOdSzfCdy4Bx5jRtCRgz4NBDCxLyoz64ofDkrzdWi75DzUd5DsbWgHFQDVtWBQwYJ5VoCRhv2NDo3FB0HxOV73X35M9/q5bVX/FVXfL4fJdfTa9gaMA4qIYtBxUwYNw8MF66dL30BBbvs1aOPCXqvhDIFywmTQwYB3OmLacqYMA4VZHk/5aAcVVVg8y4Py6d9o/Itn09ufWuWN7b0/kGxjy3jzrqKAd+gcb/+Z//2TRjRbzlllvKz3/+c/nzn//cJBpGnU888YQDxptvvrkb2E6NM7VdYsC4Sa6CL4QCGKt1MdAYFxO4pQAQA48JFRYTUuFhYUwmZGZ/RncEGONDhe26jdQgk9IRYSJUgFzwlLILdogCBowzy54tMK6pbZRp9yVky+4ROXC47wbjoOOar8mAcb6UDcd5DRhnTsdMwDiewA1FQn4x0JMf947I5X+ryunozZljJmLAuCWFynO7AeNkumcCxlgWP/6PGjl4rC8/6BGRP/+9WlZ8wVgd+auLs8mNBoyzUak89zFgnB4YL/1svfxmfNKyePhJSViMUUY+JwPG+VS39M9twDh9GmYCxgwYPf3+uGzZw3ODzv59SrXU1eXenUxqzPINjD///HOBn+y6664yY8YMx9rgbTorrwMSM8HaPvvsM8fiGOgOoAyXe+qppxyT0/gbMFYlCh+GAhiT8YDGgGId+E7dUig4BiCRMYHFzCxjacyxamGsg97pGw0Fx/pfwTHJFFwufLLZFQulgAHjzEq3BIyxdqBCvGtOQr7XIyLdR/my+K11zs+pvjHMfIW2bTVg3DbdyuUoA8aZU7o5YExZvv/RhOxxhC8/6xuRqyZXC64p8lmWU2NqwDhVEfuPAgaMk/mgOWDMS9snnq2Rvsf4snXvpBuKpZ9vkK/tITo0Exkw7lD5i/riBow3Bca83Pl4yXoZekqFc0Mx5vTCwGIyST6AsRplFaINgT/YfPmELepCVKDIGTBOL3RzwNiL1svU2XHnguIX/T33BW6ipjDt6VwD46lTpzqXFNdff32Ti9gLL7zQ+SO+5pprhAHrmCjnS5cudZbFp59+urz44otu/Zo1a5xF8Xe+8x0ZO3aszJ8/31knDxo0SD799NMmaIwPYwbD4zj6+TYVToFQAGNgsVoZq0sKBcWAYWYylloaswwsBgRzHBbGXbp0cYPesQ/r6Xwwk7mZWUeooFjDwiWVXakjFDBgnFn1TMAYWBytqJcpM2Py8z4R6TXal1cXrpN1BXh7asA4c7qV+1YDxplzQCowpiz70XqZPT8hXUf60vkQT26YUi34XaNeLORkwLiQapfOtWiTvfTSS26QlQkTJrgOSqHzZjGolQ4YA4sffbpGfn1M1A2mc+n11fLJUr60K4YYixgwLo50KMZYGDDeCIw31DfKO+/XyfDTN/osXvZZ4cpxroEx/WoGw6JOJ8zXxHWweHzvvfeawFW+rlXO5zVgnD71U4ExL2m/WlMvt82Ky86Dfdl1sCe3zohJdaxw7elcA+Np06bJzjvv7NxQzJ0715W3hx9+WLp37y4HHHCATJ482TE2QO/FF1/sQPL48eOdr2M4HIPb4dO4R48esmLFCldOTz31VNliiy1k4sSJTe25kSNHujbepZdeJgsXLnTuZ8uxnZc+p+V3bSiAMSbuCoyDFsZBlxQAJCo7ZiyMg8A4aGHMfgqKU0OSQqGxZdD8ZsxiObsB48wp0RwwpkJc69XLDbfHpMuAiPQb58srb1D+CvP21IBx5nQr960GjDPngE2BcYNgCTHzwYR0HeXLLod5cuOdMfGiSVdNmc+U+60GjHOvaRjOaMA4mYpBYPz5yg1SU9MgDzyGG4qoYMXEAHdYKObTJVRr85MB49YqVj77GzBOAmNg8Zvv1smw0yrkW/utlfG/r5CP3KDRhWlTk+NyDYw9z5Pbb79dxo0bJwCsfE1YNF566aVy0UUXuUEU83Wdcj+vAeP0OSAIjOkbM2bAzTPisvsQX/Y5wpc77425L3ELyZVyDYw5H9bAP/zhD50riTlz5shXX30lN998s3Tr1s3B5IMPPtgt77DDDm4QvEcffdR5BXjmmWdk//33d8c99thjjsHxkuf111+XPffc04Fk4DPPH87305/+VP7nf/5HAM68BGJfm/KvQCiAsbqkIAQS45oClxRkLqyLsRrWkHVkLvVjrBbGwUHv6HgwsR+z/leAnP9ksSsUiwIGjDOnRDpgTPFZs7Zerpwck18O9OSQ3/ry+uLCwWJibMA4c7qV+1YDxplzgALjCX+tlpWr62X6/QnZb0RUdh/qyaRpMVkTwRIi+QI185lyv9WAce41DcMZDRgnU1GB8fEXVsgnS9bL3Q/GpffRUdmhvyeX3xhzlsXr8zh+QFvykgHjtqhWHseUPTA+2JNPl2+QRW+vkyEnV8h3ukWaYDEvfQoJmXINjJctWybnnXeesypcsGBB3jL0Cy+84GAWxmFvv/123q5T7ic2YJw+BwSBMZbFN90Vl90P8+WAEb5MnxOXisrCG1/kChjr8weW9o9//ENuuOEGufLKK53174YNyS8IWD9p0iS57LLL3Iw18SuvvOJYHWyOMjllyhS59957N7EYZhuD4F133XUOHmPouWrVKsGamcHy7rnnHvdfGV169W1trhQIFTDG0hhYzAw41plKjowGJGY5aG0MZNZB7xYvXuzgMgWADKiwWEFxMLQMmqssWNznMWCcOX1SgTFj51AhXnVztfxykC+Dxkdl8Tt1BXHiH4ypAeOgGracqoAB41RFNv2vwPiE31fK5Olx2esIX3Y/PGlZzJcDHQWLiaUB403Tyv4lFaBNZi4pRBQYn/7HSrnu1mo56ChftuvjyZU3xWTJZ4zhUXw5xoBx8aVJscSonIHxyRMq5ec9PHngsYQMPqFC/qNHRI45p0KWLccNRWFhMfkhl8A4Hk/ISy++5PyVYkUIBFq9erXrd3Mtnuf0zxlbCOC0aNEi918BFW385cuXywcffOC+MNY+Odurq2Py6adL3KftuLqYNWuWs2AcPHiwYMG4du1axwOKJY+HJR4GjNOnpAPGfTw5/g+VcuvMuPzq0CQsnnZfTCoqkrBY83X6M+R+ba6AMTFTZgZzi0Qigj9ilnU9Rpt8TUD5Zo5Go86Ik2Mpt+zLOhienk/10HPq+eBywGmuQQjX033dwfaTNwVCA4xxRcFMpsK6mJm3E/xXC2P+A46paJg146lLCoCxuqogE2sFpOBYMyXrdTlvKWMnLgoFDBhnToYgMAYiYVl8w9SkZfHAr2Exn9IVurwYMM6cbuW+1YBx5hwAMN57qC8/7eNJl8G+7DzYk+tui7nyzZGFLs/B2BowDqphy6oA7TIDxklgvPeQiOw3zJfdDvNlm96eXHVzTJZ9vkGoi4txMmBcjKlSHHEqa2A8sVI67blW9h7uy5bdIzL69Kh8ugxA0jF1cC6BMZ+Sn33W2c4f6Xe/+10BRvzpT39qsjp8/bXX5fjjj5devXrJgQceKHzOPmrUKGe5yLOeuNxxxx0ycOBAYVAtwDDr6edPmXK7DB8+XG666SbnO7V///6y5ZZbus/lWQZOA5tsyq0CBozT6wkw3qavJ1v38WSXw3zXtr5tZrzJrVtHtKdzCYy56+buIbieZf2vYXrFsl+bq/Nkf8Xy3TMUwJgHv7qlUFgMKGam8mAGIAGDdVZozLFBC2MqISYqHp01Q/JflzUs36xTHnduwDhzOgeBMT5NqQR3H+LJgGOj8spC3L90TAfVgHHmdCv3rQaMM+cAgPE+w3zXWWWAO74YWPVlcfgJM2CcOe3KdSvtMwPGSWC839CI83P63z0i8pdJ1UUNi8mvBozLtdS2fN9lD4x3XSvf6hqRI0+rkPc/Xt/UB21ZudzvkUtgvHLlSgdzgRD4JD3hhBPkwQcfdP32t956ywHi7bff3lkg33jjjXLaaac5v6W9e/eWTz75xFkWMoBWz549Zccdd5RHHnnEGYoBwn7961/LvvvuKw899JC8/PLLcu6558rPf/5z2W233eT88893/pJhBTblVgEDxun1VGDcae+1stsQXyZN3Wh8kf6I/K/NNTDOf4ztCh2tQGiAMeAXc3Yd9E6BMRUcFsaEgGOWgUlYDQOPOQYL4y5durhPXlhHx4PtwRlArMCY0KbyUMCAceZ0Bhh32j8ig06skHvmJ2T/Eb4cMj4qz7xYK/hJ7KgXKwaMM6dbuW81YJw5B1CuDxztyy6DPLnmlpisWIUbio55+ZMaUwPGqYrYfxQwYJzMB58s3SA9RyUti//vuuqkG4oOenGbbc40YJytUuW3X7kC4y++qpczLq2SzbpGZOxZFfL2e3UdXgfnEhjT16YuBxTvt99+MnfuXGf1i6Uwvk6xOj7qqKPcoFZ8zv7hhx/KhAkTpFOnTnL22We7vjzQF5i83XbbyYgRI+TJJ590A+jttNNOzuqYz9/p88+fP9+BZayO8WcMJ6B/b1NuFTBgnF7P2Q/XyC8He7L3UE9unBpzxhcd3Z42YJw+rWxt8wqEAhhjXUwFoD6MqUQUGGtIRYevEyophcWEgOYgMGY/Oh4KhynU7KeFW0OrbJrPVGHaYsA4c2oqMOZTm0OOjcqhJ0TliWcZZLLjYDExNmCcOd3KfasB48w54K336uTaW6vlztlxWVlEsJhYGzDOnHblutWAcTLlv2QMgcnVMunO4vVZnJpHDRinKmL/VYFyBcYMhHXv/IRcck21g8WqR0eGuQTG3MfSpUsd/O3evbvzU8y6d955xwHkbbfdVi6//HL56KOPhDzAeqAywHiHHXZwPk/pj69YsUJOPPFE2WabbZz7CiyJTz31VHcMfX6mZ599Vvr27euslTmXTflRwIBxel1fW7xOLv97tdw1J1EUsJhYGjBOn1a2tnkFQgOM1cI41SUFFRyWxYBiQv6zDFCiMlGXFFgY0xFlHzoeTBoqHOY/y7q+eVltS1gUMGCcOSUdMN4v4vyrDT81Ko8/UyPxeMfCYmJswDhzupX7VgPGmXNAdaxBPl+5QTy/+L6mMWCcOe3KdSvtsnJ3SQFAWVfXKJ99vsH5Gy8VIzoDxuVaalu+73IFxnyhxwDSy1ckLWHVWKllxfK3R66B8ZIlS+Sss85yPoqffvppF/HXXntNtthiC/n+978v+Bs+7rjjnFEXriOxON5ss82cL2KO1b45g+IxcN63vvUtB45xRYGxmGoGMMZNxdFHH+3Ac/4UKu8zGzBOn/6VlQ2yfEWyPa15Mv2ehVtrwLhwWoflSqEAxoyuCPjFylhntSwODnjHMpAYmMQMOFZg3LlzZ+eSggqRAk1FpHCYjogWckJdDksmsPtoXgEDxs1rw5YFL6yTzgM8OeLUqDz1HLB4Y1nJfGR+txowzq++pX52A8alm4IGjEs37fIZcwPG+VQ3v+c2YJxffUv57OUKjIsxzQoBjF999VUHhfFrTDvtvPPOk3POOcfN+CI+88wz5YILLnCWxWq8hSuKvffe2wFjjpszZ44bPE81NGCsSuQ3NGCcX31zeXYDxrlUszzOFQpgjCsKwG8sFnNO7wkBxhpSyTEDkTQEHFPZ4M4iOOiduqwACisoJtQ3mcH15ZFFyvsuDRhnTv8ln22QKbPisuD5GknUFAcsJsYGjDOnW7lvNWBcujnAgHHppl0+Y047rdwtjPOpbz7PnQ9grO32Qhh40N6wKT8KGDDOj65tOWshgPGiRYsEdxTACUAwlsQMcseMWwp8Fs+cOdP1+Snby5cvd2CZAfKGDBni3FWMHTtWFi9e7AzEuM8gMH777bfbcut2TBYKGDDOQqQi2cWAcZEkRAlFIxTAGOjLrNAYWIxrCiyKqeD4r9bF/AcWKzjmGPVhTEeU/ZiCsJhlJm14akPUrbSfUCtgwDhz8q5f3yh8vo7P4mKaDBgXU2oUX1wMGBdfmmQbIwPG2SpVXvvRLjNgXJppnmtgrPU/nWL6BvmacGHHp/Svv/56k1FJvq5Vruc1YFw8KZ9rYLxs2TJnOXzAAQfI448/7m4UQDxmzBgBAE+ePNl9CcwG+u3z5s2T3r17y0UXXdRkEHbzzTfLL37xC+eu4plnnpHjjz9e+GKYgfPwb8zEc6Bfv37CoHdvvPGGW2c/uVfAgHHuNc3XGQsBjGmT+b7v/JB//vnnTWU5X/dUyueFL6pXg2K9j1AAY6AvVsa4o8CqmBnwy0yDjhnLYf4rKFaXEzQmFRgvXLjQ7adAmH1IRP4zKzAu1sS0eOVeAQPGude0EGfUDuPmm28ul1xySSEuadcoIQUMGJdQYqVE1YBxiiD21ylAG82AcWlmhlwDY0DR+PHj3YBaWCDmY6J/wOfzPXv2lFtuucX1LfJxnXI/pwHj4skBuQbGQCTcSzBg3emnny4LFiyQSCQi999/v1u37777yrRp09xLmalTpzpfx/g3vvvuu11fnec9AHn33XeXxx57zBmH/fOf/3RlEp/GuKaAC1BOhw4dKrvuuqtcfPHF7j9GZTblVgEDxrnVM59nKwQwph/+8ssvy6WXXurKsed5+bylkj03L8MYAJQvKtauXVu09xEqYAz8BRbrwHeEWBcrMCbzUuHR0FNwDGhWlxR8CsO+TOzDpLBY17He4LGTpix+DBiXZjIbMC7NdCtUrA0YF0rp3F/HgHHuNQ3DGQ0Yl24q5hIYK8hlIGva9gDjoLGHWvLQB1hXm/zi8P+zdx/QelVl/vhRx8Gly3Hp0jVlqcsZ56/OqCx/wlBHiqAg4ICIgoJKCxA6hFAFCdJbQqQXKSEUAQm9pveQnhDSe++9h/1fn33Zl5M3N+Em99z3vu+bs9c695x73t3Os/c5z/N8n2c/29xJyTUFzpG9n8qRLbTBCaVjx45hl112CR06dIgOKR9sqqyVVumZqvlcAMaVM3p5A8b2HwIIf+c73wmf//zno2cxwHjOnDnhz3/+c9hjjz2it7DN7772ta9FT+LWrVtHvZ7ufumll8YQFPIq432l8996663h+9//fjjnnHPC6NGjA2AaaPWNb3wjHldddVW8VzmUrY2eFIBx9YxjOQBj34vnn38+/PjHPw7ij8+cObN6CFSmnpIr5s2bF79ZRx55ZPTGLlPT291MTQDGmA6wOHkZYxjpSB7GJq5rZ8Ieoa9007uhQ4fGPJiOQz5H+j9R1wC7V6Tap0ABGFfnGBeAcXWOW7l6XQDG5aJ0/u0UgHH+NK2FGslkhYdxdY5knoAx4EiMU3FQjzjiiPDGG28Enk1JnufBM3z48BjX9O23347hJHgk0w3I9nSJ9957L8ZAnTt3br2s7/cJEybU5+cRdPHFF8eNtijDlrorW+gG+c7BAjDOl55Nqc07ID4wI8mMGTOaNNeTAWbKlCnhscceC0BcXsRWDEv0+eeeey7eF4IC4HvvvfeG2bNnx/cUsHzfffeFm2++OYIsZH51OtQp7/333x9/Yx0NhFsAACAASURBVPwhNwhxccMNN4QXXngheLeLlC8FCsA4X3o2Z23NBRjjs/A3Dpu8+73DDD5t2rQJs2bNqn8k76l3lpOnfCIAbIt3eodTXlheypvqSdhefQMfOn66r6x8yrhOe5Vpf+WKlbGv7kupPn1yeJ6Gkvye03fKWT5lU/K/trWRrpNDa+qPvK7xuKOPPjpu3OlamUpMNQEYG1RMxjl5FRtAk8rZRDQABs9ESWeDSMDjhcAbAWDsN5PKYfDlSdfpvoHMToxKHNiiT/lQoACM86FjuWvxvvfr1y8UISnKTfnqaK8AjKtjnBrqZQEYN0SV4h75rACMq3Me5AkY9+7dO5x44onhM5/5TPjWt74VWrVqFUFesj3A98EHHwxnnnlmjHn629/+NoauuOmmmyKgJA+PZMCSWKhpcy1K3bhx42Ls1NNOOy2Ildq5c+ew7777RvDMGXgsD52hSPlRoACM86NlU2vyfuQFGOvL1vTo7H3XSS9P/c/+nr3X0P30e0Pn7c3fUB3FvY8oUADGH9Gi0q/yBozJXzxlyefCw3Tp0iUID4OXHnTQQZsBxnA4Rlq8WlxyoDLj7fvvvx8xuyzt8FMrBISVSXnxX/HP1bNhfR3gqi7tp+Tdnj1rdgyJMXbs2MiX4YF4tLrSmfHopZdeigZieKA6yCP6xDva/gTKpW+FMydVeKG4688++2x47bXX4nOL14wOknoGDBgQ79uwEy30X1vC38Is1eX8t2f+FkPr/Od//md45JFHosE6tZeepxLONQEYG+QEGCcLhLNBdiTPYsBxsiyYhIRAZcUwFiTfrqrJcpEGKwsSJ0HQvfR7JQxi0Yfmo0ABGDcfbZuz5gIwbk7qVn/dBWBcvWNYAMbVO3bN2XNyWQEYNyeFm6/uPAFjcryl6J/73OeCOKa8E0eMGBFjAwKGxTGlwLp/7bXXxo2wvvvd74a2bdtGQJnn4oMPPFifj2LNM+rGG28MX/nKV8Ivf/nLMHLkyKgs/uxnP4uA8WGHHRbUTYlNekLzUWvnqrkAjCtnvPMGjLf1ZHTsQs/eFoUq77cCMK68Mdlaj/IGjHnsWyFg7wCbS/7qV78Kxx13XMAjf/CDH0SDKj4KdwPg8vT/+c9/HmOLy+9aHHPAceKh3n9GXvsQqevYY4+N+dTpnnAzsD+899e//nUEh9PzkgdtSMsoDLT27dLHBx54IOYVO51RWLvkgd/85jfRCGxVgnjn7u+zzz5BmAhxmJPX74L5C0KnTp3CySefHGUB8oB577nFTLeCSb+VET7HccUVV0QPYiuebL6pT8BjeKOVSp7biqgvfvGLsS5heirx21dTgDHX8OTynYBiA+Iw2ABjkzV5Gbs22VIM4xSSwmQ12UrPJmIaRL8XqfYpUADG1TnGBWBcneNWrl4XgHG5KJ1/OwVgnD9Na6HGAjCu3lHMEzAm3/fs2TM6gfCG5N1jblBE995778CLh8LHG4i+QLGjjPJGToqaJe2UvK9//etRqeSVLP7pj370o0DRliigd9xxR/jkJz8ZbrnllqhL0CmSjlC9o1FZPS8A48oZj3ICxpXz1EVPGkuBAjBuLKVaPl+egDGM7emnn46bTeKRwscAaYGoeOg///M/x3jjAGOhbBhq3RfbGO8UPubUU08NX/3qV6OnLZ6Nj8LnbFL5L//yLxGAVudtt90WDj300JiXEVdeq36U5dmcEp7/ZOcnY/QAq3/gfzyVL7/88rjyWIxzK40A18BmfFykAXHT9RtvBxy7r2/wRbiCDTltxrnnnntG0JpsoI/uCb2RNt903muvvWJsdsAzLEmsdWAxYFgICv0BYrdv3z7GXAcaX3311VHGqEQ5oiYAY5PKYDqEpTAxkndxOmN0JnXyME5nZVJIiuRhbMKZbOlI1o4EIKff08QszrVLgQIwrs6xLQDj6hy3cvW6AIzLRen82ykA4/xpWgs1ktcKD+PqHMm8AOOkZJHlAcO8gISYkK655prwpS99KRx++OHRu0ecYh5KloeKr/jZz342KrVkB8Cv5aS8jIS2oLBScB9++OF6AssnhiqF8q677ooAsh9TH+ozFhdNokABGDeJfLkWLgDjXMlZc5UVgHH1DGmegLGVNUBVgCKjKy9bOJy9AoSEwj9tUGnTO3sK2MASmMoDGBbnYKRVBz5s9Q+8jTEXEAyIJfcnPI/xl5fxBRdcEAYPHhwBYwZdISJSIg8+/dTT0RCcAGNhMIC7//RP/xTBYmCzOskAViTh9XRDxmTfOve//OUvR+9f+yCIny4iAQD87rvvjhttymdVEoDZSqWLLrooeg0LUwEo9uzirAtjoS3PdMABB0Sva6E1yBpkEZ7KQGerlyrV8FwzgDHQGFjMYwAInCaWs8lIuEuAset0ZD2MTQ73CXwGTEqgsbP7SRj0f5FqnwIFYFydY+w9LmIYV+fYlaPXBWBcDio3TxsFYNw8dK32WslkBWBcnaOYF2Ds6cnolMhSwJjiCtylLFLseBLtvvvu0VPo3//938Ouu+4al4ZSdiXh6sQwphwqZzkpuSK1Qa/gGeW3tOQ1/lj8yZUCBWCcKzmbVFkBGDeJfDVfuACMq2eI8wSMxfnlLWxFD/k8OVnaUwyAzBtXyCegMJ4KnKWDJTwN1fBTcgDQ9nvf+17E4J588sm4qSwwduXKVfXETcAr3gCExtu3BzDG7/HulHj6AqCFpn3xxRfT7Qh68/q1FxK5QBQCnskOexgAd4XXEHvZc3lOYTPIHwkw5g0tvnJKQmx4djKIsBRoBbjm5UweUV+lppoAjAHFybs4gcasG9kDo0uAsYlpkIDCAGMWA67ovBLk8xvlI0161wkgTvezE71SB7foV9MpUADGTadhS9RQAMYtQfXqabMAjKtnrEp7WgDGpRQp/kcBslkBGFfnXCgHYOyb/6lPfSrK+gceeGD46U9/Gj2XeC+JK+i4/vrro9cQKtINHnvssfCv//qv4dOf/nRcBjtv7keb6hSAcXnmWgEYl4fOjWmlAIwbQ6WdN08BGFfP2OcJGPMaBnaed955YeLEifVEwCNt8ibGP8DYt1wICkZYIZ+kLJYGuAUYA26BzR06dIiA8RNPPLHFCh7lHAy7eHspYAy/AzgLNZX1MNaue48++mh9P4HOwO5vf/vb0QO4/ocQoodzAox79OgRVxoxLgOd7YcA+AVwq5MxOsU8ToCxcFdWMqXkGXlbA52Ft9DPAjBO1Gn+8y4EO5PGwcPYkTyMgcSAY4zOYQIDkxwAY2VSDGMexn5PAHECh9M5PUqaqOn/4ly7FCgA4+oc2wIwrs5xK1evC8C4XJTOv50CMM6fprVQYwEYV+8oNhdgLBZhCklBYRWS4s4774xgcKIWhc2O5jafsRSVfE9XsFw0LSndY48949JUih7dQiJjpJAU6lSmSPlToACM86fpjtZojgNWdtlllxiL1De3SAUFEgUKwDhRovLPeQLGr7/+egSMzz333M0AYzxSmAirc4C2gFPewsI02LQum/BdoS2EpAAYC/MgRrFvTRYwVgbPhvtxFBUqgoexkFHZGMba5vXLGbQUMAby2qBO0i7AWGzh73znOzEUVbZfQmIkwLhr166xHV7HDM4268senpOMwPM4AcYnnnhiGDduXH2VBWBcT4oWuYiAsfggvIsJc86sE4BiALB7zulIwHECjJOHcQKMTUaMMDFDE0pK99P/LfK0RaNlpUABGJeV3Lk1VgDGuZGyJisqAOPqHda8AWP8nGF5zeo19auKmoM62XaSbNEc7eysdaJp4WFcnaOfN2Bs6aiQFMcff3yMD0jWf+qpp6JXD09iYDC9gI5gCSivI0tqeRTTD8QUtCEOJdQmNP369Y/xGcVe7PxE5+hsQsawKzyvZZv80EHoCEXKlwIFYJwvPZtSW7kAYyr3xo0fhPUb6OF1+ndT+l3rZSuFQgVgXD0zLQ/AOGFhsLODDjoohlUQngK/leBweOq+++4bQVshKWw4yxPXRnUwuZTI4GIT8zAWy1cdwll84hOfiBvdJUOt/IsXLQ4PPPBA3FMAOAsw5rUsxEOSrQHKPJSBu9sCjNWXBYz79++fuhTPWcBYFAJeyOQKoSVSW+gAdwSIp7jIOwIYe+4iJMVm5M/9n11YGXgKO5ugJlb2wORMxixQTNgj3BHy0qZ34o64bxKYAM6OBBSn++klyf1JigorjgIFYFxxQ9KoDhWAcaPItNNmKgDj6h36vAFjgh7PhG7dukU5ojkoQ2YgwGoHOEZWKVK+FCCfFYBxvjQtV215Asb6TOlKy0TPOeecGFdxzpw54fzzz4+ewjazs2kNhfSUU06JSiVPoUGDBsWQFLfffnvcmOc3v/lN9Dy2ahHg/IUvfCFu1jN06LCoKzz//PNxF3SKHm8onkQFaJzvrCkA43zp2ZTaygUYr1m3IYyftiz0G7EwzF9ceO5vbcwA62vWbQxr1nJya3nYuACMtzZSlXc/T8B4xowZMRwFz92OHTtGHgpzE9v37LPPjmGdLrnkkjBr1qwoZ8u39957Ry/chM1NnjQ5YnEA4wsvvDDy0Z49e0Z+zchrA72E67355psxBASQ2iZyeDzPZJvL4fPqtGkt795//Md/zA0wTsAyEFkMZG35JsIR77nnnriZHQOzVU3bAxgL40HWYOQmg2SB9EqaOTURw9hgUcAScJwmFe8B16WAMYEOoOSgxKWQFABjE83vCSQ2WAkodp2A5AI0rqRp3Hx9KQDj5qNtc9ZcAMbNSd3qr7sAjKt3DPMGjAl2+++/f/QkJAA2RyJDWLZnd+Qk1DZHOztznQVgXL2jnzdgTK6nRH7xi1+Mm9JR5ugJY8aMibukW/IqPIUd0IHANrwRh5GuQBkVisIGNgw8SdYXtsIO6LyjAM9AZN8LyiwvY21ZOkvnKFJ+FCgA4/xo2dSaygUYL1iyNnR8enz4zZWDQrdBc5va7Zos77u0aOna8PgrU8KrfWaHpSvqNuRsyYctAOOWpP72tZ0HYJxa3LSxTr61SgfoifcyugrzgMfijUI1AIznzp0bbrrpprjJLB4rNEX79u3D0UcfHfOZQzNnzIx8F49t3bp15K/qVifgWbxkewv4H19m/MXTxTE+9dRTY7xkcZPxdiEt8vIw9s4JZaEtq488p74zOgORd9ttt/Dss89GGaCxgDG5FeBOJ7WBruf0PEnuSDSuhHNNAMaEQ2AxTyHnBBg7p7AUGF32gOADhRsCjA2M39I5XRvAdG2Qi1T7FCgA4+oc4wIwrs5xK1evC8C4XJTOv528AWNehpaZXXXVVREAyr/HdfKEpXk2xgAYE3KLlC8FCsA4X3qWs7a8AeMIpixaFHc153U+e/bsuMTVfToCD2QrCih1lqBSZMkM5hCdgKcwLyH6g6Rc+g2AOX3a9JjffWX79esXjwXzFwTKc5Hyo0ABGOdHy6bWVC7AeM7C1aHdA++Fg87oHV7uOaup3a7J8jyKu3SfGQ49t0+48+kJYcGSj5b3t9QDF4BxS1F++9vNCzDGAyX8k4H18MMPj8ZYhlWbwlnNwxO4TZs2kVfKjx+L+y++sJATgF0A7FlnnRWErZDkcwCYhYz6yle+EvPxGFb3bbfdFuV1fHnx4iXh1ltvjYAx4+3nPve5cNRRR0Xw+ONiGGsreQ6LYbytkBTycip9+eWXo1H585//fOwLoNcmdnQJMZWlrQHG06dPD5dffnn9pnfywioBzUJeoQdA3HNVWqoZwFhIihS7OG16l+IYY3IGGUgsLoqJnQ7CY4phLO6ZPGmiAocNWjqn+waxEgez0iZXLfSnAIyrcxS935Q4wepLg+tX5xMVvc6TAgVgnCc1y1tXXoAxvm5jjeuuuy4uSbdBFiEPAITXS+QD8dk6d+4cl6SLl0aWkFJ5MdssfyN3ZJNYZpbLAZ6AHpaq8UIQG9VSO8CUOoqUDwXIZEVIinxoWe5a8gSM07vr7P1ypHuey3WS67O/u58OvzvS/87Zstn/XZfWU2761XJ7BWBcOaNbPsB4Tbj2gTHhx2f2Di/3ml05BKigngCMn3x9etj/9N7hzqcnhoUFYFxBo1P5XckbMPbEG9ZviHIwg6t4vlbgkIvhc/A4PFVy9i3hOCFsxYgRI8K8ufMicJryZHksQHX+/PlhyOAh9eGlyOHZvIy7wGV1MQiTr1euXPXheWXk5fh0uqf9lNyHHyrjGbLJPSBwastvcES6gY1yyf+eVf/UmfoNS1RWvepPqa4PK+NvSZdQRln00nfPkepJ5SrhXBOAsYEzMM6lYHE2LIVrA20gE3isDMCYi7mQFAbNgBqsdDZQJosjDWJ28lTCQBZ9aB4KFIBx89C1uWstAOPmpnB1118AxtU7fnkBxmSA+++/P+6MbNnapz/96bjM7eGHH45yAkHw5JNPjptp8IBw2N3ZkjcgsOS81157xfu8BiQyApBD+AllevToEeO4WZqnHR4QQmAQ2LNCayxc/NlhCpDJCsB4h8nXogXzBIxb9EGKxnOnQAEY507SHa4QvzrhhBMiH7OMOg89GPC5ZPn60H/kwvDIi5PDa31mh2Fjl0QP41LA2EZ4sxesCS90nxn+8tSE8OQb08LoSUvD6tUbNovhiwdv2LgpzFm0JrzVf0548IVJ4Zk3p4dxU5eH9RvrNtKbt2hNGDhqYRg1YUncXC8RZc3aDWHslKWhz7D5YdmK9WHTBx+ExUvXha4D54bJs1aEBYvXhhd7zgwP/H1S6D10QcyzYeMHYcbc1eHFnrPCfc9PimE0Fi6BJdQBZPrjORctXRf6jVgQHn1pSnjkpSnxmZcsr3NS0754xOOmLgt9h80PC5asCZNmLA/PvjMj3PPsxNB90LyweHldf+QdOnZJ+OO9o8Oef+gRzr11WOyfEBXayaaEWfDctCmYcABt27aNMde7dOlSL8tky+zodeFhvKOUK3+5vADjbM/NNQfszJEws3R/W3kbypPyp99Sven/9Luze9orzbNF3g/q8mbLpvLylqYtyjfQVvZZs+UbKru1tlJedTXUj2y9LXVdE4AxsDgh+QDg5FmcQlNA8QFI6Zy8jA1M6aZ3CTA2IGnyJaZoENNgOhep9ilQAMbVOcYFYFyd41auXheAcbkonX87eQHG+Dlv4ksvvSzGWTv22GPjDs4TJ0yMXgzAYjss8zymaIldJiab3ZjPPffc6J1MXnjnnXfiPfddkz/ENLO64ZprrokyhtVL4p1ZVqeOl156KXpXFHJEfvODnFYAxvnRs5w1FYBxOaldXW0VgHHljFfegPHa9RsjgHrGDUPCfqf2DIec3SccfFbvcOAZvcLP2/QPPz2nb72HsbydXp0W9m/VI+zfqlc49Jw+4aAze4f9Tu0RbnpsbJgxd1UEaPH1ZSvXhydemxaOads/HHB6r3DIWX3Cj07rGQ45q3cEeucvWRt6DJkfYyRfdufICMSiMrxozoI14Ya/vh8OOLNXGDJ2SQSeB49ZHPb4Xffwuz+9G3596YDw49a9w/+26hn2+F23cMVdo8LzXWeEX37Y1o9a9Yr3r3vo/TBlFs/GENZt2BQ38Dvv1mGxPwef1Sf2yXOeffPQCP5qf87CNeHGR94PPz27dwTM//e0HuHAM9Gjd/if33cP6DR9zqoICl9x96igre8f3zX8z0k9wu//9G4YOGpRWLt+86XscBArmqy05BxH/rDBljPZ5vrrrw9WTuURSqcAjCvnXf24njQHYPxxbRa/VzcFagIw5u6eQlL4OFLY0sGrmCdROmN4ycMYqARoTh7GQ4YMqXcpT8BwQ6CxIa9UC0B1T8fK630BGFfemDSmRwVg3Bgq7bx5CsC4esc+D8A48W98nkex2MJXXnllmDZtWpQJ3BPP7Pjjj4/gMfmBbCE8xXHHHRfs8sw7Rz1kCBtf2N2Zt7HYanZsFrvN7sfa8D165JFHYqxkHj5ipiUZo3pHorJ6XgDGlTUe29ObAjDeHmrtXHkLwLhyxjtPwJgn7JgpS8NlHUeG/U/vFa68Z3R4o9/s8NSb08LxVw6KQOhh5/WLgDEQ9OVes8KPTusVDjqjV7jzqfGh99D50cP4uCsGhR+c0C3c//yk6MG7YcOmGPf4Fxf3D79o2z/c+9yk8Hrf2eHWx8eFw87tG468oF/4e9cZodugeeGYtgPCBbcND4uW1cX/Be7OmrcmtHtgTNjr5O7h3fcWR8B40OhF4f875p3wwxO7h5PaDQ7PvDkt3PPshAhY7/677mGfU3qE068fEh7uMjl0eHJ8+PmF/cIBZ/SK3s28jMdOXRYuvH142O+0nuGSjqPiJnU2qgM286K+6I4RYfy05dF7+toHx8TnAaCfd9vw6I3c6bWp4aiLB4RvHv12eO6dGWHtuo1h5Lgl4fK7RoX/+UP3cO6tw8M7A+aEOq/mj7wkyRhWSpFtyC3kGQfAOF0Djtu1axeXwie5aEdnXAEY7yjlyl+uAIzLT/Nqb7EmAOPkXQw0TmEpkoLnnMBiZwyPh7EPqUP+P/zhDzEkBS8gv6ffDK4PaPIwdk4AcrUPfNH/xlGgAIwbR6dKy1UAxpU2IpXVnwIwrqzx2J7e5AEYZ9t79NFHN9v0Tlw14SXs7MzrGHhsQwrHgw8+GBUvOzTbtINXDpkAAMxbx+YXQk/Y7KNr167ROB1liI2bovcyEPqWW24pNr3LDkBO18ah8DDOiZhlrqYAjMtM8CpqrgCMK2ew8gSMhV8QWuJn5/UNF7cfGcZMWRZWr90Ylq9aH/oMWxAB10M/9DB279TrhoT/+lXX8MRrUyIwDDR1/41+c8LPL+gXDju3Txg7ZVlYsWpDBGB/ek6f0Pn1aWHuojVh9ZoNYf7iteGRF6eE828bHjeLe7X3rHDspQPChbeP2MzDePb8uvjJQOB3xyTAeHH49i/fCQe37h0GjFwYPZiXrlgfAd0f/q57+L82/QIvZJ7N8xbXeSgDcnker1m3IfaD93Trm4aGAaMWxv7o04jxS8LF7UdE0JhHtHAbf37w/bDHH7qHU64dHOYuWhvrXLxsXXioy5TYh3b3j4n31q3fFB5/ZWr0Mm7/5ITonQyEz4K+YqKSWcgmAGNAcenhvt/lYxRvSioA46ZQr7xlC8C4vPSuhdZqAjAGFAONxS9OG9+lsBQJOAYgCUkBLHa45mmsbPIwTpveAYylBByn/ykkKWWv073iXHsUKADj6hzTAjCuznErV68LwLhclM6/nbwB4+T5e9VVV8VNJ+xiLOSE3YqBv3Y//uEPfxgIS7vttlvcuE5oiT//+c/RGO0JfW/efvvtGNtRnGIyhXBXlDcHGeLxxx+PXss333xzARjnPy0icF8Axs1A2DJUWQDGZSBylTZRAMaVM3B5AsYrVq+PHrkHndE7PNxlSliztk7vxi9nzlsdQd+DW/cJL/ecHabNXhl+fmH/sNtvuwXgKU/gBIyKM8wTd7ffdo0xgcUSPuHKQeH4KwZFEDfF9BWLePaC1eH9ycvCrLmro/fvLy/ZHsC4a/RIXrXmo02xbn9ifNj75J7hj/eMjoCtkdKOmMN7ntQjegOvXLMh3Pzo2PCDE7tFz+lbHh8bHuoyOTz4wuTQ4anx4eRrB4e9T+4R7nxqQpg9f3UEjHlS3/23CXHgPacYyW/2mxPDUgDXl4plbNO7N6aH/Vv1Dh0b2PSOzGEDMJvt/vrXv94CKM4Cx0Djiy66KG7S+95778VNvGzk5fC/0F2lIStgLjYsy+Y76qijwu677x5efPHFiK0k7CTSZdOmKF8Jf5HKpLM2rMYq3ThY+dL+pDLOs2fPjnhO9g2xkXHadC2b17WNiBctWlTvBJgtt7NdF4DxzjbiTX/emgCMfbiSl3EKSZGAYsBwil2cPI0pd8BiHyPleBh/85vfjJveyeM+QNjhY+1wzzkBxenc9CEoaqhkChSAcSWPztb7VgDGW6dN8UsIBWBcvbOguQFj3sLnn39++NrXvhbPTz/9dL2HMS/jv/3tbzGm8fDhw+t3VKboiBEILHb84Ac/iEtByQ2ScwEYN++cI5MVgHHz0ri5ai8A4+aibPXXWwDGlTOGeQLGwkDc+eT4ADD+29szIgicnnTBkrXhjk7jo+etUBRjJi0LwlOIV1zqRQs8vvzu92IIi7f6zYlewcdc3D+Gjhg9cWmq8kM9vm4zPOD0OwPmhmMaAIxnzVsdrrl/TAwzkfUw/s6xXcNvrxoUbLyX0j3PToqxl297fGwMB5Hu3/f85LDXST3Cs2/PiBv6XXPfe0F5IPJBrXuFn5zTOxxydt0hTIVDHObJM1dEwFjM4r+9PT1VF9vsPnhu2PeUntEjGmCsH51frwOM72wAMKYD2XD3wgsvDL/61a+2CRgLTwEHsbLqT3/6U/0BbHb07ds3yjD1HQohArwdO3asz6ucFVlHHHFEYBQfO3bsZmAuR71+/fqFG264ITDOZ9sREoPhHpibTbCbhx56aIv8yl5xxRVhwIABEd/JlgGS33vvvXH/iGwb5LP77rsvjBkzZotnyZbfWa4LwHhnGen8nrMmAGNhJRJgnPUwBh4DgB0+npidw4crCxhnPYzlS0Bx6RnZE2jsXKTap0ABGFfnGBeAcXWOW7l6XQDG5aJ0/u00F2BMobCEc/78+RH8/fa3vx1uvfXW6MWS9kTw+2uvvRZ3GhcbEBBMqXHvq1/9agSZKWif/vSn4yZ38pMjsoCxGMbCXhQpXwoUgHG+9CxnbQVgXE5qV1dbBWBcOeOVJ2AsnMQ9f5sYDjy9V/SUzQKx8xevCbc8NjYCxq/0mhWmzFwZjriwf9jj990iYJyliHKXdhwdvndc19Bt4NwY5uHYtgPC768ZHEZlAGNlVqxaH0NUAKTf6j8nAsbZGMbA6KmzV4ZL7hwV9i0JSQHwPeFPdYBx0v/FRxZruH1ngHFdHGTtZAFjz3ntA2PCvqf2iJvpdRs0N/QZNj/GYBaH2dF3+IIYw5h3tBjGe6MV8QAAIABJREFU4hq/0G1m/WN6xu6D58W2hNBoDGCcANq2bds2CjA+9dRTw+233x5BVcBq9gDMbthQZ/xOnZo8eXI0gmfzCUlxyCGHhLvuuitMmDAh4i4pv/7YJ+qBBx7YrG7l3bOxMNkrm8hWnTt33iK/MnfffXcYNGhQ/SqvVI5H8lNPPRXuv//+zcrdc889ceNiQHYy5KcyO+O5AIx3xlFv2jPXBGCcQlI4A4l5+1DwMDcfHIBxOrvnY+Hj5Zw8jP/jP/4jfsz8TvGQ/O5I/zsnRrE1svvdx50Fc13JB3ZrZXbkPrx6/YZNsZ0NGz8KlbEjdRVltk6BAjDeOm0q+ZfmAIy9/8kA1ZzP7puzZvWasH7d+o/93jRnP2q57gIwrt7RzRsw7tSpU9zEThgKSxkZnd3bc889o6LFazXJDzxkjjnmmHD44YfHnce9q5Y57rfffuFLX/pSVFAoSj/96U+DOMdPPvlklEXko8SQMy655JIg7MXHyRLVO0It03Pf58LDuGVo39RWywUYz507N3rdARrI/kVqmAJRBvlwdWbDOcp3twCMy0frj2spT8BYvOInX58exPa9vdO4eg9duu2UWStDq+sGh5+c1Se80mt2BGOPvWxg+O5xXSOwmvXXkve0Pw+JISnEEZ6zcE048Y+DwjFtB4bewxaEdRvq9ONNH4TwSu9ZcZO7v3aZHL2af3XZwHD2TUPD/CVr46PTpUdNWBLjB/NmLvUwPuGadzfzMG4MYCyExe2Pjw0HnN4relQLiwGYhhOIedxj8LzQ8anxof+IhfWb3kXAuPvHA8Z1ISlsAjhhM8Daw+CHwFMewh/nYSxkxcUXXxy9b4WfKD18K9WXEtkF1sIgns179NFHx9VVDOh+9x1JSRnOfVZwZcuka2Cx+ZWVi5QXdiLlyZ7Vo1/ZNrQF/yntVyrn+w8b8izK8Tb+y1/+Ej2fUz93lnMtAsbZubOzjGM5n7OmAOO04Z0Pho9VOnyEeBQDiV1nvY2BzGnTO9YvgJBJlz4ozo50L53dayitWr0h2FHVbqnDxzefQGo31L93mxmefnNaPbNrqD8N3fOhLP3INpSvuBdCARhX5yxoDsAYMGQJlmXpzZX0G+jx2GOPhZEjRm62pKu52twZ6y0A4+od9bwB49dffz0cdNBBcaO6E044Ibz66qtBuIk2bdrEzfAoQeIVW9546KGHRtDXbxQc8fIoWjyK7Tbuf7xV2ArgMND53XffDevWrgtvvfVW+Na3vhXrBE6nPROqdyQqq+dksgIwrqwxaWxvygUYA4p50lnOzDhUpC0pQEfy/eOh17t37y0zlPlOARiXmeDbaM7cwCOFXZoxY8ZmIOI2ijX4E4enXkPnhxOvGhR+e+Wg8Hq/OWHh0nVh1vzV4Zm3ZsSQDoed2zcISbFy9YZwzQNjYgzjNneMCNPmrAo2nbOh3b3PTYxhLU665t0Y0kHe6x4aEzeoE+bBZnriHvM2Fv93/9N7hQf/Pjl0f3de+N3V74ZftOkf27Yp3sQZK2Js4T1P6h4B3jwAY5v7vdp7dvhl2wHxWV/tMzvMW7w2btwHLD6l3eAYb9nz2/Su3QMfehg3AjB+7p0Z0RP6j/e8F4H0tethFR+R20qmv/71r+Hkk08Owk5k4xana/GLTznllJgv4R8f1bB9V9W06R05zffNCvPnn39++x60BnLXEmBsLIUzYUCA8xWpeShQM4AxryAHkDgtH+UV5P/kYex/wDFQxmGSmWApJAXAOIWqSEAxssuXQGP/Z6+zw+JDjZFZSvObKwaF7oM3X16RzdvUa5bTE64aFC7rODLMnLvtnU2B3J6XMjt16tQoDBLCpk+bHhYvWtwkpt/U56j08gVgXOkj1HD/zHfegP/wD/8Ql5c3nKvxd30DxCD97//+73Dttdc2vuB25mQZv/LKK+OOxjbR8hxFyp8CBWCcP03LVWPegDEPFjH39t9//7DXXnvF5ZEULYAukPjHP/5xOPDAA+Pv8ojzpw9kBQAUQFke1yktXLgwehIDjAEvZBNexcBl99T35ptvRtkklSnOTaNAARg3jX4tWbpcgHHPnj3DAQccEE488cRg86UibUkBMsgdd9wRjWjPPffclhnKfKcAjMtM8G00lydgzMuWN/AjL04Nv7h4QARvr3/4/fCn+94Lx10+KPyoVa+QAGPeuCMnLA0nXj04xvs9/7Zh4Y5O48LV97wXN8OT/+3+cyOwLG//kQvDGTcMCUde0C+0aT8y/PnBMZv9P+T9JWHSzBXh+r++H37cunfU16+8e1S4uMPIcNwVg8L/tuqZG2AMGBcXuf0T48Lh5/eLzwkUFnoCWH7IWX3CjY+MjUD5tgBj4LLwFykkBfrBAX5ydp9wyFm9Q9sOI8OwsUvCuvUfbR5If4Br3HjjjeGkk07aAjBm5E6hKDjE4KFNSdUGGDMwcxgUDmNnS7UEGJu7t9xyS3jllVeirL2zjWW5nrcmAGOgbwpLkcDi5F0MJHb4cFLw0uF/h7JZD2MMUfLhTAfANd1L1+kcf/jwD+ueJTHn3jIsHHFBv/Bq39lh9ZoNIX2DlWFtxCTFSWIdFbYi1YUBLFm+Ploe16zdEHdbTfUrt2T5umhV9RvL4kGte4fzbx0W3pu0NLaT8mbP6kaLwYMHh06Pd4oxGcVqtEylffv2wYY+djpNz50tW1wXHsbVOge823kBxsDixYsXx00Udt111/juMEL5lqTEqsnCCRSyVGrFirplT+l3qx6ASL5TWaFM3e6p33fKhg2EOAAUZc19eYqULwUKwDhfepaztrwBYzxy0qRJoVu3btELWIw7/NA3hIGVYP3SSy+FLl26xOXsdvP2TnrneVl17do13vdeqysdljsy+lDYvNvyq9s93sbKFt4Q+c0c9C88jPOjZzlrai7A2Ds8b+68uKQZD/YuA4w5iQAiUzJ36A6MR9OmTYvOFVuTib3f3md5fR+sNEg83W/kbXxbef+npC94vXZ8P1KbVkZagaAcsNYhzweb6lY6um9JNdmC4SlbZ6qbLMLIpT/Kl3oK+s5oW136kV0aji5JxpBv9OjR4eyzz44rIZ544olYV/o9tVfOcwEYl5Pa227LnM7Lw1hLwN2ps1eFx1+ZGi64fURofdOwcN5twwOP2bv/NjFcefd7YeCouo3Q5O03YlHckK7V9UPCWTcPC2fdPDxceffo8Ga/OUGs4PS28TLu/u78uIGcfKddNyQeNz7yfug7fGHMK8/A0YviZnOtrh8aw1Dow22dxoX7np8UwePx05bHPo6fujycceOwcNsT4zaLocxb+OIOo8IL3WfG8BKJeq/1nRMuumNk6DXUiqO6jfbGTV0eHuoyOQK+p98wNHiGM28cGjo8OT6kzfkWLV0XN7K7/C+jQr8RC1N1sU2hMsRWfuCFyWHFqg3xOwBLuOOJcbGuM24cGsvACqT0nfB+0//vvefeuGrqtNNOi5iHMwO2TeXIKnnIIgVgXD9kFX9RS4Dxo48+Gr7//e+HDh06REyv4olfpR2sGcAY8EsIIlARtghGDgyO8ORMyHNNYCIAEbKUITx+85vfjB9V9whyfs8ePr7up3ND4827uONTE8LRbfpH6+gf7xkdug2aF1avUVddMP2n3poe7nxqfAx+L6+wEnMW1sU0EvdYIP77/z4p9B42v44BfRirmCXxkRenhDf6zY4B8q97+P24i+svLu4f2nceHy2qBMzS5Hl4UgCIWRItPxHPyG6mlqTwoOVdRShDlyJtToHCw3hzelTLf+ZyXoAxIJjl8sgjjwyf+tSn4vmZZ54J48fVWeQpjJax2yCLd7B3zaYMhDRCmG+G5a8soIQzCl1SwLybNnx48MEH43sqntbuu+8elTXvK6AK0FykfClQAMb50rOctTUHYFzaf+9sOvyWZALX2ful5bb1v3KlqaF7pXmK/xtHAWNUAMaNo1Wl5cobMCYLT5kyJcYNv/766+NKAbzVpk5WEfC2S4AxfUHYGHy4Xbt2kYeTiXmdAY/Nq5TWrvkoXIO8l112WbjhhhvCiy++GPUNfL1v375x0yeOGEmm9p6rS6gpsgSeTh9hbKLsvvHGGwE4q85rrrkmrmYaN3ZcjI9+7733xntkC3202VT6bjiLyylMFi9Cqx+EzyFPkDlS+4zYnEPEFiWr2JSKnGIFxSOPPBJBYnkZsfxmJYWY7K1atYr56VctlQrAuKUov2W7eQPG5q+4wTah40Hcf8SiMPj9xTG8Am/b0ZOWhUXL1tXzXCx0wvQVgYcwnVjICKDu6rV1crb60rtBn548a2UY9N7iCBIPGr04xkbmxJXy0c2nzF4Z6+k7bEEYMWFJsPEcIHbkhCURmN30wQcRYB46dkkYP31Fff2oI9+oCUujdzBP4lRvXfmlYcGSddHxy30OYWIlC40BDNaeOqfPWRU2fFiWd7D/R09aGh3H0ggoL94xYFnM5g0b655TnZzP1IMW6AhrSEk5ybdmwvgJ0VjtO+Cdt6fCO++8E78nvlspbyq7I+cCMN4RqrVMmeYAjJNxE+/Bp+jOiQdln9J8w1PksecHHTrpxCmfcnRldcKxXHO4wKOSQRRvNrc5QeJX+CeHDr+npB78llOI31xn+6QORhV9UE57DK+eQZubtrJHmG8ho7F88itX+g5pBw2ybTdkmHFv4YKF8flSuw3lS8/UUueaAIxZzgHFwF8DnwWMDaLD4BoAE8/ETGeTNgsYy2cCOQy+I/sxTROidHIbQBbEK+4eHQ48o3fY86Qe4bTrBoen35wemc2MuavCdQ+9H468sH84qd274bK/jAonXj0oHNVmQAz4z7N47bpN4fmuM8IxbQeE1jcNjUxAwH7LTOzk+qvLB4YnXpsWQWXLUnb/ffe4nOWiO0aEV3vP2szyqT/6yCuB1UVQe2BxilvkLKZRugfgMvkbeq6WmpyV0G4BGFfCKGx/H3yo8wKMMQ1xwCwl/+QnPxn23nvvCP4CrvxGoRPb9Ec/+lFgtfdu7bPPPnHZq1iAGA6GAnBmBX3ooYfjNwrjwui+973vRUUPwHzVVVdF49VXv/rVuHEWxU8bRcqXAgVgnC89y1lb3oBxOftetNV8FCCzFYBx89G3OWvOGzAWfk3oqH333TfyXTE8hYHx/9e+9rUYsxMQSQ/QNuMsIBngYcXhYYcdFkMy3HTTTdG7l9xvE1r8XLzPH/7wh5HPC22x2267xU0vGZHpGnfeeWe89/LLL8f/0U35AQMGxPA1F154YQSC6SvAbAbin//857FtZ/KFvpx33nnhoosuqu+Xvn/3u9+NoHdSmJctW17/nPoM4OUMok/KWqpLpidb6KvwOTbkRAvPytt6jz32iIA0GYXyDpj+9re/HT772c/GzT0ZuRfMX9Ccw7/NugvAeJvkKeuPeQPGOg/TrDvo2/HONp/pIx38I2B0WwUAvoDVD7HTzUCdVIM6s0Drturb0d/ScyqvP47woU906tv21p1okS3X0D2/wz/gI4ApRibfyJUr6lY7bK1Mtt7GXBeAcWOoVBl58gaMGURt8kyntUcH/sPwajUd0DQlfK9Hjx7R0CoPPnfddddFw6TfJLIc7OrSSy+Nhlu8lYHTyhdG0YcffjjyZd8jRtejjjoq/OM//mPkbUBjfFqiYwv9xpiK715wwQUBT7dvSTKC4tlkAA5dVv8xyrZt2zY+A31cXu1kE0cvRhe/e1YbWeOT+G0Cej0zQzAe7zlT29qAWUreO9ilZ2h3TbtwzjnnRHp4VoZkdeT1bmb7v6PXNQMYG3yTDVicBYwNNAsEwdDZ/64BSgZWOQIiD2OKqDwmq5TOCUT1v+t0v5ToAuu/3HNm+N3Vg+LOr/e/MDkG0bfMRGD+vU/uEX59+aBgGQuL4Is9ZsbYTfuc0jO81nd2iBbRmStCmztGxrx3Pjk+gsU3/PX9cMAZvcJlHUeF0ROXxFAWlswI3i/QP7CYlbF0YnkRTESCYgKGs4BxuvabF7F///7x+Uufa2f+vwCMq3P08wSMKWeWaZ555plxcytzghLD0omZULjENmZ0AVJjkJjTv/7rv0ZGsXJl3TJQDIYV1AZbPJr8TwEENGMivkWY2xFHHBGVRB7L2vEeFylfChSAcb70LGdtBWBcTmpXT1vksgIwrp7xyvY0T8CYjC6cE0MsXspz17zAn4Gzu+66awSI8VYeSxRWACmeAOTVF3HHhYX6wQ9+EJVg8oSQDzbL+7d/+7dw1llnRe88eXn02isBCEvJ9f8XvvCFQMmli0hkc3kBtABnS8ApjRRNq5bIEBRLK4p4/WoDYMtIzeHjxRdfiiuY/v3f/z2C3skj6/XX34gyBQDbcw4cODB6EXMQAYzzqNZvXs8M1uoESt99993xWcUq1ra+k1uE0iCDWIWoPO9pNMp6bGXHrRzXBWBcDio3rg36c54hKVKr2wOYluq5qY5qOG/Pc1bD85T2sQCMSylSuf/nCRjD3gCtVqbQZ30jGC7xT3MCcApbA45a5cJw+Z3vfCeGX+SwyUCKBwGc8W+HUG54I57pd4bOX/7ylxGr+8///M/YHj5oJZB25cXf6en0cLzM5tOHH354vK9Pwj3ut99+4Wc/+1ksRzeXz6oa+jrgWZ/JDY6vfOUr8XkA0Pok8Yqm3zPK4qX6r07yBrDayia6vFU/+qw+bXPQZAxWRp8TDgkL0Ef0gMmpDz/H662KIntUSqoJwNiAGyCTMR1AFgdBx0R1uAYSGwCHAUuAsR3NWeExRAwpTVpKiCMxKed0XTqILIa8jC+5c2Q4us2A0GNo3aZ3QlUcdl7fGLD+sZenRsti3fKVEDo+PSF6I7e6fnBcTqIOsZeOu3xg9DTmSfyz8/rF4Pg9h8wLaz8MaP9y79nh0HP7hrbtRwbeyw0ldGGJIfwlcLihc9o99Z577gk2BSF0poNXBIENnbLPjSY8H9Es5U1nm/9YjpcsLalvaAp4U2fKm85e8LTDfMrvzEKk/ZQvndWhrtJ+aVO/vIQpb/ZM+E0vfmrHs7iv39m8/ueZYm6Ubv7B8iPebDa/a/0CaLDkZpM2LAssbUMZQL0lC5Y7ZpO5aAlFQ2V8TAGW6s0mCor2G6KxelpS8M/2s7mvvd95eRjrq28FCyRlk0IoUQ5ZAv/lX/4lhnYx5ryQ0Nj842VM6eK5490xV5MBgtKYPr6dOnWqtziaB7yUMTSKm3az711z021nqb8AjKt3pAvAuHrHrjl7jhcWgHFzUrj56s4TMMb7hZwgtwkBAZjFQ/FlHkHAWPyXXPnCCy9E5YyiSA4jG5pHZFErhyim6qJXkBHVSSEme0ryklE5nfAOmjRxUvT4bSxgzIMK2Ay0Jk9IZMKDDz44gs5ijJIB9Gvs2HFRLiBvyEs+JLvzrCKHkP08u7wUdH3XV95QZCEKsP537tw56kb6ziNKvynaaEVPIq/wsGLMroTNoArAOE6LivjTXIBxRTxc0YkmU6CaAGPfP+Afwx5dfmdLeQLG8B6OhwycjJDwB1jSBedfEAFTOjOvdhtJw6O+8Y1vRGMpngtDUgZAChymA+Nh8IpddtklfO5zn4vGXLyInIDXffnLX478DE9WL+9dPBeQyzgKSCYLaguwq/2Ez9x2622xn34bOnRY9OK97bbbYltf//rXY166Nx4K3LWqmKd0whQ7duwYQesTTzgxvPzSy7H/aQWy53/l5VeiAxgDLUcyIabQw7PqB2AYrfBesolVxAy5dFJ5YAf33XtfvSG5FONqyXmaMAs8uVrTLqwbiJrCUTgb3HTG5ByEqXQGLPpgGLDspndJOCNg+j2dTWAp/e9cmqwwEU8JYGzX157D6gLeC1a/++97hH1P7RXuemZC6NJ9Zgw98VLPmcGusHud3CPsd1qPuJOqOsUquufZieHg1n3Cfx/XNcZDfvzlqTGmUWr3lQ8B40s6jAwz5zUMGPMs4KXI26AhoDh7zwQ2YQmohNh0EBy9IKVAK/p5Mb28vCRSfmdLBrjnG4NsQnueDerM5nftRffyon82AQd4WIgVly3Tpk2baBUS5y0bY0abhGOKQWm/lNcvQnE2eRbAun5ny2gTjXhgPP/889kiUWHwkcnmV79n83FJwn8qpA1epDxUSstYrhA3OFu0eDNwEOAv3lRpv5RnyVKfOZ1NQGnLLxqisefxIU1zKFuu1q7RJW/A2LuUBYwpYqyoX/ziF6MCaqzEBXNmbWUVld97guYOzNHSUkwIM/S+zZ49p35MWCctK2VxbGh8a22cWup5CsC4pSjf9HYLwLjpNKzFGshrBWBcnSObJ2BMBuTJAwTNegahjBVB+C8Q2HeEovetb30rymXLly2vJx55n/zA6EuJJVfh48BdMmGSodJZ3EG8e9nSZdsNGJMRyBYpqYthmccSeSIlIePI8gBjoK7nJHtQqMkRTz/1dN3x9NPRQ5pSTgHWbwcjdATG+/WPuo16OVdQYtGE4gpUzwLGAPWWTgVg3NIj8FH7dLjm8DD+qIXiqpopUE2AcTXTOY++5wkY47MMkgyUDI+AeJjcu4PejZgPPmLjVqtgrLI95JBDoi4Mu2OMxfPwtn/+53+OPDkLGOPBVuQkDA7PwhvxdzzcfZgLvti+ffvYrjqtEgLOwvfk0xb+zDjAC5lBVdgHxmErcfD2Y445ph63Ui9nLmAung8rFHbDHOc1TD9PDniwRiuY4C5kjOuvuz4+i9X9qW19AloDqoWmhBNwPASWf/7zn488HIicHGCFhiIXJIN3HmPe1DpqAjBGUEcCjQlSrO0GE4Pzv2ugnf/TGaikTIphbGDTBMiCxa6lJBz6P03e7AA0BBivXbcx9BwyP8Yb/n8ndg9HXtQ/HHFhv3gceWG/6HnM+9j1rPkfBep+q//c8KvLBobdftst/N9F/UOfYQs+jHlU12JjAGOeD7wkTNAsOFx6nTyMLYUjNBKi0wFgtTt8qTer5+dl0enxThFsTfmdLbVnnUHrbEJvS+6As9n8rgFs4qcZm2ziYetjU5pfP9WlX9nN/rTp4+Kj1VA7+lUKSnsW90vb8DExN3ycSgFjQjWQt7SMNi1FAMxmkzZY19RZWsazU5h80NIcU9YHpkf3HpGepc/CS0R9pfPQmLOYluZPbS5dUudtk+1bLV6XAzBmCcT4MBohbRgW0sHSaN5Y6spamcbV90b4F0oi0Ni8siwm/V4AxuWZjQVgXB46N0cr+LQlYJZ7ER6LVFAABQrAuHrnQZ6A8eLFSyIIDDQmP2YTnu27ATB2zYEBYCwcRKkjge+MsBTAVKClje148yorJZ7tTA5LegUA9p/+6Z82C0lhblLQLZ3NhqTgZEBZ5BSRkj7zOCZLCJGRkm8d2T0BxpRURudPf/rTUTFOsocz+YNC7DsJLGZIARgnBTbJzORUK6f0i4JNVyoA40Tx4lxKAfpVARiXUqX4P1GgAIwTJSr/nCdgjDdxHrQqRchFBkz8BKgKDIZlJAdGBk7OVDAc+BKsQkgI3rz4Jt6HnyYPY/sLJNwCr/UNAiIL2wCAlbcUME79wSvxPfxVWw6gMmBYCArlGGIBxviw+MuSdvBsnsEAboZWujuwGWgKF/JcCX/BTwG92gX6Jr7OyxiQrF0YDCdDew3Yo8geC/itEB1wArSDDegfzIvMoU59cVRCqgnA2EAmSwUhygH4dRACHUBC/5tswCQD7QA0J8CYAClfAoT9niaOex83aFnA+OgPPYzXrd8Ud0S1QZ2Yw0+8Pi10eu2j45GXp4bbnhgXOj4zoc6DOISwdMX68OALk8NRbfqH/Vv1Cj85u0/4a5fJMWRFmjSNAYxNRi+suC0JFC4Fi/2fvIvtmGqCAtvTgZbqSS9Gah8t0FE+eVL+dEbrUnr5X10N5XdPfaVlCOHKpHrTOduvbBnX6vF7aTv+31q/3C8tY97w9GD1AgJnU2P6lc2vX+ZW6n/2rF1tmWPZ5H/3bUyQfZZ0rb7ssytrnLZF46w3dratWrs2B5rbw5hCiSFgPJZ18sRPBwaFIbAS8kQ2TsZGn4DMKUahpTQMJen9ygLGYgp6jiLlT4ECMM6fpuWqsQCMy0Xp6moHvyw8jKtrzFJv8wSM6QOWoQJOOTVk5SrgKRA4eRhT5ChrlF1KbUrKUEZ5TAFdLbkF3gJn6QulchceT8G1PBZgbHksDyJynoS/v/rqq1FRLgWMLa8VDi4lgLGNdPRfmZRKAWM6j7iNynP2IHuIeZhkEM8EDKbAei8oqkKsjRg+ot7JogCME3WLc2MoUADGjaHSzpunAIyrZ+zzBIzxN/wPz8FfGWEBoK7xMk5TVqkLzwAUBpiKWWz1joPBkscwQyewOdXHIApEhc+kBBOhQ8sP0IXFlALGPIGtQrcCR6gLPDzbFsAasI1f8nwGGMtrw9qU8HgAMf5qxS+5gvygvtatW0fDalYO0Gd9wb/PP//8WB9HstSus37ot7M+K4Ov/6XjXyJgjmZWBTFSw+Y4GJY6Eqb+tcS5pgBj4G8C0xIgBzwzwYBrwBcMzyAl4NhgpJAUlovLK8kjERyTwOmew//ZiRIzyhtDUqwIl9w5KnoFdx9ct/x/0owV4cAze0fgd+KM5fW7tSo3cvyScN/zk8PLvWaFVas3BADzm/3nhl9eMiD85spBod0DY8Iv2gwIx146MG6Wt3J1nQcuwPiwc/uGNneMCNPm1AmlqR/prK8AKALstryMAcpeGGEU0nOnOpp6ztIpe721ehuTZ2tl3W9s+cbmSzFnSwHjbfUh/ZZtI3udfm/o3Nh8DZV1rzHlG5Nna/VXy/28AWP1lYakoIjZ/MZHnvUw++3ABHiB8x5PO52yZAo3IZA+hkKBE+MYuMz6aly8r8DMQw89NC5tKQDj5plxBWDcPHQtR63lAozJDAyo5IokA5Tj+aqxjUqgjz4UgHE1zp4QV1jx/OVNhE/uaMJD8UzLW3nYWsFDJ5DcBxBbDppiGPvd8lKet1Y5qbngAAAgAElEQVSmpXnsnQf44u28iwCrHEoolTyW8PeU6BrkRI4XQwYPibuwy2d1X/IQ0gfKKQWyIcCYp3NKWcD4lVdeSbdjm1kPY/IGBZtDA8cQOk1KFGYeXDb9seSWoZpMASi3fDjrYUxxBYwnD2NlhT2znFf5lk5FSIqWHoGP2i8A449oUVxtSYECMN6SJpV6Jy/AGM+Fs8GPAKxWa+OznKXwdPyJkRVoLFwmByt4FEzFamiH0Et0ZeEZrGiHQyUPY4Bz0q3R0rUY/zyMSwFj5QG7Qkjx8iUD0Lkff/zxLdrCj3v37h15O/yLJ3HWcOu5SgFj7QHD8VH6f8LLnK3M0WexmzmQ0e3pmekZnW3qh88CpvFsMsm8ufMimG48YAY33nBjpBePZ22JawyIroRUE4BxUuqSl7HJmw6Ty4HRpbPBNVAUQpMrAcaW+MtDaHTI50j/pwEzkZJgme458xqfPHNluPLu0eHAM3qFh7tMDtPnrApzFq4Jl945Muzx++6hTfuR4f0py8LU2Svj+exbhsUYxvc9PzEsW7E+bpp37q3DwwGn15WfMmtl+MvTE8JBZ/YOZ908LAx6b3GwYd7r/eaEn1/UP/zhT++G1/vNjm3YMC+b9NNzA8LFySWMl3oae5kAyoThrMCZrWdnvm4KYFxpdPPRYakzp2s9eb/z9DBGM8yIhxHrImspBoER8VBiLcUAxG6i4FhiQ+Hyvi2YvyB+a9yjSNrp1XIWHks8oTA1y10T08VsKILaw5Sy1tVaH7dyPV8BGJeL0vm3Uw7AGO/0fhPghD7ybhZpSwqgE89MINTatZvvP7Bl7ua9QyYrAOPmpXFz1Z6Xh7H5iFdb5mqDN84QFDF82dwAaFDEyML4NJkfCItX33DDDdEjWV7KLRAZYMxQvGb1mjBt6rS4lJUiSOGzMggftxnPZz7zmQhEkwsYj3lQUZK7du0avZPFNRTjUfiqvABj8hylV5gMu87zfmJw9lz2qwBOU5op8tsDGFO20+aAVkmhR0vqBgVg3Fxv3fbXax7UWkgK3wxHkZpOgWoCjJPs4vsGQ9rZUp6AMb6HVzBS4jewNUZWYTvpxgySroUrxT/xQE5SknFAf8ZJHsp4FR6eAGP6MFwuJdelgLGVNIBoZ2E54XpCOwA4hZlghE7vuX4xFAtDBffi0NVYwBi/5zXNQ7hbt271ujkDM15sTyMrfPBdnsj4qPrTc+oH0JiRFi7nN3GSgetWNMU5uawuprOQWvAGskz2+RMdWuJcE4AxhcUEcWbtz4LFrgEuACTMzkROZ5PSQAOMCVeER78lgNjgyeN/1+l+GvzSAcNzgMMA3h+c0DX8uHXv6CE8b/Ha6En8q0sHhP1O7Rl+ffmgcOHtw8Mv2g4Ie/6hR/jNlQPDtDmr4mZ3f3lmYvjRaT1D2w4jI3i8ceMHYdLMFdGTGIh8R6dxYcbcVWHQ6EXhjBuGhj1P6hGOuKBfuPe5SWHFqoaXr3t23hMsOIJyWwZvkzmbqZmsdqVcv64uHITnLNJHFKgVwNjcpfBQmiyb9H8tp7wBY7QSF5t3sCU1YiABiNPyExvM2BGV5THFR6JsUiY3bKhjfuIpAYd5DWEAvi2W6FjywotpyJAhcWworr5HGCBDD2ZcpHwpUADG+dKznLWVCzAmUFv6dtppp9ULt+V8zmpoy3fMsnnei3PmzG3RLuNpBWDcokOww43nBRjrABmWYkbGBRpbeioUA6WVxw5PogQYm788iS2JxXMtPfW+Wz6Kn/O0BcKqky5hruPr6uXFzPiLT1MOxSqka1AE8XOygqWlYjMKL8GzGVidF2CsTzyd9YEXl7iONvPRJllDm+hK59kWYFwaw5jyzjuanEO5p7CjQUulAjBuKcpv2W4tAcb4hblubsMBCt13y/He3jvVBBgnUFLoAl6xO1vKCzBGN+AuGZCjEw/a9957Lzo7wZcAxngg2YxREz+mB4vnC4BleKUnK2u1T9qbaXsAYyAtHZxe16NHjwhWC+nIwxnwCsjWliP1ST/0aXsAY06pQG28nW4O5IUBMAirD6AKGAd+A5V5QaOHZ6THA8yFw7BKyXMCzdu1axc3rsW7hb7yPQJkM2LjwckTuRLmZ00Axj72CTC29Atw7JwFjjE6whzhyYfCwUKvLE8AAiDAhgCJkSTm4dohKSNlf483Mn/Wrt8YBo5eFM69dVg49pIB4bKOI8O8xWvCho2bwripyyLw++tLB4ZjLxkYjm07MJx7y/AwbOySsH7jpjB97qpw3q3DwunXDwndBs0LKfwE0PidAXPDebcMC1fcNToMH7skrFy9Pvy928zoYXzCHweFu56ZENvJdCVepudwRg9eU15aE5IlCA2yeUrL7+z/1wpgbO5a6kFx8pE07rWcmgMwxhgAvBgbSyJ6encop6yZGJPD7xRGzME7h/YsiBQ557REFv1581BQAVMC3ft2eT/FbZKfsoaxFClfChSAcb70LGdtzQkYe58deDyBk9DHoMzIVppSXuePS1vN+8E2wgiptrTqzP+pztK2t3Y/my/lcW4wZdreLG9JdsvdeWgSbH3Ltlpfg43ke9OYFYBxvjQtV215AsapzxQvypjQT3gypZUXkBVC+DglTiLz27+DbIR/e+cBH1b4ZIFSc9sGtb4LQNoUc1EZSigdQzIPKXk8lP0GyAUSW62Aryev3RSmAkidDf1ALufVrD+WzKbEc4rHkn0TyBySPtn4mUeTflNG9Uv/KLFJ1rC01mZC6DF+3Pj695TeZJUUZZXXFVqQV6wYsPpQ38knHE5aKhWAcUtRfst2awkwTnOfAxWZoiV515aUrs471QYYkxfIdwDLnS3lCRjjTXgWwBeYygs3hVxkeAUOixXsneN4ZUN4xkhgstUx9vRhdL3pppvqMToYlRjGDYWkoC8ziKaQFFYB4n9W+jDoAm3Jo0I96ZO4wFb4kAG0BYy1Soi+nQBj/S4NSWFvAvfxXH2XhIgQUoPMm8Bw9QOs6ffkCnWqX1/cB1zj89r27PoFG8BvjYOVxlYf491CWzFC+1+ftVsp36aaAoyh/wSk5GVsMNIBQAIYA8qSl7Frg1EakoLAROgrPZssaeD83lBKvwN5Ab4bPgSZ03361pp1G8L0OSvDkuU2LaurJf2erbOhe9nfXa9ZtzF6JgOkpa2V2d77pe3srP/XEmCMKVJiKCEFYLzjM9r3wxLVlNK7haYAFF4/vh0ppd+z/7tXet/v6Z7yGIrv1tbypvqK8/ZToACMt59mlVKiOQDj9L4RfFn9yQW8CRltsoCxd9F7TtCUz7uuDNmiIZlAXvKIZXDyLlq4KMokKS8AavHiJWHtmrX17z46+8YwZtvwVF6H74Hdosk06tQ+IJvMo/8O/dKOgzzkXjalevRZHuFytGEVRErKqHPF8rpNYvVDO2KtLV+2vL5O+RigCfw8J4FK+tdSybMVgHFLUb9p7eYJGJfyS3PZ+9yQzJP4beq9d8y7ge+mlOrL5jXXeCd6h1JqKJ931Lsqf2NTth1lGqo3ez/V6/ko5fqV3vtUNuVpzDm1rw7ftUSLdL8xdeSZpwCM86Rm0+pqLsDY3DLfHN6Vbc01v6e8DeXze6qjtN7s03PEAPwAn3gKKpNNyqa2Un3Z311n76e8+tZQv1LZbfUpm6cx9WXr+rh2U93NeS4A4+akbr515wUYp7lOxhWnlwMmw2Uy0vLuJT/K58BjGVQ5XQF9gZBW9IhvjHelOW3DWkAqIJo8nJJrjlTkcoCuea9O+Xj5ep+feOKJKP+Sga0gIqMmYyrw1qqa5ARCvpUfOJs13OoHJzGe04xKZOuUeFDrA/0AyGuFElBc/P+U8H3AtXfCc/I2VhejNZxA0gYe279f/3onMfkYankwk13kcVRCqgnAmDBoYjgMqgEwgbIHRkfRMtlMsHRWxsSzHC15GBsYH+t0yC85u5d+jxcN/GlocD8a9C0HvqH8DVT7sbfyqudjG9qJMhSAcXUONiUnzxjGW6NCeufSOZvPvYbuZ/M0dL0jZRqqp7i3dQoUgPHWaVPpv+QNGOPrgE8AMQ8HoZosIyOAEt4SYOy9JFMQFnnlycdrj6ArzAzwKMkHaOgbxLMvxWbjKaF+S+Z4IMjLC9DqBIJv9r23NI2grV7yDMERqKZfzuoUm81ySp6LBFvekARfS/H1SyxV3opZ+WX2rNkxJrMYajbetLyOkC9WOvlI8hxiqou1JgarGGqW5ouLyjtEW+qkINjFmceE2Ky8F62qSO2Vex6hZwEYl5vq+bSXJ2CceuR9yr5T6X5jz1sr29D90nsf939j+9CYfNoqbU+5hu41pr6tlW1KfY1tt6F8BWDcEFVa5l7egLE5pU4ACh4sPGLyRCzlI77vQBj8ST78jnFH+ezcdA9vwqvxWfU63MPj1KNuK/l4HAJ78GF51eNgfIEr8N7H75z9n+2TevRbOTw6PYO2GuqX/BxNFi5YGPsjdi7QSp+y/dcGfm8VgedUnzayoJnRl0+fpk+bHvsIrJKvlB7lnCkFYFxOajetrbwAY73Izl//w9W8f9mU3q1sXjIywDZ7r6H6tnav9L53wntWet//3tHSdzj1KRb48E/qSzqn31Le7H3vpGdNRlV5U75Uztk7DpvMfj+yv6dy3l2geTZl28veb4nrmgGMTQQD4kObJmsCjA2qATVo6dr/DuUohEJScIF3zwAlbwSTLx3ZiZAmZUsMWtFm+ShQAMblo3WeLXmPywEY59nnoq7yUaAAjMtH67xbyhMwxtMpZMBcscRtYGlpW1oy97nPfa4eMCbMUTItT7P0jfeDJWnisfGwBahSFCXygVimvCzUkZa6W2Zm6Z0l7GQPS9UtuwO8ZmUKXhGWsvmdUpk2olIXjwb9dJbHplcUNUvYbbbFk8JyP/VefPHFsR198pz6KI8lefruGcQ7tSSdx5XvprP4rJbPaYNslARFz6kdSvvkSZOjV4R7n/jEJ+KzWk63LaE477mQrQ/9CsA4S5HquW4OwLh6nr7o6bYoUADG26JOeX/DA/Pc9A4AbIm3DajEFbcCkjefpeFifiae6IznMI4KvyKf/Iy15PzkiYca4m9fd911McSbZeFCquCjwtD07Nkz5gWuMprixXilvUcYO/E/z8hgKyY5ucDGmQyw6mJYTfwNAMXYq68AZ33RJ/zRPiRisMIgEm4AsBbOxm/0SrgDo6/4uWQBCe6ATzNI22/Ic5JVGa95QWdBY30Ru1Vd+oiH85yUrxSsK9csKQDjclG66e3kCRin3pjrTUk7Wr603Mf9r4+leban3w2Vbeje1uoszev/0ntbK9uS95MegCdXa9oFUAwkdk6gsY959sAEEmDso+ujn6yIXOiTh7F8fsOgEmNwnWVc7lfD4FbrgFZSvwvAuJJGo/F9KQDjxtNqZ8xZAMbVO+p5AsYUKzskA16PP/74GE+Mwvbss8/G5WOf/OQn6wFj3j6Wodk0SyxS/eANReGzDM6OxhQ2yVJ0cczFOaMM22xDecvWKKnaohCrD/jMMzjJGMpbygaIpuiqi7cRD17ArJhsvId5JStH4XXfplqUXm3pv3rFX+MdRWbRN+CwpXdvvvlm7Dul1rugDvFRPY/nEpeY17A4dE899VT0lOaJzOOarKR+dXoGG3gAstWlfEvJRuhXAMbV+V4XgHF1jls5el0AxuWgcuPayBMwxj/EILU0/Qtf+EJcTs5QyciKbwJ6586dF/nJ4kWL44oexlFxQYHWDLdifeJJlrjT9yX8De9iVGX4tUwdz1T24IMPjjwejwTIMuA6GE/xafIAvgrM1SdLzuEDyuHv+CKPY7wG5qBtBlh9sqGm5e7alBeIKxa4vLwordRhoCVr6Lvl6TarFOMVQIweZA9GXn3SZ8/pefBtxlsGa/mMAzB51113jZt5MtTa1POzn/1slEWslm6JVADGLUH1HWuzOQDjHetJUapaKFATgDHrnA+yg6XR4cPvABJjJD6wjqyHMcBYmayHsd994NPh4+zaOaVqsQak/hbnHadALQHGNlUj/BQxjHd8PhQla4MCBWBcveOYJ2BsgwreRZRUXkIpMTwDZYGj5ANevsIzAFZtXpG1sDNOAbyAtmKVkQ/kpUgChi0TTYmXlA0vhJsArm4vYMwLGrDLM0vitQRUpjjymMomnsEUUt5Slr9SzIHd+ADZiEzj4PVkWa6NsqyySoAxBRgNUtJfm3qok8eTZBkuZZWC79mkAjCOZCj+bAcFCsB4O4i1k2UtAOPKGfA8AWM8KPFYG1GKWWo5Nv2EoRLwijfJJzwSHgNwxaPo7fgND1vALh6Nx9HVAayf+tSnoiETr5s6dVr0QgZGM2x26NAh8j2euIBb8VMBznAEdTKsMraqU/gpbZETeD5bteOMDmQEfBPfx1vVwTjrTEbAa4WOgil069YtgsnkjE6Pd4rtAIe1pf/AZc/P4AsEt2mY59QnYaVswKkd+pt78jIGkzfwa/sg8Dgmb6R2YR/lTtUGGAPgyU/FpnflnilFe9VIgZoBjDEaH3DMxZlCBSj2sXbPOR0JOE6AcfIw9oGWJ4HEmI+UFKB0P/1fjQNe9Hn7KFBLgLHYmZZlETDM5VpOhYdxLY9u05+tAIybTsOWqiFPwJjCQDklA6g38XZnip/wFABlISF41QJmLf1M38+UH6AMTObVROkECPNyslw1u1yWTJFWPqljewFjADYl2/dNAkYDkCmJwlhIqU9CU/BMolzy5OJFxWOakgvk5S3Fg4l3tPJJcRaSwm+ePYHo6tSWDTuA1n/+859jWwDjtGM0GrVkQtvCw7glR2DH2y4A4x2nXa2XLADjyhnhPAFjfFBoJoZNAKuVOklXB5LaRBU4SrcX5oHXLn6a+BuqiGMM9P36178eQWF8MXnkircP7JXfYYUOIFi4CvgAfo8P8gqm++PH5poNsqwCAiKmtvwGNAYY8+TF4/F114BcfEfb8nsuhlU81fP5394DPITpX8pK8uo/7+KXXnophoIilzI0P/TQQxH8li+1jX+r06oeRmCAsbBRVv7gzfIJteEZhMbyf7lTNQHG6G/DXzGgzbOdLRUexjvbiDf9eWsCMLY0BGNwxggwneyBybG2ZYFiH3cfVMyIJZEiJuag+xQPHxNnh3zp2n1HkXYOCtQKYGy01q9bH4UchpJaT97jIoZxrY/yjj9fARjvOO1aumSegPFrr70WY/6KPcibKMvbxTvk6QMwpsAKzUDhFI4im5QBnPL2oWjy9BGnEDjLM4osImXrTuW3BhgDfynBpSEpxB6mIJJHJIoiJRQwXOolAxhOgLEQFBRWyq1n4NWUDp5b+qr/FFfLWXk8UaQZGVOiVFHYAcbXXnttvF0Axok6xbkpFCgA46ZQr7bLFoBx5YxvnoAxvVoYIwZH/Ac/Ep6BBy9jLUAWnxOOyeZ0eBcexsDrEAoJj8OP/KaM/gGMhX3A8+g6ie/yRgbG4pfwgoYAY/GUeTHzUBYSApCd2gPYAoz9LmwFx7QEGAO3swkoDdAFWnsOsYoZk4WlSJ6/+oUG69aui/fUCXAFjGefU/vaQR/tM3LTb9BNXvwb6M74S0YBSKs3PXe2X819XU2AMVqgkTnm3BL0au7x2Fb9BWC8LeoUvzVEgZoAjIG+wOIEHCewmGXPdSlg7GPqg+ugBAGMfXABxj7mfnckpSx9UBAw+4FpiKDFvdqiQC0Bxmn+1tYINfw0BWDcMF2Ku3UUKADj6p0JeQLGaVkqgBRQ+sGmOmMwPk8BpaiRDyhhL7/8coyDiCf4vmQTD2OhGiiFFFzKKaVT+Inly5aH8KGNmQJrKSpQmXE7AcYphmGqU2xiYG8pYEyh5umcZBOA8Z/+9KcGAWPxF/WJhzHjmf8pdJ5ZuezBI4ksZIUV+lI+AcZA85QKwDhRojjnTYECMM6borVTXwEYV85Y5gkY47HqE+dXPGFgrhU84vDypMWrxP3HL62CAZa6j6dlD4Ax4BR/pu8DjBlue3TvUc/PUfCuu+6Kq4AaAowB13gzA6mQEABocYTx4NQWABhoK6zEoIGDImBsFY57YiJnUxYwhku0adMm7knAgAxbyCZ0IE/wchbXWNulz6lt98gUwlvoK34spAUAG83QTsgNdeD3pe1k22yu62oDjJuLDtVQbwEYV8MoVVYfawIw9uH0UWbxc06AsTPQmBKEMWWPFHpC2VLA2BClj61zuvZhT9dJYaus4Sx6kzcFag0wzps+lVpfuQDjTR98ENas2xiWrVwf1qzdXBCsVNq0ZL82fQjItWQftF0Axi09Ajvefp6AsSWcQkzYvAYQy9tH4oHEG0gcX/IBQJiAbSUShYxxOSVyBW8mXlLCQJARXnnllahsCu0gVEOSFwDFwl9QNIFkvJVTXEVtSmSYDu07RIW1KYDxPvvsE5Vdnks26hGj2EZ6+kYuksg0QlBcccUV4aabbopA9vYCxmJDfv7zn4+KvfpaKqFxEZKipajftHZrDTD2HpiPLfk+NG1EKqd0ARhXzljgdYyJwFvxehNf25Ee4pN4HWMs/ooXM1AK5wA8xlPOO++8GKcfEAkUBS4zyGYP5cQvpu9737KAcbZ/HwcY64/QTfgmr14G5Gw7roWl8NzooO/4OGB5W4AxD2PPRHYQmkI/U4JZ4Le8hh28hsVudl3atufExzm1MTajmxU+zvoqXBSjMDpdcsklkR+ndsp1LgDjclG66e0UgHHTabiz1VAzgDFlK8Uu9oH2UfZRpRj5uPvIAolZ5oBJ6fDRTzGMKU7yYDoODATDSed03yTJMqKdbdLsTM9bAMbVOdrlAoxXrt4Q+gybH259fGx4s9/c6iRWGXrt2zltzqowdOzisHZ9ywPrBWBchkFvpibyBIx9JyhxPHcBn7yUeOQKCWHJKxA4AcYUNooYjyIhLCxfFUtQGctVeSKlzeAowJQny0XdI1s47MQu1rEdzsUitAGP0BNCX/A+kueBBx6Ins3azgMwHjZseFi9ek3sh5iH+vXiiy/GEByUBqA1z2ieVza2217AmHe2HejRkcLtXW+JRCYrAOOWoHzT26wlwJi+AEACYqXl502n0M5bQwEYV87Y5wkY073xPAC0c3LuYmC99957o9cwPsmpC3BsT4A777wz6vOJInjohRdeGH9njPXuJcAYb8vq6Q0BxjaJw3sBtMri5/rDsGrDOvckZ7z5qKOOijGQ9Qne0BjAGBaBp/ME9hwJXNa39N1Tr3ASPJHxaKGfErCsbSEvGLbPOuusMG/evLj6SfxlewlYKQTXQE/0EY7Db96bcqcCMC43xXe8veYAjM3V7DvX2N4p551ijIHbFakyKVATgLEPpcnmXAoWY0I+vBidax9WoHACj5UBGKeQFPKZvAkwTsqPl8CR/b8yh7ToVZ4UqCXA2Pz3niSjSJ50qpS6vKPe4XIBxouWrQuPvTwlHHZu33BHp82XpVUKTSqhH/MWrQlX3j0qnHvrsLBwSZ0XZ2m/fHcJCwRigBuB3L3mSAVg3BxULU+deQHGeLmDJw/v2r333rveu4giKF6wZaAJMPZNsXSVMkZekJ+iJ9ahmIOUN6Ed1Gne2jBO7EWAMK9km8pZ7mp5rVjB5re4yZRAHlVAW3XyEnKWtymAsXAaltTyytKfiRMnxViKPJgIfuIz8qzWtnaAvZ5xW4Dxfffdt1kMY/KTZbG8wSi6PKzJWC2RfPsLwLglKN/0NhNwwhDjfazmBBAS9xxAhY8lnaGan6kl+14Axi1J/c3bzhMwppc/8sgj0fNW+CO85Z133ongsZU5PHeFdqCvuC/MhAMo6jtv/4FWrVpFfnTppZeGmTPqVvI0FjBmGPa9sYro4osvjm3w1rWpLF6GZwsNpS2A9rHHHhsNwNpnCML7GgMYoxkPYOA0w7RNat96662454D2GZrFbRZ6o0uXLlHuwKN9P3wXGXflw2N5Kq9cuSoMGTwkgtqMz+3bt48AspVDwG75GH9b4jtaTYBxkv0Ap4x7O1vKGzD2PlshQKb1zm5P8i4999xz0fhDDi1SZVKgJgBjIJiDAmbiJc9iE9jh404RSufkZUyJKt30LgHGhosCIo+zlBTBdK7MIS16lScFagUwNo9ZyC2zTpb4POlUKXV5V73zBACbQPFKsOFEc6VFS9eFh7tMDgee0Tvc9OjY5mqm6usdOnZJOPisPuE3fxwU5i9Zu8Xz8JIgHIvlSlC2vI4yQaiJSnfOoSwKwHiLIaiaG3kBxh448fKkrAE/fS/EIOZhZLdy35Hk9cDoJsTDgw8+GPNR4HgQA4DN01SnepUBMFPo1EmpVT+hmtzhm0ywBuhSli+//PKY79FHH40hLmyYQ1n0PWMI9y6IiczbSP2S/nhv9Je3VTZRdr1H+uW7SO6ZOnVa9GSyKd9ll10WD2X1Mz0jpfmFF16ISjK6pER+ovhSmHv37h1vewZxJr2z+k/BLwDjRLHi3FgK1BJgzFBk+fnVV18dQZv0rjaWFkW+zSlQAMab06Ml/8sTMMY78Kx27dpFQyOQFADLAOtaXGO/e3/o97x0/Q7MPfjgg2MsYddWBlntgz9JWwOM8U7G2xTDeP78+ZE/elcZhjmOTZkyJQK3VgLxCP7+978fDbzf/e53I7B7/vnn17/T2wKMlQcE4+GwB/wbLz/mmGMiaCzsBfCFUZrncIo5DOTVT7+nlUc2o2U8BqIn4y+ZgNzBCK0OdNHftPkdemwvaJfHvKomwNj8A06ee+65MUxXHs9fTXXkDRiL/y3sGYwBFrc9SWgVGynz7C/dvHl76inyNi8FagIwFo4ihaTwIaX4pINC5cOZzhie/x0+5BhR8jCmDPkdg/IxcW4INDYkhRDYvBOzUmqvJcAYqAEos/yqpZT65h5X76Vns6ytc+fOUfAkIHqf80gbN24K85esCYPHLA5D3l8SJkxfHh7qMjkcdGbvcHMGMIbnrF23Mf7ebdC80GvY/KHjH0cAACAASURBVDB5Jo/ZOuNTaV/cnz5nZeg7fH48ZsxdFdZvqMsr7u/cRWvCxOnLw9IVm2+2JSSGPsxesDpoUzzlqbNXhtkLbN65KUyeuTJ0HzwvjBi3JMgb+7VBv1bE+0PeXxyWrVgXxGLOpo0bPwhzFq4J7763KPQeNj+8P2VZWLFqfSwvnz6J26ytBUvWhuWr1ocxU5bVtTV+SViW6afr596ZEfY5pWc46uL+kW6Adm1Is2fNjuCU5f48Rk4++eTooXXaaaeFiy66KILIwgHkmQrAOE9qlreuPAFjPU/8nlIHXLUsjjJJ6AXskhGy3w/fF/d9Y8xLHvEJbE1ygbODnEEYVmfKSxYhV2TzMJjIo07tkWOcKaXaduifupJirLx6tO1+kl3SMynvvv6mttSjTqAwMFibvKJTWfnkJ08pr/8paUu/1KnNbJ3qEM5Cvel+Kleus/4VHsblona+7TQHYGyuWmJOkSXzMCJ790qTd2LRwkXRs14/RowYEd9v8zgl7xxDu/eT3iCWKIXbRlXekzTnvaOMPzapEuqlT+8+8X1LdXmfvHMAHX1Sj36mJJ/vjnfTN8hvPJYZaOgnqa2UP53Vq3+eVd2+T96HbPKcvlXir3tOnmC+BaXJ83lO75L6GKj0KT1Daf7m/r8AjJubwo2vH5/IK4ax+WTemus8bhlnb7vttggMA1fFC068y1zGY8xbHsCMsLfffnuUDTnCeE/S/LSfwPPPP18P7KanM4/d9x5pV92+CTwbGTwZSb1f7ntHeTAzDANmGV6FqdKn1M76devDq6++Gu8rl01kFPkB3ps21vF6eRhmGXIZXR2M0t7D9A3wjuKhvi3pOT2rMr5L6J+SfL4LQGl0Y/h95plnwqiRozajR8pfjnO1AcboB/8xL3a2lDdgzMHHKjnzlfy4PQmv41SBb9pLpEiVSYGaAIx9iAk0JikBiNBGMPQRdnZgEM4+uBiCD7ND/rTpHcbj9/SbIcMckuDl7PB7kXYOCtQKYGzOs9yJCUbo8n+tJu+sdx9DZHVnwafE+Z/ClN7n7X1+YG2X7jPD2TcPC7+7+t3w+6vfDefdOjy0un5I2P/0XvWAMSAUkNq+87hw6rVDwq8vGxh+c8WgcMYNQ8Jfnp4QPWyzAO3MeatDh87jwpk3DA2/vXJQzNv6xmHhpZ6zIkAMBH7mzenh4vYjQ+9hCzbr9uiJS0Prm4aG+/8+KaxZuyHMnLsqXPvgmHhoq9V1Q8KvLx8UfnfVu6F95/Fh3NRlocNT48MZNw6N90+8alC45fGxYfKsFREEVjlguUu3meHC20eEE696Nxx/xaBw8jWDwx1PjIug8AebQli7flMYMHJhuPwvo2K9Nz7yfjjl2sGxzpPaDQ7t7n8vgsyAZc/x2z8OCj88sXvY6+Qe4fTrhoS/d5sZQWaKOGFNbDexUC3byx68KryDBOuGlNvNiLEd/xSA8XYQq8Ky5g0Yb+3xkmJY+rv76bd0Ls2T/k+/N6ZMzPsRRpWqyP3cUJ+a0kiqL9VR+n+639xn3/UCMG5uKjdP/WQSy9LzCElh/uHzPBePPPLIsP/++4eDDjooXgNWsl7z5AQewWSEFDZG7HJL1IEJSdYHFAkvgxdZASCsjHqd8Sugj7xvv/129PYTf5znorAwffr0jSAzwAu4A3DTH3HP1QmQAhxL6uBtiB8KcSNUjv7wJEQfHo5WOKR3zBkoJaxNelYxWfFN7wKdR/Kc+CyDrD6rT0gam10CsdJzMlxZfn/00UfH/nlG+cSJ9d1N7cZKy/SnAIzLROhGNJMnYKw588ncM08ZOYCgzuar+35Ph/zaJwfKx7jrmoEj5XFWlkEzlU+PBQNwXx0pvzzwAvNeXakMXqJPsAVtMZLKl35P5bWV7qfn8Zs2YBLaTHmd9VU7vk9AMmWTLpbNpzw8Iz2nuj4uX2kf03OX81xtgLFvJPxnZ/RqzQsw9k6Yz2l/DivpGCWTEcW75BrfElqGoXT69OnxXUpzc2uAsfeA1z8eoI6ku2uTAwa5oVu3bpEHehe9Q5Kz9wev1zfXDEby8ir3rqS8qQ/F+eMpUBOAsclgMvn4+rBiCg4ffIeJ5EMNnPDRdbj2MVc2eRgDjN0zGSXndPg/TdbS65i5+FOTFKgVwNg8ToAxRSgJHzU5aCHE952njXheFB5eq5QeTI0FM1n10aUxjIO376t9ZodftO0ffnpO33BJh5HhmvveC//Xpn/Y86QeYd9TekbAGEDKMxc4u9+pPcP/XdQv3PrY2Hj8+Kze4X9P6xk6Pj2h3gN3ybJ14ca/vh89lI+/YmC47qH3wzm3DIshLo5pOyB0HTg3rFi9IbR/Ynz4ydl9Q5ceszYbsr7DF4b/OalHuKzj6OgBPHHG8nDcFYOiN68QEKddNzhc1nFU+FGrXmH/Vr3DKde8Gw5q3Tuc3O7dcMmdI8MBZ/aK/3d+fVpYvKzOm/DZt2eEX1w8IBxydp8IUt/y2NiY/5Cz+oTrH34/TJ+zKqxZuzG83nd2+PGZvcIBp/cKPzmnTzj52sHhirtGh4PP7hP2PqlHDNHBy5kX8wW3DQ+7/7577IfQHX2GLQir12yI3h4sy5RoijdFt/Rw3/JEgkZeqQCM86Jk+espF2Bc/icrWmwKBchnBWDcFAq2XNk8AWM8nYHY8myALVCUEmt5uXs8GYFSZCDeiIBWsUXF8RZShT5gCTqeBEyVfHM+8YlPRHlCnG6AL1CZAuU+Qzw9Y9So0bGcTSC1DeAVK3X58hWx3T333DPGOxez1Mon9YihChym3OoTr0VL8sUW12eAsWXTlqZ/+ctfjqFf6DrkFsqwtt23NF//LXu3xB7ATM5Rp9iogGL1AZe1J7/nxgvpPt4fgLd47Ja566N4q5b4W/oPzKc3lTsVgHG5Kb719gA4eXkYN9RKY2Rx5eRrbN6G2tmee+Vqp6E+NfY5W7KP2X4XgHGWGpV9nRdgjO/xgGdcFAKSoRQPwkvgaQygVo8yyAqdwlBKp0t8GJUaAoxhdzaC/L+fHxVat24dVwPg7Qwuwl6og+EzGTUZXq0Y8C7IJ/Qbvk+/FDoOH9M+nou3kRXlK1LjKVATgDGw2AEsNnkTWOyagOMAGJuAJrDrBAwrx8JkkrOKyGMSEZ4c6YPtnmv3pHRuPKmLnNVIgQIwrsZRqwOMMSqePjap4m0EiHTNy4YSaRkapsaC6dvhu7A1wWvk+CXRm/gnZ/cJdz41IYyfujxMmbUiPPXG9Agg73NyHWDMG7j74Plh/1Y9w7GXDgxdB80NM+etip6/bw2YGw5u3TtukNdz8PwYPuLJN6ZHYPaka96NACzP5PcmLw03/PX9GPP37mcmhFnzV4f2ncYHgO0L3bcEjHf/Q/dw6Z2j6gHjX146MOx2fNd4T7/HT1se7npmYvjecV3Dfx3bNdz77MQYUsN94DVgu90DY8KMuavDqIlLoqez8BF3PMEjeXnse7dBc8O5Nw8Lh53bJ3R6dWoEjF/rOzvsd2qPcNAZvcL9z08Kw8cJ0bEivNB9Ztjtt11jvGJhNgDDr/WZFfY7rVfQt9GTloblK9eH1WvWxmV7FNatgcUJPKYUW6LHS8s3m8KfDka/UsZvHH3/S/Mq4xtP+d111103A6HVQRFvqIw28JHS+UEZX7JkaX1fUp+c1VPaL2/T1vqljN9KecvW+qV+87ahNqrzrW1crwvAuHF02tlyeW8KwLg6Rz1PwBgfp7B+4xvfiMAx711g7D333BM3lBS7m2LpPk9eILL428BVMUSBxMBgoCkDMwUVoLrLLrvEJbNt2rSJXkrkBvLDZz/72chLLGNftWp1ePTRx2L80uSV65vum2WDTCue7r333hj3W9iHN954IwBZALlCaOEnlsLTR/Rf+/ozbuy48GTnJyOQrQ79lNeS+c985jMRNJbPfX097LDDgg2x4hL1UaMiGC4+JI9lnlbyMajzrFafJff6I37qF77whSgjoQ9PMB7YvJYp+vKUOxWAcbkpvvX2mhsw3nrLxS/VQIECMK6GUarrY16AsW+C0EV0NQ5agGGGSzzPgccIVYEnM1Tii+KU43kdOnSI3vilgDGeycEt5cMbkz4l9IqyNmymx994440xpjnjJyMsvkwnEmKG0Vb8b3kZSBlw8TGbQesHnl2kxlOgJgBjAl1SnrMexiYdcMBBiDSxHQStLGCc9TCWj+KRwOHsGVlL/288qYuc1UiBnQEwNqcTUOY9yh6AMu9MafKeeNeyedN1QyCWNtST8pSevaulwJf30P3SvP5PAF5pv9ThN0tOKHMsnuLgYmiYRQp7gHkAKSmMNoCyDBPYQLFUv+fT55R49vIsFoJizOSP4v5NmbUyetXu36ouJIW4vg+8MDn817HvhHuenRRDN6hDXbyUb+80LgK3D/x9cowB3Kb9iLD777qHv701PYZoSO0Beju/Nj164s5dvDbcsR2A8TGXDgj/8/vu0bO3ru0Q+o9cFPY5uUf42Xl9/3/23gQ6q+rc//e2tberXbdd7epdHVbb1db7u/a21b9V61hHWgeUVkXECiiDzIOAjDIqg4DMg4gTggwiDqCgIGSeExJCBjKRkIEEAgkBAmQCn//6PHEnJy9vIIE3w3mz91rnPcO7p/Psffbw3c/+PpJxkK13te8Wua9E/j44TMYtTZbcw2cUDOY9u4yKlM27CyT70GkFgfeml6mW9K29g2Xy8mSBgxjA+N6BoTJgVryC0uY9i46elQeGhUuX0VGSmF7L7QbnM5rIPafGybGyWk1m5IwmGNrflwKMGYxQVqwiw13H1mIO7gGS0bRyOtr4gICAOq47458zCwVMgK+++uoGgDFxwJ3HIMbpn2sMrsBXSbzGUT8ZcLz5Zi3PnTMMeSQdQGBnPSIs9ZJBj9M/1xhEI8/0Z07H5B4NMbjBnGFIg5V9ti12JGcB445U2k1/VwsYN11W7c2nLwFj2mi0hZko0vcDwKLli8Eo2l6AVcYItLUYz0KjFm0nqB048AeACmDMJJcxASAsmsRo+XKN0z69ulqNU/FfSkotLcXnn3+u2sEYvaNtxh80FkyaSQ9bElBcmLSYuP74xz+WadOmaX8BiAxgzMTbcPdTt5kIow3NZJc8MZ4ChEabmDDOMQua0/RZKMGQn9tuu03BcsY5AMa1ae/TyTb5YpyLjOhL4ZEkbfpC2lr8IjcO5ObZn7V0XbKAcUtLuOnxU+daUsO46TmxPtujBCxg3B5LxXuefAUY0zeBHzA/YZETsJgFWZR74OmmH8UeDTub6Q8xkrxq1SrdRYPGL/NuAGN2s2AokkVQwGJjGHL9uvV1/Q59F7t0WAzFQDoLmszbmN+zSEq/CfUS7RT9F7uB2K3D3Iq+jLRZRAVIRjPZl7tWvUvZv576BWDMIMaASABMVF7DVYRWGICxOVORAJUYVHImnOEwNkbv+ABw/M9h7jm39mDJv6qb+97GnwBjGlDAOSYR1Gvj+Bbg9qGRdh40vABVZpuH8c83gIEjDFXgxxmGayYWcII5Hd8OkyToIDz9cw+vEN+t0/HdogWDUQrPMEy6mOA4HfkiXd6TMAB/dFZs5+Se1Ue0gwCNASABKQGOOdA4RauIMLxTdFS0TsrI0/nz52T9l/lK84BhuzNn60FDgNP3th6sM3pXUlYlr7y5X27sGagUFuTPtBngz0FxxfLHJ3fLvDUZUlh8RgbOipe/9AyUPan1wCL+oXwoO1WtaUFJsfD9jEY0jI/JTc8GNtAwfmJctNzRN7jWyJ2mL5KQXqbhoajIO3xa+YpJZ19mmTw4NFzGLUmW3KIzsnhDpr4n4C5+xyxOEkDtFxfuk39PitW8vvBaomQXlMuOiCJB43rKylQ1ymfeFQN9cCMDTkNHgYtNLVXAuMeUWDWSxzPaXgYOTQWMMSKEJV1Wqc3BPRpbnqApdZpJMxpaxq85M3hgKxOLCc4BA3FQP1lAcKbDNQMf6iKTcuOo00zoAX/RYDfxmzNgNvXRlL8Jh8bW3LlzG6RBGAAD8nziREPAGK0uOJyd70Ke2GoFgAyQ0JGcBYw7Umk3/V35Hq2GcdPl1Z58+hIwph6gXcSiIFpHaBShFMJElvad8QzzAcYUAMC/+c1vVMuXSS27jwCboYGA5oGJJeMMAxiznZX5hnH0B0xKAYxplxhXMf6AwoEJcFHRYZ0/MPYgvt/97ne6y4lxJWmxu4aJMVq9bJOlLacPQOv5ySefbDBOo39ih5QBjBkfAe6iOczCJe+N0/FDRYX2r8yH0M5CDkymsWLvfE8AbMBqnpN/wHTGRoDI5Ktbt26CRvb777+vRrfoV1vbWcC4tSXeeHoWMG5cNvYf0XaUORd0AO3d0V7StjCGds4D2nu+fZU/XwHGJj8sutIXMVdC6YU+CSoJ6gPc/ShpGseCJ/Nt+kmAZgBjFlhZ/GQ36R133KH9EnOrE2X1i5RQUeAHEJh+NiYmRudl4AfM59k1isYxCmNgBPTNzPfZPWQcxmHpfwE/wSqsa7oE/AowppIC8HAwUDIHnRyVlcEO1wzyOHMw+HMCxgzCGHDRmBiwmGvzzJzN4KzporY+3SgBfwKMmQzQmDLxcNZfvg00YNiu6DzwS2eauDexAejFN0CjiwYLfpxhuKbjAFB2Or4lgGombZ7+Ac8AptHGdDq+YyZ1AGWeYQB2mWQ6HfkiXTQvyRf8RUz4mCDxjLSZuKEhwaTIeTARY4JGeZMfQGMmUKySkvf3t+cpV/H8telKJWHSPVleLR/syJf7B4cphzHas9Pe2C83PxsoX0Y21Pwkf3D3Ahi/ujpdcgrK1SgegHF8WkMry2gjl5+pVkN2BjDuNBhKivotoegIh+89Kjf2DJDxS+spKQCMocSoqjZGQ0QSM8vkgaHhaiCv4Ai0B7UccBjNe2hYuIz9BjCGhuLWPiHKYQxYPPn1VJm0IkUmvV57vLQiRVZvPahG/eAwhqIC3mWAchygeHFppXIeAxjv2V+7cOANMKYdxpJ0Uykp2C6LtpPnYQAAUyacqd+sPHv65Z6ttgwiGFw4B4qACICzjYUB/HV+N5QnfQyr1t7C8I3wbeHP6fj++M8zTFpamhpyqa5uOCmnT/KWL6xmU0edAIYzHX+95p3RTICjE1laZyWABPg2LWDszrrgS8AYCZiFY7gLmUgCjLKNFe0kFvcyMjJ0EZmtqoCpAMpw+zLB5WAbLeMGFuacGsbEQz9hHHMKQGToKgCVGwOMAV3RnmICCyDN4qdJizEH94zBGL8YDWPy7exvaOsYswAYAyzT9wBUMyHHAJiznyFfJ0+clNPlp+u0m5nsMddxvidps1i+cOFCjZO+BE0wxlu9e/dWTWNkhpzII/1WazsLGLe2xBtPjzmz1TBuXD4d/R83aRjTXtLe0Rcwz+tozteAMZrBAMbMzZEr8xPm1YzVUZLBmT4KhU76O8BfFF8AjFGY+cEPfqD9JM85UAaiTzXh2I0DBRP/YZiVPpI0aJOwOwA9FEphzAnZWUPfRX/nxCPoz9l5RH+I0pt1TZeA3wDGbIPnYBBFZeRgYMc9E26uOZjAA1RwUBHRcjOUFGgY8z/PGaiZwZq5N5WW5+a66aK2Pt0oAX8BjKmv1HXAlpqa+gaYMqF+M9lh0uY8WCVmAs6WeM/6zgoeADR+TBhzzdZ9z231fDOAXmwdMf5MOM5sNeE7dbqqyipdGSSM0y/hAfo8NSvJI+nyH50hq5FskUFzhw6LjolJj9EwZvJFh8Mkig4EcJztKrwzg4ijxUc1T+T9o90FqkU8eUWKQLlgHODo8k1ZaqQO7ePjp6plyYYs5RDeuAutn3qwEANwH3xVoHQVALNo4g6cnaBau2geAxIbl3bwpHywM7+O1xijd/cPCtN8EA+uqua8/n9DjwsB43sG1ALG+AOvBDBGG3jI3ARxAsbJB06ohrEBjFd8eEC5hkcv3Cehe4/JgYLTcqCgXGkpQhKOyXuf58rumCNytKxSKSkAjDGEh6a1SQuZ9H45TjWMPQHjZxwaxvhnhRj+SGeZOIF8U0YMFCh3XzlAagYeTsDYV3G3VjzUS4CBrx11rLXSbst0LGDcltJvv2nzPVjAuP2Wz8VyRv/OAhDgJeONK3GM87FfAOUE4wrG9Wgb0f+juQvAihbvtm3bVIuWiSfX+OVAoxjQdPXq1aqRXFp6vE7DGMCYOYJxtL9oMF8KMGYRmsk0IDG7uUxanKHMYLKNDBijMVbBb49nesj5b/p60vMEjBkvYRiPMU5yUrKO40y+aCNXvr5S42ZiDhckfSzvbdLmPRknQUOBoT00wviPZyzuY9UeDSw0xtA2RhsMv63tLGDc2hJvPD0LGDcuG/uPuzSMO3p5tRZgjNIW/Y7Tgcuxa4XdLSzqGsAYDmTokwCPWRjFUCt9G2M7HPN3QGF2iAIMw2HM/JEDBTEWZtmNSV+JBjKA8bBhwxoolhjtZvpECxg7S+XS134BGDPIYkWDswGLqZAcBigGIGagZw4DGhPGqWFMh4ijgprDgGXcm2tz9hQxA0hWq1jhIG78cZAXNNEYlAHQ4Thzj8Yb/zcWp2ca9r71JOAvgDESo34pwORhYZjnfA/UfW+Hqa9OqfOMb8mbf56bBt6EIQ3CePPPM5Mv41/ze/5rqamu3RXgLRxhPB3p4pd2gAnYt7/9be0wWG2kLAGIOQAiuYcKgO0ydJ5QZvDNest/2N5jyr/71IQYBWnhAOaA0mHA7AS5s18th/HpszXycUCB3NAjUOkcnODyoSNnZODseOUXxlhe+ZkaeeWt/XJbnxB5dXVaHRCNkbi12/Oky6gopbdIzz0pyzZmyR19Q2TpxiwpKatto/IOn5Hpb6TKjb0CL9AwBjCurq5tZy4GGCd5AMbbwgrlibHR8vTEWAmIOSKV38Rx+FiFahb3nxUva7flyonT1Uq5UQ8Y1+bJaBh7AsZQU0Bz8a8xUVJ8vJ4TG3mzTZhO3RMoNmAx5cS2WrSofOX8ATD2lSzcFo8/AsYsAjkXl9xWJu0hv7T9FjBuDyXR/Dz4EjBGcYQxPQApi9qMB+jTaTeYgEINAWUFi5VQLjChpQ9iDI6jT2JnE3GwXZadT4aSoimAseEwZsst43vqpdF2ApwGjCY/OMBxtHvp66DNIg8AyADG5P9igDFxQGuBVhagrsk/4yImz9BKAFQDhkOxAUUHYLTxx3syaUeTGCCYHS8YBmSnFbvEGK9xsDAPkA9tBnROre0sYNzaEm88Pb4lq2HcuHw6+j9u0jDu6GXV0oAx/Qb9E0ZV6V+dmABzOXYBQ6nEzl9DSUHfzHO4idnl893vflcXesHUwBDYbQzIzHyRxU3iARzmYCF248aNamsGxTELGPu+hvsNYMzgh0rCYBGwiEERBx0cK/GcAY+5NtrFDLgIg4Yx/F6sqvOMim0GS+ZMZeW5OXsrCv4jH2gzsNLB6oUJDxiF0Sa2epEPHHniHo5V/sevde1LAv4CGFM3jXNem2ctcTbpmHNLpNFYnHzjaBl961vfUl4tViOZlMGPxGQKozafba3lQeab9QZYE7cRW9GxCln2QZZ0GhIuQ+bslc+CDylwDNB75/OhcnufEKWkqDl3XpKzTkivKXFyW59gmb82Q3mLA2OL1WjcX58L1PBQQVSfOy8Ricek69ho+fvQcFnxYZZ8FXVYNuzIk36v7FHO4fe35crxk1XywY48pb14emKMvPlJtmwLK9K47+ofKjc/F3zZgLGnhvGho2eVk/jOfiHywvxEBb93RR9RwPqfo6IEwPyrqCPKsbw9vFApKWo1jC8OGAN63z84XO7oF6Kcz/syyuqoPdguxASdNtNM9Jks0y4z6Ya2ggGEL+uRBYwb+3La/3N/Aowrqs6pYcjYlNI6Lf32XwJXnkO+ZTTjfflNMz6zgPGVl01bxOBLwJhxNf0HRu9o5+k/0KBlrA33IdpJgLZMUgFUGft37txZNYrRqIVqC4CViS7aucwZmgMY7969W2kuoKqACouxPZNntIv/93//V7fI0t8BDEN5Ad0DBnvQiGYOQF6bAhjjl0kymlgY7mPhm/wD/pIOgDFpAFqjXY0mMvYaMJTK3ASKJ/IIXyQTeibdbAFm0o6MmHxDj4FhWrS+MDTUFhpZFjBuiy/Se5p8W74GjKnHZk7uPdXGn0K5Aj8pWoPkzZf9SeOp2n8ak4AFjBuTTPt77mvA2HAYA/hCB8i8mv6N/hV+YrN4ynfKblH6PLR86Z/pi+mjAJDREKZNYP5Of00/Rp/IjmP6Vna6sMjL7lDiwrF7GW3jRx55ROeSYIAWMPZ9nfMLwBjtYoBiwF94T52AsRM4BhQCEKYymjOV2gkYUwGZeBhwmA4I/6YjMmeeeTr+Y9DFwJSPBFAKf6SLpgPq9lhmNCv8AMcMzNAQ4H/8Wde+JOAvgHH7kmrL54YBKB0KXEcMYqCl4LvEsirPWZn09g17yxmgMd82NBHw9T40IlLpFtDE/efoKHn6pVh9tnh9Lb8fRvG2hx+WHpNj5W/9Q+WpiTHSbUKM3D0gVAbPTpCQ+KN1YCmaheu/yJVnJsUqoNpldJTcNzhMHhoeIXNWp0lm3inBT0r2CdUmvm9QmNw9MFQB5kdHRcmAWQmqiTztjVQ5fbZacg6VS69pcdL5hYg6iguWCpIPlMnjY6Jk9KJ9amzPcBjvP3hSnhgTLVNX7pe8w7VUG2hTw4lMHjj++WKU3DsoTLqMipR3t+TIKeVWPic7Iw8LfMmvrc2Q0pP1lBRHj1fK0Ll7peu4GNmbXsvNXHaySobO2avg+p3Ph8icNekC37NxtNmUC5NdNK6YaJttup7c1ibMlZzdDhhTH+kv6K+aWo+vRF7tKSyAMQPGBx98UPvb9pS35uaFxSWMSs54e79k5Z9qbnDX+WfcxZgrPy9fcrJzVMOSsZsvxj4WMHZddajLsK8AY9pF6gETUkBjgFcmOWxNBfCEPoI+hnE6HPdEkwAAIABJREFUjraERUn4D6Gq4H/OTFTZFgsQhUtMTFQN2y5dutRpB/OcesuzH/7wh+qHtpgdgxhyhbMYrVwAWuo8eYK/GM5kAFgOJr8AtoC9zF+Ij+27gMhwKPMuxjFmQdMX4Nfki7kERlqZeJv8ky7G/DC6ShgcRoFQTAFIRh7MO5AN6eDPxAcNBdpbANDMYcgb/sgzC7ptwfVpAWNTA9r+zHjDl4Ax43To31jo8KSYa8rbohXP2J7FD75p5/fSlPDWj28l4DbAmPaWNpR62NGcrwFjdqvQT9AfslMFmywsOEI1RX9EP7Vzx07Z/OFm1TymXxk3bpz2PQDGGIkFMGbxkn4cLI+5IPgYO4HYoU8/itIX/SYayNgaYtcMfTV9+COdH1EjePTD2EWylBS+rdV+AxhTkRhwATw4AWM6OIBZJiqcueeaBoLGgnBotTE4YvCIH9PpmLOZkHPPtXnurSiY/ND5sdUdLQYqPmHy8vJ0BYWOjXRxPOeelRH+N+l4i9c+axsJWMC4beR+JanyffJdsypJJ/LCCy/oJIwOx/nt8m021eGXI6ewXNZ9macg6bw16XoNp+9bn+aoJrGJr7L6vETuK5Flmw7I7HfT1P+bn+bInv3HFcg18eEfQJg4lm86oNQUc95Llw078tWwnMkj/MaAxhicI12OD3cVSEJ6maz/Ml+2hxUpfURJWZWs+yJfVn2co5QZxM9roiH9zpaDsiXokJw4VS3nv3kfeJR5DsCNJrNJLzP3lBr6m7smXfOPpvSW4ENSdKwWVCY/AOjvbj0oAbHFAkhuHMb64HxevTVX+ZKNmDH4xzsueD9TjfedPH3hII3BGxrHrEbTljrLy8Tvi7PbAWP6MGiPANmPH29oMNEX8mnPcdBPA3igBXgg64D251daT1hAQduXugutDFQs3igi+D7YRXCmokYpZQjj6Y/7yiq2cwNefa1xEe/Zyhr91s33wH9Q0zw8IlImLksWtPB5Zv4nLb4z0mosT6ThTKuiknxhLPOcfuPeytHkiTihv+F9TJrGf1PSxq/xR5qarhd5mDj5nik7JgTLly9XzvhVq1bplnmMaaHJeSWOOmA1jK9Egm0X1leAsXkD6iV1jfrFjqKxY8eqwVsmsIwN+J8Dx9ibyS7jcHYiwYcIfzHjBeOH/ogJLYuYznE61wDQaFCh5cQ9YdCEYvssaTMxZ/KLRj3AM4okgNSkBX8jk14mzDjCM9FmwRQNX5M+//H98IzJsXMRlTkLWlrkH1sMpMk3hiKN0wFkk1feD3/4h+oJsNikQ/rs5iEdNL6wHcCZhXaTpvHrjLslry1g3JLSbV7cvgaM0USETgWqFr7N5jq0EAGpWEgBcL7ScUBz07f+G0rATYAxdYW2DsoeFh46mvM1YMy3yGISC7PYCtiwYYNg64hdOoDG7NhhRwtG8DD+Sj9Ff4jDSC19HguYzNtxlA9zHCiSiNNQNrH4yW4dFnWJC3793/3ud0q5tHXLVu1r6cdoT9A4NhzJGuk32shoPkMv5Uu7OCZ+fz77BWDMQIZBIAMqcwA8cDAJYUDFwTVgLWAxh9F2ATCmgkNJQYfIgIgKx0Gl5TCDJM7muqkVw5v/pj5rahrWX8tIwALGLSPXloyVb5sBAJMhtmuyeON03r495/9NuQaogVLC0xG3M37mpIAy+DfO6cfpl/8BoABwPJ3xR3wAzN78eIYx9yasuefs7Zm356QD+NSU9C4Wp/M/wGpvaenDVvpxO2AMGAAPJxP6tLSONdgFCEIT0HByMuhk4I9M+Pab685WnpODhaclIrFEaWbQnI9JKZW8w6e17pv4+AYw9rg3o0z5vb+MKFJKmexDp+t2DFC1T5RXS0xyqWr7ZxeUS3RSqRqIxLBlas5JYaEE0BbjkwvXZSjNzfMz42VbeKHAFV5Tc16BYhZ5WBDaHVMsX0Yeluikkto8VdXubiKOY8cr9TnayRinDN5TLNvDCiVs71HJOXS6bpeBeQfSZhcCefoyskgCY48I9DgsFhEfDpD6cAlpH5fd0UdkR2SRRCXXpg04bRxt2tHjFRKXWhsXFDUsCgF8A0Z/85mrd8ZiLKQzeMeqddeuXXUiwAST8ReTBcqV8dflOsZpFjC+XOm1bThfAsbOvoa3YpzPWN7p8OPpDz/4NQodxr83vyasOTv9mmvn2dOfmX9QZ43z9GOec+Y/z/+9PXMqvDQWjvfkO/MmE2ea/E98be0sYNzWJVCfvi8BY+oXdBJoCqIJCM93dVV1XT2nfvMtMqdnfs+ii2edBTwCiGI8x8KH+Z4IR5/jWc8JDw7AWAEFM/53fldc821S7/HLNf5In3BOv/VSsVdGAm4CjClfAEPGI9AgdDTnK8DYfBOVFZUSHBSsC0BoE/Nt8j3y3TIuwx7A9OnTdUcLi6RmQZbwfKsov0CJxI4D4/iPfLL4Cr0S/nAsfkKBwdx++rTpqr3M+JLvFEe6xANYDXCMEqlxfM/4hWrpcnY1mHg64tkvAGPToFMxqRicqVjmTKfAQeNvznQoVCo6DgMYY03ZDC6pqPxvzqajMvecrfN/CVjA2H1lzDfMyiST0O985zsKUvAWLfXNNhav53PP+0tLtmEb4y28t2eXjvfyfVxJs+eZV8/7y89V80O6HTBmYIRWGduWmVB3JAewiGYB29wYEC5evFgHmmgmAB6zNZW+3/TZF5MNoCeg7Lw1GTLo1QQBuOUYPCdBwdzE9DIFUA3Au3Z7rgx7ba8MmB2vBiwHvpqghisBmFkIwR80L8S1aH2GTHszVf33fnmP9JwaK2OXJEvQnmLVxt0eXiQYh7yld7BSzEADE5pwVE6WVysVDbsWRsxPFNLAsObQeXvltbXpsietVKprzqlmMbsYnpsep7sAMKCJUcpnp8VpWtNW7ZeM3JN1iz0Y2QR4fvnN/Rovfvu8skfGLUmWbaGFakyTxSiAZ3ZDkB5+eNfBcxN0VwMgMn54T0BldhiQt76vxEn/mXuk/6wENfYZFHe0Dqxm8s3kgZ0ecKgao6PGyCUTNp7Pnz9fF/oYp12OY7xmAePLkVzbh/ElYGzepq36F9J1pu28Nnnz5dkzPV/G7Yyrpd/DmZbz2gLGTmm07TXzZ19RUqBVyKI3WoBQpcB9St9O+0/fDf0J/Qba8lC2wPvNPWMfUxe59wSMmfuj/AXfdkpKSh1YxHMUSQCQ0H5k/BQcHKyLzeABxAkoDNDEc/yiNblp0ybdgQDI5ASl27Yk2mfqbgOMGS+A/1C3OprzFWCM3Mz3eDEZ4sfTn3nW2POLxcd/jPloK5zhndfO8CYt5zOuG3vu6c/ei9JZoVzr5jnnVYC+HHQkBiQGOKbxp4Ojo+CaToF7c6ZjIozhMKazwh/OCRZzjTMV0VRSfWh//FoCFjB2X/HynfJtM9gDMGZLpXVWAk4J+ANgzEQGK8RMijqSM4AxGsZcI4chQ4YIbTU8m2gOMLlj4seWOPp803d7ygkKl4XrMlXLd8DMeHl9c7Ys3pCloCuGKBevzxI4uUvKKmXN57kK7P7rxSgFkwFLRy/cJxiIBGQuPHpWgdTo5FI1eHl732D55+hIGbskSePEz029AhWgzT5ULjEpx2Xs4iTl9cbw5bKNmarty38Y2Hx4RIRymy/dmKkA7sj5iWoIc9ySJKWrYecC2sTXPLZL7uofIv+eFKtcyIvWZ0rnFyLlhh6BCvIajWa0f5+bFqfxTl2ZKqs+yZGJy5P13fu8vEfgLkerevmmWuOe3SfGyOINmbLqk2wFj+8dFKpgcGHxGdWoDowrVo72f42JlkUbMpUuZ+yiJPlz9wAFkZEbcodugi2AjLMMSOztzLZiFgAMv6xnWV3qnnGZBYwvJaX2+X9LAMbt801trporAQsYN1diLeffl4AxgCzb0uEjxb4PNFPz5s3TOTzc2/Tj2Ckw9FNsZ2dnEX0ESmL0LZ6AMfN3jEfCefrEE08oEMgcn+dok6LNjB0h6Kz+/ve/63nmzJkKDtN/QA1DumxXhxv5X//6l/ojDAvUcK4WHyludDzRcpJ3R8wWMHZHOZFLXwLGxNfYGLslJNKaabVE/t0ap19oGNMh0IGwdQXAmIMOggPtFg60DrmnwzMrmKxMADQbwDguLk79GUDYrFxwz2ErqVur+eXn2wLGly+7tgxpAeO2lH77T9sCxu2/jBrLoQGM4UUrKipSTSQ0hihTtFXRgAJIhycUzs6oqCjVImKMQJ/udPBwvzA/UY1Erv8iT4pLK5SeATB0wtJkWbI+U4FgKBwAih8YFiFrt+dJRVWNgqbQOwycHS9/fS5YVnyYpYBxTGqp3NU/VK57OkC2htTyfp86XS3xaWXSZWSkdBsfrVQW0DvAQ/7oyEiZuDRJqRzQeP4q6ogaqOw2LlopMuAGhsd4X2aZvDB/n9w/KEw+CTyktA/bwwvl2q675PEx0RKScFQNUuJ30858ufnZYHl0dKQUH69UGoxZ76YJIPaqj7OVDgPAGW1ijGs+MzlWPg4okM27C9SI5r9GR2ka+IGSBi1stI1Je83nB+XUmRrZuDNfru26Ww1YHio+qzzmxaWVMnBWvIxcsE8yc5H316rpNXTo0IuCxQDIGCl7+eWXVbuLrYLOA55ZxnlOx3iMMR1aX/hlVwmaQhhTgaOV7c70A07HOA7NNhPGmQbP0GpDocDpGDvCtUn8Tv9ce8sXYRlzekuDMHC087919RKwgHG9LOxVQwlYwLihPNryzpeAMeDshvUbFASGgxRubdoB2nm2rGO8Ee1h+gT4S1l0pG3/6U9/qgataKcNYMw8jW3q2A166KGH5Nprr9Xt6rTZtPkYlad/+c1vfqMGKeEsxzAXgDBxslW+/FS5LlaS3re+9S2lqWQ3zNKlS3XLOzyq5Imwnn1EW5ZJe0rbAsbtqTQunhdfA8YXT83+6w8S8CvAGPAXsBjtYnOgXWwAYyYPdHhMGg1wzCTSUFKwjcUM5M3E0oDFFDbPOCx47A9Vv2nvYAHjpsmpvfmygHF7K5H2lR9/B4yp/0y8vB30jaZ/c5YKfaU3//SrAHOejjjoP72F4Rn9pKejf20sDOl75os4SNukQV7gOkPriIkhoBx+APowVAVFB5M8Q3sAeMzBLgMmoVlZWQoYEifhCo6clumrUhVIHTJnr2wPLVIah8Lis6pVXMs/fl6+CC+Sm3oFSY/JsWrcEs5jjpQDZbJoXYbc2CtInp4UUwsYp5RqfE9PipXconJ9hhyOlVUquPz4mCjZHX1Yuc0xDvnP0VEy5fUUBW+ho0CT+Z4BoTJ0ToLEpx3XdEgLg5kz306Tu54PVRoMuJLRMP6/bgEyckESOh514kZT+LY+IXL/kAg5UlopaC1Dk/H42GiJSz0uNd9wqkMvwX8RicfkQMEpefOTbKXIQBbQY5j3JG2Md97aJ1hGzt8rZyrOqaHM67sHyGMvRsv72w5KWs4JQW5wLwNaG05kuOQB8E2ZeNMuNs+Y1DMe6927d4MDbS84Lp2OuoKWGmVu/LP4j3YZCwcAEAC9zoV+yp2tyADYJgxn0qSvX7dunWqlm3SoIxgzw3gaeXOG6dOnjwwfPly59Yx/c0a7Ha13z3fhHqNl/G9dvQQsYFwvC3vVUAIWMG4oj7a88yVgTNvK7hPaUIxg7dy5U1+NtrFLly7yi1/8Qg1E8vD8ufOqzAVv6VVXXaXGsRhHGMCYPoDFQjSBf/vb3wpawxiqxAHuQnfxwx/+UJXD2JFF2vQf7ETC0BbjCXYkotkMMP2DH/xAtZGJA3+MTeBgJW36AIMTaAL2p04CFjCuE0W7v7CAcbsvonaXQb8AjDF6x0SSDgQtYxp3c9Cwc9DRmTMdABNqNEeYiDKIh5cjISFB/dCZmA4Fv+belB4TEJ5Z5/8SsICxO8vYAsbuLLfWyrU/AMYY6qB98qSkoG9Cs+bNN99UEBUg1Rxo3H755ZfaXzplzYQM3j7+N37NmS2gaOl6Ouge0Ow1/pznt956S/tgzzBoBxPGM5133nlH06c/djr68ZiYGDVwQfzkBQNpjz76qBpNM5NM+mTCrlixQvtzT3CSe3hyAfmY+LE9Fe3k8vLT8vHuPOkxJVZu7RMi1z8TKHcPCJVRCxLl85BCBUDLTlXJuu25ckOPIAWN7xkYKtAzcNwzKERu7RMkN6HNOzJSQdLY1FIFXeEfPnyslqaCd8KwHBQUgLYY1gNU9QSM4QVeuiFLQeGbnw2qS4e07h4YIn/tHSQ8Ryua+ACM/9gtQMYtq6UlMeAoRufu6BtaBxjH7S8VaC8GzU6QtIMnNJ/Gr5E3APTSDZmqmYzGtHnP+75JG7D4xl6BAnUHBj8zck/JqAX7FJj+/54JlOufCVA+ZTSQcw6dquMwhoOSeupZJgYkdp6Z+KMdDLWIOdi2jNGTwMBAk1U9MzbDcAqG9PCLvxEjRijY0LNnTzWugjax8z0ZA2IVHVAA/840pkyZopprjCeN41uCIgOQ+cUXX6zzb8IRDxMvT4cmMVprzjRMHtFY43/r6iVgAeN6WdirhhKwgHFDebTlnS8BY94DrWDa7Ntuu013otBWAwL/6Ec/EigoDD2RacNJ//vf/778+Mc/1t0djA0AfKGQ+NWvfiX/8R//oYuBLCSbMOzoAJS++uqrtf0GpCZdxki7d+/WccEf/vAHHWMYwBiwmvGTcfQ1LFQDGLMYST9i3YUScBtgTJlChQWXdkdzFjDuaCV+5e/rF4CxsaLKGe0pJ1jMNVolAEh0NoDE5kwnANAMYHzNNdcoYMx/BiCmwzGAMdfmOWI3ndGVF4GNoT1LwALG7bl0Gs+bBYwbl439R5S+4Hvf+55OAtwoD/o6jLdMnTr1Am1F+iwMuKLVyUTJeaBZuWTJEtW8dL43YQBjBw8e3MA/YZnQoZ3r6ZiUwennjJ/rYcOGqXzpWz37ScBqQDT8OMORLoAwIK7TAQKTNu2w8Q8PIVqkgIIYw8GRDpO4pKQkGTVqlE7qnEAk2qFoGgMYo/lJHrDIjFZRSUmpxCQdk1mr0wTjdNAxAI7e/FyQzHg7TbWI391yUP7SEwA3TI3AQc9gjudnxUvfGXtk7JJ9OkaI+wYwxpjc0dJKNYZHHkubABhD7TB/bbryGj84PEK5kU06nJ+fuUf6zdgjc9eki2oYhxfKn7oHyKSVqU6xyekz1Q0AY/iLAbSHzk1Q6guj/UsgqDGgvTh05KxyHgN+PzQ8QvrPrn9Hk3afGXEy5710NbpXVXNe8g6fkRWbD2g+nxwfLfcODJP/e3K3dHkxUlIPnFBKCgAAQGDKwFkmnteU5/Lly1VrvMHLOG6c9cl5jRfu2XoMmACIzDfidJ7+nf95Xhu/5uz5v+e905/z2tOfuW+KH+O3I5wtYNwRSvny3tECxpcnt5YIxfyYfhTgFO1b5sRX4jwBY8YhcBBDCYExPKejzWRcjzYy2sL0KxyAzfj/yU9+It/+9rdVAzgtLa0ub1ATsfMEwBg6C+gqMLLHGW1ktImhqkAL2QDGgM/OsQX5oh7y3iw8W8DYWTL1124DjBkvzpkzR6lQ6t+iY1xZwLhjlLMv39IvAGMmpkwsmSBASWHOTuCYjg7gGMCYxp+DrSqEpTNBw5hJNh0BnaAZ0DtBYsLgnP/7sjBsXO1PAhYwbn9l0pQcWcC4KVLquH7crmFMX0Sfh4alN2CM/9Dm3bdv3wUHYegPnY7+Dj7WxMTEC/wzqGZbvqejf2Uy5i0N0uYb9HRMyLyFIV3S95yIEQcaQiZfaFNDTYBGEVrGpE3eyQvaIoDKTOgMGMnklv4doBwqADSf8YcWKf34ydM1ys8LlzFauQCn0EC89n663D84TB5/MUp5hD8NKFCe4rGLkgWg1DiAV4Bb+IWhgeA+JqVWwxjAGE7f81/XUkVcDDCevCJFsvJPqdbw21ty5MHh4artW+1ICz7g0hNVsj/nhBwprVBKCziMAYxfev3igHFK9gk15PfE+GhB27jmXO07EP++jDJZuy1XAmKOyJINmaodvWhdZp2GMO/Ke2EgMOXACeV0RgZQWETuK9H353/u1+/Iky6jIpW/ef77mcqpzCLA/PnzFaw35eLtTF+LNrJTw9fIuSlnytMtRu/M+JOzGWs25R391U97A4ypz3xvnK1rWwlYwLht5e9MvaUBY9pw2gIDGJs5t8kD83cDGNOPo2F8yy23KH0FO0CgpPjP//xPmTt3rmon07YyfmGxErAYQ7lolLJwzNG3b18FmFmE3rhxo4Zh1wgAMrtKjCMfFjA20mj87CbA2PMtOlo/bAFjzxpg7y8lAb8CjKGkMBzGTCCZfJqDiSeAMSCx0TLmmom1JyUFnQMdl+cZYZpGhf+t838JWMDYnWVsAWN3lltr5drtgLHph4y8nPfOa/O/57kpfi4VprlxNMW/px/Pe/JkjN5h5RwAmr4eYzfw3LJdFDDSaBMDEsN7CN0GGlH0+U4HNy9G2v49KVYCY4+oITvAVADW4fP2qpE6jNBF7D2mhvEwfLcl+JBSMuAPConJK1PVGNzSjZnNBow/DTykGs1QOyRllUlVNdzARdJldKTyJcMjXFV9XkFqAOVX3tov/3oxUjbvKlDDc00FjAF7J6/cL7f2DpbVWw8qnzL5LzhyRma8lSZ/fS5IFq/PkNWfHZSHR0TIM5NiZVf0EQWN8ZeZe0omLktRLeXVW3MU2H79w2ylznj3s1w5W3FOQWjyOn1Vivy5e4BqIp86U7toANcwFA2A997AYibv0JKwOIDzVu7OcvN27RbAmHdDOx/tOLZHU387umsJwJjFkIoqxvLNA32p74VHz+pCEotA8Hxb13YSsIBx28neM2VfA8YsXjspKUgP4/P//d//LXfddZcuiJs80G4yX/+v//ov1SaGUoI29IYbbtA+hf79iy++UEN5UExs2bJV21b6FBaMUQpjF5Vz7s6OZBahGT+wKA4FRmOAMcCz1TA2peH97GbA2Psb+e9TCxg3rWxpd8ACOTq68wvAmE4EsNhwGAMMMwh3HnR0gMdGw9icCWMoKYyGMZWCTsUcpqIYANn839ErT0d4fwsYu7OULWDsznJrrVy7HTBuLTm1x3ScgDFbWtE0coLFaBoBPgJCobVM398YAAkFxPRV+5WCouv4GJm9Ok0Wb8ySEfP3yV0DQmXwqwmSmnNSjh6vlNc/zFKeXqgdoGV4/aNsGblgn/L6dn4h4rI0jHdFH5Zu42PkngFhMmZxkoTvPaocwxiYu7NfiJCneWvSZcWHB2TwnL1qDG/ArD1yIP+UnK0EXG6ahjHl+EXEYQHwfnB4pEx5Y7/GOWphkvzt+RDpOSVWoK3IP3xGjdvBVWzSXrYpSwbPSVCKjt7T45SG4szZGvkspFA5nR8YFiETV6TKW1tylBrj3oGhqp0dvKdYwW4je4wZwQtpDBOi9QXAD10FBouYrOPX+G9u3XMTYMxkDQAduhV4lju68zVgXFFVI1uDD8lHAQVScqKqWeI9ebpaPgk8pNQv677IFeq6dW0nAQsYt53sPVP2NWAM6EvffdNNN8lnn32mbT/PWPCFxxjKLcby9AnM3zFmB2h7++23q3IYGsbXXXedGqhjEQ6/+PnZz34mgJfsTMI+w8IFC1XDmP6G8QPx4Rejd3AlQ38BUG0oKbxpGFvA2LM2XHhvAeMLZdJen1jAuGklA46IrRTo62j/OrLzG8AY0NhQUQACO8FiwGE6BwMYc20Op4YxHQbP6UyMJpIBjTnznAPHvXX+LwELGLuzjPmOaeC/853vyOTJk935EjbXLSYBCxi3mGhbPGIDGN95551qpAyAGL5muJkBngoLi3QsYPpskyHTd5t7znTnGIGbtzZd/jE0XG7sGaTA6C29g2TInASJSDwmVTW1tAFHSioEuoj7B4XJDT0CVSsXY29PTYiR4PijGhfajI1RUmCkbvySJHnCYfQOigm0hm/qFaSgNfzF+YdPK3C7eEOmGuC7sVeQ0mHgp+8rcZontstfDDA+W9HQ6B3vClcxtBNPTYyRm58L1jhv7BmomsxfRh5WOg7kAWi8eEOW8hH/pWegGtkj7X6v7NG08QPNRklZpaz7Ik/++WKU4A/eZ4wG3j8kTI36od1pnBk7Md5iEg84wJbfr776qk7L1vgxYZp7przdQEnBe2J0kX7pSig4miuf9uzfl4Ax8k3PPSWdR0TIxOUpwqJQc1zZqWp577Nc1bRnocZoyTcnDuvXdxKwgLHvZHmlMfkaMM7Pz1dDdIzT0QqmTaSPAMwC9IWPnoXF1157Te0WcP/rX/9a6SNR4ELD+Prrr1ebCQDG9AHECS0VWsozZsxQzWG0iNnZAl/xPffcI2PGjFH6qj/96U9KZwHIDGZgNYyvrIZYwPjK5NeaoS1g3DRps4D15z//WdsUDH13ZOcXgDFAsdEuNqAxWsbOg47OAMZGu9hQUrBNEqN3aBjjz2gSG81iOiEOHGeeMyi1zv8lYAFjd5axBYzdWW6tlWsLGLeWpH2fDoAx21UZxMGNiyE9tqOyvZTvnj7a9M/m7C0X5j+2nJ8+Wy2HS9iGflyik0slp/C0lJVXKxUE/jgAgwFioaGAsgIwGT5hgFMn13BV1XkpPl4hJ8qxl1C/sEx44sR/RWXtGIJnaDRmF5RLRu5JpYogDM/hVC46dlaSs8okIrFE4FkGYDZpMQSBCoL8wKPsdAC6xaUVcrQMI761+WfEQt7gUs7MOyVRSSVq0O9YWaVUVtePaUzabMsn7eikEjWUR9o1Dk5l0gAU5n3wh0ZxdsEpfQeoNcifkTF5M3Jk/MRYjAm6GW85/TnfoznXlLubAOMpU6b4FWDM1u+EhAQt1+aWp68B46TME/K3fiEyYVmKFB6taE41EgDjNZ+3PmCMzPguALz27NmjmpBoPJp5SLNewo88W8C4/RSmrwFj+gAWD+EhRnP4wQcf1F1BPN+9e7fce++9ymeMkTvA3scmIot/AAAgAElEQVQee0ztFhiFLjSMoaTAaK4BjGtqzsm2bdtUaxlN5O3bt+vYAPqfCRMmyM9//nP57ne/q1zHv/zlLxWk5puj/7gYYIztBUtJcfG6aAHji8unPf1rAeOmlQZ0atDgsBOB/rgjO78AjFmRxHgdBxzGHHQ4HAzAAI7p6DiMtjETSzodwhgOYzSM+Z+OwxwM1rh2DtrMxKcjV5yO8u4WMHZnSVvA2J3l1lq5toBxa0na9+kAGGO8pmvXrnLw4MG6Pru5IJUzZ4QFKK05x1EL2NYCrU5ftaAnQCkavtXnGBd8A8Z6LCDXgqUNw3Ln7TnPSEuNbH0DTpsxhnneaFoKytbG65mat7TwY/LPe2qa3wDKzvCAy57+TJ6c/jS+b/IOkE1+OdrCMU5zG2D80UcfXbaRv7aQcWNpUjcYIwPKYFjy008/VeDTjKcbC2ee+xIwRkMeI45/fTZIer+8R7aFHhJ2B5hvtfxstcSllsr723Nl7baDyk/Oogf/47wBxvzDYpFZGMnKL69bDKG+5x0+I9vCiuSNj7Pls9BDes/3YL6Zk+XVuhiF5nPR0bMSmXhM3t92UD4OKNDFm9NnKtQ418KFC4UxJ8a4OIYNGyavv/66gmFGVh3tbAHj9lPivgaM+T6Is7i4WPtys0DCc8bw0PUABNPnYzyVuT7tjPmujB8Uxsxz+Vo0ToynchC/mcODDRCPMbwLbzELzWZ+z5n7kpISxQ1IB8cZvIB8kgfiM/+1n9Jp+5y4CTCmrNFQx1AyCwwdzfkSMOZb4Jsw3wX3yJeDZ94cfr7WseM3/s7V95fGv4nXnE185t6kx9lbeo09N/FzJi7C8n074zd+AIxZrGLxygLGf1EuePpkt7qraMDpGNAuBiTmTKMPUMyAlWeczWGAYwMYGw1jAxibSmMqOhUKZ56be7cKzOa76RKwgHHTZdWefFrAuD2VRvvLiwWM21+ZNDVHRsMYbSQ0gqyzEkACjNcsYNx2dYFxMWNqtn+//PLLCnyyAyAgIEC3hQPW8L+38bMvAeNNO/OUTuK6pwOUdqXT4DD5MrJIzlTUSGJGmYycnyj3DgiV+weFS6ch4XJ3/1A1fImBSUBeT8C4/Ey1lJ2sknXbc+X2PsHK9x2856gKGpqX1Z/nyH0DQ6XT4HCBz/vegWHyyMgI+eCrPDUOyaQ45UCZDJgZL90nxigFzEPDI+S+QWFyV/9QeXh4iIyb/bFMmTxJt80//fTTDQxDotCCIa60tLS2K9w2TNkCxm0ofI+kfQ0YEz3tAQftt7NtcD5n7m3uPbLU7FviIT4Tp8lDsyOyAS6QgNsAY8YLtK/YEuhozpeAMYqZjMuxJ0I/xRlj09DEbd26VRd96PuNq6muUQPHcIgvX75cFixYoLutsHNBG4PjO2Xhhh0ILPAEBgaqPxaj2c1E2UE7SRjGGMuWLZN3331XucjBBAF3TfxvvPGG+gULNI78MH/AIDbhZs2apdQ3pMdCksH/LGBsJCbiFxrGrC6iKcwZoJhK4TyogFRoJ1AMoESHAdBsjN6xDYznpuPizIE/c+2rTqu+COxVe5aABYzbc+k0njcLGDcuG/uPKOfd9773PR3YWHm4RwL0vwxM4S9+4IEHLGDsnqJr8ZwyRrOAcYuL+aIJ8H3S98JTPW3aNOndu7f06dNH4BlftWqVhIWFqSYhEzrnBNKXgHFu0WlZufmA3NwrSHpNjZPNuwqU2iXnULmMXpAot/cNUQOSH36VL1uCDsmEpcly36BQGTArXuloGgLGWUK4d7cclHsGhCjgi6FJ6GIAlzfsyFfqi78PDZd3tmRLSHyxrPrkgDwyMlIB4cDYI6qZnJRZJt0nxCjHN4YnZ72bplrQYxcnSae+m6XLv8dLj2eeFk+wGN5VnjFHWbFihU6gL1oAfvinBYzbT6G2BGDcft7O5uRKJeBWwBijux3N+RIwBtjFQGWXLl3koYceUooZwEU4wn//+99Lv379ZE/cHsXRGB+gnMmz//mf/5Ebb7xRbr31VuUwv/vuu2XdunV1CzqxsbHKUc5Yn3h+97vfKYc5fjCM2alTJz0ID5c5/5PupEmTZNSoURov1HWEveOOO2Tt2rUKRKM8Ct6HAWbygNFN4rj22mvlt7/9rfa52dnZWiUsYFz/ZfgFYAzoC1hsgGMDFqNhzLUnYAwATKXlYODKYIwKRQUCWOZ/AxIjKiYiDIRxnJ339aK0V/4oAQsYu7NU+bat0Tt3ll1r5NpqGLeGlFsmDQsYt4xc3R6rmwBjuH7RagkOCpaTJ066XfQN8s8YmQnZvn37VGuHiR2gJzv5Bg0apNrH7733nmr2HDhwQJU8AIzZMcAkki3jl+tIGw7wPfuPy519g2XckmTJOcSW86/lnS05qgXcf2aChO89psbsAH7hIZ+6MkWB5GUbM5WTG0oLtIAnrkiW19akywPDwuXZaXFqNJIwTAegsegxJVb+3xO7ZNPOfOUXh9MbwPn1zQdUG3n6qlSloEjKKpOnxsfIP4aFy+bdBcqXDnd5dGKRTJzxrnTt1lP+/XT3BprFgMXmQH7QUzCm6WjOAsbtp8QtYNx+yqI95sQCxu2xVLznyZeA8dGjR2X8+PECJzgg8ciRI2Xjxo2qZdy5c2f54x//qNrG4G1oIIOrAO6ymPz555+r9vCrr76q4C2c5Ggog8HR3wHgYhST8QFawC+88IJidd27d1duYfjPMYr54Ycfary/+MUv1OAlIPPs2bNl06ZNSjuCEUwzvkCDePr06Qo+08eiYY4GM9rOKKPAmQ64jLOAcX398QvAmEoIWGyM3xnAmLOhpaCjcx4MaKmQ3gBjxMN/5myuawejtc+ZnFjn/xKwgLE7y9gCxu4st9bKtQWMW0vSvk/HAsa+l6k/xOgmwJhxJ4aWsLptFBeysrJ0MsUExXkwwYLH04xDKSuu0erZv39/A78mHM+J2zMM8RCf8WfOPCN9lC+cDhoJ48fbme2gng7AF78AfUzExo4dK88884wwweMAQGaiyMSPSeL69etl6dKlOlEzEzrPOJtzzzg9OeuE3Olh9G7aG/uVogJtYUBd41RT+Ms89T9peYpk5p2UDV/myT0DQuUfQ2spK+56PkRWfnRAzihYDCj9tXISAwBf23W3LFiXIWu25cnabXmy7st8GbskWW7tEyzdJkRLRu4pUcB4XIw8MylWwexv9E8kIytfXp23RC2wG3DY2xnAuGfPnjJ37lyVKXI1BzzYISEhqhzDuxtH2aOhxRZe49ec0aiDOoR5kNPB0YqxMeI0fjlzz1Zdb1biqQP85/TPNRN4jJIxN3Pmi3kZbTj58kyHeFhocGqfU48ADNBcY3uzZzq8C/nynJNRpzF45umffMFdynzR6bhnIcdTXuSRZ8jL+T0RljBfffVVgzTwT/1nvulvzgLG/laivn0fCxj7Vp4tGZuvAWNjUJIzbS9tI/0JICxaxIwDoJLASDXAMu05/QD9AYqajE1mzJgh3//+93VnEm0tgPFvfvMb9U87C4UEYC9jDPrJn/3sZwoKow3MWAV6ib///e8CaLx48WL1Rz7oV2677TbVgCZN6CpYsCdP9AWEJQ/EDSj97W9/W/ta+i3acsthXFsT/QYwhpKCigElBYVPJTQ8xnRyVAYGRwxEGKCbg8GM4TBmsIAfKgkHFZZBiDmb54jOc3DSkh+2jbvtJGAB47aT/ZWkbAHjK5Ge/4e1gLF7y9gCxu4tu5bMuVsAY2TAmNIcjCsLCgpUE4bJCpMmc8Bdi/YMIB59mnGMYwF44e3Dj/HPmXsAWCY6TuCNayaJ8Ap7hiFdNILy8vJMEnpmwoeGDpzEzjS4JsyOHTsa+OcGYI188f+UKVNk8ODBChgbIBTwk4N7xt6Msfr37y8PP/ywUgXxrmgsXa5DnkmZJ+TOviEyYWmKFB6tUPoIqCduejZYtocV1RlmxC/us9BC6TQ0XF6Ynyhx+0tl4458ua1PsNzQI1CB378+FyzQR5w6U1sGVTXnZWfkYQWV/9Q9QB4cHi4Pjag9Hn4hQu4eGCo39AyUeweFqqE8KCnQMO7zSrzsyyyre7WMzByZPWf+JQFjIzsoPui7nAfbauGAxECYeR8SYC7DpHjo0KEqX2eYIUOGKPjKHMnpqDMzZ868wD/lM3r0aMnMyHR612sm8mz/pRydabAo8M477yhI4JwvlZSUyoYNG1RjmnhNGK7RTNu8ebMq+piEAIy/+93vyiOPPKJ1yfg3Z7TW+Rb4npwOsBhAwPgzZ4wJYpjRk/+ee7Y6Iy/jlzP54tmWLVsuANgBHqBe8fQPiM2ikL85Cxj7W4n69n0sYOxbebZkbL4GjMeNG6faxeweMv0K7QVtIQbjMC4IkAw11Y9+9CNtU82CJf0W4xOoKqAKvO6667Q9j4qKUrCYsQGAsulHGJew+AwNBfGD+eHww+L0H/7wB31u+kPigS6DeACIGUsBOkOdhTYzbTvGZaHOuueeexQwph8kPAuiFjCurYl+ARgD+hotY0+w2ElLwTWVkkrKwbUBjA0lBRWcgQcVxZwRFRWVw1RAU3FrxWh//VUCFjB2Z8lawNid5dZauWaCZzmMW0vavk3HAsa+lae/xMaYzA0cxt7kzUQHTciVK1fqxIXJizmYYGG8xUyuCM/YFXAPwzLGn/PMc7SMnSAz1xiHefPNNy8IQ7poRubk5DTIHiAagLEzbnNNGDR3PB2KF6tXr9Z3gXd34sSJqh1rQGIDfnLmGZRwAKFwDKJhnLQvSRU+PONt6j1jdE/A+Pz5r+WlZSkKGG8NPqQAsjO+TwIPyT0Dw2TUgn0Sn3ZcNuzIk7v6h0j/mXtk9daD8uioSNU2hqoCh1ZyUFyxAsb3DQ6TnZFFsjv6yAVH0J5iKTtVJQYw7jczXpKyTtQlnZdfJIuWvH5JwNjICeM9THiZAJsDLSyMAjF3MfMTEmD+ApCKli9+jH/OPEPDnXrkdCjeoOHr6Z97KPu80aegNcZE3xk/10zEMzMzL9B8Jp/UM898kQbx8C04wV8AYzS+2JJM2++ZDveeWsy8E4sw5NnTvzNfzndnfmjy5QxDvsgriynOfBGWMIAO+Cfv77//voLHnAHw/c1RduwQuOqqq1S+dh7sbyV8Ze9jAeMrk19rhvY1YMziHFq8LPihoIljzALlBFq/UCrRfrMA/vOf/1wXk/Hj7LNo+9Ewvuaaa7TfoN1FW5hxAWMR094AGD/11FNy1113ya5du+vSAwRmERreYnajGEc/8OSTT6pWM2017Ri7b9jlBHWF4VAGaOaa/oZFcdKzGsZGin5i9A6wmAMNYwYORrMYLWOj7s5gmUrMmUESZzp/T6N3VCQzKKCycG0qKRWbe3OuF6O98lcJWMDYnSXL981An62MkydPdudL2Fy3mAQsYNxiom3xiC1g3OIidmUCjNPcChgzZmVCBMjl7WCCZMahFA7XjHcB/bz55zljYTOWNWHQ3PXmn2ekbzR1TAUAEAMQbCwMEzTnhI9rxtRs7SQMgDKTMgMOc0YDCNAJrc2FCxfqln4ANrSQ0P4krDNOk5emnglrAOPx32gYE/aVt/bLLb2DZcmGTDlcUjuh5XlF5Tl577ODqkk89fVUyS4ol/e35ylv8cJ1GZJ/+Ize39grSHpOiZXEjDKlpEg5UKY8xzf2CtD4AJFrztUeqTkn5O0tOfLR7gLlOm4MMC4tOy1vvbtRunaDr7hW69oJqJtrAGM0taGeoEy9Hd5kRj3x5pdnzvpkZEscjfnneXPTuJwwnvkCMP7Wt76loAP59Ja/5ubr/Ll65R/nu1+JvAgLGDFnzhwFTSxgbCRrzx1FAm4CjGkzoLIBTLzSPseN5dsSgDH8v2j8OgHjbdu2KShLf09bziLyT37yE9394ZQb7SfavADGGKIDbGZxFHCZvo+xi2nnDWB83333SXBwSB39D+MRFqA9AWMW85yA8cGDB3VXzK9//WvVOp43b5622eQV3mMAY3Zh0ddYwLi+lPxCw5hVcUNJYagoGIAb4JiKx8CXM4AwZw5AJYBmViTQMI6Pj69bpTcDHTOA4IwzFdac60Vpr/xRAhYwdmepWsDYneXWWrm2gHFrSdr36VjA2Pcy9YcYGaO5FTD2B/k734HxM98pky64dwGJmcgx8WOrJ1rLgGtoFDFm95XRO/LA2Hx/9gmBd3jI3ETV7gXM/Ty0UB4fEy1dx0XL9rBCNVKH4bnQvUdl4OwEuWdgqKz6JFuOlVUKmsQPj4hQ43XQUOQWnVbtY3iJX35rvxw/WaXhB8xOkD92C5AFa9MVNMZv0dGzsmhDpgLOGL8jvsYA4/Iz1bJqXZA8/GQtqG4AYucZsBgNbGgnAOHNOzrl7c/XBjDu1KlTu35N6h0AFAAD2sjMNf3N8U5Ww7hppQptzZmKGqlmccIjCO0jmAQLdCz8cQZk47mbnZsAYzfL2Rd5b23AGBokuIbhkIeSgnYEOwzGMQ7gP3Z+stuIb4ExwqUAYxZRwfNwTQGMGSOGhYWpzQS0iXfu3KmcxsTBYjfgMYAxRvHIgwWMTQn5kYYxjS6gMVoVNL40xlRAzgYs5kyHh4YxFYED/wxkAYzZSsf/5j/ExCDAgMWcOfjfuo4hAQsYu7OcLWDsznJrrVxbwLi1JO37dCxg7HuZ+kOMFjBuH6XIxAutLcBi+HXZisrkCz5djNIwAUP70kmHAGDM1lBfGL1DCnmHz8i/RkfJfYPCZNCseIncVyL5R87IvDXp8sCwCHl2SqzMfjdN75+fuUc6vxApM99Ok6z8U2oQb83n9YAxoG5F1TkJ33tMNYo7DQmXjwMKpLr6nOyOOSJPjI1WruKJy5JVe3ni0iQFm595KVai9pVo2MYAY4zoLV2fLHd2Xyk9+42V/s/3VW1sAxgDtMOhO3fuPImPT1All46mrOIWwLh9fH0tmwsLGDddvgnpZfLBznxJzWGnRz1kTPsI9QlAFTsroBxau3at7rSAwoW5i1udBYzdU3KtDRizWMwCMVrD999/v/IdQ4+Fti8LoYGBgcozjNYvO4/A2dgl7GvAGLCYtACLb7nlFqXpYhcVYDba0bfffrvuaIFKi2/RAsb1ddpvNIzRFDardU7tYgMcU/Cs4DFI5eCahhuQ2WgYG6N3BhA2wLG5N8Ax4nNe14vTXvmbBCxg7M4StYCxO8uttXJtAePWkrTv07GAse9l6g8xWsC47Uux4myFahbDb4yhPIyLYRQNnle0fxhze3O+BozR9H370xzpNyNenpoQI9vDiwTgNyP3lLz1SbYMm5eo//WfFS/DXkuU1zdnS3ruScEGHv6+ijoik1akymchh6Sispbn92R5tWoeE+cbH2fLqfJqOVtRI7uij8joRfukzyt75PmZCdJrSpwaz9sWWignyqtV6STnULlSYsxflynZhfWG5iqrziltxYi5UbLyva9k7XvvytSpU3W7LkaEAN0Bk+CjZi7T0cBi6ooFjL19MW3zzALGTZf721sOyj9HRwn86Ggb48AoWEyDBx6qHgxDsjiE0hr3GCtlfONW0NgCxk2vH23ts60AYxaLWTy+6aabVMsXY3iAs1BGYOxuwoQJSrUF7tZcDWNotbxRUjg5jAGh6U9ZzP7Vr36l6WKcF4N3TzzxhILIP/7xjzUeFEoNYAz9BUZOO7LzC8DYyWFsKCkMUAwwzEEDzDMGrFxzpkISlgoGyTaVCj88Z/LBwQCNg2ecDVBszh258nSEd7eAsTtL2QLG7iy31sq1BYxbS9K+T8cCxr6XqT/EyJjMUlK0XUky9s7JzlEt4h07duiOPbiUeW7G0o3lzteAMUbuio5VSFzqcdUMLjhyts7QXXFphexNL5OQhGMSuveYoAkIpzFhGOPDQ8x9ctYJKTzKfKAW7OH/0hNVEp18XLUG0Q6unRucl5QDJyQquVQCYoolbO8xScoqE+guzoNAAxSdqZaU7JOSnntKr40ciBvNZ/KQV3RSigoP6TwkOChY+Ypp65Qn+pu8mXAd6WwB4/ZT2hYwbnpZzF+bKfCer9ueJ5XV5xRzQClt7ty5ijl0795dOKCc4YwRL7AI/gek+vp8vVZy01NtW58WMG5b+TcndV8CxgCr69at051EaBCbBQ9wM+o8vMAY4jU8xPRpaNez+4hdNNguQAN5yZIlymPMezBmQOMePwDMpEF/i+N68eLFSm8F+Gt2K6EAunz5cmGxFd5i49Aexj/88hiCJTyGhAGru3XrppRPcCyzyA0tBqA1i7Xgg2gfs8tn1qxZqmBq4uyIZ78AjCl8ChZaCg6jYeykpKAC09lxULmcgLFTwxh/ZnDreaaCGNDYVNyOWGk60jtbwNidpW0BY3eWW2vl2gLGrSVp36djAWPfy9QfYmS8ZgHjtitJxt5sOWUyyMSN8nC6i42ZfQ0YO9Nt7BoA2AC6+GEuerE8eovH0z9gs+czb+Eu9oxJNoorKLpw3dGdmwBj5p7MRan//lh2vgSMgX6gemERBk18NPmPlFSopj8a+Sy4eHNo5UM5k5J9Qg4U1PrjWzaOy7JT1VJ6skrYaYDRyoOFp/X69Nka5RRnAYfdASwkZeadkuLSSl0oog3ACCbxpx08KYXFLBjVx23SIA8sSOGHhaWDheXKV+z89heuy5S/Phcs67+oBYwx1PXee+8pOAU4bGhnnGeAY0BjwDXaALc5NwHGlBUaqfQ90CJ0NOdLwBhMjX7/wIED2v6Zvp8z+BwULNBOmB1GyB58joURAGa0ftPS0hRQNuVg/PAco4SkYb4v2lbGGsRLm2vSww/PiZc22DjSMv65NnFjZA8tZmgq2AUFOEy/a+Igv7R5qampmhbxd2TnF4AxFcN00lQGtn1QiShoCt8MvjjzjMpGwXMmHA200+idqXz8z2HuOZsK25ErTUd6dwsYu7O0LWDsznJrrVxbwLi1JO37dCxg7HuZ+kOMjM8sYNx2JclYmQnW5YyRWwswJm/fKCk1EJS3Zw082Js2k4CbAGO017Zs2aIACPNQtzvaVAVYvgFkfQkYA/IC+H64q0A2fJmv1A0L12UIXOAvv7lf1n6eq+CxkSH+Dx46Leu25yrf+JhF+2TaylR546NsScwoU6AXaLeq+rxAB/POlhxZ90WezF2TLi+v2q/Pdkcf0ecf7S6QTwIOyax30mTSihRZtD5TAmOL1WAl/OTwmY9bkqwc58HxR8WsfbHAhAHMTTvzZf7aDJmwNFlGL9yn+V2/I0//Y9EI5wSMK6pqJD09TbUU0Wh0gsSe12gcjxgxQqkpAMScB5qSAM+0tU5nFuucfs01Bhg9FzAoV7APAD7jz3kG3KPcPR3AqtOfuSZfaI927txZ/vCHP+juCMKSLukbf84zz8kDeTGO92osX6RBvsBxnH0M4XnO/874zTXyMkClSYdvE/+ff/65jBw5UrZv327+6jBnXwLG3oTmLCPn/7V9cMNFGE+/3vyYODz9Xuq5+d95dsbBtbMOOv3Z64YS8CvAmJUMGgIOGjtz0MnRYAASc+3UNqZBcwLGpjGiAhmwmGtTqczZVrCGFclf7yxg7M6StYCxO8uttXJtAePWkrTv07GAse9l6g8xMiazgHHblKRzAkYOPO8vlavWAowvlQ/7f/uTgJsA49jYWBk9erSsX79eDTu2P2k2L0fMgZkjlxwrUXARRawePXrIVVddpSDdlcyDAVYxJPn8zHj5x7BweXRUpHQZFSnPTo2VB4dHyL0DwxTsPXGKXb+1FDMz3kqTW/sEyz9HRypPeI/JsXJHvxAZMmevYFgSrmCoYia/nqIGL+8fHCb3DQ6TW3oHy6ur02Tx+gzp/EKEPDoyUp6aGCtdx0XLYy9GaZzdJ8TI/LXp8tiYKH325Lhofd5rSqxqPrOoVFZerXm6q3+o8hOTbq+pccL93QNDZeVH2XK0rFKFXA8Y58vpM1USHR2lW92bAhiDSbz66quyaNGiBse8efN0McITAAX8XLNmjW67d4bBeBhazWhIMicyDiwEjcply5Y1iJ+w0AJQf48dO2a865k2HRoBtvY70+B6/vz5mq9OnTo1AIzRDvUWZsGCBbJ69WrlknWC3+AzjO/gcvZMg3fZuHGjflfOesd7YayMfHuGIR4MrQKoOx3yeuedd+Sll15SLtuPP/7Y+XeHuG5pwPhyhNjcccPlpHGxMG2d/sXy1h7+8xvAmAaBA5CYTo0D7WLujYYx9zS0NDAcZjXLUFLEx8fr/06gmEIy96Yy0ViZ6/ZQiDYPLScBCxi3nGxbMma+b7a5fOc735HJkye3ZFI2bhdKwALGLiy0b7JsAWP3ll1L5twCxi0p3ZaN2wLGLStfN8fuJsA4JiZGjZcBkmHcye2OeS7zX7jIt23bJoGBgcr5efXVV6uGJvNpJ3jXnPetPnde+b6ffilWbn42SEHfDV/mSVBcsSz7IFPuGRAqT46PkdjU40pdsWZbrtzYM1CffbAzX1KzT6jByeGv7ZXrnw6Q2e+myZHSCoF2YuLyZLmhR6ACwzPf2S9oLkfsPaYax8R7R98QGb80WT4LPSTvb8+VHlNi5bqnAzRNwGYM1W3eVSBPjI2WW54NkoCYIwpaQz9xV/8QeXxstFJNwIVOftFUvrV3sPScGqc0FYDLDQHjSomIiFBu1aYAxj179tR5y4wZM5RLFT5VDuYyLIoyv3E6NNtXrVolnv6nT58uGCBNSkpqEIbw4B3wJcPbauLnPHPmTHn77XcuqL/UBbhknX7NNYY6eT9PwBigGsOn3vKF4b+UlBStX+ZdwGiwJQXnrWe+eBfS57ty4i+8C7yzcMya/Jgzz+Cq9dT2R14rVqzQxR34aQGcO5prj4BxRysDt72vXwDGbGFgFZSzAYuNdjENEAeNCp2bObjnIIxTw5gVLhydoDlM48S9uTZntxW4zW/zJGAB4+bJq7345tu2gHF7KY32lw8LGLe/Mmlqjixg3FRJdVF8caQAACAASURBVCx/jM+shrE7y9wCxu4st9bItQWMW0PKF08D0Bi+T7Q8ATyvv/56BdnQUi0qLFJAjjF3c+bFdYDxxFh5/MUoBV5NLvIOn5YBs/bI/YPC5NOgQ3L8ZJV0mxgjf3xqt2wLK1Tw1viNSS5REPnO50MkOatMys/UKK3FLb2DZM576WpU0vj98Kt8uXdQqDw7NU5CE2o1aOFQfuvTbPlLz0B5fka80krgn+evvLVfbnkuWN7ZclDT3J99UsYtThIAa/6HK/lYWZXEpZbKA0PCpeu4GDWoCe9xPWCcp/zMaNsCYDbGX2yoKaCkwBgX2uqEcR4AsNAvUB5OB45B+WAAzOmfewyHoS3s5F8lPBQSALZO/+YaqgpPrVzK1jN+45/n8L8+/PDDDTSMS0pKNH3PcM580W8bR76gkOCbN3E7z+QLjMdZzwjDc880TDj4b1EadDrkhSw3b94sffr0sYCxUzj22kqgEQn4DWBMAwAlBY0cDYoBjAGAaSw4AxxzTcdGIwN4TBg0jK+55hpd2TIrpvzvPGigDGDsbOAakat97CcSsICxOwvSAsbuLLfWyrUFjFtL0r5PxwLGvpepP8RoAWP3lqIFjN1bdi2dcwsYt7SELx6/AeeYD2NMatiwYfLkk0+qotWUKVN0az9anJQTRsTQ5sTvpRyAcejeYwKtxPMz9kjygVojVaRXXFqhGsN39w+VzbsLJP/waXl0VJT8pWeA1NTUAowmX5VVNTJmcZL8qXuARCQeU2N5cAs/MCxc1n+Zp2Czycumr/L1+UvLkiUzt5amAMN7nwUfkpt6BcmYhUnGqxriW74pS27vGyKccfAjEw4qjS8ji2TDznx54+NseW1thmotPzEuRvbsP64G9JyAcWX1OTly5IjSMPTu3VsAhQ1A7DzzHADzjTfeaKARXJcpLxdGDl7+uuJHzrid141FfLlG70zc5txY/OZ5U/0Z/97OjBdQKkJh0GoYe5OQfWYl0FACfgEYo10MUGw4jJ2AsRM4ZoUNQJjOzJwBmp2AMcAyDQkHjRIH/k0DZc5N6RAbitreuVECFjB2Y6mJDrashrE7y641cm0B49aQcsukYQHjlpGr22O1gLF7S9ACxu4tu5bOuQWMW1rCl47fzHuZNwMY33bbbQp4om0MeMw8yVAG7N69WzLSM1SDlfl3Y3NlAxg/Oy1Ohs3d28DA3bGySlmwNkP+1i9EPviqQNJyTshDwyPl9n7BdfNyZ65fWpEq13UPkF1RhxUghm4CnuOtwYek/Ew9fQOA8cMjImTm2/ul4EitUbfqmvPyRXhRLWC8qBYw5n1PllcrJzEcySs+zFJjmcXHK+StT3NkyNy98tz0OHlmci0Pcu/pcXJzryDpOr5xwBjZQbeAljGgsRMoNtc853+AefJg5O581/Z8fbmAcVu8E/WSHUkWMO7cFuK3abpQAn4DGBsNY09KCqNZTGONhjH3XKOBCIBMOBoMNIyZiOKHiQfOnE2Hxz3X5rkLy9tmuZkSsIBxMwXWTrxbDeN2UhDtNBsWMG6nBdOEbFnAuAlC6oBeGJdZSgp3FrwFjN1Zbq2Ra7cBxuPGjbvA6B3AKdq3ubm5FxxFRUW6I1a+rpcmbRmaunAHewtDXGZeakIBLkIzkJeX5zUMilWec1fyRfre0oD6gPmwccTP/Llfv37yq1/9Svr27augJ1qx3bt3V+D4mWeekaFDhyr/LMbWwsLCJCcnR+fZzLudAGgtYHxUnpsWJyNeS5TMvHrDZAYwvrNfiGzaVaDgbpfRUXJTr0A5U1FjsqTnisoaeXFRsnIQh+wpVsN0AMYYr9seVqicxiYAgDFG7zCAV3S0lqYAQ3nbwzwBY1EN49c3Z6tRPQBjaCa2Bhcq3/IjIyNlwbpM2RJSKGF7SyRyX4k8MDRcHh/bOGBMHsAn4LnGOBtyQpsYhTXO3GPULn5P/AVla/Lf3s9uA4wZS7LQERAQ0N5F6/P8WQ5jn4vU7yP0C8D4+PHj2iGhZWwOo1nsNHjHNSAxYBIHHZgBjH//+9/r6h8dIp0anbEBh+lkTUfH2Vz7fe2wL6gr5z/96U+V68iKwz0SsICxe8qqLXJqAeO2kLpv0rSAsW/k6G+xWMDYvSVqAWP3ll1L59xNgDF5Xbt2rRraAqA1DqNjixcvlhdffFENbY0ePVrPI0aMkAULFiif6vlz9VyuzFWjo6Nl0qRJMmrUqLowXHMsX75cd9Q656LMbQFpx44d2yAMaY0ZM0bQ/GVe7HTwvJK+Mw38jxw5UkFfuGGdjjkzBs7uv/9+6dGjxwVasgY8BkBGEWvIkCFqwOyLL75QjlsnyH0pwHj+2gwBMP5wV4HSTPSaGqe0E0HxxXLuG1nx/vFpx+Xfk2Ll1j7Bsi+jlsP4SgFj3vlEebUYwHj5h1lqeO+l5Sny/57YJYs3ZknZKQz+QVX5tQLGfx8cJo+NiZbYlBJBa9mTksLIkXICRN+6datg+G3OnDlqgI17+IZNPXCWrQnb3s9uAoyRr1nIATfqKI73ZqzkVsAYg5Jwmh8/Va3ULy1dbnzfJWWVUnqy9ntvLD2tTxU1mjcWtc5//bXmj50KcJ2zMOV25xeAMVQUAL+syLKCx5mGwJwBgTkAkcyZRpuPhk6djg3AGKuhdIh0auajMmfT0Zl7ztb5vwSshrE7y9gCxu4st9bKtQWMW0vSvk/HAsa+l6k/xGgBY/eWogWM3Vt2LZ1zNwHGzBOZQzL+dM4ReQdA3okTJ8r48ePrDoDcJUuWKGBs5pjIE8AYTVS0H9FYdobhHo5bQC5nGsxp161bJy+99FID/4Ql3cDAwAsAY6gPSN8ZP9ekMXv2bAU2neXLu8Fb3KlTJ+nZs+cFgLGhVkDTGE3kF0a8oFqzO3fuVMCYPBrXVMD4g10FcrbinCzbmCU39goSNI1Ts09I0bEKySkolylvpMqtvYNkzOJ9UlB8RjWKWwIwhod4yspU+f1jX8mMt9Mk59BpzUPawZMCrcZ1TwfIv16Mkoi9R6Wy6lyjgDHvT1+FLMAjzM5n7k15mrORlVvObgKMPWXqVpl7vsel7qlntC/BwcHyy1/+Urp06XKpIO3qfzjMV32cLau3HpQjpRXOjRktks+Tp6tl/tp0eeOjbDnloLfxTIz2ISrpmLyzJVu51M9WnpPC4rOyeVeBrN2WKwcKyj2DuO7eLwBjQF8OAxoDFgMc81HQIHPPtWmgzZlOnTCGw5iJKP5wNOg0IJw5cKZB4d7Zueuf9scvJWABY3cWqwWM3VlurZVrCxi3lqR9n44FjH0vU3+IkXGZpaRwZ0lawNid5dYauXYTYOxNHmbe6O0/z2fGrzl7/u9531R/nuGac+9Mg/k0msVXX321DBo0SACGAYk5c8DBy3MAZ7SdKTvm394cgDHG4/q+skdGLth3ASXF4vUZcs+AUNUwRj+r7GSVPDs1Tm7tEyK39g2W52fukUdHRipFRM/JsZKac0I1+s6crZGXlidL1/HRsj28SLg3DvCmy6hImbcmXYqO1c710Qb+MqJIbu0dLOOXJBuvqtUMMHXXgFBZ+dEBpaT4Kuqw+vtLz0Dp/lKs9JuxR/7WP0QeGBYhGOgjP9BgwJu8eEOtwbyNO/KlqvrSRgDrEnbxhZsBYxeLvVlZ53tmrBQRESF33XWXfrelpaWqYMm8mf+c33yzIm8FzxipfHBouHQbHyPpuSyatWyih0vO6jf/j2HhcqS0stHEAJPf3pIj/xwdpeAyGtD7MsvkhfmJ2m6FJhxtNKxb/vALwBjQFy1jVlzRKuYA+OVg9Y6DlVHujYYxgC8HQLMBjOPi4tSfAYT533xc7f0jckuFc1s+LWDsthKrza8FjN1Zbq2VawsYt5akfZ+OBYx9L1N/iJExmgWM3VmSFjB2Z7m1Rq7dDhi3hoxaKw3A3/79+8t9990nTz31lILHzJ8HDx6stBdLly7VNpg5uHGNgU/QSiRmlMmsd9Jk0fpMyT9cDyxDBwHf8KBXE2R3TLGJSioqz8mMd9Ok55Q98uT4GOkxJU6mvpEqB/JP1dFU4AeAd+LyFIlILNEwJoLdMUdkzOIk1fhjmzmOfEQllUi/GQmybFM9BcfpszVqNG/wqwmyJeiQ+kVzeM22XOmhxu5i5KmJsTJ03l6JTyuVzbsLZPhrifLBV/lSeqJKNu7Ml+dnxqshvhoH3YjJiz+eLWDsjlLlm4SS4tprr5XOnTvrDoMNGzZIYmKicq2DpaFYyZiquQ4AFxoHOL85ewK64LvQNXDgOHvz21gcAMYPD4vQbw/AmLDewnvm26RzKb+aP5P/r78WAOPb+oTIAyMipNgDMOYVTHxoIr+zJUceezFKVn2UrZQZuUVn1HDma2szZH/OSc1SLaZYKxdPWXnm2dw70+HahOPsdE19R2eY5lz7FWAM+EtHZQzfcaaDM4AxIBKAMUCwAY4Bmg0lBRZM8YvDD44Pxnw0POPgvrFOUAPZH7+RgAWM3VmUFjB2Z7m1Vq4tYNxakvZ9OhYw9r1M/SFGxmUWMHZnSVrA2J3l1hq5toBxa0j50mkwpoZjF+3hrl27CmMoqC4WLlwoO3bsUON5Zt5MbJeaI3v739szE5fnfwA0BnRqzI957jxzbRxxesZr7s3Z6ddcc4ZH1WADzudc18Zb/9Qzrvp//OvKbYAx5Ue9dtZb/yqRxt8GwBj7TNdff72w0AP/OLsEpk2bJh999JGCx4WFhVJxtkLl05Q6DHgKZ29e0RlJzz0pOYWnFThlN4HBNll0OXq8UnmBMVh5uKRCMnNPSW7RaaV8ACgGFC46WiH7c04olQPauzzHGcC424QY2ZteJgVHzkpG7knJLSrXXQHkwekAVQlz9HiFHCg4pfkqPHpWdx442w/CkAYLRSxeAUYThmtPwJgUWASiDcjKPyU5h8rV3xsfZzcAjMnL2YoaTYuFKROmuLRCnxE+u7Bc84QRTha7nCAw6bA74VhZlcoIGhzaPcKxQ+L02Vq5EKay+rwC2hgPTT94UvNDWfhyscovAGOM3gEWGy1jQGJzGA1jAGKuDWBMI4HWMdrJBjBOSEhQPzQiHE6A2Nkx8OE4752V0177lwQsYOzO8rSAsTvLrbVybQHj1pK079OxgLHvZeoPMTIms4CxO0vSAsbuLLfWyLUFjFtDyhdPg/F0RkaGzJ8/X5544gkFmQCVAJDRRHS6pgBLTv/NvfaM3/O+ufE1x39tWg0BqeaE93e/bgKMGS/k5ubKli1bBOOPHc05jd4VFxerUU54yZ988kmhHJkjYZBx8+bNwpj72LFjF9U6rjn3tVIwrPwoWylmer+8R4bM3SuLN2ZKbGqpnK2sbSey8st1RwFGIbcEH5IJy1KUlmb4/ERZ+XG2AsifBhXK2CXJ0mtqrPSfFa9UDyUnqnSBSAHj4RHy0IgImbsmXcYvS5Y+r+xR7f7lHx6Q/QdP1oGkAKmlJyplR+Rhmf7mfhk4O17IF2l+sDNfAGlN+wGwmlNYLos3HtD8Eye7FOAfvrlXcAMNY0DgXTFHlNqG/PGe8JoPey1ROr8QoRzLUFIAgqN1vHRjllLmAJRv2lWg4T4PPSRz3kuXIXMT5LnpcZrWZ6GFUu6g0CGdj3YXyMtv7Vf6m8FzEoR3XLopS6at2i/RSaUKMgOoB8QWyytvp6k/4oPmB6Od7OKoqvENJY5fAMaozwP8cjZaxUazmDNUFHR4gMWAxOYMIAzQDGB8zTXXCIAx/9GQcFCR8GOuzXMaFlPJOloj09He1wLG7ixxCxi7s9xaK9cWMG4tSfs+HQsY+16m/hAj4zMLGLuzJC1g7M5ya41cW8C4NaTceBoAwgcPHpRPPvlEt7F369ZNrrrqKgFkMvNgc248FvtPR5CAmwBjsJ2QkBDVrP3www87QvE0eEcnYIws+MYxgAlHOdrG3bt3V/CY3QTDhw9XI5jIKSUlRRUyPb/52P3HZeDsBLmpV5D8e1KsTHo9Vfq8vEfu7Bci/WclKO0LWrbRSSXKIw4PONzfAKwAnH99LkgNRw58NUH5wP8xNFwpaW7pHSR/6hagoC9gcUVVjXQeEaF+b3kuWJ6ZHCsTlqYof/hfegUqcAx3MA5NXDjESeOu/iH6H37hMb+jb4gCtvgBWMaIJnzD13bdJY+Pi5aX30yVB4dHaP4xaOmkpNgWXqRx3tonWIbNS1Rw9t5BYXL9M4Hyt+dD5K1Pc1SzOiH9uAycFS9PTYiWwLhiyT5ULi+tSJGbnwuWTkPD5ebnAqXn1Ng6o5n3Dw5T+SBb8gS4fHOvIM3DuKVJKg/ix/jnX3oGKZgMWBy056h0nxirnO+UAbQ7GOCkLMYuSlJt6QaFf5k3fgEYA/oawBhKCsNl7NQyBggGOAYw5uPgoCMkLBxMv//97yU+Pr5Ow9h8DE6QmDA4AyBfpsxtMBdJwALGLiosR1YtYOwQhr28QAIWML5AJK55YAFj1xRVq2bUAsatKm6fJmYBY5+K068is4Bx2xUnc96SkhKlekTDkDk029YBjAsKCnQu3Ha5sym3Nwm4DTDG8BsatUOHDpXp06fL5MmTVYsenMjToVE/derUC44JEybI7l27pfxUPW83YY8cOSJr166Vl156qUGYKVOmyLx584Q+z9MdPnxY+YS9pQNVBJiWp9u4caO8/PLLDdIgPHQxu3btUiVIZxi+2zVr1siIESOUv5j3J27yCTAM5gEmBmiMQUvO5kC50oDHn332mWpoM9c+Vlap2r6Au9Pe2C+p2Sfl+KkqpZNYtC5T/j4kXKa9kSqHis9ITHKpPD4mWm56NkiBVmgi0MRdvumA/PW5YLmxZ5C8+l665B0+o1QMizZkyf89uVu1cuE2P/sNYPyn7gGqhZycdULKTlUrfcXAWQlye98QWfP5QaVuCNt7VJ6ZFCsPj4iQ97fnKRUG/OIY2wTMfmBouMBrDui68ct8uaFHoGo0kzbUD/lHzqjxTIBgAxhn5p2UAbMTFMRFS7mw+KwcLasQtIMfezFa/vpsUB1gjHbvsHkJmoegPcVysPC0TF+Vquk8OS5a4lJK5VDxWaW1QAOad1r6QZZUVp9TTes7nw+VTkPCJTD2iPKik/d3t+ZIp8HhKqstgYfk2PFKefOTHLlvUKgsXJchB4tOK91H5L4SBfAfHxut2seG0sNZF5p77VeAMZQUhsPYaBgbSgoqNZ0dILHRMubaGyUFnSSTD88zwnUCyc0VtvXvPglYwNh9ZUaOLWDsznJrrVxbwLi1JO37dCxg7HuZ+kOMFjB2bylawNi9ZdfSObeAcUtLuPH4me8yD67VePtawSc0EC1g3LjMOvI/bgSMyTPAKAshAKMAqSgY4hhTAOICvAIm48fzQAN306ZNiiURhm8lKSlJPvjgAxk7dqxq6jrDkBYANYCrceBQAQEBGgYNfqd/c8135zQmacIuWrRId8kbf+YMEEy+wL2cLisrS7WFMVppgGAThjSGDBkivXv31v/IqzmMX86DBg2Sd999V9LT01VWSVll8ty0OHloeLhsCyuUsxW1XLzQPGB4steUOOkzPU6Ss8okJqVUnhgbLZ1fiFQNWoBMANLYlBK5d2CYahcXFp9RPmH++zLysNzR7/9v70yA7ajqxP3GUaFmapzSMqVMCSVharQUrRH5C0Kxysg2LuAoICgggoDse1AEFRVkJyI7A4NsgkIIUSBK9oWs7HtYskKWRyBhDeH86zvhd+lc7wuP5Paz+76vq05O9+m+p09/597cvO/98juj0rHn3pcWPv9aTm2xY05JMT6NnDI/5wgmGpdrrx72dO73Rxc8kO577Pn0++Gzskg99ux7E3mLuY5C3uTzrns83+/M/3s050s+4bz70tY/GJ2jdukr9/nmm+mO8fOynA1hjPgl+vn48+9PM2YtyYvecS05hU+97OH0pYPHNFJSrBDG01cSxojzrX4wJp1z7aM5dQZ5lJe89Hq6cfisRMTyTy56ID/jneOfTRvu9td05Nn3ZnkdY3/1tWVp359OTkRXI4yJkEaQM3YY3zlhXkKsv/b68izceVZSW4S7LL4X3u1+RwhjPmzI4shhzAckoouj5i8A5HFEGEfNayIlRUQYA5G/KKLwhckWAjnO50b/6GgCCuN6Tq/CuJ7z1lejVhj3Fen230dh3H6mndCjwri+s6gwru/clT1yhXHZhHvfPz9HK4x7z6u/XVlHYYzwRYrys/7QoUPz/zoPuYbzQYoeccQRify+iFXe/xRewwKQ/HuUVKhFT3TzzTdnKcx1xddwfO6556aZM2dmkRv3QUqfccYZad999839EuF79dVXZwmN34rCv3GKG6/n3nG+ucaFxT3idQRKIp5vu+22tM4666Sdd945v54xcU98GFI4RDF1PAPSfNiwYVmi49boi/4n3L8ofePYiemz3/5r2v7Qsel/jp+Y0zB884SJOf3Dxt+9K+0+6O40ZvqCLIy/cdzEtNugu9PoafPzsBC0RO4STbvtgWPeWuBuhbQdOXV+2vqAMemos+99WxgfMi7tduKkxAJviNTYRk+dn7594qT0/VOn5hzDl/xxRtri+6NzNPA3jpuQx/StQRPTLsdOSFvsPzpttNdduV8Wrfv6sRPyWO95pDu6y/Wz3a/mqOUQxjePnJO+8N0R6YLfP5EjeeNiopSvGPLUSovetRLGP7nowRzx/PvhMxtzw2J3iGlSUpCT+aVXluWIaiKeL7ppRs5TzH1iLpHnpKb4419n5ehoGO31k8lp4++OTJvtNzrteNj4dNx59+U+Fz7/amPBwRjr6tYdI4z5oEQqCiRwiGJq5DACKYQx+1F4HR8QUlJMnjw5tzMpfBDYQhpT0x4T1vzBXd0J8HXVJqAwrvb89DQ6hXFPZGyHgMK4vu8DhXF9567MkfNvMnMYl0m4vL4VxuWxrXvPCuPqzKDCuDpzUcWR1EkYv7n8zSw+b7/99jRkyJD05z//OT300EMN9wNf/k0xd+7cnL+biOBi4TVEHs+ZMyctKywqxmtYD4vzxevZR0iPHz8+dXevLCXxUHfeeWfjemQuorrM7a677kof/OAHc1oK3Bn3JHI4IomR1vvvv386/PDD0zXXXJPZLFq0KItunjFcGGMcPX1B+tpRE3KqhW0PHpt2PHxc2vGIt8t/HTI2sWDb2OkLG8KYiOR7H12Ra5go28dnvph4LcKYLSJqR0+fn7Y+cPRKwniHQ8alvU6anGbMXroSokn3L0rfPWlSzok8dPScdP61j2VZTIRy85h2YIyHj0sn/ub+9MTMJTlCl1QZDz/1wkp9Eqm72fdG53zGzy16NV1/56wsmi8nT/ELb6cuIaqafMnfPP7udPFNM3IO45bC+MIH01eOnJBuGz2nwRBhPHzCvLTdwWNy5PJLr7yRjj733kSeZ1JQkLu5uJ104QNpywNGZ2FMdDKvn/TAonTyxQ+mrxw1IZ8jNQeifdD592fOxfkq9vVu9jtCGPNmj+jikMb8ZqVYihHGEV0cKSn4YLDoHRHGXBeRxPEbIz4cFDZq2tsB/91MlNf+fQgojP8+3Nf0rgrjNSXY2a9XGNd3fhXG9Z27MkfOv80UxmUSLq9vhXF5bOves8K4OjOoMK7OXFRxJHUSxvDD5RBUSJQuNe9v3E74HeqIyOWa5sJrIsq2OB8EJzZfG8d4qWaHVBwH10WgY4yj2He79ln07sMf/nDaYostsvgmFQVR1AcddFAiL/Nll12WyPFM9DN+LZ6zOKbYn/Rgd9r1mAlpv59NSSMmP5fzD8+c91KiPDVnSUKckrbi+SWvN4TxvqdMySkqeJ5Wwph2pDFRyKRbKEYYI4y/ecLd6eEnX3grGnkFlWFj5uRI5yPPujeNm74gXXbLk2m7H45Nv/rfh1caE+N6fOaSNPnB7iyLySO8z08nZyk8cspzKyGeOW9p2nTfUY0cxsPGzUub7zcy52wmDUUwIOcxuYS/etT4Vaak+Mlbwpixxmv/Rhi/vCwNvv7x9Jk97ko/v+yhHHH89qDeTIecPj1tuu/ILIxfWPJ6TrFBtPUz85bmPMmjpj6XI5QR0+SPvuzmJ9PiF19/u4vV3OsIYcxvZ1i8jkKoPYUPHIUPLh9Q/iKgRLQxQokPAK8pRhhzPgRxyOGogzGTHBMdbdadSUBhXM95VRjXc976atQK474i3f77KIzbz7QTelQY13cWFcb1nbuyR64wLptw7/tXGPeeVX+8sm7CuD/OUTwzwnjddddNm2++eV7wj4XvLr300pxLecaMGTkKGn/G9k6+C1n5w9On5/zB5OIlhy4bkbGjps1Pgwbfn8753aN5cTtyGJOSInIac92qhDGvbxbG5DDe8oAxOUdx3Gvxi6+lX13xcNrmwDE5snj2sy+lIaNm5/QPB582LT3wxOK3opbfTPMWvJz+d8hT6ciz7km3j5+bF5QbfN1jidQZv77qkfTa629F9L6Z0m9vfCJ9/jsjGsJ42iPd6VuD7k67nXh3mnDvwvTq6yuirR97+sV0zDn3pa0OGJ1IhdH94utZlB/y65VzGL+TMD7u/PvT0peXpakPdycW2/v6MROyWF/2xptp+Rtv5rQeRHPnHMYjZudF864a+nQ67IzpadjYuWnJS8sydxYSPO3KR3K08Zn/90he8C9Pyhr80THCmBB/oouRxNT8lgZRjACO39awTwlxHMI4IoxJScF5ftsT0hi28WGJ9jheA+6+tCYEFMY1maimYSqMm4B4uBIBhfFKOGp1oDCu1XT12WAVxn2Guu03Uhi3HWnHdKgwrs5UKoyrMxdVHInCuIqz0npMRA9vtNFGicXxRo4cmVNOLFiwILszXBdbb10XghNpueNhpIqYlC76w4y8+N1Vtz2d8wmzwB1ti154rRFhvM9bi+Bxn3crjHc6bFxeEO7bP56ULr/lyTR0zJx03nWPpa8eOSHtfuKkHOX8+rLl6YEZi/NieeQGRube+JdZIYN2bQAAIABJREFU6dZRc7IUJv0EaTGmP9KdF4ib8lB3zr28w2Hj0yU3P5mGT3g2P9MOh47LuZkjhzGC+txrH8uL2x1+5r154b5bR81Op17+cNr24DE58vfSPz65BsL4viyMyWNMruKNvzMiHXrGPeniPz6Zy96nTMmL8P2/vUemISNm5zzK/3vrU+m/Dh6biNq+9Oan0i2j5qQrbn0qfecnk9JOh49L190xsylKufV74p1aO0IYEy5PpDB1hPNHhDE1X3L8pqQoihFKfCgQzbHo3ZQpU3IOY37w4IMS0jhEcbT39kP0TvA9X30CCuPqz1GrESqMW1GxLQgojINE/WqFcf3mrC9GrDDuC8rl3ENhXA7XTuhVYVydWVQYV2cuqjgShXEVZ2XlMeGzCIwcM2ZMTsXKon94M35mZnu3fovrKaR5ICXD/xx/d0Ky7nrcxCyQSQnxs0sfSg8+uTi98cabPQrjJ2YtaSx6FyMmJcWY6fPTNj9A+L696N1Oh47LkcTfOXlyjlbe5ZgJ+Xjfn05JQ0bOzhHD9IF0nXj/wnTC4Ptz3/99xPiEKN72oLFZLN/0lxWLxnEfUkrcNmZu2v1Hk9KXDxmXvnXC3Wmbg0anH13wYPrcXm9HGCO3WSTvl1c8nP77yPHpa8dMSLsey0J6E9Oux6945ogwJkfzob+envb88aQ0aur8nC7inSOMVwhjxv/Qky/k+3z1qBWpJXY+Ynz68W8fSDscOjZtsu+onAcZWU/e5TOueqTBnKhk5oDXnXPt4+nJOUtXSt0RfN9t3RHCGOmLLA5xHLKYCONWwpgPDB8OSnHRO4QxYpnzIYkBGqKYfT4YxeN3C9zr60VAYVyv+YrRKoyDhHUrAgrjVlTq0aYwrsc89fUoFcZ9Tbx991MYt49lp/WkMK7OjCqMqzMXVRyJwriKs7LymPh3En6LlBQDBgxIO++888oXvMujEMZvvLE8zV3wSpp436J0y12zc3TuNX+amYZPfDY9MevFxsJtC59/LecX5rrn38qri7B98aXX04jJ89OISfNXGgELzd016bl0zyPPp1dfw80tT6OmzE+jpy1IRAWzeNzv/vRMuvmu2Wnqw8/nheiWL18hsRnby68sS48+/WK6c8Kz6YY7Z6Urb30qIYrvvm9hYizcmw0RTPTw5AcXZelMn8hnch0TbTxyyvy8uBzXI76fmrM051e+/o6ZiTQcjHHi/Ytyyghe89qy5en5F17LYyR1Bc/BAnX3PfZ8GjNtQZrz3Nv5j+kPduR/nv7o82nZG8vzuFkI8NGnX0hjpy9Id4yfl/56NyyX5CjuTfYdmY9fff2NzIX8xePvXZif7cohK55xzPQFOXfz62+sWINtJbCrcdARwhjpiyyOxe9CGFNHWgq+6IolUk+0EsZw5AMVdezz5ot9PnRunU9AYVzPOVYY13Pe+mrUCuO+It3++yiM28+0E3pUGNd3FhXG9Z27skeuMC6bcO/7Vxj3nlV/vFJhXJ9ZRxh/6EMfSjvttFPbBo0WQ3a+9PKyLG5ZkA2huULgrrgNYpZrck7esLVvBWOuaF/ZrfFa2pGqcXlchzwmihjR+/IryOS3ryk+FH0wDqQ0aTGIyqWPN5e/ZYvfuniF41ueXn51WV6gjxzM9El6i3x94fIYF5HJ9EveY8bDdfl533omXh/PGrI5rmkeI+1cz+uJYj7g1KnpjP97NEc/I6ARv+Q2Jkp68++PTpMeWNSIHI7xxDMueesZaW/X1jHCmJQUkbs4Fr2LPMZ8yRE5jCQmb3FEF1MjmiOH8bRp0/I1b//GZEUuYyQxP4xEO/AVxu16C1a7H4Vxteenp9EpjHsiYzsEFMb1fR8ojOs7d2WOXGFcJt1y+1YYl8u3zr0rjKszewrj6sxFFUeiMK7irLQeUxnCuPWdbH23BBDLC59/NW2674i0+fdH5QUDbxszJ113+8x04C+npS32H51O+u0DOQ3Iu+17Ta7vCGGM9I0o42ZZXExLwT7CGHEc8jiE8cCBAxMpKfhCRBBHNDE1Gz+MhDSO4zUB72vrQUBhXI95ah6lwriZiMdFAgrjIo167SuM6zVffTVahXFfkW7/fRTG7WfaKT0qjKszkwrj6sxFFUeiMK7irLQek8K4NZcqtK7wj2+mq4Y+lfMSf/XoCWmfn05Oe540KX3lyAlZFpOigwjovtw6QhgjiymRkiIiiyM1BdHFCKSoI8oYMdy86F0IYyaBH0AiupjjokSm3a3zCSiM6znHCuN6zltfjVph3Fek238fhXH7mXZCjwrj+s6iwri+c1f2yBXGZRPuff8K496z6o9XKozrM+sK42rPFb6RlB4jpjyXrh72TPrtjU+k3/3p6fSncfPSY8+8nRO6L5+iI4Qx6SgiJQWSGGEcJXIYR80XXkQYI5UQzaSkIMJ46tSpOcK4KIZbSWMmiGvcOp+Awriec6wwrue89dWoFcZ9Rbr991EYt59pJ/SoMK7vLCqM6zt3ZY9cYVw24d73rzDuPav+eKXCuD6zrjCu/lyFZyRP8vznX8m5ktuZk/jdEugIYRzRxUhjUkyQliJSUVCHLKbmC48IYyKEKVy/9957Z2FMDmPOxzlgMmH8IMJGHQL53YL2+noSUBjXc94UxvWct74atcK4r0i3/z4K4/Yz7YQeFcb1nUWFcX3nruyRK4zLJtz7/hXGvWfVH69UGNdn1hXG1Z+rEMZVGWlHCGNEMdIYURwL30VaihDHCCRSUiCLKbEIHq+NCONY9C7STYQ4juMQx0xecb8qk+k42k9AYdx+pn3Ro8K4LyjX9x4K4/rOncK4vnNX5sgVxmXSLbdvhXG5fOvcu8K4OrOnMK7OXFRxJArjKs5K6zEpjFtzsbVnAh0hjJHFEWUcKSlCFCOGI3dxRBojk0hLgQjmdUQYb7DBBnnRO66hnR8+KBh+Cm3N0cY9Y/VMpxBQGNdzJhXG9Zy3vhq1wrivSLf/Pgrj9jPthB4VxvWdRYVxfeeu7JErjMsm3Pv+Fca9Z9Ufr1QY12fWFcb1mauqjLQjhDFpJUIYFyOMkccIYAoCiS87ChHGRWFcjDDmuhDFzTWTFtKY2q3zCSiM6znHCuN6zltfjVph3Fek238fhXH7mXZCjwrj+s6iwri+c1f2yBXGZRPuff8K496z6o9XKozrM+sK4/rMVVVG2hHCOFJSUCOJSU1BSgq+3IguRhhHTRvRwpHHOCKMi4ve8YMHG9dFtDHHIZCrMnmOo3wCCuPyGZdxB4VxGVQ7p0+FcX3nUmFc37krc+QK4zLpltu3wrhcvnXuXWFcndlTGFdnLqo4EoVxFWel9ZgUxq252NozgY4SxrHgHcIYcRyFLzkiipHE7BejjZHMsejd1KlTs1yOKOKQxSGKizX7bp1PQGFczzlWGNdz3vpq1ArjviLd/vsojNvPtBN6VBjXdxYVxvWdu7JHrjAum3Dv+1cY955Vf7xSYVyfWVcY12euqjLSjhHGpKKgIImJLqYQVcxxRBhzjDhGJlEQwkQYR0oKhHGkquCHj5DCIY4jDQXtsV+ViXQc5RBQGJfDtexeFcZlE653/wrj+s6fwri+c1fmyBXGZdItt2+Fcbl869y7wrg6s6cwrs5cVHEkCuMqzkrrMSmMW3OxtWcCHSGMkb6RliJkcUQXI4kpCCRkcJSQxq1SUoArhHFRDhf3FcY9v6k66YzCuJ6zqTCu57z11agVxn1Fuv33URi3n2kn9Kgwru8sKozrO3dlj1xhXDbh3vevMO49q/54pcK4PrOuMK7PXFVlpB0jjBG/pKSIRe9CGPMFR4QxNeI4FsAjahh5zGuIMN5ggw3SlClTchs/eHC+WCJNRdRVmUDHUS4BhXG5fMvqXWFcFtnO6FdhXN95VBjXd+7KHLnCuEy65fatMC6Xb517VxhXZ/YUxtWZiyqORGFcxVlpPSaFcWsutvZMoCOEMdHFiOLIYUyUcQjjqPmiI4dxpJyIOlJShDDmOn7woCCHKYhjaraoaXPrfAIK43rOscK4nvPWV6NWGPcV6fbfJ4Txdtttl+bNm9f+G9hjLQkojGs5bXnQCOPtt98+7bfffmnu3Ln1fRBH3nYCCuO2I13tDhXGq42uX7xQYVyfaVYY12euqjLSjhHGEWHcnJKCL7jIXUzNMbIYoYRAjpQUCGN+EOUafvBgizrkMMfsR3tVJtFxlEdAYVwe2zJ7VhiXSbf+fSuM6zuHCuP6zl2ZI+ffZWPHjk0DBgxIgwYNSosWLWr8gr/M+9r3mhNQGK85w07tQWFcnZlVGFdnLqo4EoVxFWel9ZgUxq252NozgY4Qxt3d3Vn8xsJ3sfgd0cXFBe/YRxIjkyiI4xDGAwcOzCkp+EKMqOKQw/wgEpHF1LHfM1bPdAoBhXE9Z1JhXM9566tRK4z7inT776Mwbj/TTuhRYVzfWVQY13fuyh65wrhswr3vX2Hce1b98UqFcX1mXWFcn7mqykg7QhiTigLxu2TJkkSEMTWyOGq+5ChIpKgRx/yAQTqLvffeOyGMp06d2khZgRQOUUwdUcbF9qpMouMoj4DCuDy2ZfasMC6Tbv37VhjXdw4VxvWduzJHrjAuk265fSuMy+Vb594VxtWZPYVxdeaiiiNRGFdxVlqPSWHcmoutPRPoCGGM9KWENEYWI45jsbuINEYS84UXNVKJ18Sid/wgymvYirKYfbaILC4K5HzCPzqWgMK4nlOrMK7nvPXVqBXGfUW6/fdRGLefaSf0qDCu7ywqjOs7d2WPXGFcNuHe968w7j2r/nilwrg+s64wrs9cVWWkHSGMkb5EGZOKgqhiCuKXQk7iyGEcAhmZRMQwBdEcwnjy5Mk5wjiEMOcjopi2EMZVmTzHUT4BhXH5jMu4g8K4DKqd06fCuL5zqTCu79yVOXKFcZl0y+1bYVwu3zr3rjCuzuwpjKszF1UcicK4irPSekwK49ZcbO2ZQEcJY+QvsjgWvqMmujiEMRKJLzxEMDXHiOZISTFlypR8Lbi4ho0fQijRRrvyOOPoF38ojOs5zQrjes5bX41aYdxXpNt/H4Vx+5l2Qo8K4/rOosK4vnNX9sgVxmUT7n3/CuPes+qPVyqM6zPrCuP6zFVVRtoRwphF75DFEWWMJI4SEcZ80bEfwhih1Lzo3bRp0/I1IYmRwyGIQxozcRF1XJVJdBzlEVAYl8e2zJ4VxmXSrX/fCuP6zqHCuL5zV+bIFcZl0i23b4VxuXzr3LvCuDqzpzCuzlxUcSQK4yrOSusxKYxbc7G1ZwIdIYxJRUFaCuqIKo7IYmpSUSCQ+LJDEkeNDEY0E2G8wQYbJIQx50IYI4ZDGIckDnFseoqe31SddEZhXM/ZVBjXc976atQK474i3f77KIzbz7QTelQY13cWFcb1nbuyR64wLptw7/tXGPeeVX+8UmFcn1lXGNdnrqoy0o4QxkjfEMakpIhcxsUoY77oEMcI44gcZvE7XksO44EDB6apU6c2IoxDCIc8ZsJ4HRttcb4qE+k4yiGgMC6Ha9m9KozLJlzv/hXG9Z0/hXF9567MkSuMy6Rbbt8K43L51rl3hXF1Zk9hXJ25qOJIFMZVnJXWY1IYt+Zia88EOkoYk5IichhHhHGkpEAgIYyRxBFlzD6iOXIYR0qKiCpursEYopgfTtw6n4DCuJ5zrDCu57z11agVxn1Fuv33URi3n2kn9Kgwru8sKozrO3dlj1xhXDbh3vevMO49q/54pcK4PrOuMK7PXFVlpB0hjJG+yOLIYYwYjujiqPmii8XvEMERacxrIiVFRBgzOfzwESUii0Mgx/mqTKLjKI+Awrg8tmX2rDAuk279+1YY13cOFcb1nbsyR64wLpNuuX0rjMvlW+feFcbVmT2FcXXmooojURhXcVZaj0lh3JqLrT0T6BhhXExJgQQOUUyNHEYghTBmP0oxwnjy5Mm5nShioo/ZQhpT026Ecc9vpk48ozCu56wqjOs5b301aoVxX5Fu/30Uxu1n2gk9KozrO4sK4/rOXdkjVxiXTbj3/SuMe8+qP16pMK7PrCuM6zNXVRlpRwhjchZHdHHkLybKuFiKEcYRXRwpKchhzKJ3RBhzXUQSR2RxSGMmjX3aQxxXZSIdRzkEFMblcC27V4Vx2YTr3b/CuL7zpzCu79yVOXKFcZl0y+1bYVwu3zr3rjCuzuwpjKszF1UcicK4irPSekwK49ZcbO2ZQEcIY6KEWbyOQg5jSkQYE1WMOOaLjhLRxgglhDGviRzGRBhzPgRxyOGoA2Mx0jjarDuTgMK4nvOqMK7nvPXVqBXGfUW6/fdRGLefaSf0qDCu7ywqjOs7d2WPXGFcNuHe968w7j2r/nilwrg+s64wrs9cVWWkHSOMu7u7E9HFiGLqpUuXZlGMAI60FOxTQhyHMI4I4xDGEWHMDyBsEU0c7XFclUl0HOURUBiXx7bMnhXGZdKtf98K4/rOocK4vnNX5sgVxmXSLbdvhXG5fOvcu8K4OrOnMK7OXFRxJArjKs5K6zEpjFtzsbVnAh0hjElHQaQwNaI4oouj5kuOSOOiKEYoIYARzbHo3ZQpU3IOY37wQApTU0IUR7vCuOc3VKedURjXc0YVxvWct74atcK4r0i3/z4K4/Yz7YQe+ffZ2LFj04ABA9KgQYPSokWLGr/s74Tn6+RnUBh38uyu2bMpjNeMXztfrTBuJ83O60thXJ85VRjXZ66qMtKOEMZIX2RxiOMQxaSiYL9ZGCOAEUqU4qJ3CGPEMudDEjNRIYrZD5GsNK7KW7jccSiMy+VbVu8K47LIdka/CuP6zqPCuL5zV+bIFcZl0i23b4VxuXzr3LvCuDqzpzCuzlxUcSQK4yrOSusxKYxbc7G1ZwIdIYyRvsjiWPwuhDE10jjSUPBlF4U2pHAIYxa9GzN6TJo/f37uKyQ056N/ophjP+oQ1dYrhH0ncSBafb/99ksf/vCH04033tjzp8gzlSNQFMbHHnts/kVQJ703fZbV//uG74Dvfe97aa211krjxo2r3HvXAa2aQAjjL33pS+mZZ55pfO/7mVj9z0SnsBs+fHj6yEc+YoTxqj9ClTsbwpj0cI8++mgjAKRT3pc+x+r93cTPdATyvOc970n8fe/29yWgMP778q/63RXGVZ+ht8enMH6bhXu9I9AxwhiZG7mLY9G7yGPMlxyRw0hi8hZHdDE1/5AjJcXHPvaxdNJJJ6ULL7wwXXzxxemSSy5JF110Ud7nOArtlLgmjq1XcOkkDldccUXacsstFca9+7ukUleFMOYHjZ133jldddVV+XPbSe9Pn2X1/s65+uqr01ZbbZX+8R//UWFcqU9t7waDMN5ss83ShhtumL+HL730Uj/bb/27pD//nXDppZdlUbz22munE0880ZQUvfs4VeKqEMZf/OIX0xlnnJH8TK/ed1unff4vu+yydOqppyqMK/EpTTngas8990xdXV1p1qxZ+X/fVmRoDqMCBBTGFZiEXg5BYdxLUF7WINARwhjpGxG/zbK4mJaCfYQx4jjkMVHIJ5xwQvrEJz6RNtlkkywIkYQIhajZby6rOtd8rcd/y68OTLbeeuvG++LPf/5z40PjTvUJ8DmfNm1aWnfddbNY2mabbf7mM1yH96BjbP/fHbwXPvnJT6b1118/TZ06tfpvZke4EoEHH3ww7brrron/FbTtttv6uW7x75P++vfGpptumj760Y+mc889NwcDmDpspY9OZQ9YcHqfffbJ/97afPPN/Uz7mW68B/glwsc//vG07777Vvb9218GVowwvuCCC9J1111nkUHjPbDRRhvlf1uPGjWqv3wkavucIYw/85nPpBtuuKExh36m/Tut1Xvg+uuvzz5l4MCBiTRRdd26kMWUSEkRkcWRmoLoYiIOo44oY1JS8AU4cuTIdN555+XIBqIbzjzzzHTWWWfluvk4ztHONZbOZnD22WenK6+8Ms2YMaOun49+OW7yWc6bNy+df/75fkb9e6rle4AfeObMmdMvPx91fmjSRpEiyO/ezv7uXZP55RdB/JvPrR4E+K4eMmRIOuecc/xc+339N++BwYMHp2HDhtXjzdzBoyTQ6ogjjsj/O2udddbJ/zOXoAyLDHgPvP/9709bbLFFmjhxYgd/Cjrj0UjHR+AM/9NyvfXW8zPs32OrfA/wHuG9wi9wH3nkkdp+CLpIRxEpKZDECOMokcM4agRxRBjzAwXSmHNxPTV9RKRycZ/rOC7W7Fs6mwHvGd4nbvUigDTml0R+Pjv787m688t7w891vT7TjJbPNd/hqzvvvq7z/z7wc12vz7Wf6c7/TK7J37sR7FOvd3XnjZa/V8kTT8qfI4880iKDld4DxxxzTCKV4+zZszvvzd9hTzRz5szEL+L8HPv3WG/fA/H5XrBgQW0/DTnCmOhipDHpKZC9/OMk5C778QMm8o8IY774KPxDlZrz/PfFaKdmo41r2Kjj+tzgHxKQgAQkIAEJSEACEpCABCQggQ4lYIqfDp1YH0sCEpBAPyCQI4xJSYEojoXviBSO32gjjiMlBbKYwm+skcSI4WhrFsLN8jjEMUyL+/2AsY8oAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSKAWBFbKYYwcDlnMPmI4/jtTRBojj0MWhxSOyOGIPg55zG9UI/K4Odq4FnQcpAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCCBfkSgizQUsehdMcK4mJICSUw6ikhJURTGnEMUhxxGFoccLtYwbT7uR5x9VAlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEAClSfQSElBDmMkMakpiDJGDhNdTGRx1LRFGoqoEcQRWUwbx2zF6GOOQyRXnogDlIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCTQTwk0hHEseIcwRhxHQRITUYwUZr8YbRyCmHMhjiOKOM6FKC7W7LtJQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpBAtQhkYUwqCgqSmOhiClHFHEeEMceIY4QxJSKIQyYjgSlFUcyjxjEimS3EcbUwOBoJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhLIi96RjoI8xiGLI7oYSUxBECOGoxSlMQK4mJICpCGPi3K4uB/yWPwSkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCRQHQJZGCOLSUkRi96FMCYFBRHG1Ihj9iO6GHncLItDFEf0cdSRpiLq6jy+I5GABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIIEg0EhJETmMiTIOYRw1wpgoYiQxEjhq9kMghxwOaYwcptBOzRY1bW4SkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCRQLQIrLXrXnJIiIosRxEQYc8w+kriYhiIEMDXCmC3qOMdx8Xy1MDgaCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAIS6Oru7s75i2Phu1j8juji4oJ37COJI39xRBkjgotRxhFVHHKY8xFZTB37opeABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIIFqEegiFQU5jJcsWZIXvaNGFkdNVDEFKRw14jgihhHDEW0cbUjhEMXRxmMX26uFwdFIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpBATkmxePHihjRGFpOaIha7i0hjpDDCOOqIKg55DEr22YqymH22iCwuCuR8wj8kIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUigEgS6iC4myphUFEQVU5DFxXQUpJ8IgRyimMhiShyHOA4hzLmIKA6BXIkndhASkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABCTQkkBDGBNljCyOhe+oiS5msbtY6I4IY0RwpKYIaVysuQvHbIhiSrTRrjzOOPxDAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkEDlCORF75DFEWWMJI6CLKYgiKNG+hJVHIvecUx0MdHEIYSRwuwXj+PJI+o4jq0lIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUigGgS6SEVBWgrqiCqOyGJqUlEgiJHGSOKoQwhH3SyMi8chiSPamGM3CUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAISqBaBvOhdCGNSUkQu42KUMZIYcdwcVYwsRiZHHuOIKA4hjCAOScw5No7jfLVQOBoJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAv2bQEMYk5IichhHhHGkpEAII4xJPRFRxuxHdHEseBfHSOGQx1GDOURxSOT+jd6nl4AEJCABCUhAAhKQgAQkIAEJSEACEpCABCRQLQKNRe8ihzFiOKKLo44cxhFhHHUIYkRw7PN4COEotLMVxbHCuFpvAkcjAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSAACWRgXU1IgjkMUUyOHiTAm2jj2Iw1FRBlTUxDHFPbZQhpTx7lozxf08Edcu6q6h5f2WfOqxtZ8rs8G5Y0kIAEJSEACEpCABCQgAQlIQAISkIAEJCABCawBgbzoXUQXR/5iooyLpRhhHNHFSGHEMXVEF0eNII7I4pDGjDHaEao9bbzu2WefTSNHjky33HJLGjJkSC633npruv3223P7Y489lqU2/a3uxn1IwcGzr842Z86cNHTo0Mb4YpzFevjw4WnevHmNVByrcx9fIwEJSEACEpCABCQgAQlIQAISkIAEJCABCUigrwjkCOPFixcnCgKVEhHGRBUjjhHGlGKEcVEUs08JORw1UjYkcTxQRN/GcXPNfSZOnJi+853vpE033TRtt912afvtt09f/vKX04477ph22mmntOeee6bB5w9OTz/9dENMN/fzTsdPPvlkuvDCC9Pvf//7d7q05fkRI0akrbbaKm2yySZ5bIyvuey9995p0qRJmUHLTmyUgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJFAhAlkYd3d3J6KLEcXUS5cuzaIYQRxpKdgxaQOsAAAan0lEQVSnhDgOYUyUMRI4hHFI4oj+jWjiaI/jnhggqYnM3XjjjdO//du/pUMPPTT96Ec/SieeeGI6+uij01577ZU23HDDtN5666WTTjopRyPHvXrqs7mdMdx0003pU5/6VDruuOOaT/fqmOjntdZaK6277rrphBNOSD/+8Y/zGBlnlDPPPDM9+uijRhj3iqgXSUACEpCABCQgAQlIQAISkIAEJCABCUhAAn9vAl2kZCC6mBpRHNHFUSOIkbhFUYwkRgBHHVHEiFj2o4526mjvjTD+y1/+kr74xS+mLbfcMk2ZMiXNmjUrPfPMM4mo4OnTp6ff/e536XOf+1wWytdee22W20WQjHfhgoU5Avmpp57KUpnxszFuoqjPOOOM9P73vz8deeSR+fiVV1acZ3yI8YULFzbuSVoJ+ixupJ5Ye+21c/TzQw89lMc4c+bMVCzxurhnRG/Pnz8/zZ07N7PmfhTGt6p7cg0yH7nP/tKlL+V7khqDucp8l6+4Bl6zZ8/Oz0V7ceO1PMtzzz2X+TBGXu8mAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSKALAYksDnEcophUFOw3C+MQxSGLOW5VQlSGKAY1srJ43Ao/MjOEMSkoEJrFLYTn+eefn/71X/81bbPNNmnBggX5EsZBfmPSTJx11lk56nfQoEHp9NNPT+RApi+ec8RdI9Iee+yR3vve96YvfelLWUDfe++9WYAjpW+88cZ09tln5wjm448/Pv3yl7/MEcnkVo4thPEuu+zSuH+ca65ZVPDOO+9Mf/jDH3LfyOpf/epXOSczfc6YMaMxZqKmiVg+7bTT0s0339x4fnjzDIMHD0733HNPuuSSS9LJJ5+cfv7zn6crrrgiX3ffvfelyy+/PJ1yyim5XHPNNSul7YAPAv2Pf/xj7p+oaJ4N6f7444/n+Wkeu8cSkIAEJCABCUhAAhKQgAQkIAEJSEACEpBA/yGQU1IgUYlepQ5hTI00jjQUiOMotCEfi2kpQhqDjv2oYx/RG/shk1thLgrjHXbYIUfKxnX0QWEjfzGpKd7znvdkCUo7UbVIUKKTyX2822675Qjg//iP/8hi+YYbbsjXXnvNtTl6mdd++tOfTkhlchLzeuQy6TC23Xbb/Pqdd945p66gjcjm1159Ld8/hPHXv/71RMTwqjaijrkHcpt+/v3f/z19/vOfz6krRo8eneUtuZCR19/61rdynmbGtfnmm2eZC3fm48ADD8xR1fvvv39+xq985St5bOuvv376xS9+kQ444IC09dZbJ7j953/+Z77XVVddlRDWMEcWI6vJvcxYyAWNlCdXNKL6kUceWdVjeE4CEpCABCQgAQlIQAISkIAEJCABCUhAAhLocAKNRe8id3GkTYg8xhFhjCRGEBPpGgUJyX5EDUcdcphjJHGxHZ4c97StShjzmhDG1Ijhf/iHf8iL5DG+oUOHpk984hNZ9hKBi4wlmpbF58g1/IMf/CBH5xLRe+yxx+YcxCyuR5oLBDRRwIhbZC5Ru6NGjcp9kkf5Ax/4QNp9991zKgfGEcKY1BgXXXRRIpq3WIhSJl0EG30fdthhuY/PfOYz6aijjkrnnHNOjjZGQiNsN9tss8Y9iSw++OCD04ABA9I+++yTo4cRxty/q6srffzjH8+vHzZsWM7rzHXkUyaFBxHDPPMxxxyTPvaxj6Uf/vCHOeoa+X/llVemjTbaKIvpyy67LE2YMCFHTiOpBw4cmGU593GTgAQkIAEJSEACEpCABCQgAQlIQAISkIAE+ieBnMOYCFSii5tlcTEtBfsIY8RsyGPEL8IYKVwsIYxD7nJdSGMwr4kwLk4TEbgIYxagQ3AjjJHCV199dRa7jBMBikBFyhJNO27cuHz/iy68KP3TP/1TIuUEG8L8jjvuyGL3N7/5TWbB6+Fy1113pU9+8pNZJhOlyxbCmPu/733v+5vy0Y9+NL+OaxHGhx9+ePrQhz6UI6AjIpm+b7vttnzu0ksvzfdC0DMft99+e0IuE+lMugiegzQa3I+F/xDrbEQFhzhHkvNaNmQ3UcTI4KlTp6aZz8xMB/5gRYQyUcbkMKYP+kVQs8Dgrrvumhfpyx34hwQkIAEJSEACEpCABCQgAQlIQAISkIAEJNDvCOQIYyRjpKSIyGJEIgWpiBSOOqKMEcQRXYwALgpjKEZbyOGiRObanjbuEzmMm1NSFF9DH0hgBCqRtrwO4U0O42nTpmUxjHS97rrrcrqFT33qUzmyFpHKmJqFMeNbumRpeuKJJxqvRyCTxuJnP/tZTiNBdC7n2UIYE+1LKggiiIvluOOOSyyGx4YwJkqZ1BhE+cbGOOBevGfzmLfYYosshUMYs1AfQpx5YKOdVBak12CBwJD05Dn+xje+kSXwxIkTcxQ2KSwQw0RXM35EOuXXv/51Ts2BgOeZ3SQgAQlIQAISkIAEJCABCUhAAhKQgAQkIIH+SaBr8eLFiYK4RD4ijKNEDuOoiX6NCOOILEZcFmVxUQy3ksZgDqnZCnlvhTGLxZGnF2HMgnWMgxQQyNQjjjgiR9YSnfvZz3425zpmgTzEak/CmGfg9QjieD15kEk5QZqLf/mXf8npHIj2ZQthvKocxvGcCONDDjkkRwIjaKOdez7zzDM5TzERyN/85jdzRDH3RC4TkUy+YaKIQxivvfbaafjw4Zk54+C5v/zlL2dhTGqN6PuBBx7IDIgaRhjzGvIbE1VNtPQXvvCFnOOYGkbkVf7a176Wo7RbzYttEpCABCQgAQlIQAISkIAEJCABCUhAAhKQQOcTyBHGyGKkcaSliFQU1CGLqRHGRUEcQhiJjKgsimPQ0cY1bNRx/aqw9kYY08+tt96ac/SSp3funLk5wvjyyy/PMpRIYKJ+f/7znyfazjvvvByNvCphTJQ1+YSRpwhVUlvw+t/+9rfp4osvzrKYc83CeJdddkkLFixo+UghbxHG5BJG2JKCItq7u7tz3uKQ2tzz1FNPTRdeeGEiLQYyl6jfojBG+JJWA9ZsvRXGpNXg+ZHfRx99dM67zLNR4EPu4wsuuCDL95YPY6MEJCABCUhAAhKQgAQkIAEJSEACEpCABCTQ8QRyhDGylHQOiONidHGI40hJgZykIHWRxEjLaGsWws3ymPOxFfejLeqiMN5+++3TrFmzGqKZcTBOUjiQm5c0DCzuhuxeuHBh2m233bJEPvvss3MaCMbGa4jqRb6GMH5z+ZtZmBZzGCOCDzrooETu4VNOOSXNnj278awjR47MEpmcwqS8YOtNhHE8U0/CGBGM2F5vvfXSL37xixzhzHiJJiYi+NOf/nSOSn744YcbEcbvJIzjnvfff38jwpjF7Yg43nPPPTOHm266Kct/rmWeeCYWwbv++usTkdtuEpCABCQgAQlIQAISkIAEJCABCUhAAhKQQP8ksFIO40hJEaIYeUtBYkakMfshi0MKI4BDHlOHPCaSlkIbdYjiqFshD2FMfuLNNtssLxyH/LznnnvT2LHjcsqIb3/722mttdbKaRvuu+++3D9RvuTsRb4OHjw4RwLPnTs3p2PYfffd03vf+968CNyIESPybVkgDvm6//77JxayI/8vwpicxKeffnqaOXNmmjdvXm4/8MADU1dXV75f5CVuhzBGBHN/on7POuusnJ6CMY8fPz63k27j85//fI76jZQUqyWMx09I3d3P5+hlno97EqVMCg5EMrJ6/fXXT0cddVQW763mxTYJSEACEpCABCQgAQlIQAISkIAEJCABCUig8wl0kYYiFr0rRhgjKJHEFCQx6SgiJUVRGHOOSN6Qw8jgkMPFGpTNx63wIoz/+te/5khYhOkHP/jBnMuXfL6UAQMGpI985COJReyGDh2aXnn5ldwN4z3zzDOzMB44cGDaa6+90j777JOFKzL0Ax/4QI7W/dOf/pTFNekniCZ+3/vel7bccsucmuGKK67I4pR2opURxUhr+mMcSGpkLs9BSgzyCa8qh3E8X+Qwbk5JgZA+99xzs6QmtQZiG5nLPRkz91xnnXXS6NGjGxHG//zP/7zKlBRxz4gwRqJPGD8hLVv2Rrr77rvT3nvvnfvkHnvssUe+F1yJ5r7zzjvj5dYSkIAEJCABCUhAAhKQgAQkIAEJSEACEpBAPyTQSElBWgekKykfSEuBHEbeIoyjpq0YSYwoRhBT0x7RxXBsPg6R/E6MuQdSFtGJNN1ggw1yYVE2UkKQM/i0007LaSm4R3FDwB533HF5kTsELNHGSNE77rgjR9GyqNu1116bU28QQUzqCa4hXQV5g0lDQd/ch9dz/x122CEhmc8///wsqW+++eYsjMkJvOGGG+Zcx4sWLSoO42/2ieQ9+eSTE5HRyPDiRnQzuZLjnshpJPQtt9yS77nxxhvnlBrMzWGHHZYX+ps8eXLmSz9wRYyzSB7RwrGRZuLQQw/Ni+3lxfCWr4j0RiQff/zxOd0Fz7juuuumnXbaKQ0bNiw/V7zeWgISkIAEJCABCUhAAhKQgAQkIAEJSEACEuh/BBrCOBa8QxgjJ6MgcIkoRgqzX4w2DkFcFMcRRRznQhQXa/Z72nj9u9m4Pkq8jrGTooLx9rTFfRgnz1fcOH7uuecyg2J77MdrezqOdurma5vPxXmkPGNm7K22uC7OcdyqjfPN7a3aYMOie0SVF7dWry2ed18CEpCABCQgAQlIQAISkIAEJCABCUhAAhLoXAJZGCMNKcjKWPQOgclxRBhzjEhFGFMQrSFbQxgjgouiGGxxHCIyxHEZSOMexb5btcX55nMcN7dxbau26KMddav+W7W14149PU+Z92vXuO1HAhKQgAQkIAEJSEACEpCABCQgAQlIQAISKJdAXvSOdBTkMQ5ZjCimIIkpCGJkcZSiNEYAF1NSMFzaooSILIriaCv30exdAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAE3g2BLIyRxaSkiEXvQhiTtoAIY2rEcSyAF5HFzbI4JDHniwVBHMKY2k0CEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgASqR6CRkiJyGBNlHMI4aoQxUcREGIcsDiFcTE9BW0hjJDGFtogojpo2NwlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEqgWgZUWvWtOSRGRxYhiIow5Zh9JXExDEQI4hDGPGJHEcY7j4vlqYXA0EpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQk0NXd3Z3zF8fCd7H4HdHFxQXv2EcSR/7iiDZGBBejjCOqOOQw5yOymDr2RS8BCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQALVItBFKgpyGC9ZsiQvekeNLI6aqGIKUjhqxHFEDCOGI9o42pDCIYqjjccutlcLg6ORgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJCCBnJJi8eLFDWmMLCY1RSx2F5HGSGGEcdQRVRzyGJTssxVlMftsEVlcFMj5hH9IQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpBAJQh0EV1MlDGpKIgqpiCLi+koSD8RAjlEMZHFlDgOcRxCmHMRURwCuRJP7CAkIAEJSEACEpCABCQgAQlIQAISkIAEJCABCUigJYGGMCbKGFkcC99RE13MYnex0B0RxojgSE0R0rhYcxeO2RDFlGijXXmccfiHBCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIIHKEciL3iGLI8oYSRwFWUxBEEeN9CWqOBa945joYqKJQwgjhdkvHseTR9RxHFtLQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpBANQh0kYqCtBTUEVUckcXUpKJAECONkcRRhxCOulkYF49DEke0McduEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkUC0CedG7EMakpIhcxsUoYyQx4rg5qhhZjEyOPMYRURxCGEEckphzbBzH+WqhcDQSkIAEJCABCUhAAhKQgAQkIAEJSEACEpCABPo3gYYwJiVF5DCOCONISYEQRhiTeiKijNmP6OJY8C6OkcIhj6MGc4jikMj9G71PLwEJSEACEpCABCQgAQlIQAISkIAEJCABCUigWgQai95FDmPEcEQXRx05jCPCOOoQxIjg2OfxEMJRaGcrimOFcbXeBI5GAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkAAEsjAupqRAHIcopkYOE2FMtHHsRxqKiDKmpiCOKeyzhTSmjnPRni/wDwlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEqgMgbzoXUQXR/5iooyLpRhhHNHFSGHEMXVEF0eNII7I4pDGPHG0I4/dJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIoFoEcoTx4sWLE4UcxpSIMCaqGHGMMKYUI4yLoph9SsjhqCMNRchjHr0YaVwtFI5GAhKQgAQkIAEJSEACEpCABCQgAQlIQAISkED/JpCFcXd3dyK6GFFMvXTp0iyKEcSRloJ9SojjEMZEGUcaioggDmEM2ogmDnkcx/0bu08vAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSKB6BBrCOPIYI4yjFKONI0VFMT0FEciRlgKZjBQOkRxRx7THPuctMvA94HvA94DvAd8Dvgd8D/ge8D3ge8D3gO8B3wO+B3wP+B7wPeB7wPdANd8DXUQXk8OYQlqKSEeBIGY/BHFEFjORsejdqiaVKGM26ogqpi4eV8+fOyIJSEACEpCABCQgAQlIQAISkIAEJCABCUhAAv2XQI4wjkXvqEMYUyONIw0FwjhKczQxAjnkMSjZjzr2kcWxHzK5/2L3ySUgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJVI9AY9G7yF0caSgij3FEGEdqiYgupkb8Rl2MHg45zHkkcUQV086mMK7eG8ERSUACEpCABCQgAQlIQAISkIAEJCABCUhAAhLoIqqY/MXUzbK4mJaCfXIRI45DHocwRgoXSwjjoiAOaQxyhbFvPAlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEAC1SOQI4xjwTukcUQWR2qKWNguaqRxpKCI6GIEcFEY85jRFnK4KJG51k0CEpCABCQgAQlIQAISkIAEJCABCUhAAhKQgASqRaCLhe4opKRAEiOMo0QO46hj4TsijEMaI5CLsrgohltJYx4/Io+rhcLRSEACEpCABCQgAQlIQAISkIAEJCABCUhAAhLo3wRyhDGyGGkcaSkiFQV1yGJqhHFREIcQRiCHKA55DFbaIsKYOq7v38h9eglIQAISkIAEJCABCUhAAhKQgAQkIAEJSEAC1SSQI4xJSUH+4lj4LtJShDgmmpiUFMhiCvtIYuRwtDUL4RDH1Gwhjpv3q4nFUUlAAhKQgAQkIAEJSEACEpCABCQgAQlIQAIS6H8EVsphHCkpQhQjhiN3cUQaI49DFocUjsjhiD4OeUyEcUQeN0cb9z/UPrEEJCABCUhAAhKQgAQkIAEJSEACEpCABCQggWoT6CINRSx6V4wwRh4jiSlIYtJRREqKojDmHKI45DCyOORwsQZD83G10Tg6CUhAAhKQgAQkIAEJSEACEpCABCQgAQlIQAL9i0AjJQU5jJHEpKYgJQVymOhihHHUtEUaiqgRxBFZTBvHbMXoY45DJPcvvD6tBCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIIH6EGgI41jwDmGMOI6CJCaiGCnMfjHaOAQx50IcRxRxnAtRXKzZd5OABCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIIFqEcjCmFQUFCQx0cUUooo5jghjjhHHCGNKRBCHTEYCU4qimEeNY0QyW4jjamFwNBKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJJAXvSMdBXmMQxZHdDGSmIIgRgxHKUpjBHAxJQVIQx4X5XBxP+Sx+CUgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSKA6BLIwRhaTkiIWvQthTAoKIoypEcexAB5Rw8jjZlkcojiij6OONBVRV+fxHYkEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQkEgUZKishhTJRxCOOoEcZEESOJQxaHDC6mp6AtpDFymEIbNVvUtLlJQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEpBAtQistOhdc0qKiCxGFBNhzDH7SOJiGooQwCGMeUTEMVuc47h4vloYHI0EJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQl0dXd35/zFsfBdLH5HdHFxwTv2kcSRvziijRHBxSjjiCoOOcz5iCymjn3RS0ACEpCABCQgAQlIQAISkIAEJCABCUhAAhKQQLUIdJGKghzGS5YsyYveUSOLoyaqmIIUjhpxHBHDiOGINo42pHCI4mjjsYvt1cLgaCQgAQlIQAISkIAEJCABCUhAAhKQgAQkIAEJSCCnpFi8eHFDGiOLSU0Ri91FpDFSGGEcdUQVhzwGJftsRVnMPltEFhcFcj7hHxKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJFAJAl1EFxNlTCoKooopyOJiOgrST4RADlFMZDEljkMchxDmXEQUh0CuxBM7CAlIQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEmhJoCGMiTJGFsfCd9REF7PYXSx0R4QxIjhSU4Q0LtbchWM2RDEl2mhXHmcc/iEBCUhAAhKQgAQkIAEJSEACEpCABCQgAQlIoHIE8qJ3yOKIMkYSR0EWUxDEUSN9iSqORe84JrqYaOIQwkhh9ovH8eQRdRzH1hKQgAQkIAEJSEACEpCABCQgAQlIQAISkIAEJFANAl2koiAtBXVEFUdkMTWpKBDESGMkcdQhhKNuFsbF45DEEW3MsZsEJCABCUhAAhKQgAQkIAEJSEACEpCABCQgAQlUi0Be9C6EMSkpIpdxMcoYSYw4bo4qRhYjkyOPcUQUhxBGEIck5hwbx3G+WigcjQQkIAEJSEACEpCABCQgAQlIQAISkIAEJCCB/k2gIYxJSRE5jCPCOFJSIIQRxqSeiChj9iO6OBa8i2OkcMjjqMEcojgkcv9G79NLQAISkIAEJCABCUhAAhKQgAQkIAEJSEACEqgWgf8P8XhrzPhz7LEAAAAASUVORK5CYII=)", "_____no_output_____" ], [ "What’s actually happening under the hood?\n\nWhen we fit() on the pipeline with Spark data frame (df), its text column is fed into DocumentAssembler() transformer at first and then a new column “document” is created in Document type (AnnotatorType). As we mentioned before, this transformer is basically the initial entry point to Spark NLP for any Spark data frame. Then its document column is fed into SentenceDetector() (AnnotatorApproach) and the text is split into an array of sentences and a new column “sentences” in Document type is created. Then “sentences” column is fed into Tokenizer() (AnnotatorModel) and each sentence is tokenized and a new column “token” in Token type is created. And so on. ", "_____no_output_____" ] ], [ [ "import sparknlp\n\nspark = sparknlp.start()\n\nprint(\"Spark NLP version\", sparknlp.version())\n\nprint(\"Apache Spark version:\", spark.version)", "Spark NLP version 2.6.3\nApache Spark version: 2.4.4\n" ] ], [ [ "### Create Spark Dataframe", "_____no_output_____" ] ], [ [ "text = 'Peter Parker is a nice guy and lives in New York'\n\nspark_df = spark.createDataFrame([[text]]).toDF(\"text\")\n\nspark_df.show(truncate=False)", "+------------------------------------------------+\n|text |\n+------------------------------------------------+\n|Peter Parker is a nice guy and lives in New York|\n+------------------------------------------------+\n\n" ], [ "from pyspark.sql.types import StringType, IntegerType\n\n# if you want to create a spark datafarme from a list of strings\n\ntext_list = ['Peter Parker is a nice guy and lives in New York.', 'Bruce Wayne is also a nice guy and lives in Gotham City.']\n\nspark.createDataFrame(text_list, StringType()).toDF(\"text\").show(truncate=80)\n", "+--------------------------------------------------------+\n| text|\n+--------------------------------------------------------+\n| Peter Parker is a nice guy and lives in New York.|\n|Bruce Wayne is also a nice guy and lives in Gotham City.|\n+--------------------------------------------------------+\n\n" ], [ "\nfrom pyspark.sql import Row\n\nspark.createDataFrame(list(map(lambda x: Row(text=x), text_list))).show(truncate=80)\n", "+--------------------------------------------------------+\n| text|\n+--------------------------------------------------------+\n| Peter Parker is a nice guy and lives in New York.|\n|Bruce Wayne is also a nice guy and lives in Gotham City.|\n+--------------------------------------------------------+\n\n" ], [ "!wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/jupyter/annotation/english/spark-nlp-basics/sample-sentences-en.txt", "_____no_output_____" ], [ "with open('./sample-sentences-en.txt') as f:\n print (f.read())", "Peter is a very good person.\nMy life in Russia is very interesting.\nJohn and Peter are brothers. However they don't support each other that much.\nLucas Nogal Dunbercker is no longer happy. He has a good car though.\nEurope is very culture rich. There are huge churches! and big houses!\n" ], [ "spark_df = spark.read.text('./sample-sentences-en.txt').toDF('text')\n\nspark_df.show(truncate=False)", "+-----------------------------------------------------------------------------+\n|text |\n+-----------------------------------------------------------------------------+\n|Peter is a very good person. |\n|My life in Russia is very interesting. |\n|John and Peter are brothers. However they don't support each other that much.|\n|Lucas Nogal Dunbercker is no longer happy. He has a good car though. |\n|Europe is very culture rich. There are huge churches! and big houses! |\n+-----------------------------------------------------------------------------+\n\n" ], [ "spark_df.select('text').show(truncate=False)", "+-----------------------------------------------------------------------------+\n|text |\n+-----------------------------------------------------------------------------+\n|Peter is a very good person. |\n|My life in Russia is very interesting. |\n|John and Peter are brothers. However they don't support each other that much.|\n|Lucas Nogal Dunbercker is no longer happy. He has a good car though. |\n|Europe is very culture rich. There are huge churches! and big houses! |\n+-----------------------------------------------------------------------------+\n\n" ], [ "textFiles = spark.sparkContext.wholeTextFiles(\"./*.txt\",4)\n \nspark_df_folder = textFiles.toDF(schema=['path','text'])\n\nspark_df_folder.show(truncate=30)", "+------------------------------+------------------------------+\n| path| text|\n+------------------------------+------------------------------+\n|file:/content/sample-senten...|Peter is a very good person...|\n+------------------------------+------------------------------+\n\n" ], [ "spark_df_folder.select('text').take(1)", "_____no_output_____" ] ], [ [ "### Transformers", "_____no_output_____" ], [ "what are we going to do if our DataFrame doesn’t have columns in those type? Here comes transformers. In Spark NLP, we have five different transformers that are mainly used for getting the data in or transform the data from one AnnotatorType to another. Here is the list of transformers:\n\n`DocumentAssembler`: To get through the NLP process, we need to get raw data annotated. This is a special transformer that does this for us; it creates the first annotation of type Document which may be used by annotators down the road.\n\n`TokenAssembler`: This transformer reconstructs a Document type annotation from tokens, usually after these have been normalized, lemmatized, normalized, spell checked, etc, to use this document annotation in further annotators.\n\n`Doc2Chunk`: Converts DOCUMENT type annotations into CHUNK type with the contents of a chunkCol.\n\n`Chunk2Doc` : Converts a CHUNK type column back into DOCUMENT. Useful when trying to re-tokenize or do further analysis on a CHUNK result.\n\n`Finisher`: Once we have our NLP pipeline ready to go, we might want to use our annotation results somewhere else where it is easy to use. The Finisher outputs annotation(s) values into a string.", "_____no_output_____" ], [ "each annotator accepts certain types of columns and outputs new columns in another type (we call this AnnotatorType).\n\nIn Spark NLP, we have the following types: \n\n`Document`, `token`, `chunk`, `pos`, `word_embeddings`, `date`, `entity`, `sentiment`, `named_entity`, `dependency`, `labeled_dependency`. \n\nThat is, the DataFrame you have needs to have a column from one of these types if that column will be fed into an annotator; otherwise, you’d need to use one of the Spark NLP transformers.", "_____no_output_____" ], [ "## 2. Document Assembler", "_____no_output_____" ], [ "In Spark NLP, we have five different transformers that are mainly used for getting the data in or transform the data from one AnnotatorType to another.", "_____no_output_____" ], [ "That is, the DataFrame you have needs to have a column from one of these types if that column will be fed into an annotator; otherwise, you’d need to use one of the Spark NLP transformers. Here is the list of transformers: DocumentAssembler, TokenAssembler, Doc2Chunk, Chunk2Doc, and the Finisher.\n\nSo, let’s start with DocumentAssembler(), an entry point to Spark NLP annotators.", "_____no_output_____" ], [ "To get through the process in Spark NLP, we need to get raw data transformed into Document type at first. \n\nDocumentAssembler() is a special transformer that does this for us; it creates the first annotation of type Document which may be used by annotators down the road.\n\nDocumentAssembler() comes from sparknlp.base class and has the following settable parameters. See the full list here and the source code here.\n\n`setInputCol()` -> the name of the column that will be converted. We can specify only one column here. It can read either a String column or an Array[String]\n\n`setOutputCol()` -> optional : the name of the column in Document type that is generated. We can specify only one column here. Default is ‘document’\n\n`setIdCol()` -> optional: String type column with id information\n\n`setMetadataCol()` -> optional: Map type column with metadata information\n\n`setCleanupMode()` -> optional: Cleaning up options, \n\npossible values:\n```\ndisabled: Source kept as original. This is a default.\ninplace: removes new lines and tabs.\ninplace_full: removes new lines and tabs but also those which were converted to strings (i.e. \\n)\nshrink: removes new lines and tabs, plus merging multiple spaces and blank lines to a single space.\nshrink_full: remove new lines and tabs, including stringified values, plus shrinking spaces and blank lines.\n```", "_____no_output_____" ] ], [ [ "from sparknlp.base import *\n\ndocumentAssembler = DocumentAssembler()\\\n.setInputCol(\"text\")\\\n.setOutputCol(\"document\")\\\n.setCleanupMode(\"shrink\")\n\ndoc_df = documentAssembler.transform(spark_df)\n\ndoc_df.show(truncate=30)", "+------------------------------+------------------------------+\n| text| document|\n+------------------------------+------------------------------+\n|Peter Parker is a nice guy ...|[[document, 0, 47, Peter Pa...|\n+------------------------------+------------------------------+\n\n" ] ], [ [ "At first, we define DocumentAssembler with desired parameters and then transform the data frame with it. The most important point to pay attention to here is that you need to use a String or String[Array] type column in .setInputCol(). So it doesn’t have to be named as text. You just use the column name as it is.", "_____no_output_____" ] ], [ [ "doc_df.printSchema()", "root\n |-- text: string (nullable = true)\n |-- document: array (nullable = true)\n | |-- element: struct (containsNull = true)\n | | |-- annotatorType: string (nullable = true)\n | | |-- begin: integer (nullable = false)\n | | |-- end: integer (nullable = false)\n | | |-- result: string (nullable = true)\n | | |-- metadata: map (nullable = true)\n | | | |-- key: string\n | | | |-- value: string (valueContainsNull = true)\n | | |-- embeddings: array (nullable = true)\n | | | |-- element: float (containsNull = false)\n\n" ], [ "doc_df.select('document.result','document.begin','document.end').show(truncate=False)", "+-------------------------------------------------------------------------------+-----+----+\n|result |begin|end |\n+-------------------------------------------------------------------------------+-----+----+\n|[Peter is a very good person.] |[0] |[27]|\n|[My life in Russia is very interesting.] |[0] |[37]|\n|[John and Peter are brothers. However they don't support each other that much.]|[0] |[76]|\n|[Lucas Nogal Dunbercker is no longer happy. He has a good car though.] |[0] |[67]|\n|[Europe is very culture rich. There are huge churches! and big houses!] |[0] |[68]|\n+-------------------------------------------------------------------------------+-----+----+\n\n" ] ], [ [ "The new column is in an array of struct type and has the parameters shown above. The annotators and transformers all come with universal metadata that would be filled down the road depending on the annotators being used. Unless you want to append other Spark NLP annotators to DocumentAssembler(), you don’t need to know what all these parameters mean for now. So we will talk about them in the following articles. You can access all these parameters with {column name}.{parameter name}.\n\nLet’s print out the first item’s result.", "_____no_output_____" ] ], [ [ "doc_df.select(\"document.result\").take(1)", "_____no_output_____" ] ], [ [ "If we would like to flatten the document column, we can do as follows.\n", "_____no_output_____" ] ], [ [ "import pyspark.sql.functions as F\n\ndoc_df.withColumn(\n \"tmp\", \n F.explode(\"document\"))\\\n .select(\"tmp.*\")\\\n .show(truncate=False)", "+-------------+-----+---+-----------------------------------------------------------------------------+---------------+----------+\n|annotatorType|begin|end|result |metadata |embeddings|\n+-------------+-----+---+-----------------------------------------------------------------------------+---------------+----------+\n|document |0 |27 |Peter is a very good person. |[sentence -> 0]|[] |\n|document |0 |37 |My life in Russia is very interesting. |[sentence -> 0]|[] |\n|document |0 |76 |John and Peter are brothers. However they don't support each other that much.|[sentence -> 0]|[] |\n|document |0 |67 |Lucas Nogal Dunbercker is no longer happy. He has a good car though. |[sentence -> 0]|[] |\n|document |0 |68 |Europe is very culture rich. There are huge churches! and big houses! |[sentence -> 0]|[] |\n+-------------+-----+---+-----------------------------------------------------------------------------+---------------+----------+\n\n" ] ], [ [ "## 3. Sentence Detector", "_____no_output_____" ], [ "Finds sentence bounds in raw text. ", "_____no_output_____" ], [ "`setCustomBounds(string)`: Custom sentence separator text e.g. `[\"\\n\"]`\n\n`setUseCustomOnly(bool)`: Use only custom bounds without considering those of Pragmatic Segmenter. Defaults to false. Needs customBounds.\n\n`setUseAbbreviations(bool)`: Whether to consider abbreviation strategies for better accuracy but slower performance. Defaults to true.\n\n`setExplodeSentences(bool)`: Whether to split sentences into different Dataset rows. Useful for higher parallelism in fat rows. Defaults to false.", "_____no_output_____" ] ], [ [ "from sparknlp.annotator import *\n\n# we feed the document column coming from Document Assembler\n\nsentenceDetector = SentenceDetector().\\\nsetInputCols(['document']).\\\nsetOutputCol('sentences')\n", "_____no_output_____" ], [ "sent_df = sentenceDetector.transform(doc_df)\n\nsent_df.show(truncate=False)", "+-----------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n|text |document |sentences |\n+-----------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n|Peter is a very good person. |[[document, 0, 27, Peter is a very good person., [sentence -> 0], []]] |[[document, 0, 27, Peter is a very good person., [sentence -> 0], []]] |\n|My life in Russia is very interesting. |[[document, 0, 37, My life in Russia is very interesting., [sentence -> 0], []]] |[[document, 0, 37, My life in Russia is very interesting., [sentence -> 0], []]] |\n|John and Peter are brothers. However they don't support each other that much.|[[document, 0, 76, John and Peter are brothers. However they don't support each other that much., [sentence -> 0], []]]|[[document, 0, 27, John and Peter are brothers., [sentence -> 0], []], [document, 29, 76, However they don't support each other that much., [sentence -> 1], []]] |\n|Lucas Nogal Dunbercker is no longer happy. He has a good car though. |[[document, 0, 67, Lucas Nogal Dunbercker is no longer happy. He has a good car though., [sentence -> 0], []]] |[[document, 0, 41, Lucas Nogal Dunbercker is no longer happy., [sentence -> 0], []], [document, 43, 67, He has a good car though., [sentence -> 1], []]] |\n|Europe is very culture rich. There are huge churches! and big houses! |[[document, 0, 68, Europe is very culture rich. There are huge churches! and big houses!, [sentence -> 0], []]] |[[document, 0, 27, Europe is very culture rich., [sentence -> 0], []], [document, 29, 52, There are huge churches!, [sentence -> 1], []], [document, 54, 68, and big houses!, [sentence -> 2], []]]|\n+-----------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n\n" ], [ "sent_df.select('sentences').take(3)", "_____no_output_____" ], [ "text ='The patient was prescribed 1 capsule of Advil for 5 days. He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night, 12 units of insulin lispro with meals, and metformin 1000 mg two times a day. It was determined that all SGLT2 inhibitors should be discontinued indefinitely fro 3 months.'\ntext\n", "_____no_output_____" ], [ "spark_df = spark.createDataFrame([[text]]).toDF(\"text\")\n\nspark_df.show(truncate=False)", "+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n|text |\n+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n|The patient was prescribed 1 capsule of Advil for 5 days. He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night, 12 units of insulin lispro with meals, and metformin 1000 mg two times a day. It was determined that all SGLT2 inhibitors should be discontinued indefinitely fro 3 months.|\n+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n\n" ], [ "spark_df.show(truncate=50)", "+--------------------------------------------------+\n| text|\n+--------------------------------------------------+\n|The patient was prescribed 1 capsule of Advil f...|\n+--------------------------------------------------+\n\n" ], [ "doc_df = documentAssembler.transform(spark_df)\n\nsent_df = sentenceDetector.transform(doc_df)\n\nsent_df.show(truncate=True)", "+--------------------+--------------------+--------------------+\n| text| document| sentences|\n+--------------------+--------------------+--------------------+\n|The patient was p...|[[document, 0, 33...|[[document, 0, 56...|\n+--------------------+--------------------+--------------------+\n\n" ], [ "sent_df.select('sentences.result').take(1)", "_____no_output_____" ], [ "sentenceDetector.setExplodeSentences(True)\n", "_____no_output_____" ], [ "sent_df = sentenceDetector.transform(doc_df)\n\nsent_df.show(truncate=50)", "+--------------------------------------------------+--------------------------------------------------+--------------------------------------------------+\n| text| document| sentences|\n+--------------------------------------------------+--------------------------------------------------+--------------------------------------------------+\n|The patient was prescribed 1 capsule of Advil f...|[[document, 0, 334, The patient was prescribed ...|[[document, 0, 56, The patient was prescribed 1...|\n+--------------------------------------------------+--------------------------------------------------+--------------------------------------------------+\n\n" ], [ "sent_df.select('sentences.result').show(truncate=False)", "+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n|result |\n+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n|[The patient was prescribed 1 capsule of Advil for 5 days., He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night, 12 units of insulin lispro with meals, and metformin 1000 mg two times a day., It was determined that all SGLT2 inhibitors should be discontinued indefinitely fro 3 months.]|\n+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n\n" ], [ "from pyspark.sql import functions as F\n\nsent_df.select(F.explode('sentences.result')).show(truncate=False)", "+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n|col |\n+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n|The patient was prescribed 1 capsule of Advil for 5 days. |\n|He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night, 12 units of insulin lispro with meals, and metformin 1000 mg two times a day.|\n|It was determined that all SGLT2 inhibitors should be discontinued indefinitely fro 3 months. |\n+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n\n" ] ], [ [ "### Sentence Detector DL", "_____no_output_____" ] ], [ [ "sentencerDL = SentenceDetectorDLModel\\\n .pretrained(\"sentence_detector_dl\", \"en\") \\\n .setInputCols([\"document\"]) \\\n .setOutputCol(\"sentences\")\n\nsent_dl_df = sentencerDL.transform(doc_df)\n\nsent_dl_df.select(F.explode('sentences.result')).show(truncate=False)", "sentence_detector_dl download started this may take some time.\nApproximate size to download 307.2 KB\n[OK!]\n+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n|col |\n+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n|The patient was prescribed 1 capsule of Advil for 5 days. |\n|He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night, 12 units of insulin lispro with meals, and metformin 1000 mg two times a day.|\n|It was determined that all SGLT2 inhibitors should be discontinued indefinitely fro 3 months. |\n+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n\n" ], [ "documenter = DocumentAssembler()\\\n .setInputCol(\"text\")\\\n .setOutputCol(\"document\")\n\nsentenceDetector = SentenceDetector()\\\n .setInputCols(['document'])\\\n .setOutputCol('sentences')\n \nsentencerDL = SentenceDetectorDLModel\\\n .pretrained(\"sentence_detector_dl\", \"en\") \\\n .setInputCols([\"document\"]) \\\n .setOutputCol(\"sentences\")\n\nsd_pipeline = PipelineModel(stages=[documenter, sentenceDetector])\n\nsd_model = LightPipeline(sd_pipeline)\n\n# DL version\nsd_dl_pipeline = PipelineModel(stages=[documenter, sentencerDL])\n\nsd_dl_model = LightPipeline(sd_dl_pipeline)\n", "sentence_detector_dl download started this may take some time.\nApproximate size to download 307.2 KB\n[OK!]\n" ], [ "text = \"\"\"John loves Mary.Mary loves Peter\nPeter loves Helen .Helen loves John; \nTotal: four people involved.\"\"\"\n\nfor anno in sd_model.fullAnnotate(text)[0][\"sentences\"]:\n print(\"{}\\t{}\\t{}\\t{}\".format(\n anno.metadata[\"sentence\"], anno.begin, anno.end, anno.result))", "0\t0\t51\tJohn loves Mary.Mary loves Peter\nPeter loves Helen .\n1\t52\t68\tHelen loves John;\n2\t71\t98\tTotal: four people involved.\n" ], [ "\nfor anno in sd_dl_model.fullAnnotate(text)[0][\"sentences\"]:\n print(\"{}\\t{}\\t{}\\t{}\".format(\n anno.metadata[\"sentence\"], anno.begin, anno.end, anno.result))", "0\t0\t15\tJohn loves Mary.\n1\t16\t32\tMary loves Peter\n2\t33\t51\tPeter loves Helen .\n3\t52\t68\tHelen loves John;\n4\t71\t98\tTotal: four people involved.\n" ] ], [ [ "## Tokenizer", "_____no_output_____" ], [ "Identifies tokens with tokenization open standards. It is an **Annotator Approach, so it requires .fit()**.\n\nA few rules will help customizing it if defaults do not fit user needs.\n\nsetExceptions(StringArray): List of tokens to not alter at all. Allows composite tokens like two worded tokens that the user may not want to split.\n\n`addException(String)`: Add a single exception\n\n`setExceptionsPath(String)`: Path to txt file with list of token exceptions\n\n`caseSensitiveExceptions(bool)`: Whether to follow case sensitiveness for matching exceptions in text\n\n`contextChars(StringArray)`: List of 1 character string to rip off from tokens, such as parenthesis or question marks. Ignored if using prefix, infix or suffix patterns.\n\n`splitChars(StringArray)`: List of 1 character string to split tokens inside, such as hyphens. Ignored if using infix, prefix or suffix patterns.\n\n`splitPattern (String)`: pattern to separate from the inside of tokens. takes priority over splitChars.\nsetTargetPattern: Basic regex rule to identify a candidate for tokenization. Defaults to \\\\S+ which means anything not a space\n\n`setSuffixPattern`: Regex to identify subtokens that are in the end of the token. Regex has to end with \\\\z and must contain groups (). Each group will become a separate token within the prefix. Defaults to non-letter characters. e.g. quotes or parenthesis\n\n`setPrefixPattern`: Regex to identify subtokens that come in the beginning of the token. Regex has to start with \\\\A and must contain groups (). Each group will become a separate token within the prefix. Defaults to non-letter characters. e.g. quotes or parenthesis\n\n`addInfixPattern`: Add an extension pattern regex with groups to the top of the rules (will target first, from more specific to the more general).\n\n`minLength`: Set the minimum allowed legth for each token\n\n`maxLength`: Set the maximum allowed legth for each token", "_____no_output_____" ] ], [ [ "tokenizer = Tokenizer() \\\n .setInputCols([\"document\"]) \\\n .setOutputCol(\"token\")", "_____no_output_____" ], [ "text = 'Peter Parker (Spiderman) is a nice guy and lives in New York but has no e-mail!'\n\nspark_df = spark.createDataFrame([[text]]).toDF(\"text\")\n", "_____no_output_____" ], [ "doc_df = documentAssembler.transform(spark_df)\n\ntoken_df = tokenizer.fit(doc_df).transform(doc_df)\n\ntoken_df.show(truncate=100)", "+-------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------+\n| text| document| token|\n+-------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------+\n|Peter Parker (Spiderman) is a nice guy and lives in New York but has no e-mail!|[[document, 0, 78, Peter Parker (Spiderman) is a nice guy and lives in New York but has no e-mail...|[[token, 0, 4, Peter, [sentence -> 0], []], [token, 6, 11, Parker, [sentence -> 0], []], [token, ...|\n+-------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------+\n\n" ], [ "token_df.select('token.result').take(1)", "_____no_output_____" ], [ "tokenizer = Tokenizer() \\\n .setInputCols([\"document\"]) \\\n .setOutputCol(\"token\") \\\n .setSplitChars(['-']) \\\n .setContextChars(['?', '!']) \\\n .addException(\"New York\") \\\n", "_____no_output_____" ], [ "token_df = tokenizer.fit(doc_df).transform(doc_df)\n\ntoken_df.select('token.result').take(1)", "_____no_output_____" ] ], [ [ "## Stacking Spark NLP Annotators in Spark ML Pipeline", "_____no_output_____" ], [ "Spark NLP provides an easy API to integrate with Spark ML Pipelines and all the Spark NLP annotators and transformers can be used within Spark ML Pipelines. So, it’s better to explain Pipeline concept through Spark ML official documentation.\n\nWhat is a Pipeline anyway? In machine learning, it is common to run a sequence of algorithms to process and learn from data. \n\nApache Spark ML represents such a workflow as a Pipeline, which consists of a sequence of PipelineStages (Transformers and Estimators) to be run in a specific order.\n\nIn simple terms, a pipeline chains multiple Transformers and Estimators together to specify an ML workflow. We use Pipeline to chain multiple Transformers and Estimators together to specify our machine learning workflow.\n\nThe figure below is for the training time usage of a Pipeline.", "_____no_output_____" ], [ "![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAtUAAADxCAYAAAAEEUtiAAAgAElEQVR4AeydB1RUVxuu7/3vX5M/McmviaZoLFGxYMcIogioWFAELDQVu9jFAvaGiL13sbfYe8Xee0GNienVAlaKCL53vfvMgQGBIDNUv1lr1gAz58zw7He+/e5vf2fv/wO5CQEhIASEgBAQAkJACAgBIWASgf9j0tFysBAQAkJACAgBISAEhIAQEAIQUy0iEAJCQAgIASEgBISAEBACJhIQU20iQDlcCAgBISAEhIAQEAJCQAiIqRYNCAEhIASEgBAQAkJACAgBEwmIqTYRoBwuBISAEBACQkAICAEhIATEVIsGhIAQEAJCQAgIASEgBISAiQTEVJsIUA4XAkJACAgBISAEhIAQEAJiqkUDQkAICAEhIASEgBAQAkLARAJiqk0EKIcLASEgBISAEBACQkAICAEx1aIBISAEhIAQEAJCQAgIASFgIgEx1SYClMOFgBAQAkJACAgBISAEhICYatGAEBACQkAICAEhIASEgBAwkYCYahMByuFCQAgIASEgBISAEBACQkBMtWhACAgBISAEhIAQEAJCQAiYSEBMtYkA5XAhIASEgBAQAkJACAgBISCmWjQgBISAEBACQkAICAEhIARMJCCm2kSAcrgQEAJCQAgIASEgBISAEBBTLRoQAkJACAgBISAEhIAQEAImEhBTbSJAOVwICAEhIASEgBAQAkJACIipFg0IASEgBISAEBACQkAICAETCYipNhGgHC4EhIAQEAJCQAgIASEgBMRUiwaEgBAQAkJACAgBISAEhICJBMRUmwhQDhcCQkAICAEhIASEgBAQAmKqRQNCQAgIASEgBISAEBACQsBEAmKqTQQohwsBISAEhIAQEAJCQAgIATHVogEhIASEgBAQAkJACAgBIWAiATHVJgKUw4WAEBACQkAICAEhIASEgJhq0YAQEAJCQAgIASEgBISAEDCRgJhqEwHK4UJACAgBISAEhIAQEAJCQEy1aEAICAEhIASEgBAQAkJACJhIQEy1iQDlcCEgBISAEBACQkAICAEhIKZaNCAEhIAQEAJCQAgIASEgBEwkIKbaRIByuBAQAkJACAgBISAEhIAQEFMtGhACQkAICAEhIASEgBAQAiYSEFNtIkA5XAgIASEgBISAEBACQkAIiKkWDQgBISAEhIAQEAJCQAgIARMJiKk2EaAcLgSEgBAQAkJACAgBISAExFSLBoSAEBACQkAICAEhIASEgIkExFSbCFAOFwJCQAgIASEgBISAEBACYqpFA0JACAgBISAEhIAQEAJCwEQCYqpNBCiHCwEhIASEgBAQAkJACAgBMdWiASEgBISAEBACQkAICAEhYCIBMdUmApTDhYAQEAJCQAgIASEgBISAmGrRgBAQAkJACAgBISAEhIAQMJGAmGoTAcrhQkAICAEhIASEgBAQAkJATLVoQAgIASEgBISAEBACQkAImEhATLWJAOVwISAEhIAQEAJCQAgIASEgplo0IASEgBAQAkJACAgBISAETCQgptpEgHK4EBACQkAICAEhIASEgBAQUy0aEAJCQAgIASEgBISAEBACJhIQU20iQDlcCAgBISAEhIAQEAJCQAiIqRYNCAEhIASEgBAQAkJACAgBEwmIqTYRoBwuBISAEBACQkAICAEhIATEVIsGhIAQEAJCQAgIASEgBISAiQTEVJsIUA4XAkJACAgBISAEhIAQEAJiqkUDQkAICAEhIASEgBAQAkLARAJiqk0EKIcLASEgBISAEBACQkAICAEx1aIBISAEhIAQEAJCQAgIASFgIgEx1SYClMOFgBAQAkJACAgBISAEhICYatGAEBACQkAICAEhIASEgBAwkYCYahMByuFCQAgIASEgBISAEBACQkBMtWhACAgBISAEhIAQEAJCQAiYSEBMtYkA5XAhIASEgBAQAkJACAgBISCmWjQgBISAEBACQkAICAEhIARMJCCm2kSAcrgQEAJCQAgIASEgBISAEBBTLRoQAkJACAgBISAEhIAQEAImEhBTbSJAOVwICAEhIASEgBAQAkJACIipFg0IASEgBISAEBACQkAICAETCYipNhGgHC4EhIAQEAJCQAgIASEgBMRU56AGXr4EEhKA+Hi5ZwcDss6LN9FIxr4f5MTvVF678TNnh/7lPTTOeVEjf6VpiRHZ9x3Kq/3IX2lInjcPATHV5uH42md5EQ9EPEzAuUux2L43Clt3yz0rGWzfE4XzV2IVc7LPCzeaoCdPExB+Ow67DkSLRtL5jvA7dPZiLO7eT8DzuJd5wlzT3D199hK3vo3DnoPSvln5/ee5t+2OwpGTMfj51xeIjnmJhDw4AEsZtxgjHkTGK+1nNT85fxS2743GhataP0L2chMCKQmIqU5JJBt+Z6d/6nws7Lwi1b3XiMfoN/qR3LOQQZ+Rj1C/bSSqu0bgwNEYlRnMhqbO9FswYH/7/Qs07/pQfeYuAY/Qf4xoJK3vSZ+Rj9HINxJVXCKwblu0Mk2Zhp8NB9JQ/343Hm16PUTFZhHoPFjaN622Neffybu8cwT8xz3Go8cJeWLwlZYc4168xLEzMbBpHaH6EXNyknOlHmv7jnoMe+8IWLlH4NjZWDXTnFb7yN/fTAJiqrO53WmWmDFt3CESN76JQ3y8llVjJyv3rGVA1lfDn8OudSRu3I7L5pZ/vbf77W48nDtFYuf+aERFi0Yy8t1g+978Nk4NnjgDlJszSU+evkSzTpGYuewZYp9L+2akfc3zmpdqNiMg+DFmLH2GmNi8ma5mCQI1btNKi2XSj2Rt32GsPbK+dvM5HH0iVOLj9SK7vDq/ExBTnc0tzOn8ISGPsXVPNBLyw/xjNvMz9e1Y+rF2WxTa+z9CQi6dvqMZ3HM4Br1GP0ZsHu30TW2nzB7/8uVL7AmLRr/Rj/HoSe4toj96OgY+/R4iJiZvmrrMtk9uOe63P+Ph4BWhDHZu+Uyv8zk4EPMb+girN0W9zmHyWjMSWLzmGboPfWTGM8qp8gMBMdXZ3Iqs+WSG6oefX2TzO8vbkQAzPMxWs1QgKip3mq7o6JeYNO8Jlq1/Jo2WCQI//RaPxu0j8ee93Nm+/JdmhT7D9NCnuTqbngn0eeYQzv609HuIS1fz5hQ+a8Lre0eo2c48Az2ffdDTF2LRqH2kfIfzWbua+u+IqTaV4Gse/8fdBDTyjsQvv+fSNOlr/j957eWcxvvmThyc2kficS7NZLI0YMTkx9i4Q7JQmdEXa5UbekXi9z9zr6kOmf0Uc1c9kw45Mw1shmNY9tGu/0OcOJO7y4TS+lc58K7XKgLf/ijJmbQYZfXfr4Q/Rz2fSMTFyWxTVrPOS+cXU53NrSWmOpuBp3g73VQ3FFOdgkzW/JqQA+tP5QlTPSd3m2qW0fDOGx9jY2Px8OFDPHr0CFFRUYhPpWCdf4uJicGLF5rR4+Pjx4/VPbXXZ43iMnZWMdUZ45Tdr6LWoqOjERcXl6i/tD4DdcjX5dRNTHVOkc/d7yumOpvbR0x1NgNP8XZiqlMAycJf2elduHAh2zs+MdWmN+r333+Pn376SZ2IRjk0NBTt27fH8OHDMXnyZPzyyy+vvMkff/yBuXPn4ptvvlFtfvToUfTq1QsDBgzA/fv3X3l9Tv5BTHVO0k/7vTkQo4YYN9IbiPG5+fPn4/Tp02mfLIufEVOdxYDz6OnFVGdzw5nTVBtnkhhk9N/5L/Fn47v+b/JvzB4av9b49XwuZXbxdc6jn4ufJ+V59OfS+3tqnyu1z6T/P6/7KKY6bWIp2evtZXxEWm3BY1M+d+7cOVSvXl1lOI3PkdU/i6k2jTDbsUmTJvjqq69UnKCBLliwIHr06IGFCxfCz88Pd+7cSXwTve1v3LgBBwcHHDx4EBEREWjQoAEaN26MRYsWqQx34gG54Acx1eZpBLa93v585C3l7/o76X+nvlLe9OeYpbazs8Pq1auTDcZTxpbnz5+jXr16WLJkScpTZdvvYqqzDXWeeiMx1dncXOY01T/++CO+/fZbXL58Gfv27cOVK1cSAxGfu3nzJk6fOo3z58+rKTVO39Lo7N+/H+HXw9U0Lv99/v369esqwxQWFqZG/5GRkSo48vHmjZvqPY4dOwb+/vTpU1y9elWd58yZM6oD1QMlp4nZue7duxd8/Z9//qnOQ5P9+++/48SJE+B78HPrU3zPnj1T5+fn4vl4DM/HrAX/j0OHDqk7j+H5TbmJqU6dHlmz3chbv7Fdrl27pgwROzE+d+TIEdWuNFo8hjc+99133ykzdfjwYfz666948uSJMlMff/wxTp48qV6jnzerH8VUm0b49u3b+PzzzzFhwgRlnjdu2IgPPvhAfafZthcvXlRxgN9Rxo6TJ0/h0KHDKh7Q6OzatUu1ecWKFRESEoKff/o53ayjaZ82c0eLqc4cN/0omuCff/4Zd769o+LGgQMH1CO/99evXVf9EfsfPcZz1urWrVvg6xgPGOPZJ/Cmx38+x37lyy+/xJo1a9SxjDGcNWFc4cwH4w51p5tqzqDk1E1MdU6Rz93vK6Y6m9vHXKaaQS0oKAg2NjYqI9SmTRvUqlULO3bsUCZ27NixqFq1KurUqaOyRTQ9nIa1t7eHq6ureuQon1O7p06dQpUqVdC8eXO0bt1aZQCCg4NVh0kTzCDHOzMINFWzZs1S523RooU6DzNY9+7dU0Fy7dq16jO5uLio53x8fNTUL9+/VatWKgPG93F0dFTmmpmJDRs2qM/esmVLdUzPnj1x9+5dfP311+oYZruYOeMxDMh6MM5M04mpTp0addC7d2907txZdVhkTNbMPLId+DPb1NnZWemJeqPhZqfHQVLDhg3V39lGbm5uapDEY9955x14eHgoLaX+zub/q5hq05hOmTIFb7/9NmrWrInFixertv33v/+t2pVtTePMQTgNNWOQra0t9FhQrlw5bNmyRWWzP/30U/W9Xr9+feJg37RPZr6jxVSbxpLxgX2Mk5OTis18rFChAmbOnKliBGcp2DfRWNNQs19gPGAM4SPjDGc7GP9XrlypNEUN8bjChQurTDWN8/Hjx1Vc4XNNmzZVd8YjMdWmtZ8cnXUExFRnHdtUz2wuU83R+pAhQ1C8eHGV3WXNImsdLS0tVWZx6NCh+Oijj1R2ifVpHNGXLl1aZYqZcWQgq1y5sjJMzABwenfGjBlgXSQzBpy25yM70c8++0ydm1lummp2nMwk8LXMgpcvX15NC/P3MmXKYOLEicoUM3NOU8/AyNpKGnoGxB9++AGjRo1CtWrVVPaTwbJdu3Yqe83zd+/eXXXanALkQIGZatZpDh48GOvWrTOpgxZTnaoslTnes2ePaltmIzkb0b9/f1VHS71wwMNpf84WMOPk6emJPn364LffflMdZZ/efVRb8jkOgqi37du3o0SJEqq9TRkIpf6J0/6rmOq02WTkGZrlsmXLqjbkz/zec8aBbcvvIeMG/8a4wcE4Z8moA5pxZrj5O3VibW0NGmpqifEqN93EVJvWGvw+s//hDAYH3JzdoC74faeR5p3aYHKGMZ9a4MwHZys508FB+JgxY9TAvG7duqr/4HM7d+5U/daqVavUbAiTOYwz7DOoPZryTp06qRlLDu4kU21aO8rR5icgptr8TNM9o7lNNc2oXhLBwPPWW2+pLOGwYcNUZpmZRGYhaVRpuCdNmoSpU6eqgMaOk8aHRlY3uPzwLPFgdnHOnDnKWFtZWSmTy+w46yOZsWZmmjcG1y5duqiMMoNrkSJFlLnia3lnIGTWmVkMZjD53ux8AwMDlVlnyQFNOD9bt27d1MUnPA9XGWAGndkOZkeZEWMG7PfffpdMdboKy9yTbCu2KQcxS5cuVVO7zEBu3rxZdXwcODG7xPbjnSabHd7Zs2dBfXDgROPEO034gwcPVDkANUZjlp03MdWm0WYb8vvKGSTqgqaoaNGiyhDx4kWaJ343eUEZv5v8rvLGmQvGBg7EaZCYkeTMWW4z1PysYqpN0wjjPhM3HEDzZ2aj3d3dlemlZvidZ4zo27cvOFhnrNDr8Jllnjx5ihqY0xQz5tCU8ziWjzDuMKFC3TExNHDgQNVnMO5wJpVJHvZ5YqpNa0M5OmsIiKnOGq5pntXcppqlEgxSvDHDyGlaGh2aagY83jjFRvPNwMZsAescaVI5fccaZppqBjIez9ujh4/U66dNm6ZMNTPMzDwx6LEjbdSokaqjVi8GVNkAp/+Y2aapZtZKv7FzZf0cM1gsGeB76+8/btw4/PrLr+r1y5YtUxdC8b3YMbPshIGZHfTIkSOVIdcHBc+eZn5TFMlU6y3z6iN1wjbhzAGna5ld4uwDa/Y5y9G1a9fE9hs9erTq6Kg1tgt1xBs1QnPOtmONvJjqVznzLyG5eEm9jJpqxgcaKRoh3miMGBvEVKfe5ub8a06vU00jzT7G29s70VRz9qpDhw7q3+T3n4kZZpk5sGIs0VeMYaKHCRuWA7JchM/phptmmYMxzoSyxIgzqAEBAYn9FuMO+wMmisRUm1NRci5zERBTbS6SGTyPuU01p86YWWaQYyaXGSWaGmNTTdPNqTZmBJhF5HQsO0B/f391cQmncWl6mW3khSXMELBsg1lKXsnPIKcHPV6AyCwWAx6DI8tO+NpBgwYhPDxc1cNx+lfPXjCwbty4URk1ZqK5KgDXrmU2mjXezHwxUG7atAkRDyKUmWYtJztsTgXSgPM1vDNTykBKk57Zm5jqtMmxzViyw0wQa6OZiaIeOAPCgQ4zRdQa23D58uVq1oO64ICMGSeacmqLHSmnfZmhKlmypGqv7MxWSqY67TbOyDNsK15kyDbW40HKTPWZM2exbds2VXfNWMIYw0E1Y4OY6oxQNu01ecVU9+vXT5UIspyQ8Zx6Yp/RsWNHNTPJvof9ErVDDfHiR5YXMlPNJA/LSVjXT5POGREmX1g6+DxWW/1Dyj9M05EcbX4CYqrNzzTdM5rbVHMkz/KL8ePHq0z09OnTlaE1NtXsJPW6Ni8vLzXq58WCzEgyE8nA9u6776qaZ56H03bMbPM5dpDGppqGihkGZsFZtuHr66tMNQ01swc0yjRZzIizjlqv2+aKAMxa0sjTRLOsgzW6HADwGAZd/p0/M8jSzDPQ0mAzQ8rzcQDB7DqNW2ZvYqrTJ8fOi7pgDa2efebULmco2Easo2TmiOVCXCeWAyReuMpZEGaQOLjiBbKsuWW9PQ06p4CpJWaxs+Mmpto0ymwnZpw5gKURMi7/oOlhrSyv0+AsFGefWNbFuMHX8xoPMdWm8c/I0bnFVPNCdPYvjBGpZap5XQZjCq+HYfzmTBjjPrXCfofPsa9ijTU1xCQML26mqeY5eZ0Q9UYjzdjDPoRZbhpwnkNMdUbUIq/JTgJiqrOTNgBzm2peuDF79my1KQMvPuRULDtFlnTwoj79xgwBs8vMNjKITZ82XZV0MDvJ4MbswIIFC1TwokniMkZ8jpltBjgGP94YQNmxMnswYsQIZXb1VSD4vg/uP1AXQTIIMkjyohSeh4abyyIxg8njWLerL6tEw8VabX4uGnUuxceAyqWWWELALDvPx/+Px/AzZPYmpjp9ctQJ+XM9YvLnje3K9uc0Lgc1HPxwVoR/05/bunWreo5lRZyFYHvzeGaWONtAA8bXZsdNTLVplNlOHFCxrTkzxfp4Dqo4mGW2kN99fmc5U8VVfTirRE3wokS2N2c2+FrGDV5TkV3t/jr/tdRUvw6tV1/LGMw+Rl9tilrgzAUHYbzx+8+YwOQI4z/1wosPGccZD1g2xtdQG4zpNMfsF6gf9kNM0vA46o2xhmacx3LW88mTp+o56oulaTl1kyX1cop87n5fMdXZ3D7mNtXMUtNI08DQEOk3BiTj3/l3BkLWrPG1fNTNKU01M8LsDGlmjZ/jeRgw9dfyPAyEzBTwPAyMxs/xeb4vz8NyAOPneC7dbPE1emfLR/6un4+v02/8mefi3fgY/fnXfRRTnT4xtgWZp2St/11vC2pCbz+ekb/rbW7cfjwP29z4b+l/AtOfFVNtOkO2F7+/bD9+h/l9Z3vzZ/5Nb0/+znihfz+pAz7Hv+s/m/5pzH8GMdWmM2U7s4150+OD8e/82fh36ia1GMFjqS9dQ/yZ59bjC8/B53jXdcf3NNah6f/N659BTPXrM3sTjhBTnc2tbC5TzYDDrC6vjGZwMeXGrAGv4jelVtmU98/OY8VUZyftnHkvMdU5wz0vvauY6rzUWrnzs4qpzp3tktOfSkx1NrfA/YgEtPJ7iNt3TDPCNNWclmVNMrNCptyYAeCKHcwQ5PcbUV28GosmHSPBusTceIuOeYlZoU+xYEXmVznJjf9Xdn2mb79/geadI3H3vmnfi6z8vItXP0PIPE5jZ+W7yLnTIvAs6iWad3mI67eYiU/rVbn374wRjX0jcTk8/8fs3NoKR8/Gwrnjwzypn9zKND98LjHV2dyKUdEvMW7mE8xZ/gzP43KnqctmJNn6dnFxLzF14VN0H/oo1wbD+PiXOHAsBu0HPMKjJ3mwx8/WFk3+ZjSpqzZFYdjEJ3j6LPd+vy5ciUX9dpF49FjaN3kLZs9vNNOO7SLBJEdevLHvGDjuMSapgVnu1XleZJvRzxw06ykGj3+c0ZfL694QAmKqs7mhExJe4sdf4uHgGYn126Lw8FGCZKuyoQ1Y9kGDOnv5U1i5ReDHn5Nq9rLh7V/7Le49SECrHg8xIOgx/rzL2kXpONODyGzjk6cJ2LgzGjYtI/DNd6ztzb3MYmNfoteIx/Ab9ki1r2Ss02td8z0XE/MS1248h3WrCGzcEZ1nExvU+50fXsDaLQK7D0bj8eMEZNN1wOZrjDx4JtWPPE7AotXPYOcZid//lKmmPNiMWfqRxVRnKd7UT85rO374OR5t+z+EZYsIWDjLPTsYVHaNQNOOkar0JjcbLqqGn+/uvQQMCXmMam4RKN9MNPJXGqncIgIObSNx+kIsnj/PvYaa7cvyrciHCRg74wmqu0nb/lXbmut5S5cIfNkqAqHrnoElIHnZiL6If4nvfnwB9x7Sj5hLHxk5TxXXCDh3isTX37J0KHfHmdQdiPw1KwmIqc5Kuumcm5mpJ88ScO9BPP68J/fsYEDWnG7P7YZalw2zUVHRCbgfEY8/74tG/kojevvmlaw+DR3r+lX7SgzIljh49348Ih7GI/Z53jbUeoxgLOMMjfQj2RcfVZx5kpBrywd1bchjzhAQU50z3OVdhYAQEAJCQAgIASEgBPIRATHV+agx5V8RAkJACAgBISAEhIAQyBkCYqpzhru8qxAQAkJACAgBISAEhEA+IiCmOh81pvwrQkAICAEhIASEgBAQAjlDQEx1znCXdxUCQkAICAEhIASEgBDIRwTEVOejxpR/RQgIASEgBISAEBACQiBnCIipzhnu8q5CQAgIASEgBITAG0TgRUICeJdb/iUgpjr/tq38Z0JACAgBISAEhEAOEuD2MHHxCfj+4VMsvnQHa67/iD+fxiA+L+88lIM8c/tbi6nO7S0kn08ICAEhIASEgBDIcwTiE17i7tMYfHXjJ4w4fBWDwy4hIOwyxh4Lx97vfsfDmDgkiLnOc+2a3gcWU50eHXlOCAgBISAEhIAQEAKvQYBG+VncC+z/7g+MP3EDg2imD11Ofg+7jGlnbuHMr/cR/eKFmOvX4JubXyqmOje3jnw2ISAEhIAQEAJCIE8QoJmOeRGPC79HYMa5rzH4IDPTqRhqg8EeHHYZQw9dwZLL3+HrB0/w/EUCJHGdJ5o6zQ/5Rprqly9fIiGB9wS8iI9HXHw8nr94gdi4OETHPsezmFh1fxodiydRMXgcFYNHz6Lx6Bkfje/ReGR4/klULPh6HhsVE4uY53GIjXuB5y/i1XvEx8er9+OXju8vt5wloDSgdPDXGngcFWvQgHHb6z9TF9Hq+eQaeJ6KBhJEAznb7MnePSMaeBrDGJAyDmhtrsWEpJ8ZJxgv+HqJA8lQ59pfjDXAGJ3UF7xI7AteRwPsD3QNsD9I3he8kL4g1yrBtA/GHv15fALuRDzF0ivfYcihy1qpR8rsdBq/syxk5JFr2HTrZ/z+JBpxCQkQl2Bam+TU0W+EqY6PT0AMzXJ0NB49fYpf/riLr7//CedvfI0dx89i1d7DmLVxJyat2YoRSzei39w16Dl7LbrNWgvf6avhPWUlPCatgMfk1fCYska7T16NNpNWwnPKSrSduhqdZ65Bj1lr0HvOagxauA7jV27FtHU7sHj7AWw8dAJHL13D9Tvf4/tff8X9yId4GhWF6NgYFWTFZGe9/Nlhxjw31sA93P7hJ1y4cRs7T5zF6n1HUmhgLXrNSamBlfCYvCq5BiauhOfkFfBRGlgNv9nJNTB13XYs2r4fG8KO48ila7j67Xf47pckDcTExuJF/AsZaGW9BKBrICo6Go+fPcOvf1IDP+PijW+w68RZrNl3FLMNcWDksk2JcaA748C0VUZxYBU8JqeIA0oDq9Bpxhr4GeLAwIXrELRiC6au24aF2/fhq4PHcfjCVVz55jvc+eUX3I2MxJOoKMRIHMiG1tfeIqUGfvvzvqaBm7ex++Q5rNmvaWDymq1QGpjDvmANqIEO01NowLgvmKj1EYwDnWas1jQwezUGLliLoBWbwTiwcJumgbALV3DlmztJGnim9QVx8VICkG1CMOMbcTWPe89isPXrX7S6aWanUzPPYZcx6MBFDNp/Me3sddhlBB0Px+Ef/sTDmOdgTbbc8haBfGmqmQ1m8IyOjkbEw4e4eO0GVm/bhRHT56Ld4NFo2n0Q6rTvDSsvP9Ro2xtW7f1Rs9MgWHcfDtve42DnHwLHwdPRYNhcNBq1CE3GLYXzhJVoPnENWkxZC5cp6+AyaZ363Tl4ORqPWYKGIxbAMXAW7AdOQd2+wajdYxRqdRmCLzsOhFX7vrDy7ola3j1Qv3N/tO43DL3HTcbMFWux//gp/PjLb3jy9Bmex8Xhpdf+1EcAACAASURBVHyJzPINStJAjNLA5fCbWLtjN0bOmAffwDFo2n1wcg346hoYBtveYzUNBExHw2QaWIHmk1Zr7a80sBbNQtbAOXgFGo9djIbDdQ1MRd1+wajdU9NATWqgXT+lgZpefqjfuZ/SQK+xkzBj2RqlgZ9+/R2Pnz5D7HPRgFkEAKiBihYHYhD58CEu37iFdTv2YNTM+WgfYKQBTz/U8DGKA92SNOAweJqKA04jFxriwAr1vVcxIFkcoAZSxAGlgdEqDtTsOAhW7fuhhndPFXccO/WDe5+h6DF6IqaGrsLeoyfwwy+/4vHTp4h9/lzNaJiLw5t6HiYreFcaiKEGHuHqzVtYv2svxsxeqOJAMz+jOKBroOMgWOsa6B8CB/YFQ+ciuQa0ONCCGpis9wW6BhYm9QX9GQdGo1bXofiyk0EDPj3BOODQqS/c+wxBj9EhmLRoBXYdOobvfvxZaeD58zg1m/qmtl1e+L/Zxzx5/gJHfryr6qZZypGqmWbW+sAl9NtxHt6z96Hp0LnotekoBu2/kOrreR5muqef/RpX/oxEVJwMtvKCHvTPmO9MdcLLBDyLisKtb77F8JApcGzTDuUdmqOErRNK2DXDFw3cYeHsjYquHVG5dXdUa9sHVoYgWqf3ONTznwjHgOlwGj4fjceGKjPtMmU93GZsRss529Bq3k51bzlnJ9xnbYPr9A1oPmkNmgYvVwa8wZDZBmM9HrV7jEStLoGo0b4/qnj2QKWWXVCuWTuUadQapRxboGTdpiht1xQ1ndugfZ9B2Lp7Hx5ERqrMpd5A8vj6BNiRRkVF4+tvvsOoSdNR36M9yjs0Uxooad8Mpeq7w6KpkQZ8+oDG17rbUNShoe4fojTQMJkG1sFtxia4z96GVnOpgV3QNeA2fQNcJq1F0/ErNA0M1TRg1y8YtmpwZdCAV09UatkV5ZtTAx4o5dACJepQA01Qo2lL+PTyx1dbd+JBZIQyAq//n8sROgFdA99+9wNGT56Ohp6+qODYHCXqaHGgVH03WDT1QgXXDlocSNTAMKWBehxYB0yDsQZooBI1oOKAsQY2ahoIXoFGoxehwdA5sB84FdSAGmB3DYSVrz+qpNSAo6aBUnUbo4qTGzz8+mHt5m249+C+aEBvzEw+Gmtgwsy5aOTVERUTNeCML+q7oWyiBrqhqndv1OwwENZdjeMANTDPqC9YB9fphjiQoi9wm65pQA2yRy1Gw6Fz4DCIGhgPWzXA1jRQ1aCBCi6GOODoquLAF3ZNUNXJHW2698OKrzbi3n3RQCabPksPo5mOfhGPG/ceYdrZWwhMz0wfvIQBey6i49KTaD3tMJoM34TS9p1g2awXWgYvRf+dpzH4wMVUzTVXCRl2+AoWXvwWvz6JQuyLeKm3ztKWNc/J852pjouLw4LlK1GmZh18WLE6ilS1wWdW9ihWuyFK2DdH6YYtUa5ZW1Ry76yMLg1vrS4ByvywA3QYNEV1iMw+OwevRIsp6+E+aytaz9sFz8Vh8Fl+RN29Qg+jzYL9aDlnhwqyLjTWQcu1jPXgmbDzn6g6Zxo1BmoGbJr4Ci06qM6cAb14XWcUta6PItVs8ZFlTXxsaQWPzn749vvvkZAQb54WfgPPwszUivUbUKZWHXxYoRoKV7LGp1b1lAZKJmrAB5Zuugb64cvOAWoQxA7QfuDkRA00DV4BNaiamaQB72VJGvBYSA3sTNSA8/jlcOKsxeCZqDdgUjoa8EbpBu4oUdcZxZQG6igNfFqpJly82uPODz+IBkzQLktqVm/chIp1HFDYsjqKVLbGZ1b18DnjgJ0eB3y0OODRAzXa9VNxgANhzjIoDQyZg8ajF6Np8EqDBrYY4sBBeC87nBgHqAEOtGi4tcHVMjQcsRCOATNhTw30GQvr7sPUwK2aT59kcUDXgB4HClvWROEK1dG4lTfufP89Xr6UOJBZGbAv2LJzN6raO+GjitVRuLI1Pqmhx4FmKO3EBIsPLNkXePihejstDtj4jUBdxoEBjAOz0UhpYAVcJq+Hux4HFh1M1hd4LDxg0MBmpQEVB0ZSA7MMGhgHG6WBQajm01dpoKJrB5XgKd3QHSXtmqGYTQMUqVYHugYatfTCt999j4T4F5lFIMeZmQBLSX988AShV75TFxiqJfJSK/VgdvrgJfitPwPPmUfgPilM3WmqS9l1xOe1PFHSth1qegWg3fzN8N91RruoMY1zjT56HRtv/oSI6Fi15rUUhZi5Yc14unxnqu9HRsLezRsflK6IQuWqaaa6pj0+t01pqrsYmepA2PYYqbJKzCxwqk/LUq9Ci2lfwX32drRZsE8F0XarT6H9mtNou/IYvJaEodXcXSqLzSnApspQ6YFUN9XDlKmu5mMw1a6+sHD2UoZKN9UfV9cM1f/KVEIRi8pYuW49Xrx4gZdyqUKmpP7kWRQcWvqgYBlLpQFjU13KoTlKO3FgZWyq++PLzoGaqe4/HvYcWA2jBpbAecIqtJiapAEa6narTqKdkQY44HKbuUVNA2umWtNAvYE0VOM0Q5XKwEozVM2SmeqCZSrj4/JVMWHKNCkFylTrawexVrmJZwcUKltZiwOVbfBpisE1DVUl9y6okmiqqQEaKppqw+A6mQa2oc2CvVocWHUS7decSowDr2iAhipwdgpTTUNlMNU0VE298EV9bWClmeo6KFypFgqWrYwPy1TE5Fmz8VJ2X8u0CiIfPYFn9774X2lDHKjMwbWWYOGMFeOApgGa6h6JppoaYIJFH1w3MiRYOLjm7CQ14LX0MNqt1uPAcXiFhqH1/N1w1+NA8Ao4jVyE+tTAwMkqDth0Hw6WAekaSGaq7TVT/XF1OxSuVBP/K1MZ7xUvi2lz5qm+INMQ5ECzErgf+Ri+E5ai10aWb6SRYTYY44H7LqL9ouNoNeUw3CcdUndjU01jzXtp+46o12M8uq7ehwF7z6deb81seNhlTDx1U5WbPI6V9a3N2rBmPFm+MtUcvT14FgPfmWtRyr45PqleB0Wq1sanNe1RzEY31e7KUOmZapWd6BQAG7+RqNs3SHWCLOHgFC6n8bTsxBZlnpmR8go9BGapmbVuPX8PWs7ehhbTNqL5xNVoMi5UTRc7Dp6hanJte41RJQVWvgNQ1bsXKrXqhgouvrBo6okv6ruieN2m+KxWfdBUf1jRCh+UqoD3PyuBpcuWSSA1QeRPY+PgO+crlHZskagBNVuha0A31e6dtM60bT98qTQwAnX7MkM1SRki1tMnaWBzkgaWaBrwMtIAp34TNTBsPjQNTIRtr7GqPjPZbIVL+0QNlKjbVJlqTQM18X6piihYrBTGjB1rAgE59HFMHHos3oKyDVoaNGCrzVhRA3bNwOwgB1aVlAb8UD1RA8NRp08Q6vlTA7NUOY/zeD0OUAM7oeKAQQOeiw+ijYoD2+Gq4sAaVXvNshFmqllOVqc344A2uK7qbTDVxnGgjqaBT2ioLGvig1IVUeCT4hgXNF4uYDVBypFRsRi0cjcsGnvgsxp2+LhqbYMGGhg0QFPtDUu3jlqmum1f1FRxQNOAFgd0DSxX19G4z9Q0wFnKxL5gSRjaLNiDlnO2Q4sDnLVcaugLdA2MhY2RBtgXVGzBvoAJFjeUsHNWmWqa6o8qahr4z4efIGTiJCkDMkED5j70tz8foGqjrrBs3hutJq1E320n0l7lQ12YeAk9NpyF95yjaDXlkCr/0DPVuqnWH8s16oZmI+aj18Yj6Rr2wEOXMff8Nwi/+0itby1Za3O3smnny3emOiI6DoP3XkfHNcfhGrwQX/r0QKXmPijHrIRTS5Rt1Ep1pnpNNTs5K1/W0Q0DTbBdf5aATEODofPUtJ9mqtbCdfpGlYWgiebdfdYWNd3L8hDNTC3VpnwDZxoyE+Nh4zcKNTsFonpb1lT3hKVRTTXLEGj0afw/rFAdBYqVwj8LvI//vPMuQkNDxVSboOunz19g0L5wdFx7HK4TlqBW216o5OKD8o1awqKhQQPO3qhgqKtnaU4N34HqYqJXNKCM9XK4TF6jTBOnf1lXbawBZrCoAXakTiMWor66YJXZqWClAWbBa7TzR1VP1lR3QfnmbVGWdfUOLvjcxgk0Ux9WqIH3ipfBPwt8gP+++z5Gjx5tAgE5lBcQjTh4Ax3XnoD7pFDUat8HlVu0VRooY6QBZgtZlqXFgQGqnta255jks1ZGGuAAmrMSSRrYqsWBqalrgIM0xgFqoHo7fxUHNA0Yrq1waI5itZ3wSQ07fFhR08C/3i+If771DsaNGyem2gQpR8bEIfjo1+iw5jjcJy+DtW/fRA0wDliwL2AcaNFBJTwYB5gA4UWFxhqoP0S7YJ3XTDAO6BpI6gtSamCZuqhRz1IbX7heoz3r6nsZ4kA7lFXXVrjg89qN8GmNeiq5wjhADfzt//0TwcETxFSboIHMHkqjyiXyWD9tfNNMdReVYS5e2xs1vQLhu3Ar+u86k3pdtGFpvQF7L6LL6lNoEbQVpR06q+N1M238WNzGG5Vd+8Jz+jr0234y7ZKQsEsYfvgqVl//ET89eqY+q/HnlJ9zjkD+M9VRcQjYFw6/rZfQZ+dlDNh5AYO2nEDfZdvQZepieI6YjCb9RsG+eyBsuwTApvNgWHcJRO3uw9TFJHZ9maWaqBnrIbO0C5XGLFalHc0mrFLmiQaq+cRVKovJlUEaj16kVolwDJgB+wETUbfvOENHOlhNKXJqkSa+XFNvfOHoiuK1G+KTarXV1HSBoqXw9oef4F8FCuLvb72N/7z9jphqE78Puqn223YFfXZegf/OixioNLAdXaYuSdKAn5EGugaqmkdeUKRlKqmBqSpb2XDEfDRWGliBZiG6BtagWcjqRA1wZqPhsHnQNDBJzXroZkpdqKo00AnlmJmiBmyd8Gn12ihkURkFipY2aOAD/P2td/Cfdwpg1OjRYqhM0IEy1WE30T1RA1oc6LdiJ7pOWwLvUVPQtP8oOPgNMcSBAEMcoKEahbp9tIuW1eofxnEgaDmS4oCxBpap+mtNA1otNWOJpoEA1GjXX82KsI5fxQFeU6E0YIsPLSrjvWIGDbxfEP94+x38499vi6k2of15KE31hKO30X3rZfQ2xIFBW0+g38qd6Do9FN6jpsK5/2gjDWh9AWuftThADYQY4sDMxL6AJV5pxwGuAsQ4oGmAs5+8UFUNrNv3VwNrS7dOysyXVhpojE+rUwNVlQb++9EnylBTA/9XmepgMdUm6uB1D+feEr89icbWW7/g9oPHamdE3Vwbm2rdDH9RzxeOfSei25r9GLjvfNrm+uAl9N5yAvX7TkK5Rl1BA62fI+VjKbv2sG43DB2X7oT/7rOpl4SwzERteX4d++78jntRsbJizOs2dha8Pt+aatWZ7rqCwXuvYtTBa5h4NBxzT97AopPXMOPoZQTtOYeArSfQe10YOi/dA5/5W9F65ga4TV4FlwnL0GzcYjiPXYSmY5LuTUYvRGPeRy5AoxHz0HDobDQImAbHgZNQr18Q6vQcBZuuAfjStz+qevihkpsvyjf1RJn6LVCyjhOKWdmBF6IVtqiM/5W0wHvFSuLdIp/hrYJF8K8CNFRiqs2h8WSmetcVDKIGwqiB65hDDZzSNDB+7zkEbj2OPusOKg20nb8VbWYlaaB50CKDBhaiaaIWDBoYtdCggVlKAw4GDbAztu4aiJq+/VHN0w+V3TqggrMXytZ3TdJA5VooXK4yCpWywAfUwMdFjTQgptocGtBNtd+2q2pgNWjPVYw0xAFNA9cx8+hljN97HoFbT6AP48CyvWg7f5umgSmGOBDEOLAQTccsTIwHiXFAaWC+URyYbBQHAlUcoAYqGWmgVJ3GWhwwaOB/Jcvj/aJ6HPg40VCJqTZdBVqm+ht0M5jqQXuvYMTBawg5ch2zT97AwlPXMevYZYzfdx5Dtmka6EINLNA04D55FVpMWI7mqWiAemBf0Ggk44CugelwHEgNjEfdXuwLqAF/VPfqgcruHbU40MAV1MDnNevh08Q4UD5ZHOBslZhq09v/dc/A9aYjop8nbi3OZe1GHrmqNnP54eFTtZHbr3+w/EPLVKc0wuUbd0fzkQvUcnnpregxYM85dFyyAza+w1U99efWWm11yvPxd4v6ndFgwFT4rTuIgfvSWoLvElgSMuX0LZz79YFa5k9W5n3d1jff6/O3qd5JU33FYKqvY+7Jm1hy5ibmnrqBKcfCMf7IdYw4eB2D915Dv11X0Gv7RXTfeAZd1h5Fh+X70X7xDnjP24Q2M9ej1dSVcJuwGC5j56PJsJlwGjwZDfzHwb7PKNj5DUHtTv6o1a4XrDy7ooqrLyo6e6K8kyvKODRHqTpOKF6rHopWs8HHljVQuGwlFCpZFu8VK6GZ6kJiqs0naUA31RxY9aWp3kMNXDWY6psIPXsD806FY6pBAyMPXkfAvmvov1vTgJ/SwDF0XKFrYDM8ZhlrYJ5BA5PQwD9IaaAuNdB5gNJADc9uqOrWAZbNPFGhkRvKOjZHKdskDXxCDVhYolDJVztTyVSbRwmaqb4FzlYwS0kNjDhwDSFHw9XAKvTsTYMGbqg4oGnguhYHdjAOnEaXdceNNLApSQMhi+Eybh6aDp+lxYEBQXAwxAFbpYHesDLSQHknN5ShBhgHrO1RrLoNPrG0wkcWlfC/EhZ4L9FUFxFTbZ7mV2dJMtXajJXSwMGrBlPNvuAG5p2+gWnHwhF8JBwjwxgHrqs40HvHRTAOdFUaOID2i3fCex7jwFdaX6A0oPcFU9CAGuhr6AuoAe6DoDTQEZbNvFChsTss6ut9ga4BvS/Q4kABDq4LFVElYGKqzSiEDJzqWWwczv16HzPOfq2WsUu53vS44+HYdvsXXP/xd1Rp3C3NDDOzz9VbD0CbKavRb8epNMs3uDIIn+frqrUagFJ126V9TmsvVGzWEy3GLEKfrcfTXoLvkLbl+eJLd3Dz3mM1CMjAvy4vMTOBN8tUn7qBJWduYd7pJEM14mB4oqHqTVO9iYH0GDqtPIj2S3bCZ57WmbaeugLuIUvQYpweSCehQf+xsO89EnW7BaJ2R5rqnrDy6Iwqru1h6ewF1Zm+YqqrvzGmOkU5mpmlm/bp0jXVp25AGSqDBtiZcmDFztR/91X03nEJfpu1zrTTygPwXbILPvO3qM6UGnALWQIXgwYaBdBUGwZW3YfAthNNdQ9YeXTRNMDOtJFrKqa6er401fHx3Gwj7XbJzmf+2lTfwnylgRuaoTLEAU0Dlw0aOKHigG+opgHP2V+h9bQVcJ+ox4FZaBwwCQ39x2mmOlEDjANdUNXNV8UBDqxeNdXV3whTnZDwMsempNM31Tew5KymgWnHqIHrGHnIEAf2GMeBE+i06iB8Q3fDZ8EWeM7eANUXUANB89XAihpo4D9WS7B0Z4KFcaAnaugaUHHAMLhWCRbdVOt9Qf4y1YwBuSUOZDTmrN1/Gr3XHjCsvpH6Ji7MBgcdvgLnwJkoW78LPrf2StMI0yTX6TJG1VunWb4Rxk1hLqoLE5sNn4uKzj1RonbaJSElavuoJfg8p69F/x2nUzfsvDhy/yUM3nERV3+KyOi/L68zIwEx1cZZSmaoXsNU10/VVCcZqjfdVF+78yhHOlSTTbXSwHHNUKVjqhsHTEwy1d0CNVPdNrmpLv8Gmeo7vzzFwye5Yyc400z1JfhtOo2u6wym2qABz1mvmupGg2mqxyaaat1QaaZaG1ynNNVFq3PG6s0w1THP43Hj+8eIisn+tZYzb6qvaINrFQfSMdXj/tpUc9ZSZar1Gas3wFQ/i36Bew9j89SCsKOnLUeFJn5q9Y2eG46knQ0Ou6yMd/sFW2DbZQxK1W2fprEubu2FMg6d4DRgGrqu2pf2ih5hl1RpR9eVe1VttkWDLuCxqZWD8G+s4a7nF4QOi7YnGwQw+913+3l0CD0Jz+lHcOj6b2a0inKqjBIQUy2mOqNaee3X9Zp6DWfCI7LdWIupfu2mMssBp69HYNraO7j/kNts52zKWky1WZrU5JPQTE9bdwc7TvyBqOjsNdZiqk1uvtc+ATPUv96Lxqglt3A3MjbPZKxHTVmqTGxJWx9Ua+WPNlNW/eXqG9wNkcvq1fAYBGaR0zLBLAmp2LQHWoxdjF4bjqaeYVYXHV6C/+5zaDtvE6x9hyvznNY5mSWn+W4cMFNdINl/1wW1ukib6dxo5hBaTzkspvq11WueA8RUi6k2j5JSOYv3mIvwHX8JNFssDcium5jq7CKd/H2OX36AduMuYezSrzVjnYNzwGKqk7dNTv1GUz1u2ddgLNh+/PdszViLqc7+VudX/sc/nsF92DmMDr2FPyJi8oSx1k21bmK/qNcBNr4jVDY4zfINw66JvTYdVRlubj2eXoaZOyjWaDMQnjMMy+WFXUp1pRAt43wCLSeuQNWW/dXOi/rnSvlIw17JpTdaT90P98naBjNiqrNf98bvKKZaTLWxHsz6s8/Yi2gx5Bx8x1/GiWsPwC1es+Mmpjo7KL/6HjTVXqMvwn3YeYxZeitHjbWY6lfbJyf+optqxgEa623Hf0d0NpWCiKnO/hbXTXXzgLNoNfy8Mta/34/O9cY6pammeaVBZjbYvkcwuq3lcnmpr77BixoH7r+A7mv2q/IN1lsXT2NFD60kpCNsO45UK4CkZ9i5Y2PPDYfRdMgcVZpCA53SVPP3Yl96oNnobYm7Noqpzn7dG7+jmGox1cZ6MOvPuql2Vcb6Eo5duZ8tGWsx1WZtxgyfTDfVNFA01qNDv1a1lfoarxk+kRleKKbaDBDNcArdVDMGJBrrY78hJjbeDGdP/xRiqtPnkxXPGptqtrdurFkSkoMTV3/5r6ZmqnUDSzPLLLTz8HnotelYOuUbl9Wa0r6Ltqosdyk731RNsDLsNt7gDooN/Cej+9oDGHQg7S3PB+w5D9Zb09xzS3P9cxk/iqn+yybOtheIqRZTnWVi0001g6vrUGasL+Ho5ft4kcWlIGKqs6xJ0z2xsalOMta3DMY63UPN/qSYarMjzdQJU5pq6oKzGVuOZr2xFlOdqSYz6aCUplo31qyx/vnP6Fd2KDTpzcx4cHqmWjevLN+o3magqqPuvyvtDVnUcnnbT6LNVC6X559uvXUZh45oNHiGtsEL66rTuA8O48Yxp9BgwDwUt3m1fltMtRnFYOKp8pWpJovI6DgM2X8jcSe1pHWqw9X61K8uqWe0RnEuWP3jv+8WwLJly/LFNuXGplo31u2DLuHwxXtZaqxpqgMNGlDrVKu1yg3rVGdkSb1sXf2jROLmL9z0getU/7fAB3lyN72Uplo31uxQ1WoA2VdWr9YqH3XoFvy2X01jner0ltTLudU//llA21Hxn2+/i5CQkDy/q2Zqploz1hew8dCv4OogWXV7GBOHkGPfoPvW1NapTm9Jvdyx+sff/vEvTJo0KU/tqJiaqdaN9cjFN/HTH1G50lhnxFTr5pqrb9TuOBJcASS9khCWb/TefEzVW1do2iNZvTUvbGS9dKuJK9CX25Gnlak2LJHX46uz8JpzDE4Bq1HC9tU1rcVUZ1UUef3z5jtTzS1G9337J4KO3MLA3drGLqMPXcMk7qh4Stv8hQv+6xt/aGsUGzb+yAFT/U6Rz/DfQkXwfpFPUbWGFYYMHYrbt2/nqUCaluxSmupEYz3uEsIu0FhnTY11XHwCDnz3J8Yf/VppYNCea2pHRaWBk4bNX9JbpzqbTTU1wE0f3i9cBJZVq2LQ4MG4fv16njNUqZlqzVifw6jFNNbPs20KmBo4/P1dtU31wD1aHODmHtxZdY7SwM101qnOKVNdGAU+KowKlSpjwMCBCA8PR0JC1nxH0vrOmvvvaZlq6sJz1AVsCMs6Y82+4MRP9zDx2G0M2nMdjANJu2pqm7/MV5u/pFynOmdM9TuGnVULfFQEFpaW6Nuvv9JAfHzWDTzM3d5pmWrdWI9YlDuN9euYapprVRvt2AkN+k1GtzUH0ikJuaSWveu+9iAceoeo9a25dF/TIbPVzovKlKdzwWLvLefQdsExtJqiXYTYcJCYanNr1tzny3emmvWb0bHP8ev9CKw6dArtpq9A3zX7MW7vWcw6fg2LT99QGeu0TLWfMlQZ2/zlddep5k5qXJ9W7ajILaqLl0aBoqVRqFR5dOjRF+cuXETkw4d4Ece1fjPXmbIT23PqT+w5nbP3/WfvovWIC6qOkgHV+M5SEK4SceDcXdD8mPv28uVLxFADDyKx9uhZ+Exdij5r92HsHmrgOhafDk/cUVHtpKaXAOmbPmSFqa7TGMWttV01tR0VK6l2pwbeK1oaBUtZoE2Hrjh38RIiI6mBF8pUZ6QO8eYPj7Hv7N0cb/P5W75H6xHnk7W13u5uQ89h5KKb6uLF7EhYaxqIUxr46tg5tJ26FL1X78WYPWcx89g1LOZueqe4m57BUBk04L+bOzCm2PzFLOtUuxh2VrWH8TrV/O5/UIIaKIV3i5aCm08HnDp7zhAHXiDhZQIys+Dvo6dxqtQqp+PAzpN/oP+M8FQ1QW3QWK8/8Ati47IqDsThtweR2HTiPDpMX4Feq/Zg9J4zmgYYB4xNtdpR8Rr892SlqW6M4rX0zV+4s6ohDigNlMZ7xUujuWdbnDh1GhERkSoOKA38RZDk6kq3fnic4zGAelu7/2e4BJ5Ntc1ZYz1socFY5/Cym8ZIU5pqXvxXpn53WDgxw5z6BYI019yshbXRLqMXofeW4wgIS72EgyUh/rvOouuqvaqGesDe82kacZZ69Nt5AR2XnkTrqYeTXYAoptq41XLnz/nOVHNUf/nqdXTqOxAV7BqjuE0DWDR0Q+12veA+JAQ956xA4JrdGLn9BEbuOY+hey9i0N7L6L/7MtTWtJvOoEsGd1T8a1Otb1PeGCWsHVG8Zj0Us6qDolb1ULSmA4rWcsRnvFvZoUTNemjQygdL16xHRORDJCQwO/H69iPySRxYYtFmgHTDIAAAIABJREFU5IUcvXuMTN1Q6yZLM9YXse/MXcS9MG+H+vJlAq6F30TXAQGoaNcExazrKw3YtO0J14AJ6Dl7OQJXUwPHNQ3su4hBey4n60y5RTV31UxvR8WMbv5SxrEZvqjTBMVT0UAxa0d8VpMaqIfiVvXg2NIbC5atRMRDaiBjXFbs+VmtrJDTbc4Ok+2qt3HKRxrr4Qs1Y53V4ZDswm99jZ4Bw1GxXhMVB8rWd4W1jx9cB49Hj1nLEbB6J0ZsM2hg70UM3ssLjRgHmKnmrprm2/yFW9V/YdcEJWwc8blxHKiVFAc+qW6HYjXsUK+FB2YtCsXDR9w8KXNZyt/uxaD/zPAcjQG6Hrm8WkotGP9OY73uwC94buY4kBAfj69vf4v+w8egsmMzlLBpiLINWsDapztaDA6C38xlCFi1AyO3HVNxYNg+auCSIQ5cTNJAWjsqvubmL9ymXGnAOqUG7FVfULRmfXxaox4+t7JD3eatMW3+Qk0DGchUMznBtcAZd3XuOfWY1sBab3NexDx0/k38yFKQXGKsUzPVlV2HwHnUVtTuNA2l7dPfQZH11lXc+6vl8mie06qNDgi7BJrmVJ/nxjJ7LqLr6tNoM+NIsiXyuKIH72Kqs7rnMP38+c5UP3z0GG7tuqBIxer4qFJNfFy9DorZNEBJ++Yo69QSFV3aorpnd9h0Ggj73qPRaMhUuIxfjJbT1sBj7mb4LNiJtov2qLvPgu3wmsctqjeg9bTVcJ+0FC2CFsF55Fw0HjIVDQYGw6HvGNTrORx1ugbApqM/vmzXBzW8/VDNozuqeXZF1dbdUKV1V1Ru1QWWbp1QwaU9LJp6oYxTS5Swb4ZitRvhUysHFK5cGx9VtELZWnbYunsP4uMzt1FC5JPnKvujB7Dc/EgD1nbsRew+9YdZjXVUVBRad+iGjyvWwEeWmgaKWtdHSftmBg34oLqHroFRRhpYDY+5m+C9YFeiBtoufFUDLuOpgTloPGQaGmZIA901DbSkBjqjgosvLJp6o4xTK5RyaIbPazfCZzUdUKSyLQpbWqG0VR2ErlqdYUO1ZMdP6ir73NzW+mejsWamihvEZOXtWVQUvLv1xGeVaqKwZU0UqVZHDa5K2jVDmYaMAz6o5tENNh0HoF6vUXAKnAKXoEVoOXU1POZQAzvRdrEWB9ou3JEUB6avgfukZWhBDYzSNTBBxQF7xoFugbDpNEDFASvvHiniQBdDHDBowFnTQAn75ioOUAMqDlha4fOq1li+dj1evsycqf71bjR6TbkGl8D0Da3eLjn92Gbkeaw98ItZ48DjJ0/Qc9AQFQc0DdgqDZQwaKBCc00D1koDI5UGmo9bqDTQhhqYvwNtF+/VdGDQgNqmfBo1sBSuwYa+YKgWBxz7sS8YYegLkjTAWFPdsxuqerAv6ILKKg50QoUWvrAwaKCUQ3N8btsYn1k5oEhVW9UXfGZphRXrvspQX0BTve3Y79BXWcnp9vyr96exDpx3I9cY69RMdZWWw+AavA9uEw6g6YjNqOE5FiXrpL2iBzdk+cLOF7XaDkWXlXvT3kExlYsRBx24hJ4bzsJr9tHEUg/dSBs/iqnOyl7DPOfOd6b6wdMoNO87AoUr1TIYKjsVSIvXaYpS9V1h0dRTGdvKrbuhqndv1OwwENbdh6FO73GwHzAJjoEz4DR8PpoGhaL5xDVwnfYVWs7egjbzdsJjwR54LNwHz4X70Wb+LrScvRWu09bDZdIqOAcvQ+Mxi9Bw2Hw4Dp4OO/8JqNNnHGy6D8OXnQahetu+qOLRQxnrcs18lKHSOtOG+NTKXnWmBS2q4cMyFbBs5SrDhYqZyVTnHVPNwKtnrHed/MNsmaqnMbFoMWAsilSuhQ8raoaKpjq5BtqhUquuSgNWvgNg3W1oMg00HD4fTcYtNWhgPVrO2oLWr2hgdzINNKMGRidpoJ5/SKIGanYajOrt+qGykQbKNqKpZmfqZNCALQqV45SwJcYGByOjBch5yVTrbT50QdYa6yfRsfAIHI9PqtniI8taylQX5UxBnab4wrGFIQ60Q+XWugYGGjQwNjEONBw2D03GMg6sVt9zTQM74LFgtyEO7EOb+cYaWA2lgTGLDXFgBhI14DcMugaqeDIOdIaKA41agXFAaaCmvTJUhcpVR8HSFTB+4sRM19UrUz0175hq6oIZThprc11rEfksGl2DZuDj6tQA44AtNA00Rqn6LWDR2APlXQxxwKs3VBzoOgR1eo9FPf+JcAyYDidqYFxqGtgDz4V7DX3BbrSavRVu07+CyyRqYDma6BoIoAYmanHAz9AXtOsHaqCSeyelAS0OuCQz1YUq1ECBEmUwceq0DF1fk9dMNdubMxgBc2msn+V4xjo9U01T6zbxIFyD96N+/+WpXiioX8So11uXdeysaqh7fnUk7cy0YfOYPtvOo91C1k0nL/UwNtP6z67BO1DZpS9SrlktFyqaxxCb4yz5ylTTgkZEPceAHRfQPDgUNdv1QmlHFxS3baiygaUcWqBMo9Yo16wtLN07q8BWvW0/1dnZdB+JOn3Gw85/MhwDZoCmqvHYUDQLWQWXKevhOn0T3GdtRcs5O9XdffYOuM3cjBZTtUDadPwyOI1chAZD58B+4BTY9ZsA215jYN11qDLuNPCVW3dHhRYdVKa6dAM3MGNSzLoBPqlWGwXLVsbbRYrhPwU+QGhoaKZX/8hLmWoGVt1ktRt3ETtO/I7nZqit5Oof/tsvwHXiMtRs10dp4PPaDVWnVcqRGvDQNODWWQ10aHZpeKy7j0Cd3kGw858EB4MGmoxdAucJK9EimQZ2aBqYtQPuM7eogReNl7EGHAZNhV3/CbDtqWnAqsNAZeATNeDshTIN3dUMyuc2DfFJdVsUtKiK/35cHG8X/AijRo/OsKHKa6ZatfmQcxgyP+uM9ZPYFxi25zJcJ6/El+37oXR9VyRqwMEFZZxaw8LZR5lbDnY56P0ypQYGz1DmuPGYUKUBl8lGcWC2QQMqDmxBi2mMA2vgPH4ZGo1ajAZD50JpoF8I6jAOdBtmiAN9DHGgozZb0dBdmepi1EA1WxQsSw2UwL9MXAEmL5pq6kI31ubYgZUrQQUduAbXKatRq6M/yjRwRXFbJxUHStpTA61QzrmtSnRU8fCD3hdYdxsO216MAxPhoDQwD43HLIFzsBYH3KZv1PoCgwZaMg7M2ooW0zYYaYB9QZIGbHuNTdRANR9qwA8VXTtqmeqGbioOFLNxUjOrhcpVw38/KYm/vfUOgoMn5FtTzfamsR40J+cz1n9lqjVTG4ZmY3akn62u5Zm4jnQJw/bkLmMWod+Ok8lLPsIug1uLd1z2at20bqBTe/SecxS9Nh9XNdwWDbomvpeYanPYYfOcI1+a6oB9N9Bt83n02HIWfmsOoMPcdWg5chocuw3Cl17dUMmtAyq28EWlll1Q1bMnahhMlY0yVeNg5x8C+0HT0GDIbDiNWIDGY5egadByOE9YoUw2jTbvzhOWqyxGo1GL0HDoPGXG7QdOhl3f8bDtORq1ugxBzQ4DwCBaxaM7LN3YkXrhi/ot8HltJ60TLWOJAp8Ux78/KIS/v/0O/v32O2+cqdZNFo31djMY68R1qrdcUBrosTYMHeetR6tR0+HYbTBqeVMDvmr61dK9C6p69VRZZBprm+7DUafXWNj1D4GDQQMNqYEx1MCyRA00n2jQALNS40LReNQiNBiWpIG6fYM1DXQdAiulgb6o3MagAWdvfEGTxww1B1RlLPHOJyXwr/c1Dfzn3ffzvanW23zI/BtZUgrCdapHht1E960X0HPrOVADneZ/hVZjZqJ+9wDU8u6OytSAS3vDAFvXwKAUGpgKx8DZSNLAcjgHa3EgSQMr0DRoKVQcUBqYqQ2sDXHAuusQ1Ow40DBb5afiQDlnxgFNA59Uq6M0UOCTkvj3Bx/h72+/i3++9a5JyyrmVVNNXbA2nxe7mWqsaaonHL2NbpsZB87Bb90hgwZmoYFfoEEDHVQ5ViUmWTi4atcXNTsNgjLWvfU4MBX1A2dpiZYxi7U4YNCA1g+wL6AGQpM0EEgNTIZdPy0O6MkVrS/wgyWz1M7eKK2MfmN8Wr0OCpa1RIHPSuLf//sI/3jnHfzt7//K96aa7c2SsAGzw3M0Y50xU30IzcbuzLCpVtlra17M6IPqrQfAZ84GDNx3HgP3XUS3tWnXTadmpvW/+cw7hkH7L6nSEurZofdEcJMZMdXmMcTmOEs+NNVxCNgXnrhONS9AG3ngCoLDrmLGkSuYdfg8Ju09haEbDqLLgg1oNWExmg6fiQaDJ8NhQAjs/YNRr/8E1OsfAvsBk1W2yTFgpgqqNNk0z7w3GDIX9QNnqlIPZqZ5DMs9bHuO0sy0rz+qefVE5ZadUb6ZN8rUd0EJm/r4rFptFC5XTVv5oVgpbY3iQkVUZurvb72N/7yhpprBlXcuw8etjE3JWCea6m1XoNap3nMZI4w0MPvwBaWBYZvC0HXBRrQOWYKmI2aiQcBkOCoNsP2pg7Q0MNeggTloMGQWHAfPgMOgKYlT/alpoAJLfuq3MGjABkUqVEOh0uXxQXGDBgoWgb5O9X/eKfBGmGq2N8t/mLF+YOYa6+Sbv1xWF6ImaeAqZh++iMn7TmP45kPotnCT0oDziJloOHiKIQ5kTAP1h8zVNBAwXRlplnvY9Q1KjAMsKajmzTjQBeWNNFC0Wm0UqVD9FQ38631tnep//PvtN9ZUUxfMYLIUxBRjnbT5y2X02XlF08D+KxgfdhXTj1zFnCPUwBkM33QI3RZtQpuJS+A8YhYaBkyBw8AQ2LMf6PfXcYAaqB8425BUmarigK4BZaaVBnopDag40KAFStRugCQNVEiKA4a+4B9vv4P/+//+ieDg4HydqdbjPuMAL6zNqXWsTTHVXCnEwqkXitu0TcwcG5eD6D/T/Np0HAeP6WGpXoSoG2c+Nh+7E81GbVVlJ8Z/1001L3TkiiJcRcR79ja4BO1NtkpI6ymHcej6b+bwiHKO1ySQ7021tvkLN/7g+rRc8P8m5p4MV+tUBx2+huEHrmLQnivou/OiymZ0W38cnVcdQvvQPfBZsAWes79SFym6TVoKdYHaqLloPHQGnAZPgqP/ONj3GYW6fkNRu/MA1GrfG1a8IMXdF5bNvFGhkTvKOrrgi7raMkoMoh9b1kDhspVRqGRZvFesJN41rFHM6V4x1QZjPca0jPWrpvoKRh28lkwD805qa5UHHb6OEYkauGSkgcOaBuZvheecDWg1fQ3cJlIDC9FsdJIG6vsHwb7PaNglaqAPrLw0DVRqrmnAon5yDWhL6lVGIS6rSA0Y1qd9E001O1VmqmisIx6Z7+LF5KaahuoKRhy4hhBDHAg9e8uwpF44xh+5jhEHr2Hwnsvou9NIA6sPwzd0L3wWbIV+gZoWB1LTgB4HBsLaty+svLqrOKA00NgdZQ0aKGHN5dRqgxr4yKIy/lfSAu8VNcSBgkUgpjrpwkpTjXWSqTbe/OUaQo6EY/bJm6ovmH/KsGfB4WuJGui38yJ6bj2Pbl8dR+fVR+Cr+gItDrRWF6pqfYEWB2bCafBkJMaBHuwLBqJWe8YBaqADdA3occBYA6ov4LKKxUqiAONAIW1w/aaZan2A3XfGdcPOi6/pZEx8uamm2qFvKBoOXo3yjfsm2+RFN9T6o0WjXnAJ2p3MABubZtZtO/ZbCouGPVG783R1kaTx84mm2rBSCJfdazk57JXziak2URAmHP6GmGoaqusZ3lEx7SX1FsNl3Hw0GcZAOgmJS+p1H4LaHf1Rq11PWHl0QRXXdrBs5oXyTm4o49DcsD6ttkZx4jrVylSXEFOdYg1rPXPhbYKxTt1Um2dHRfeQJYkaSG9JvapuBg00cgWXUytl64TitfR1qqurixELldQ60zfdVOvGOnD+TXBJSHPcMmKq56sNgLhOdThGHgxHwL5r8N991bCk3ulXltTzmMUB9gpQAy1UHJiFRoMnoYH/WDj0GQU7xoFOAxLjgNKAsxcqNHJDGWqgjhOKWydfp1pMdZKJ1r/7xo801lxuLzMZ69RN9VWEHLmO2SrBou2qmbhW+SHurns92dKaalnFxCX1NmuDq6kr4EYNBLEvmIXGAZoGmGAx1kANjy6o6tYelZprGlBxgBpIXKfasGeBIQ686aZab/fe066B5UsZWaPfHLGC5zDFVBf90gOO/ZfDfWIYWozfC7se87Ul+IzqqxNNtVPqppoXQjYash6VXAJQwsYHxb70hHXHaama6gF7L6L7ujNoM40XNr5qqGnCxVSbSxmvfx4x1frGH7uvoFcO7KgomerUO1WutX3k0v1MdaZZaarZmeoDq/RNdXttYCWmOt01ivWOlNO/wStuIzo2c0vIpQx9WWGqPXVTPTG5qW6YpqluD8u/MtUlJFOtayC1R24gdfzKg5TNm6HfzW+qOXO5Aa2nroC7QQNNh6dvqqu4ctZSTHVqbZvW37iGdeTj7Nt9lWIyi6k2rCVNc91s9A5UaTnilXIQlomkzFRz2T7r9pNVrTbNNA04S0pSM9Vtph+B56yjKjPtloahFlOdofCQZS8SUy2mOkOmJ60AmBV/5wWLhy7cy5Sh5jclN5jqKq5iqjOqDa6tG7T8NqKiM7c2e2rRUUx1NHrlsSX1UuqFFywevXw/tebN0N/EVKeesEjJObf8zjXV9RWBXn8x2QxJIs0XmdVUcwm+kIOo021Ohkw166crNhugjLSe0U7LVNNIp2em9VIRyVSn2dRZ/oSYajHVucpUc/vyMBMMNb8xYqrzTmeqDPWy24iK0bZlN1fEE1Odt001DfXhi/cyvKxkaroRU5134gCNPdeuvxsZa1Kbp6aDjPwtN5jqz2t5JJrwtEy1bpr/6lFMdUZaPWteI6ZaTHWuMdUs+TAlQ61/RcRU543OlIZ6/PLbeBZtXkNNHYipzrummrsrmmqoqQEx1XkkDgw9h+ELb+KPiJgcMdTUSp411RPD1Eoh3PXR2GiLqdbdQPY/iqkWU50rTLW5DDW/QmKqc39nyhrqIGaos8BQUwNiqvOmqfYcdUENrF+a4So1MdW5Pw5w5Z8Ri27ijwc5Z6gZL/KcqdYviuy1QF0UKetUZ795TusdxVSLqc5RU01zRUPNzFRmrvBPTdhiqnN3Z8qOlBlqc5d8GGtBTHXeM9Veo3ktxV2zbVktpjp3xwGu7DJy8S38fj9nDTXjRs6a6l2qpjqj5R+uE/bDKWAtyjfuk1guIqbaOPrn7M/51FTfSNz8RVun+vWW1Ou67hg6rTyI9kt2wmfeJnjMWq9d8Z24lFaKJfW6BaZYUs9wkVpGl9Tj+rRv4DrVuqHmxUjx8Qlm+ybQVA/ep2lA2/yF61SbZ0m9jK7+kdkLFf/x1jt46933MTqfblNOQx3Mkg8z11CnFA9N9ciwW/DbdgW91cYfr65T/bpL6mXb6h9vvQPqICgoKNPT4XltR0Vu+kRDbc44QFM94eg36LZV3/zlCkYcNGVJvexd/eNvf/8nJkzIn9uU01CPWpI7DDVjR06aapegPajWZpjaeTHdCxUnH0KryQfxZdsglLRtl2ioeYyY6pQ9QM79nq9MNTE+VoH0NvruvIJ+u64g95pqC7xXzLBOdcEieLtgIXzwURGUK18BmzdvztAuWqnJ5uGTOHQYfxnM+uT0nTWzaV1Zrhvq42Y21GQSFfdCbU/cd9dVpYFBe/OGqX7rg0J4r1BhWJSvgIULF2bYUK3c8zN4gWdOtzeXP2O7ptXm7EgnrLyNp1Hmr6FO+V2gBqac+Bb9dl1Dv13aBk8pN3/JKVNdrLoNuF79RxaVkm/+UqgIdA18UaYMFi9enGENpPz/f7sXgwGzwuE5KmfjAN/ffdj5NDVBrWiGmjNV5htYk8eT2DjMPfNdCg3kHlOdtGeB0eYvBYvgrYKFUKDQR/i8REmEhoZmqC94EZ+AnSf/yPEYwBjUZuSFdNu75bDzucpQUys5aapbTTmETsvC0HTobJRr1A3Fa3srw6wvqddy8iG0nnoYnVecgv/u8+gYuhPW7YahVN32iRvNiKlOGQFz7vd8Z6oTEhLw4Fk09n7zG0IOX0W/zScwbM8FTDh0FXNPXMeSMzfVJjBTj93A+CPhGJED5R/cSU3b+EPbopqL/lvXc8DESVNw8+ZNREdz4fvMLSrE7b0v336Y4/drdx6B9ZGpGSwaL9/xl3DyaoTZO1J+lRJeaho4cOc3TDx8FX03HcfQ3dTAFcw5Ga40MO+0tpOatvGHYdOHxI0/zqDruuNqtsJ3yS74zN8CtfGHYdOHjKxTnX6mugYKW1TSdlQsXgoFPimKAkWKorqNLcZPCMGtW7cQFRWlNJARHfxyNxpXvsn5Nt969Lc0O1Qa6pCV3+BpVFymtf06YTLh5UtEREUj7LvfMeXodRUHhu4+r+LA7BPhWMIdFV9z8xdzZappqpN2VKSh0jTw9ocfo8qXNhgbNB7h4eGIidGmxTOigZRsnkTF4cYPj3M8Dpy/FYnAeTdSjQOMDVw+k6VfNIXmvjEOREZF48gPf2Dqsevov+Ukhuw6j+CwK5h94jq4q+b80zegbf4SjpF/ufmLeTPVanddtasmNVBaxYF3PvoElWvWwqgxY3HlypVEDfwVm4SEl6qMIqdj/6WvH2LfmbtwCTybaptzVZfRobknQ61zzUlTTcPce8s5te14tzX7Ydd9HMo4dIRtl+loOekg2i8+jv47L2Bw2GVwe/KAsMsYuO8C2kxZg6ot+6usdbPR2+VCRb0xc/gxX5rqR4+f4OCx4+jsH4gqzb3QoOcwdJm1AqM2HsCMA2cx/fBlTDx8BePCrmL4/msYvPcq+qe5+cvmZOUfuqFKtqNihso/tO2JP61UE0UqVseHFlXxoUVlfFiuKopY1kB9Vw/MC12GH376CXFx5tlVLoe1pTJQKU21nqE+dY2GOnMDh7/6v2hCHj95giMnTqLrwKGo5NwGjv+/vfMAiyLL2v/z3/+3M7szn7q6ujM6plHXiAlzwASSDICkwYiKilnMGVQURRAUTJhzGNM45qyYwBkQs45p1DGggOTs+z3nVlfTYIuC0MnTz9N0qO6qW7/7cuut26fOGT4Vgxavh9euY1h8/DIWn4nAwtNXMfdUlChfPekIVdO7qqimV1hTPQ6t+spVNeU81faiVH2Ndtaimh6VqK7YsBXKG1GZ6hwNfN+gGdp3d0TQyjV4+OixXmogNPKNmCnL2+dOMzRrqEkfpIGEhESEXryEoROnwbi7C0yHTYZb4Dp4/nxUqQG/XBqIUtHA+xUVewYrKirmKf5iPs77AxUV1Rd/ydEAzVYboxydZNdphLJ1jNG6ix0CV6zC/YcPkZlZdHm7P/Y/U1zLKW7ee/0d5P3Vil7TrytnIqKRkVn0hlqpgcREnL8chtHTZ6GpbS90HDIRAxatxcydRxB4TBoH/E5HYt7pKFGmXFTVPCyNA0N3X4b7jvNwU1ZUfN9Uq6uoaKKoqilVVFRT/IVK1Tdri4qNpHFAPhaUo/GgrjHadLGDX/AK3PnjPjKzMlE8o2Tx9DjNBf35Ihk2k9831ZKhvqP1ixLV7bkumGrJMEcIw9x32S7YzdmKkbsuY9LxCEw6GSEZajLV8v1kJDwOXILl+CDYzDnIplpdx2rhPYMz1Wnp6Zg9fyEq1DNGmZoNUc6oGco3MUHVNuao1bkHmjgOgMWomei9IARDQnZj+OajGLHjLIbvuoChP1+EiKfefAYDNhyD6+qD6L2cBtIdcA7YJKpo2c5ZKZWmneIH8/HzlAdTk0FUnngkmvccIrbRyLYPjLo6o66FA2qZ2aKmaTf8t0M31OhkixqmtqjWoRuqtLVExead8H2j1ihX1xhla9VHp249EHX92if95KcFvRRok/SzrqrBkmeoL10vPkNNDSQz4rdkGSrUIQ00kDRgbILKrTqjZuceMLbvD4tRM9B7vkIDm47kaGAXmalQDNpyVqEBiqsnDZCh2gx73zWQNBAMq8nqNDAKzXu5v6eB2p3tUNO0ex4NdBcaqNTcFOUbtREaKFe7IVqaWSMy6hqys4umumCBOu0zPqzOVNOB1HfzHxqboZabn0HhH8HLUbVhc/y7Fo0DTVFe1oCZHajSHZ1s9/ZZiSErd2GYqgZoHNhxDoO2KMYB8WvFPsU4sFkaB7xXouuMYFhP8YeFPA4MmwqTwTQOyBoYiEa2fWHUzQX1LB1AGqhlJmvARhoHOpEGrFBRqYEmKPNfIzQ0MUVkVBSy3+mXBmT+8qM6U02Gmi5Opmsp0ovJUNP209PTsWr9JlQzboF/0zhQlzTQFlVam6GmmR0a9XCF2fBp6OWzQtLAxiMYsf2MdCzYRRpQGQfWHETvFaQBGge2wMF3LWznShqwmuIP8wnzYDpmFjqQBuhY4KrQgNNANLLLrQFxLOhEx4LcGqjUopNiHGiCMjXqCw1EXI3Sq2MBmerHL5LeM9XOM69g9ro7eKnFtHmyJtU96oypVhhmYaJPRojZa6WJls00PYrZ6t8xck8Yei45A3vf3OXKOaWeul7WzHsGZ6qjY2PRwb4X/lXdSBxM/1O/OSo0bScG0h/bdUWNzj1Qu2tPGNn1RyPnoWjWzwOth0xDhzHzYD41EF3nrILdgk1wWLQdTkt+hvOSXXAO+lk8d1qyAw4B22HvtwW2PmvRbXYIrGYEwXLKIphNmI+OY7zRfqQX2g6djjbuU9Fq0CS0GDBBbMO41wg0dBiCejb9UMvSCdU72ShMdUelqf5X9XooV7MO1m3aIoyhPs1QqJOrqqkmQz3QJxKXbxSvoaZ2xCclo4NDb5SpYSRM9X/qSydWlVqZQdKAHep07Yl6tioaGDwVHcbMVWrA1ncT7P23w3Ex9f0uOOXRQA+/LbDxWYduc0JgPTMIFqSB8ZIG2o2YBZOhM9RoYCQaOio0YOWEGqY2qNrWEjmahVTbAAAgAElEQVSmuglIA9/VNMLseQvw7l3xzOCp66uieC+vqaYD6cItf4BCESgcQ5M30oBlr/4oW6uBYhxoJsYBpQbM7MQ4IGnAHU37eqDV4KloP3ouzKcEouvsENgt2IgeH9CAY+B2kAZs58saCFaMAwvQccxctB85CybD8mpgLMQ44DgERjQOCA3YoqoJaYBOrunEqglK16gnYq19FvrrnQby9nFeUy2fWJ+7+hrpGcV7whDzNh4uw8agdA0jYapzjwPWqGFmh1rWPVHPhsYB0sAYtBo8Be1He6OzUgOb0MN/m2Ic2C3GAXE8WLwTpAFxLFCnAY8cDbQVx4LJaDFwApq5esC4lzwOuKKWlbM4uaITq0ot5JNr0oARSlSqDt+AxcjK0p9fLNSZahoH5uiwoSbN6pqpVmukFaZ64rEIjPnlCvquPAeKt1bNTy0/Z1OddyTS3GuDM9VvktIwOGQfGvRwReXmncTMxA9N26FSK1P82E4aSOtYS6a6oZM7jHuPQosB49HafRpMRs5Gh7ELYDopEBYzVsB6zlrY+G6BXcDPcAjaB+flB/HTyqNwCTmKn1YchuPSA+gRuBu2/tvRzWcDrLxCYD41GJ0m+KODhw/aDvdEy0GThalu3HM4Gti7oU63Pqhp4YBqHbqjShsL/NCsI75v2Ar/rt0QJSvXQMnyFbFu3XqD+OlXNtU0MyUZ6tgiS5eV379IYlomBq/+BQ0dBgoN/NCkLco3aScOWlVN5IOpC4xsXSFpYLSkgSFTC6QBZ4UG7Bfvhp1SA6tgPm0pTCcuEhowUWigaT8PNHYhDQxC3e6kAUdxYlVVaKATvmsgaaBUlRr41w+VMGv2bEXssWbNaH5cP7ZM1VTTgdRvq3YMNbUzPi0DozccRmPnwajSwhSkATEOtJDHAVvUslZowHGIGAea9x+P1rIGPObDdFIALGYsF+NA9wU0DuyEw5J9cFpG48ARuIQcUY4D9ov3wM5/hxgHrGdJGpDHAZMRXopxYCwa9xyhogEHVBMn1/I40BplazdEqSo1UeK7CvD+jOwfH+srTS1XNdXyiXXo1TfFbqhp/2KT0zF1xwkY9xyGKq3MUMH4A+OAjas42TXuNQqkgVaqGpgYAIvpKhpY9DPsg2QNSMcCGgeclh2AUgPzN0LWQM444CUMe9N+qhromzMOtCUN0DjQGmXrNEKpH2viqzLlMH+Br17PVNM4QOE/ujpDLf8f+K/cASOzgcoL/6iiYWPH6ejhc1RpWn/yP40+/sdQs8OAXJk3Krd0gdnYDcrPkbEtSJlyOaY6PyNNyyadiMDYg79hwLoLcFpEZjr37LRsqB0XnkL/oFBcuvtK3j1+1CABgzLVZD9ikjMw5dgNDN4Zht7LdsFqwjw0cxqI+tZOqGfphLpWThCm2qafOLjRQY5mqVq4TUTroTPQbtQcdBwnGWtzGkxnrUbXeRth47tVmGe7RTsh7v47YOO3Hd3mb0YX73WwnLkSZlOWoNMEP7T3mAeT4V5oPWSKGKSlmYnBMLKhmQkXMTNRVcxMdEL5hq3EjOo331XCVyVK4Z/flhBXfBtCPKVsqt3mRyLspmYMNf3viJR6QgPh6L18N6wm+qCZ8yA0sHZGPUtHhQZcxK8GZHJVNdBGRQNmZKqmLYeVUgNbYOu3Xep/hQ7odXehgfWw9CQNBAkN0EkVmSk6QNOBWmjAiWap+6OWVU/UMLUDGXzpJ19JA9+Wr4yvSpbCP0uWhlcBUuppcLzId1OyqaYDqf+2PxCflKGRkyh1jZJS6t3CoJ3hInyny2RftOg5GPWFBuRxwAVGNn1Rv8cgccIjxoGBNA5Mh8nIOeggxoEAmE9bBisveRzYAjtVDfjvhO1CeRxYrxgHSAPSiTVpgIy6pIFRil8q+osxiGZKczTQGmVq1sf/ViANlMZX35SAt7e3Ri7qVMevqN5Tmupp4aBx4HzUm2IN+VBtt5RS7w4G7QhDnxX70GXKQrToOQT1u1I4Tm4N0IQHnfRKGpiA1u6kgdkf1IC6cYCOBV3n0jhAkyuSBtrTODBSGgea9Z8gTt4aOg2BkS1poBdqmPVQaECapSYNfFu+Cr4qWQaUUs/Hx0cvTbXt5DDIhloqPa7aM7r3/K+Xb7B8436YuYxHrQ6uwjTLptrJ77QwqYv2X8fpyIdoZDlYo6aazPT4w79jyNZLItRDNs/qHnstOoPxG8Jw7OozJKUaxrVZuqeW/FtkkKZ68tEbGLbvKkaLlHpRmH7od0zddRrj1+zCEP/VcJ7hiy7jZsF05Ay0HzYNbd2niZCNtsNmoh0NpB7zJGM9cRHMJgfBYtoyWM5cAatZIcJkW89eDSuvVbD0XAGL6csURsofHcb6iO+TMWs5cCKa9RuDxi7DxAx13a69UdPcHtXaWaNSsw74vn5z/LtGPZT64Ud8U648vipVGv/zzbcGZ6oHLbiKsFuaM9Qkd7n4C+UoHn0gCpOOXsP0w6SBMyoaWIgu42YrNDA9Hw34Cw2YT18GC9KAVwio/+lky9prtTDSuTTgMV8cjEkDdKImaUD6laJu1z4qGugoNFC2Rj2UrCRroIykgRKl9NZUU1aXgO33tWqoSQM5xV+iMJpS6h2JwjQxDpzFhHW74b5oDX6auRBdx8+Bmeo44D4d748DpIElOeMAaWDWKqEB1XGg82SFmR47X5yci3FAoQFj8UvVIKhqoHLzjvi+QXOQBkpVqqYYB8rg79+WwN//8a3BmOq56++AxoEL1zRnqEkDcvGXoXQsoFzlR66KcWDaHtLAHgwNWAsXTz90HS+PA3mPBbNUjgX+6Dx5iTjBEseCj40DSg3MREu3SVLYh6yBbn1R09wB1dt3QeUWsgaMFBqogK9KSRr4f/9ff001XUsxd8NdRMelQcORX/k7nnyWpmdk4trth/D0X4dW3YejhYsnevqfgOf2CJy78RLJaZl49uI1jK00ZKoVcdMjdoahz/JzcPA7BfsPzE5TuMeIVZewLfQBXsSmiHA7/fmNM59O0cNFhmuqyVAp81RHYeHZGyKV3urLt7A09Bp8T0ZixqHLGLcnFMO3ncaAdUfRZ+V+/BS0E46LtsJ+4QbY+qxBd+/VIm62i9cKWM9cCsvpwbCYugSdJy2C2URfdBrrgw6jZqPdsOloM2gSWrp6oGmv4WjkMBD1u/dGPStH1OzUHdXadEaVZiao0KA5vq/dEOWq10EZOU91uaIr/qJLGhyz+DquaNhQ0/7LpnqoQgNkqLxOShpYevEm1ly+iWUXVDVwHsO3ncrRQPDPQgM9fGUNrJI0MGsFrD2XwmpGMCxlDUzI0YCJQgMt+kkaaOw4EPVtekkaMM3RQMWGzaWUerIGKlTGN2XpxIpMdQn8U09N9eUbsQjZ9whxidqboZb1n2Oq5eIvUfA8QUWgbmDpxVtSak2FBjwPhWH8XtLAaQxYT+PAr3AJ/hkOAduU44DNXMU4oNRAkFoN5B0HJA0oxgHT7qjeVhoHfmjQHN8px4HqKFmhkjDVX5cua3Cmevmeh7h47Q3SMjR7jYBsqt1lU31YUfxFaOAm6FggjwMzD4Vhwp5QjNh+GgPXH0OfEEkDjh/SwMycccB8cgA6T/SF6Tg6FsyRYumVx4IRaOzohga2fVDPyklcrFy9rTmqNGsHygQlUmuKcYA0UDnXiZW+muq/olNEyMdrPTLU8rhBWYOSU1Jx+kIEZgTtwp6LDxCXlC5+caNlNKOtCVNNcdOj94WLdHqUxzq/UA+3pecRfPAm7v71tlhSU8ps+PHTCBisqRaG6kCe4i8XyFDdwvKLNyDnqfY8eQOTj17H2MPXMOrX3zFs9yUMFhUVj8N19QH0Xi6l1HPy3wj7+athO2c5ukyjioq+MB83B51GeaL90Klo6zYOLfuOgJRGidKp9UQ9S3vUNrVBDRNL/NiqIyo3kYo+SKa6dk7xFwM11TcevtXKz/+qpvq9ioqkgbCbanIUX8fYQzkaoCwwAzceV1TVzEmraL9AoYHpsgZU06nlpNSjDCNCA1aUUs8G/1XRAOUo/r5OA0WucsXB1ABM9aPnydJFidnanyN531Tnrqi4JuwWchV/UYwD48Q4EIFhey6J7A9um0gDlAVIMQ4s2oTcGlgI9Sn1hogsMw2694KRVQ9JA+0sFWkV5TzVDfHvanXwr8rVUbJ8JXFiZWimOi0jS6RYo/z5mr590FSfuY7gC5KpJg3Ieaq9lHmqr2H0gQhxLJDy1UsaoCxAlK/eSdaA93J0mb4EVpPVa0BOqdfQpheMrBXjAGmgFaVXlTQgHQtUir+IXy31d6aa+phCfvRphlqdLqneRWpahjCpZKblW3Gbagr18DjwG9w2XIBzwOkPmmkK/ei7+By8d0Yi/I9opKTrz8WsMktDffyyTLWYpbytYqqvqy3+IplquUx5jqFyKK4y5QZqqlXGIo3+/+Rrqi/exNqwWwpTfQPaKf7S1CBNNVXE03SWjw8J62OmWir8IZ1c+5y5Ds8TdHJNucqjFLnK8+ap3oOCF3/JyVFcy8wGNRSmurJqRUUDN9VkSLQ1DnzMVFMBoBxTfV2l+IusAcpXr5qneg96Bv8M50UbRVpFu7kr0HVGEKyFqZ6DTqO90EFMsFBaRWmChVI3SidWkqkWGlCa6qaKXy0Ny1TT/6S2+vxD40FRvV9cpnrknnARN+2+7RJ6Bp0RoR7qYqbpPRf/M5iwMRxHI54iNjFdZ8bcomKs7+v5Mk21qKR2A/POqDfVIlf1JtlU785V/MXOe4WYnSh48Zcvb6ZaW/8cBTXVUlXN6yqGqrDFX8ajVd/haO4yGPlXVDRMU62t/la33YKZ6hufYKr3FsJUqy/+8iWZanV9o6n3Cm+qVYtAqZrq94u/sKnWVG/qxnai38Shk7OHMktI1VY9oS77h9PCE7D2WJnrgkb6bB3LkbCdeyhXphAK7xi4/oKIm5ZCPdSnyXPyP42hKy9i27kHeBaTVGzF03SDtP62gk21FsqUfynhH9r6t2BTrS3yurNdNtW60xfaagmbam2RN9ztUlGps5evYtBEP9Tp2F+YZlVTTXmjBy49j/Wn7mDh2l8+yVSLGekP5JumZZQir9+Sc1h6+BZuPY1DWmaWwf4SYAjKYVPNptoQdJxrH9hU58LxRb5gU/1FdnuunWZTnQsHvygiAu+ys/HqdSx27D+Nrq5TUK11L3QetwG9A84i4NcbuPY4BgnJ6di05/inm+qF6menKUXerB0RuHjnFZJSM7VyjVIRYftiVsOmmk21wYmdTbXBdWmBd4hNdYGRGdwX2FQbXJfq1A5lZmXh4ZPnCFi1E1NDjiH05kskpGSIWeSMzKzPMtXOfqcxZu1lHLjyJ2IS05ClAxd/6xR8HW4Mm2o21Tosz8I1jU114bgZ0rfYVBtSbxZuX9hUF44bf+vTCVBekJTUNMQlpuSaRS6sqaZQD0qRt+7kPTx9k4T0rCzk5B759HbxJ7VHgE21LpjqsoaZp1pbstZXU/21yFP9Lb4pqZ/FX7TV3+q2q++m2lAqKqrrG029p++mWh8rKmqqb3V9O4Ux1X0Cz8JndxRuPo0TKfJ0JZOSrrPWtfYZlKkmuLEpGZh+/CaG7ZeqaE06chVeoujDdVH8Zc3l28p0ajqT/YNMdeky+Ps3JVCqdBls3LgRhlCmXFtiT0rPxBSFBkSeatLASSr8cR1U/CVvSj1dyv5BGvhXmXKiPLFqflRtsdTX7SZmZGLWqTsYvj8Kow5EYuLhq6L4ywIq/HGBNCClU6N89VJaxY+l1Cvm7B/K4i9SjmI6sfLz89P7MuXa1M/btEz4nvsDQ3+JlCoqiuIv17BAkaf6wyn1tJv9QxwLqKrm199g0aJFelWmXJv9rUvbptCQ7ftPo3qb3rniqtVl/3DxP43x68Nw6e4rvE3WfuEsXeKoj20xOFOdkZWFy0/eIPDSPUw7dgNTjl3FrJPXhaFaJuep1pGUemWqUNGHyijxXQVUql4D5lZWWLkqBE+ePOWB9DP+m2hAC3/6Gksu38fUY9cx+SiZakkDumqqSQPlq/yITmZmWL5iOR49egQqQMC3whHIzM5GxF8xCA67j+nHpXFAOrnWVVMtjQP/qVwZHc3MEBwczBooXNcrv0XHgmsvY7HsykOlBjxP6q6ppoqKJb4rj3KVqqJdp44ICAzA48eP+Vig7FH9eUITIq9j32J24EY0sXZXGmtVU+3kdxruKy5i7+VHiI5PRUYWj/f608MfbqnBmWoSc1pmJuLTUvEqMQXhT99gc8RDrAq7i1Vhd7AunGaqb2LROTlPtTRDNfaQ6uzEObgp81RT8ZedUsJ/NcVfTKmiovsUtB04TiT8z5Wj2NIetaiioqii1RFVmrYFVdOrUM8YlYwao17z1rC2c0Rg8DLcunMHr1+/RkpKshhEeZbyw6L92BJily40kIZXiWn47VkMtl4lDdxByOXbWBd+R6mBXLOUh1WLPoQKDfRfcxB9VqhWUlsDW0WucuvJVFXTWyr64D4FJm4fzlP9XxNrUU2vchNJAz8YGaNSPWPUbtoSVrYO8FschLt37yH69WskJ7MGPtbHH1v+DtI4kJCWhuikVET8FavQgDQOrA+/gxWKcSBHA5SrnMqaR4Iqq4rCH5tOQNbApxR/ERroN0LkKje2V5+nWjkOGBnjh3qNUbOJpIEF/oG4efs2XkVH8zjwsQ7+hOXSOJCFhLR0oYHI57HYHiUfC6RxgDQgV1T0VFZUVD0WqMlTHbBJKv7iXfDiL/9tZ40fW5mKY0HFhs3FsUDSQAtYdO+Bub5+4ljw6tUrHgc+oY91+SMUvpGUnIJrtx9g8CR/1OnQH3WtpDzV/YNCsezIbfwZnQiqOmqoxXJ0uX+Kq20GZ6pVQZFQqcpbSkYmEtPSEJOUinvRMTj38C/suv4nQsLvwe/cbXievImJR65hzIFIDN8TBqk0rVz8RTLVVJr2/YqK3jAdNTPHVPelg+kgUfijkU1vNOjqiPoWPdDYugda2zjBsqcr3MZOwoIlQTh+5hyePX+O2Lg4pKTQRQ58lqrad0X2/N07UWqWNJCQlqrUwPlHz7H7hkIDobk1MGJPGNypkpqqoQr+WZQnJg28b6o90SGPqaYy5Xk10MrGCRYu/TDQYwLmBS7BkROnhAbi4uKQnJzCP/UXWae/v6JMGgcyM5GYnobYpDQxDlx4/EJoYFX4PfiH3obXKXkcuIrhey7DfYeqBmgc+BnOKoaqy/QgWE1aCItxVKreU1TTy22qFRro5oj6ljQO2IM0YP5T3xwNHJc0QOMAa+D9fiuqd+hiL9JAqkIDcSnp+ON1DC4+eo69N59i1ZV78D+v0MDRa/A4GIkRe8OEBgZtPoH+aw+Jk2tRUVHWwIcqKtIES1+poiKNA41tFccCS3txLJA00Aeuo8fD2z8AB44ew5NnzxAbK2mAY2mLqtd1Yz3kQd7GJ+HwmXD0GR8Iz81huP5nLJLTOEWebvRQ0bbCoE21uGyWRlNRKvedKOeZlZ0N+lmQEqgnZ2TiTVIaHsQk4uqLOJx7/Ar7bv6Jzb/fx4rQ6/A7ehk+v56H166TmLr1EMat24vhK7ZhyJJNcPNbi4ELVsJ1XhD6zQmAm3cghs1bjFHzAzHJfxl812zC6p27cfhsKMKuXsODx38iNu4tEpKSkJKaKmKmxeBJzl+0sWg7ltemIKDo//w0EJOchoexiYh8EYezjyQNbIm4jxXnSQNhORrYJmtgu6QB/7UYsGAl+s0LhqvQQACGzQsUGpjovwzzV29EyI7dOHQmFGGRUQoNxCEhkTSQotSAmKWgdvKt+AiI/zMqmS2NA3QSqzoOxKak41FcIq6+pHEgWowDWyIeYOX5G/A7Gg6fA+fhtfsUpm49jPHr92LEyu1wD9qEQf7yOBAMV+9AuHkHYKhSA0uxYPVGrNqxSxoHIqNw/9GfiIlTpwHFOFB8BL7wNRNf6f4hDcQkKzTwIg6hCg1sjXiAkPM34E8a+PWC0MC0bYcxfgMdC0gDm+FGGvANgSuNA96LMXCOpIGRPoGY4L8UPqtoHNiFA6fO4nLEVdx/9BhvYuMQn5goJlQyMjOR/S5bmq3kccAgdUqao4sXE5JSkJiSztUQDbKXpZ0ybFP9CR1HYxiZW4rDlQ6ymUjLyBSz28npGUhOT0dSWjoSU1LwNikJsYmJiE1IQGxCPGLic+5vExORkJyMxOQUJKWkiDQ7qelpoAEzi9LiCOf0CQ3ij2icwMc0kJSWoaKBZPUaeJuAONJAkqyB1BwNZLAGNN6pBdygrIGMbMU4kCGNA6k0FohxQNZAKuKTFBpIzH8cSE5R0QCPAwXsEc1/XNYAzWirHgtIA+JYoBwHUvE2jwZyjgeKcUB5LMjRQLpiHOCZaM33LW+RCWiKwBdvqgsPmoZgmgUv/Br4m/pKQO501oC+9uDnt7tgGpA//fnb5TXoDoE8vZrnpe60k1vCBJiApgiwqdYUad4OE2ACTIAJMAEmwASYgMESYFNtsF3LO8YEmAATYAJMgAkwASagKQJsqjVFmrfDBJgAE2ACTIAJMAEmYLAE2FQbbNfyjjEBJsAEmAATYAJMgAloigCbak2R5u0wASbABJgAE2ACTIAJGCwBNtUG27W8Y0yACTABJsAEmAATYAKaIsCmWlOkeTtMgAkwASbABJhAkRGgQsT3H2Zi5eYkjPeOx5Cpb+HOd51jMHTaW0zzjceG3cl4+hdVkiwyCejcithU61yXcIOYABNgAkyACTCB/Agkp7yDp38C2rnEYIpfArbsScbB4yk4wHedYiD65FgK1u9MgseceDSwicH8ZVRN1DCdNZvq/P5reRkTYAJMgAkwASagUwRSUt/BZlAc3KfF400MlXjnyjs61UEfaExW1jvcuJOBHu6xGOedgPQMw+s3NtUf6Hx+mwkwASbABJgAE9A9AiGbE2HWJxapqe/YUOte93y0RU+fZ6Ftz1gcPpUKQzsfYlP90e7nDzABJsAEmAATYAK6QCA17R1a9IgRsdS6PkOdmZmJ5OTk9+6pqanIyspCWloaMjIyBNbs7GzQnW60X7S8oPtHn6d1y+vUhf5S14bMrHc4fi4VDiPjQP1pSDc21YbUm7wvTIAJMAEmwAQMmMCjJ5moYf5G5/eQTPGJEydgamoKa2trdOnSRdy7du2KSZMm4fHjx5g4cSIOHToEMt+RkZF49OiR2K+EhATxXTLkBbmlp6djzJgx+PXXXwvyNa189v7jTJi7xuL5S8OKrWZTrRU58UaZABNgAkyACTCBghIIvZwK+2FxBf2axj9PRnnbtm0oVaoUNm7ciJ07dyrvZ8+exbNnzxAQEIBz584Jg03G++jRo2K2OjAwED169MDLly+V7ZZnr+XZbOUCQHyHtkez1FZWVli7dq3qYp18Hh2TDRv3OJC5NqQbm2pD6k3eFybABJgAE2ACBkzg9PlUuIzSH1NdrVo1xMfHv9cjFPpBM9NknC9evIgWLVogJCREGOzJkyejTZs2uHLlCmj2mWasaWZ7//794r3Y2FhlqAgtu3r1Kvbt24e7d+/CzMxML0z1m7hs2A2Lw71HbKrfEwe/wQSYABNgAkyACTCB4iagb6a6YsWKwjxHv4oG3WNiYkQs9dOnT0GhIEFBQRg+fDhKlCiBmjVrYsaMGahVq5aY4W7Xrh1evHiBWbNmoUaNGmjdujVq164tQkpoPTQ7vWDBAtA2WrVqJb5PM+P6MFPNprq4/1N4/UyACTABJsAEmAATyIeAvpnqf/zjH2jatClMTEzEvVu3brhz5w5kU00GmGasKWzj2LFjYs8XLlwowj+io6PFsqpVq+LgwYOIi4sT36PZ7/nz54uZ6a+//hpnzpwRFzXevn1bmHM21fkIqJgXcfhHMQPm1TMBJsAEmAATYAJFQ0DfTHWZMmVE2AYZZrqHhoYqzTHNVJMBfvjwISwtLcVyip329fWFnZ0dXr16hd27d6Ny5criAkRPT094eXmhefPmcHV1FestXbq0yBRCdCmmmmKz2VQXjdYKsxY21YWhxt9hAkyACTABJsAENE5A30z1h2KqVWeq8zPVmzdvFuEdc+fOxZIlS8Td29tbXPy4d+9efPfddyIMhDqC4rRpJpxNtcZlqdwgm2olCn7CBJgAE2ACTIAJ6DIBQzTVFP5hYWGBXbt2idlmf39/mJubixlsugixbt26OH78OBITE/H69Wt4eHhg/fr1+OOPP0Ts9fnz55GSkiLCSn788Uc21VoUMJtqLcLnTTMBJsAEmAATYAKfTkCfTDWl0aMLCynvdN7bX3/9JeKmN2zYAIqddnZ2FnHVp06dEqn36tevj9GjR4vUex4eY8WFiDRb7ebmhsaNGyM8PFwUeZk5c6ZYRnHY9vb2qFChAmidun7jCxV1vYe4fUyACTABJsAEmIBBE9AXU03FX27duoU1a9aIsIy8nUJGm2amaSaasniEhYWBZqgvXLiAN2/eiPCO4OBgUPo8ukBx69atoLAPyvbx+++/KysuJiUliZnp2bNni8+QkY+Kisq7OZ17zaZa57qEG8QEmAATYAJMgAl8SQT0xVR/bp+oK1GelZX9wdLlZOL16camWp96i9vKBJgAE2ACTIAJGByBL8VUG1zH5dkhNtV5gPBLJsAEmAATYAJMgAlokgCbak3SLr5tsakuPra8ZibABJgAE2ACTIAJfJQAm+qPItKLD7Cp1otu4kYyASbABJgAE2AChkqgKEw1xR9ToRTVO70nxzFnZ2cjPT1dWVSlMCxpXfI65PXRY1HdMjIyRPvVxVLntyy/7VP76Lvq1qn6Pdo3yon9OfvDplqVKD9nAkyACTABJsAEmICGCRSFqY6IiMC4ceMwatQocR8zZgxmzJiB06dPi3zPlG2Dqhc+efJUabQLupuUlYNS4FHqvOfPn4vMHbTeorqFhISIlHuXLl3KtUoyxQEBAU/bhCYAAAv8SURBVJg4caLYbq6FH3lBmUYoNR+VUc/vRvsxZcoUPHnypNB82FTnR5iXMQEmwASYABNgAkygmAkUhanes2cPqEjKhAkT4OPjI8yvg4MDGjRogDNnzogCK5Qj+vHjx4U2jTExMWjUqBGuX78uck2TiS9KU03t/dvf/oahQ4fmauOLFy9QqVIllCtXTqT0K0h3kEnu0KEDzp49m2udedfx7NkzUKVISgdY2NlqNtV5qfJrJsAEmAATYAJMgAlokEBRmGrKD03FVWhGlioRJicn488//4SJiQkGDx6Mly9f4rfffhMmmGacqXIhmVWqXHjx4kXQe/KNQiVoOVU8JANN66Ib5Zo2MjLCtWvXxHootzRVRIyNjQNVUKQZ7HPnzomc0vS+fKPnN2/exMmTJ4WppxASdTc7OzthnqlMOe0D3cjg0glDmTJlhKm+ceOGeJ/CXGhfqY30KLeRFtL2aOae2kJtbN26tZixpxAPmvUmLtSWyMhIsd/0PpVYr1y5sniPTbVArPzDFRWVKPgJE2ACTIAJMAEmoMsEispU0yzyw4cPlbsaHx8Pa2truLi44N69e2LGlgwo3Y2NjUUZcapY2LRpUzE7TEaVDGVgYKBY3rVrV2FIaeaYTLdsqqkQC62vU6dOwpgfPHhQzGBTGfJ27dqJ59OnTxdVF6kEOYWkkLm3srJC8+bNRWEX2TQrGwuATLWrqytKlCghzDIty0jPQK9evURoCxlrMtVkmmn9VIWxY8eOovri+PHjhdGnfZ4zZ47YjqmpqWh/1apVhakmQ02Fa9q0aQNLS0u0aNECAwcOFN+jGW021aq9kfOcTXUOC37GBJgAE2ACTIAJ6DCBojLVZB7Xrl2L/fv3Y8eOHaL8N4VN/Prrr2KGuF69emIWmUxx2bJlQeXAabaaZmxr1qwpPkfPy5cvj7179+LVq1e4ffu2MMm//PJLLlNNM880M06zvjRLTt+hyoc0Wx0UFCQMPH2G2tO+fXtRXZFmy6ldZP7v37//XpgFmWqKBaeZdXd3d9Fj9J3q1avj2LFjKFWqlGg/lT2nUBd6pPaHhoaKMBdaN4V5kFmmGWxaRrHY9FmKLadt0gnEtm3bxDLat1q1amHFihXiZIRNtfp/EjbV6rnwu0yACTABJsAEmICOESgqU01hExSeQTOxNDPcpUsXkBmmWWaKGZZNNYVvkFGlWW2amaZwD5qVnjp1qjCc33zzjbiokS7wmz9/vvgezQRHR0eL9ZMpz2uqyazSNiiUgowtGWkqT07muGXLlmL2mNZHF0uWLFlShF/kzcghm2oy0KVLlxYhIFu2bAHNOFN5dDLVFNZB66H35JAViuvu378/PDw8hKF3cnISs8/UFjLStH0y1WT+K1SogEmTJol1yPtGM+EU7sKmWv0/Bptq9Vz4XSbABJgAE2ACTEDHCBSVqa5bt66YdaYwDbqokEIhKOSBzGVeU00ztPQZ+UZGlMI0Vq5cCTLnFF4xa9YseHl5iSwimzZtEhc7kmnPa6p3794NMzMzYbppfWSm6eJAiml2dnYWJn/atGlifWSqJ0+eLOKy88Yuy6aawjvoosRDhw7B0dERZKwpZIVMNcVIe3t7w8bGRqT3o+0lJCRg2LBhGDBgAGbPno3evXsrDTeFdVBICplqyi5CJxM0G077RndqF4WE0AWcbKplNeR+ZFOdmwe/YgJMgAkwASbABHSUQFGZ6rwx1aq7m9dUV6xYUZhfmi0mUyrHOlPoCJlXMrEU9/z27VthsCm0guKjP2SqKZ6altNNNtWUGo8MNMVtk2ml9dHs+IgRI8T6P2SqqU0jR46Era0tvv/+e3FCIJtqCk/ZvHmzCD2h0BC6UciJhYWFMNRk8CnW+9HDR2JZWFiYCA0hU33ixAkR5nLlyhVxYSOddJAJpxAVahebaoHsvT9sqt9Dwm8wASbABJgAE2ACukjg7KVUOAz/vHzPFNpQEFNN4RUUE02ZNehiPQqRoNzTlEWDsmVQ+AgtI3NLBpwyh6heqKga/kFGVp2pplllMrV0USRtg2KuaQab1k/xzjSDrnqTZ6rJVFOcNKXXozR79Fo21ZSNhL5LJwG0zX379ilnw+/evStivCnshb5HJwI0S02z3mSqyUTTNmi/aRmFgVDM+dGjR4tkpvp1TDZs3OPwx+NM1d3S++dsqvW+C3kHmAATYAJMgAl8GQTuPchEPes3n7Wz4eHhYlZYni3OuzKacaa4aAqHkGOqKdSDQiYouwfFFJN5JaNL5pnCNPr16ycudqR1Z2ZmipAKKpBC5ptmh+k5FVehmV+KT5ZjnCkzCIWN0Ow0hZ9Qxo6xY8eK9cltoG3lvS1evBgUZkIz2JSJZMiQIeJiRGoTbZMMPj3Sd+mR1kVtpMcHDx6I92kZxVFTfDXt24YNG0SIB5lyWi/FX1Meb8oy4ubmJkJUqJIihcJQ+Mvn5PF++CQLFq6xePr8/X3Lu6/69JpNtT71FreVCTABJsAEmMAXTCAp+R1+NH+DV9GFL/lNZpIMbN6QChkrvS8vp9leyvYhh2SoK89NuaQpXIOWyQaYzC2tg16rrk/etjzzTK/JhMttode0PjLK9Ci/L7dNfpTXLb9W3TZ9R/W7tC3VddI25Jv8WdoerZPaIi+X94GW0V31fdX1y+v61Eeq1n7hShosB8QhManw/fip29Pk59hUa5I2b4sJMAEmwASYABMoNIF32e8wKyAB/ca/BXJHRBR6nfl9kUI3KBOInK0jv8/ysk8jEBOXjV6j47BmWxLyRLV82gp0+FNsqnW4c7hpTIAJMAEmwASYQG4CiUnv0KFPLBYsTUBSSvHOdFJBFTmMIncr+FVBCZCBfvU6C3ODEjFo8lukpGjgrKigjfzMz7Op/kyA/HUmwASYABNgAkxAcwQoLCE+IRvDZ8SjU99YBKxJxLGzqQiPTCv6e0QawuleHOv+QtYZFpGGwydTsGBpIlo6xGDSvHgkJBqeoab/ADbVmhsHeEtMgAkwASbABJhAERGg2Nzfr6VjzpIEOA6LhWmfGL7rIAOzvjHoMzYOfisTcfc+5QLHe9lMikgSWl8Nm2qtdwE3gAkwASbABJgAE2ACTEDfCbCp1vce5PYzASbABJgAE2ACTIAJaJ0Am2qtdwE3gAkwASbABJgAE2ACTEDfCbCp1vce5PYzASbABJgAE2ACTIAJaJ0Am2qtdwE3gAkwASbABJgAE2ACTEDfCbCp1vce5PYzASbABJgAE2ACTIAJaJ0Am2qtdwE3gAkwASbABJgAE2ACTEDfCbCp1vce5PYzASbABJgAE2ACTIAJaJ0Am2qtdwE3gAkwASbABJgAE2ACTEDfCbCp1vce5PYzASbABJgAE2ACTIAJaJ0Am2qtdwE3gAkwASbABJgAE2ACTEDfCbCp1vce5PYzASbABJgAE2ACTIAJaJ0Am2qtdwE3gAkwASbABJgAE2ACTEDfCbCp1vce5PYzASbABJgAE2ACTIAJaJ0Am2qtdwE3gAkwASbABJgAE2ACTEDfCbCp1vce5PYzASbABJgAE2ACTIAJaJ0Am2qtdwE3gAkwASbABJgAE2ACTEDfCbCp1vce5PYzASbABJgAE2ACTIAJaJ0Am2qtdwE3gAkwASbABJgAE2ACTEDfCbCp1vce5PYzASbABJgAE2ACTIAJaJ0Am2qtdwE3gAkwASbABJgAE2ACTEDfCbCp1vce5PYzASbABJgAE2ACTIAJaJ0Am2qtdwE3gAkwASbABJgAE2ACTEDfCbCp1vce5PYzASbABJgAE2ACTIAJaJ0Am2qtdwE3gAkwASbABJgAE2ACTEDfCbCp1vce5PYzASbABJgAE2ACTIAJaJ0Am2qtdwE3gAkwASbABJgAE2ACTEDfCbCp1vce5PYzASbABJgAE2ACTIAJaJ0Am2qtdwE3gAkwASbABJgAE2ACTEDfCbCp1vce5PYzASbABJgAE2ACTIAJaJ0Am2qtdwE3gAkwASbABJgAE2ACTEDfCbCp1vce5PYzASbABJgAE2ACTIAJaJ0Am2qtdwE3gAkwASbABJgAE2ACTEDfCbCp1vce5PYzASbABJgAE2ACTIAJaJ3A/wHKoC4zF0g+9AAAAABJRU5ErkJggg==)", "_____no_output_____" ], [ "A Pipeline is specified as a sequence of stages, and each stage is either a Transformer or an Estimator. These stages are run in order, and the input DataFrame is transformed as it passes through each stage. That is, the data are passed through the fitted pipeline in order. Each stage’s transform() method updates the dataset and passes it to the next stage. With the help of Pipelines, we can ensure that training and test data go through identical feature processing steps.\n\nNow let’s see how this can be done in Spark NLP using Annotators and Transformers. Assume that we have the following steps that need to be applied one by one on a data frame.\n\n- Split text into sentences\n- Tokenize\n\nAnd here is how we code this pipeline up in Spark NLP.", "_____no_output_____" ] ], [ [ "from pyspark.ml import Pipeline\n\ndocumentAssembler = DocumentAssembler()\\\n.setInputCol(\"text\")\\\n.setOutputCol(\"document\")\n\nsentenceDetector = SentenceDetector().\\\nsetInputCols(['document']).\\\nsetOutputCol('sentences')\n\ntokenizer = Tokenizer() \\\n .setInputCols([\"sentences\"]) \\\n .setOutputCol(\"token\")\n\nnlpPipeline = Pipeline(stages=[\n documentAssembler, \n sentenceDetector,\n tokenizer\n ])\n\nempty_df = spark.createDataFrame([['']]).toDF(\"text\")\n\npipelineModel = nlpPipeline.fit(empty_df)", "_____no_output_____" ], [ "spark_df = spark.read.text('./sample-sentences-en.txt').toDF('text')\n\nspark_df.show(truncate=False)", "+-----------------------------------------------------------------------------+\n|text |\n+-----------------------------------------------------------------------------+\n|Peter is a very good person. |\n|My life in Russia is very interesting. |\n|John and Peter are brothers. However they don't support each other that much.|\n|Lucas Nogal Dunbercker is no longer happy. He has a good car though. |\n|Europe is very culture rich. There are huge churches! and big houses! |\n+-----------------------------------------------------------------------------+\n\n" ], [ "result = pipelineModel.transform(spark_df)", "_____no_output_____" ], [ "result.show(truncate=20)", "+--------------------+--------------------+--------------------+--------------------+\n| text| document| sentences| token|\n+--------------------+--------------------+--------------------+--------------------+\n|Peter is a very g...|[[document, 0, 27...|[[document, 0, 27...|[[token, 0, 4, Pe...|\n|My life in Russia...|[[document, 0, 37...|[[document, 0, 37...|[[token, 0, 1, My...|\n|John and Peter ar...|[[document, 0, 76...|[[document, 0, 27...|[[token, 0, 3, Jo...|\n|Lucas Nogal Dunbe...|[[document, 0, 67...|[[document, 0, 41...|[[token, 0, 4, Lu...|\n|Europe is very cu...|[[document, 0, 68...|[[document, 0, 27...|[[token, 0, 5, Eu...|\n+--------------------+--------------------+--------------------+--------------------+\n\n" ], [ "result.printSchema()", "root\n |-- text: string (nullable = true)\n |-- document: array (nullable = true)\n | |-- element: struct (containsNull = true)\n | | |-- annotatorType: string (nullable = true)\n | | |-- begin: integer (nullable = false)\n | | |-- end: integer (nullable = false)\n | | |-- result: string (nullable = true)\n | | |-- metadata: map (nullable = true)\n | | | |-- key: string\n | | | |-- value: string (valueContainsNull = true)\n | | |-- embeddings: array (nullable = true)\n | | | |-- element: float (containsNull = false)\n |-- sentences: array (nullable = true)\n | |-- element: struct (containsNull = true)\n | | |-- annotatorType: string (nullable = true)\n | | |-- begin: integer (nullable = false)\n | | |-- end: integer (nullable = false)\n | | |-- result: string (nullable = true)\n | | |-- metadata: map (nullable = true)\n | | | |-- key: string\n | | | |-- value: string (valueContainsNull = true)\n | | |-- embeddings: array (nullable = true)\n | | | |-- element: float (containsNull = false)\n |-- token: array (nullable = true)\n | |-- element: struct (containsNull = true)\n | | |-- annotatorType: string (nullable = true)\n | | |-- begin: integer (nullable = false)\n | | |-- end: integer (nullable = false)\n | | |-- result: string (nullable = true)\n | | |-- metadata: map (nullable = true)\n | | | |-- key: string\n | | | |-- value: string (valueContainsNull = true)\n | | |-- embeddings: array (nullable = true)\n | | | |-- element: float (containsNull = false)\n\n" ], [ "result.select('sentences.result').take(3)", "_____no_output_____" ], [ "result.select('token').take(3)[2]", "_____no_output_____" ] ], [ [ "## Normalizer", "_____no_output_____" ], [ "Removes all dirty characters from text following a regex pattern and transforms words based on a provided dictionary\n\n`setCleanupPatterns(patterns)`: Regular expressions list for normalization, defaults [^A-Za-z]\n\n`setLowercase(value)`: lowercase tokens, default false\n\n`setSlangDictionary(path)`: txt file with delimited words to be transformed into something else\n", "_____no_output_____" ] ], [ [ "import string\nstring.punctuation", "_____no_output_____" ], [ "from sparknlp.base import *\nfrom sparknlp.annotator import *\n\ndocumentAssembler = DocumentAssembler()\\\n.setInputCol(\"text\")\\\n.setOutputCol(\"document\")\n\ntokenizer = Tokenizer() \\\n .setInputCols([\"document\"]) \\\n .setOutputCol(\"token\")\n \nnormalizer = Normalizer() \\\n .setInputCols([\"token\"]) \\\n .setOutputCol(\"normalized\")\\\n .setLowercase(True)\\\n .setCleanupPatterns([\"[^\\w\\d\\s]\"]) # remove punctuations (keep alphanumeric chars)\n # if we don't set CleanupPatterns, it will only keep alphabet letters ([^A-Za-z])\n\n", "_____no_output_____" ], [ "\nnlpPipeline = Pipeline(stages=[\n documentAssembler, \n tokenizer,\n normalizer\n ])\n\nempty_df = spark.createDataFrame([['']]).toDF(\"text\")\n\npipelineModel = nlpPipeline.fit(empty_df)", "_____no_output_____" ], [ "result = pipelineModel.transform(spark_df)", "_____no_output_____" ], [ "result.show(truncate=20)", "+--------------------+--------------------+--------------------+--------------------+\n| text| document| token| normalized|\n+--------------------+--------------------+--------------------+--------------------+\n|Peter is a very g...|[[document, 0, 27...|[[token, 0, 4, Pe...|[[token, 0, 4, pe...|\n|My life in Russia...|[[document, 0, 37...|[[token, 0, 1, My...|[[token, 0, 1, my...|\n|John and Peter ar...|[[document, 0, 76...|[[token, 0, 3, Jo...|[[token, 0, 3, jo...|\n|Lucas Nogal Dunbe...|[[document, 0, 67...|[[token, 0, 4, Lu...|[[token, 0, 4, lu...|\n|Europe is very cu...|[[document, 0, 68...|[[token, 0, 5, Eu...|[[token, 0, 5, eu...|\n+--------------------+--------------------+--------------------+--------------------+\n\n" ], [ "result.select('token').take(3)", "_____no_output_____" ], [ "result.select('normalized.result').take(3)", "_____no_output_____" ], [ "result.select('normalized').take(3)", "_____no_output_____" ] ], [ [ "## Stopwords Cleaner", "_____no_output_____" ], [ "This annotator excludes from a sequence of strings (e.g. the output of a Tokenizer, Normalizer, Lemmatizer, and Stemmer) and drops all the stop words from the input sequences.", "_____no_output_____" ], [ "Functions:\n\n`setStopWords`: The words to be filtered out. Array[String]\n\n`setCaseSensitive`: Whether to do a case sensitive comparison over the stop words.", "_____no_output_____" ] ], [ [ "stopwords_cleaner = StopWordsCleaner()\\\n .setInputCols(\"token\")\\\n .setOutputCol(\"cleanTokens\")\\\n .setCaseSensitive(False)\\\n #.setStopWords([\"no\", \"without\"]) (e.g. read a list of words from a txt)\n \n", "_____no_output_____" ], [ "stopwords_cleaner.getStopWords()", "_____no_output_____" ], [ "documentAssembler = DocumentAssembler()\\\n.setInputCol(\"text\")\\\n.setOutputCol(\"document\")\n\ntokenizer = Tokenizer() \\\n .setInputCols([\"document\"]) \\\n .setOutputCol(\"token\")\n\nnlpPipeline = Pipeline(stages=[\n documentAssembler, \n tokenizer,\n stopwords_cleaner\n ])\n\nempty_df = spark.createDataFrame([['']]).toDF(\"text\")\n\npipelineModel = nlpPipeline.fit(empty_df)", "_____no_output_____" ], [ "spark_df = spark.read.text('./sample-sentences-en.txt').toDF('text')\n\nresult = pipelineModel.transform(spark_df)\n\nresult.show()", "+--------------------+--------------------+--------------------+--------------------+\n| text| document| token| cleanTokens|\n+--------------------+--------------------+--------------------+--------------------+\n|Peter is a very g...|[[document, 0, 27...|[[token, 0, 4, Pe...|[[token, 0, 4, Pe...|\n|My life in Russia...|[[document, 0, 37...|[[token, 0, 1, My...|[[token, 3, 6, li...|\n|John and Peter ar...|[[document, 0, 76...|[[token, 0, 3, Jo...|[[token, 0, 3, Jo...|\n|Lucas Nogal Dunbe...|[[document, 0, 67...|[[token, 0, 4, Lu...|[[token, 0, 4, Lu...|\n|Europe is very cu...|[[document, 0, 68...|[[token, 0, 5, Eu...|[[token, 0, 5, Eu...|\n+--------------------+--------------------+--------------------+--------------------+\n\n" ], [ "result.select('cleanTokens.result').take(1)", "_____no_output_____" ] ], [ [ "## Token Assembler", "_____no_output_____" ] ], [ [ "documentAssembler = DocumentAssembler()\\\n.setInputCol(\"text\")\\\n.setOutputCol(\"document\")\n\nsentenceDetector = SentenceDetector().\\\n setInputCols(['document']).\\\n setOutputCol('sentences')\n\ntokenizer = Tokenizer() \\\n .setInputCols([\"sentences\"]) \\\n .setOutputCol(\"token\")\n\nnormalizer = Normalizer() \\\n .setInputCols([\"token\"]) \\\n .setOutputCol(\"normalized\")\\\n .setLowercase(False)\\\n\nstopwords_cleaner = StopWordsCleaner()\\\n .setInputCols(\"normalized\")\\\n .setOutputCol(\"cleanTokens\")\\\n .setCaseSensitive(False)\\\n\ntokenassembler = TokenAssembler()\\\n .setInputCols([\"sentences\", \"cleanTokens\"]) \\\n .setOutputCol(\"clean_text\")\n\n\nnlpPipeline = Pipeline(stages=[\n documentAssembler,\n sentenceDetector,\n tokenizer,\n normalizer,\n stopwords_cleaner,\n tokenassembler\n ])\n\n\nempty_df = spark.createDataFrame([['']]).toDF(\"text\")\n\npipelineModel = nlpPipeline.fit(empty_df)\n\nresult = pipelineModel.transform(spark_df)\n\nresult.show()", "+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------+\n| text| document| sentences| token| normalized| cleanTokens| clean_text|\n+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------+\n|Peter is a very g...|[[document, 0, 27...|[[document, 0, 27...|[[token, 0, 4, Pe...|[[token, 0, 4, Pe...|[[token, 0, 4, Pe...|[[document, 0, 16...|\n|My life in Russia...|[[document, 0, 37...|[[document, 0, 37...|[[token, 0, 1, My...|[[token, 0, 1, My...|[[token, 3, 6, li...|[[document, 0, 22...|\n|John and Peter ar...|[[document, 0, 76...|[[document, 0, 27...|[[token, 0, 3, Jo...|[[token, 0, 3, Jo...|[[token, 0, 3, Jo...|[[document, 0, 18...|\n|Lucas Nogal Dunbe...|[[document, 0, 67...|[[document, 0, 41...|[[token, 0, 4, Lu...|[[token, 0, 4, Lu...|[[token, 0, 4, Lu...|[[document, 0, 34...|\n|Europe is very cu...|[[document, 0, 68...|[[document, 0, 27...|[[token, 0, 5, Eu...|[[token, 0, 5, Eu...|[[token, 0, 5, Eu...|[[document, 0, 18...|\n+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------+--------------------+\n\n" ], [ "result.select('clean_text').take(1)", "_____no_output_____" ], [ "# if we use TokenAssembler().setPreservePosition(True), the original borders will be preserved (dropped & unwanted chars will be replaced by spaces)\n\nresult.select('clean_text').take(1)", "_____no_output_____" ], [ "result.select('text', F.explode('clean_text.result').alias('clean_text')).show(truncate=False)", "+-----------------------------------------------------------------------------+-----------------------------------+\n|text |clean_text |\n+-----------------------------------------------------------------------------+-----------------------------------+\n|Peter is a very good person. |Peter good person |\n|My life in Russia is very interesting. |life Russia interesting |\n|John and Peter are brothers. However they don't support each other that much.|John Peter brothers |\n|John and Peter are brothers. However they don't support each other that much.|However dont support much |\n|Lucas Nogal Dunbercker is no longer happy. He has a good car though. |Lucas Nogal Dunbercker longer happy|\n|Lucas Nogal Dunbercker is no longer happy. He has a good car though. |good car though |\n|Europe is very culture rich. There are huge churches! and big houses! |Europe culture rich |\n|Europe is very culture rich. There are huge churches! and big houses! |huge churches |\n|Europe is very culture rich. There are huge churches! and big houses! |big houses |\n+-----------------------------------------------------------------------------+-----------------------------------+\n\n" ], [ "import pyspark.sql.functions as F\n\nresult.withColumn(\n \"tmp\", \n F.explode(\"clean_text\")) \\\n .select(\"tmp.*\").select(\"begin\",\"end\",\"result\",\"metadata.sentence\").show(truncate = False)", "+-----+---+-----------------------------------+--------+\n|begin|end|result |sentence|\n+-----+---+-----------------------------------+--------+\n|0 |16 |Peter good person |0 |\n|0 |22 |life Russia interesting |0 |\n|0 |18 |John Peter brothers |0 |\n|29 |53 |However dont support much |1 |\n|0 |34 |Lucas Nogal Dunbercker longer happy|0 |\n|43 |57 |good car though |1 |\n|0 |18 |Europe culture rich |0 |\n|29 |41 |huge churches |1 |\n|54 |63 |big houses |2 |\n+-----+---+-----------------------------------+--------+\n\n" ], [ "# if we hadn't used Sentence Detector, this would be what we got. (tokenizer gets document instead of sentences column)\n\ntokenizer = Tokenizer() \\\n .setInputCols([\"document\"]) \\\n .setOutputCol(\"token\")\n\ntokenassembler = TokenAssembler()\\\n .setInputCols([\"document\", \"cleanTokens\"]) \\\n .setOutputCol(\"clean_text\")\n\nnlpPipeline = Pipeline(stages=[\n documentAssembler,\n tokenizer,\n normalizer,\n stopwords_cleaner,\n tokenassembler\n ])\n\nempty_df = spark.createDataFrame([['']]).toDF(\"text\")\n\npipelineModel = nlpPipeline.fit(empty_df)\n\nresult = pipelineModel.transform(spark_df)\n\nresult.select('text', 'clean_text.result').show(truncate=False)", "+-----------------------------------------------------------------------------+-----------------------------------------------------+\n|text |result |\n+-----------------------------------------------------------------------------+-----------------------------------------------------+\n|Peter is a very good person. |[Peter good person] |\n|My life in Russia is very interesting. |[life Russia interesting] |\n|John and Peter are brothers. However they don't support each other that much.|[John Peter brothers However dont support much] |\n|Lucas Nogal Dunbercker is no longer happy. He has a good car though. |[Lucas Nogal Dunbercker longer happy good car though]|\n|Europe is very culture rich. There are huge churches! and big houses! |[Europe culture rich huge churches big houses] |\n+-----------------------------------------------------------------------------+-----------------------------------------------------+\n\n" ], [ "\nresult.withColumn(\n \"tmp\", \n F.explode(\"clean_text\")) \\\n .select(\"tmp.*\").select(\"begin\",\"end\",\"result\",\"metadata.sentence\").show(truncate = False)", "+-----+---+---------------------------------------------------+--------+\n|begin|end|result |sentence|\n+-----+---+---------------------------------------------------+--------+\n|0 |16 |Peter good person |0 |\n|0 |22 |life Russia interesting |0 |\n|0 |44 |John Peter brothers However dont support much |0 |\n|0 |50 |Lucas Nogal Dunbercker longer happy good car though|0 |\n|0 |43 |Europe culture rich huge churches big houses |0 |\n+-----+---+---------------------------------------------------+--------+\n\n" ] ], [ [ "**important note:**\n\nIf you have some other steps & annotators in your pipeline that will need to use the tokens from cleaned text (assembled tokens), you will need to tokenize the processed text again as the original text is probably changed completely.", "_____no_output_____" ], [ "## Stemmer", "_____no_output_____" ], [ "Returns hard-stems out of words with the objective of retrieving the meaningful part of the word\n", "_____no_output_____" ] ], [ [ "stemmer = Stemmer() \\\n .setInputCols([\"token\"]) \\\n .setOutputCol(\"stem\")", "_____no_output_____" ], [ "documentAssembler = DocumentAssembler()\\\n.setInputCol(\"text\")\\\n.setOutputCol(\"document\")\n\ntokenizer = Tokenizer() \\\n .setInputCols([\"document\"]) \\\n .setOutputCol(\"token\")\n\nnlpPipeline = Pipeline(stages=[\n documentAssembler, \n tokenizer,\n stemmer\n ])\n\nempty_df = spark.createDataFrame([['']]).toDF(\"text\")\n\npipelineModel = nlpPipeline.fit(empty_df)", "_____no_output_____" ], [ "result = pipelineModel.transform(spark_df)\n\nresult.show()", "+--------------------+--------------------+--------------------+--------------------+\n| text| document| token| stem|\n+--------------------+--------------------+--------------------+--------------------+\n|Peter is a very g...|[[document, 0, 27...|[[token, 0, 4, Pe...|[[token, 0, 4, pe...|\n|My life in Russia...|[[document, 0, 37...|[[token, 0, 1, My...|[[token, 0, 1, my...|\n|John and Peter ar...|[[document, 0, 76...|[[token, 0, 3, Jo...|[[token, 0, 3, jo...|\n|Lucas Nogal Dunbe...|[[document, 0, 67...|[[token, 0, 4, Lu...|[[token, 0, 4, lu...|\n|Europe is very cu...|[[document, 0, 68...|[[token, 0, 5, Eu...|[[token, 0, 5, eu...|\n+--------------------+--------------------+--------------------+--------------------+\n\n" ], [ "result.select('stem.result').show(truncate=False)", "+-------------------------------------------------------------------------------------------+\n|result |\n+-------------------------------------------------------------------------------------------+\n|[peter, i, a, veri, good, person, .] |\n|[my, life, in, russia, i, veri, interest, .] |\n|[john, and, peter, ar, brother, ., howev, thei, don't, support, each, other, that, much, .]|\n|[luca, nogal, dunberck, i, no, longer, happi, ., he, ha, a, good, car, though, .] |\n|[europ, i, veri, cultur, rich, ., there, ar, huge, church, !, and, big, hous, !] |\n+-------------------------------------------------------------------------------------------+\n\n" ], [ "import pyspark.sql.functions as F\n\nresult_df = result.select(F.explode(F.arrays_zip('token.result', 'stem.result')).alias(\"cols\")) \\\n.select(F.expr(\"cols['0']\").alias(\"token\"),\n F.expr(\"cols['1']\").alias(\"stem\")).toPandas()\n\nresult_df.head(10)", "_____no_output_____" ] ], [ [ "## Lemmatizer", "_____no_output_____" ], [ "Retrieves lemmas out of words with the objective of returning a base dictionary word", "_____no_output_____" ] ], [ [ "!wget -q https://raw.githubusercontent.com/mahavivo/vocabulary/master/lemmas/AntBNC_lemmas_ver_001.txt", "_____no_output_____" ], [ "lemmatizer = Lemmatizer() \\\n .setInputCols([\"token\"]) \\\n .setOutputCol(\"lemma\") \\\n .setDictionary(\"./AntBNC_lemmas_ver_001.txt\", value_delimiter =\"\\t\", key_delimiter = \"->\")", "_____no_output_____" ], [ "documentAssembler = DocumentAssembler()\\\n.setInputCol(\"text\")\\\n.setOutputCol(\"document\")\n\ntokenizer = Tokenizer() \\\n .setInputCols([\"document\"]) \\\n .setOutputCol(\"token\")\n\nstemmer = Stemmer() \\\n .setInputCols([\"token\"]) \\\n .setOutputCol(\"stem\")\n\nnlpPipeline = Pipeline(stages=[\n documentAssembler, \n tokenizer,\n stemmer,\n lemmatizer\n ])\n\nempty_df = spark.createDataFrame([['']]).toDF(\"text\")\n\npipelineModel = nlpPipeline.fit(empty_df)", "_____no_output_____" ], [ "result = pipelineModel.transform(spark_df)\n\nresult.show()", "+--------------------+--------------------+--------------------+--------------------+--------------------+\n| text| document| token| stem| lemma|\n+--------------------+--------------------+--------------------+--------------------+--------------------+\n|Peter is a very g...|[[document, 0, 27...|[[token, 0, 4, Pe...|[[token, 0, 4, pe...|[[token, 0, 4, Pe...|\n|My life in Russia...|[[document, 0, 37...|[[token, 0, 1, My...|[[token, 0, 1, my...|[[token, 0, 1, My...|\n|John and Peter ar...|[[document, 0, 76...|[[token, 0, 3, Jo...|[[token, 0, 3, jo...|[[token, 0, 3, Jo...|\n|Lucas Nogal Dunbe...|[[document, 0, 67...|[[token, 0, 4, Lu...|[[token, 0, 4, lu...|[[token, 0, 4, Lu...|\n|Europe is very cu...|[[document, 0, 68...|[[token, 0, 5, Eu...|[[token, 0, 5, eu...|[[token, 0, 5, Eu...|\n+--------------------+--------------------+--------------------+--------------------+--------------------+\n\n" ], [ "result.select('lemma.result').show(truncate=False)", "+---------------------------------------------------------------------------------------------+\n|result |\n+---------------------------------------------------------------------------------------------+\n|[Peter, be, a, very, good, person, .] |\n|[My, life, in, Russia, be, very, interest, .] |\n|[John, and, Peter, be, brother, ., However, they, don't, support, each, other, that, much, .]|\n|[Lucas, Nogal, Dunbercker, be, no, long, happy, ., He, have, a, good, car, though, .] |\n|[Europe, be, very, culture, rich, ., There, be, huge, church, !, and, big, house, !] |\n+---------------------------------------------------------------------------------------------+\n\n" ], [ "result_df = result.select(F.explode(F.arrays_zip('token.result', 'stem.result', 'lemma.result')).alias(\"cols\")) \\\n.select(F.expr(\"cols['0']\").alias(\"token\"),\n F.expr(\"cols['1']\").alias(\"stem\"),\n F.expr(\"cols['2']\").alias(\"lemma\")).toPandas()\n\nresult_df.head(10)", "_____no_output_____" ] ], [ [ "## NGram Generator", "_____no_output_____" ], [ "NGramGenerator annotator takes as input a sequence of strings (e.g. the output of a `Tokenizer`, `Normalizer`, `Stemmer`, `Lemmatizer`, and `StopWordsCleaner`). \n\nThe parameter n is used to determine the number of terms in each n-gram. The output will consist of a sequence of n-grams where each n-gram is represented by a space-delimited string of n consecutive words with annotatorType `CHUNK` same as the Chunker annotator.\n\nFunctions:\n\n`setN:` number elements per n-gram (>=1)\n\n`setEnableCumulative:` whether to calculate just the actual n-grams or all n-grams from 1 through n\n\n`setDelimiter:` Glue character used to join the tokens", "_____no_output_____" ] ], [ [ "\nngrams_cum = NGramGenerator() \\\n .setInputCols([\"token\"]) \\\n .setOutputCol(\"ngrams\") \\\n .setN(3) \\\n .setEnableCumulative(True)\\\n .setDelimiter(\"_\") # Default is space\n \n# .setN(3) means, take bigrams and trigrams.\n\nnlpPipeline = Pipeline(stages=[\n documentAssembler, \n tokenizer,\n ngrams_cum\n ])\n\nempty_df = spark.createDataFrame([['']]).toDF(\"text\")\n\npipelineModel = nlpPipeline.fit(empty_df)\n\nresult = pipelineModel.transform(spark_df)\n\nresult.select('ngrams.result').show(truncate=200)", "+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| result|\n+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| [Peter, is, a, very, good, person, ., Peter_is, is_a, a_very, very_good, good_person, person_., Peter_is_a, is_a_very, a_very_good, very_good_person, good_person_.]|\n|[My, life, in, Russia, is, very, interesting, ., My_life, life_in, in_Russia, Russia_is, is_very, very_interesting, interesting_., My_life_in, life_in_Russia, in_Russia_is, Russia_is_very, is_very_...|\n|[John, and, Peter, are, brothers, ., However, they, don't, support, each, other, that, much, ., John_and, and_Peter, Peter_are, are_brothers, brothers_., ._However, However_they, they_don't, don't_...|\n|[Lucas, Nogal, Dunbercker, is, no, longer, happy, ., He, has, a, good, car, though, ., Lucas_Nogal, Nogal_Dunbercker, Dunbercker_is, is_no, no_longer, longer_happy, happy_., ._He, He_has, has_a, a_...|\n|[Europe, is, very, culture, rich, ., There, are, huge, churches, !, and, big, houses, !, Europe_is, is_very, very_culture, culture_rich, rich_., ._There, There_are, are_huge, huge_churches, churche...|\n+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n\n" ], [ "\nngrams_nonCum = NGramGenerator() \\\n .setInputCols([\"token\"]) \\\n .setOutputCol(\"ngrams_v2\") \\\n .setN(3) \\\n .setEnableCumulative(False)\\\n .setDelimiter(\"_\") # Default is space\n \nngrams_nonCum.transform(result).select('ngrams_v2.result').show(truncate=200)", "+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| result|\n+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| [Peter_is_a, is_a_very, a_very_good, very_good_person, good_person_.]|\n| [My_life_in, life_in_Russia, in_Russia_is, Russia_is_very, is_very_interesting, very_interesting_.]|\n|[John_and_Peter, and_Peter_are, Peter_are_brothers, are_brothers_., brothers_._However, ._However_they, However_they_don't, they_don't_support, don't_support_each, support_each_other, each_other_th...|\n| [Lucas_Nogal_Dunbercker, Nogal_Dunbercker_is, Dunbercker_is_no, is_no_longer, no_longer_happy, longer_happy_., happy_._He, ._He_has, He_has_a, has_a_good, a_good_car, good_car_though, car_though_.]|\n|[Europe_is_very, is_very_culture, very_culture_rich, culture_rich_., rich_._There, ._There_are, There_are_huge, are_huge_churches, huge_churches_!, churches_!_and, !_and_big, and_big_houses, big_ho...|\n+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n\n" ] ], [ [ "## TextMatcher", "_____no_output_____" ], [ "Annotator to match entire phrases (by token) provided in a file against a Document\n\nFunctions:\n\n`setEntities(path, format, options)`: Provides a file with phrases to match. Default: Looks up path in configuration.\n\n`path`: a path to a file that contains the entities in the specified format.\n\n`readAs`: the format of the file, can be one of {ReadAs.LINE_BY_LINE, ReadAs.SPARK_DATASET}. Defaults to LINE_BY_LINE.\n\n`options`: a map of additional parameters. Defaults to {“format”: “text”}.\n\n`entityValue` : Value for the entity metadata field to indicate which chunk comes from which textMatcher when there are multiple textMatchers. \n\n`mergeOverlapping` : whether to merge overlapping matched chunks. Defaults false\n\n`caseSensitive` : whether to match regardless of case. Defaults true\n", "_____no_output_____" ] ], [ [ "! wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Public/data/news_category_train.csv\n\nnews_df = spark.read \\\n .option(\"header\", True) \\\n .csv(\"news_category_train.csv\")\n", "_____no_output_____" ], [ "news_df.show(5, truncate=50)", "+--------+--------------------------------------------------+\n|category| description|\n+--------+--------------------------------------------------+\n|Business| Short sellers, Wall Street's dwindling band of...|\n|Business| Private investment firm Carlyle Group, which h...|\n|Business| Soaring crude prices plus worries about the ec...|\n|Business| Authorities have halted oil export flows from ...|\n|Business| Tearaway world oil prices, toppling records an...|\n+--------+--------------------------------------------------+\nonly showing top 5 rows\n\n" ], [ "# write the target entities to txt file \n\nentities = ['Wall Street', 'USD', 'stock', 'NYSE']\nwith open ('financial_entities.txt', 'w') as f:\n for i in entities:\n f.write(i+'\\n')\n\n\nentities = ['soccer', 'world cup', 'Messi', 'FC Barcelona']\nwith open ('sport_entities.txt', 'w') as f:\n for i in entities:\n f.write(i+'\\n')\n", "_____no_output_____" ], [ "\ndocumentAssembler = DocumentAssembler()\\\n.setInputCol(\"description\")\\\n.setOutputCol(\"document\")\n\ntokenizer = Tokenizer() \\\n .setInputCols([\"document\"]) \\\n .setOutputCol(\"token\")\n\nfinancial_entity_extractor = TextMatcher() \\\n .setInputCols([\"document\",'token'])\\\n .setOutputCol(\"financial_entities\")\\\n .setEntities(\"financial_entities.txt\")\\\n .setCaseSensitive(False)\\\n .setEntityValue('financial_entity')\n\nsport_entity_extractor = TextMatcher() \\\n .setInputCols([\"document\",'token'])\\\n .setOutputCol(\"sport_entities\")\\\n .setEntities(\"sport_entities.txt\")\\\n .setCaseSensitive(False)\\\n .setEntityValue('sport_entity')\n\n\nnlpPipeline = Pipeline(stages=[\n documentAssembler, \n tokenizer,\n financial_entity_extractor,\n sport_entity_extractor\n ])\n\nempty_df = spark.createDataFrame([['']]).toDF(\"description\")\n\npipelineModel = nlpPipeline.fit(empty_df)", "_____no_output_____" ], [ "result = pipelineModel.transform(news_df)", "_____no_output_____" ], [ "result.select('financial_entities.result','sport_entities.result').take(2)", "_____no_output_____" ], [ "\nresult.select('description','financial_entities.result','sport_entities.result')\\\n.toDF('text','financial_matches','sport_matches').filter((F.size('financial_matches')>1) | (F.size('sport_matches')>1))\\\n.show(truncate=70)\n", "+----------------------------------------------------------------------+----------------------------------+-------------------+\n| text| financial_matches| sport_matches|\n+----------------------------------------------------------------------+----------------------------------+-------------------+\n|\"Company launched the biggest electronic auction of stock in Wall S...| [stock, Wall Street]| []|\n|Google, Inc. significantly cut the expected share price for its ini...| [stock, stock]| []|\n|Google, Inc. significantly cut the expected share price this mornin...| [stock, stock]| []|\n| Shares of Air Canada (AC.TO) fell by more than half on Wednesday,...| [Stock, stock]| []|\n|Stock prices are lower in moderate trading. The Dow Jones Industria...| [Stock, Stock]| []|\n|The bad news just keeps pouring in for mutual fund manager Janus Ca...| [NYSE, NYSE]| []|\n| Shaun Wright Phillips scored in his international debut as Englan...| []|[soccer, World Cup]|\n|NEWCASTLE, ENGLAND - England deservedly beat Ukraine 3-0 today in t...| []|[soccer, World Cup]|\n|MONTREAL (Reuters) - Shares of Air Canada (AC.TO: Quote, Profile, R...| [Stock, stock]| []|\n|\"SAN JOSE, California - On the cusp of its voyage into public tradi...|[stock, Wall Street, stock, Stock]| []|\n|\"Shortly before noon today, Google Inc. stock began trading under t...| [stock, stock]| []|\n|roundup Plus: EA to take World Cup soccer to Xbox...IBM chalks up t...| []|[World Cup, soccer]|\n|The U.S. Securities and Exchange Commission yesterday approved Goog...| [stock, stock]| []|\n|After a bumpy ride toward becoming a publicly traded company, Googl...| [stock, stock]| []|\n|In the most highly anticipated Wall Street debut since the heady da...| [Wall Street, stock]| []|\n|NEW YORK Despite voluble skepticism among investors, Google #39;s s...| [stock, stock]| []|\n|If only the rest of my investments worked out this way. One week ag...| [stock, stock]| []|\n| U.S. stocks to watch: GOOGLE INC. (GOOG.O) Google shares jumped 18...| [stock, stock]| []|\n|\" U.S. stocks to watch: GOOGLE INC. &lt;A HREF=\"\"http://www.invest...| [stock, stock]| []|\n|roundup Plus: KDE updates Linux desktop...EA to take World Cup socc...| []|[World Cup, soccer]|\n+----------------------------------------------------------------------+----------------------------------+-------------------+\nonly showing top 20 rows\n\n" ], [ "result_df = result.select(F.explode(F.arrays_zip('financial_entities.result', 'financial_entities.begin', 'financial_entities.end')).alias(\"cols\")) \\\n.select(F.expr(\"cols['0']\").alias(\"clinical_entities\"),\n F.expr(\"cols['1']\").alias(\"begin\"),\n F.expr(\"cols['2']\").alias(\"end\")).toPandas()\n\nresult_df.head(10)", "_____no_output_____" ] ], [ [ "## RegexMatcher", "_____no_output_____" ] ], [ [ "! wget -q\thttps://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/pubmed/pubmed-sample.csv\n\npubMedDF = spark.read\\\n .option(\"header\", \"true\")\\\n .csv(\"./pubmed-sample.csv\")\\\n .filter(\"AB IS NOT null\")\\\n .withColumnRenamed(\"AB\", \"text\")\\\n .drop(\"TI\")\n\npubMedDF.show(truncate=50)", "+--------------------------------------------------+\n| text|\n+--------------------------------------------------+\n|The human KCNJ9 (Kir 3.3, GIRK3) is a member of...|\n|BACKGROUND: At present, it is one of the most i...|\n|OBJECTIVE: To investigate the relationship betw...|\n|Combined EEG/fMRI recording has been used to lo...|\n|Kohlschutter syndrome is a rare neurodegenerati...|\n|Statistical analysis of neuroimages is commonly...|\n|The synthetic DOX-LNA conjugate was characteriz...|\n|Our objective was to compare three different me...|\n|We conducted a phase II study to assess the eff...|\n|\"Monomeric sarcosine oxidase (MSOX) is a flavoe...|\n|We presented the tachinid fly Exorista japonica...|\n|The literature dealing with the water conductin...|\n|A novel approach to synthesize chitosan-O-isopr...|\n|An HPLC-ESI-MS-MS method has been developed for...|\n|The localizing and lateralizing values of eye a...|\n|OBJECTIVE: To evaluate the effectiveness and ac...|\n|For the construction of new combinatorial libra...|\n|We report the results of a screen for genetic a...|\n|Intraparenchymal pericatheter cyst is rarely re...|\n|It is known that patients with Klinefelter's sy...|\n+--------------------------------------------------+\nonly showing top 20 rows\n\n" ], [ "rules = '''\nrenal\\s\\w+, started with 'renal'\ncardiac\\s\\w+, started with 'cardiac'\n\\w*ly\\b, ending with 'ly'\n\\S*\\d+\\S*, match any word that contains numbers\n(\\d+).?(\\d*)\\s*(mg|ml|g), match medication metrics\n'''\n\nwith open('regex_rules.txt', 'w') as f:\n \n f.write(rules)\n ", "_____no_output_____" ], [ "import os\n\ndocumentAssembler = DocumentAssembler()\\\n.setInputCol(\"text\")\\\n.setOutputCol(\"document\")\n\nregex_matcher = RegexMatcher()\\\n .setInputCols('document')\\\n .setStrategy(\"MATCH_ALL\")\\\n .setOutputCol(\"regex_matches\")\\\n .setExternalRules(path='./regex_rules.txt', delimiter=',')\n \n\nnlpPipeline = Pipeline(stages=[\n documentAssembler, \n regex_matcher\n ])\n\nempty_df = spark.createDataFrame([['']]).toDF(\"text\")\n\npipelineModel = nlpPipeline.fit(empty_df)\n\nmatch_df = pipelineModel.transform(pubMedDF)\n\nmatch_df.select('regex_matches.result').take(3)", "_____no_output_____" ], [ "match_df.select('text','regex_matches.result')\\\n.toDF('text','matches').filter(F.size('matches')>1)\\\n.show(truncate=70)\n", "+----------------------------------------------------------------------+----------------------------------------------------------------------+\n| text| matches|\n+----------------------------------------------------------------------+----------------------------------------------------------------------+\n|The human KCNJ9 (Kir 3.3, GIRK3) is a member of the G-protein-activ...|[inwardly, family, spansapproximately, byapproximately, approximate...|\n|BACKGROUND: At present, it is one of the most important issues for ...|[previously, previously, intravenously, previously, 25, mg/m(2), 1,...|\n|OBJECTIVE: To investigate the relationship between preoperative atr...|[renal failure, cardiac surgery, cardiac surgery, cardiac surgical,...|\n|Combined EEG/fMRI recording has been used to localize the generator...|[normally, significantly, effectively, analy, only, considerably, 2...|\n|Statistical analysis of neuroimages is commonly approached with int...|[analy, commonly, overly, normally, thatsuccessfully, recently, ana...|\n|The synthetic DOX-LNA conjugate was characterized by proton nuclear...| [wasanaly, substantially]|\n|Our objective was to compare three different methods of blood press...|[daily, only, Conversely, Hourly, hourly, Hourly, hourly, hourly, h...|\n|We conducted a phase II study to assess the efficacy and tolerabili...|[analy, respectively, generally, 5-fluorouracil, (5-FU)-, 5-FU-base...|\n|\"Monomeric sarcosine oxidase (MSOX) is a flavoenzyme that catalyzes...|[cataly, methylgly, gly, ethylgly, dimethylgly, spectrally, practic...|\n|We presented the tachinid fly Exorista japonica with moving host mo...| [fly, fly, fly, fly, fly]|\n|The literature dealing with the water conducting properties of sapw...| [generally, mathematically, especially]|\n|A novel approach to synthesize chitosan-O-isopropyl-5'-O-d4T monoph...|[efficiently, poly, chitosan-O-isopropyl-5'-O-d4T, Chitosan-d4T, 1....|\n|An HPLC-ESI-MS-MS method has been developed for the quantitative de...|[chromatographically, respectively, successfully, C18, (n=5), 95.0%...|\n|The localizing and lateralizing values of eye and head ictal deviat...| [early, early]|\n|OBJECTIVE: To evaluate the effectiveness and acceptability of expec...|[weekly, respectively, theanaly, 2006, 2007,, 2, 66, 1), 30patients...|\n|We report the results of a screen for genetic association with urin...|[poly, threepoly, significantly, analy, actually, anextremely, only...|\n|Intraparenchymal pericatheter cyst is rarely reported. Obstruction ...| [rarely, possibly, unusually, Early]|\n|PURPOSE: To compare the effectiveness, potential advantages and com...|[analy, comparatively, wassignificantly, respectively, a7-year, 155...|\n|We have demonstrated a new type of all-optical 2 x 2 switch by usin...|[approximately, fully, approximately, approximately, approximately,...|\n|Physalis peruviana (PP) is a widely used medicinal herb for treatin...|[widely, (20,, 40,, 60,, 80, 95%, 100, 95%, (82.3%), onFeCl2-ascorb...|\n+----------------------------------------------------------------------+----------------------------------------------------------------------+\nonly showing top 20 rows\n\n" ] ], [ [ "## Date Matcher", "_____no_output_____" ], [ "Extract exact & normalize dates from relative date-time phrases. The default anchor date will be the date the code is run.", "_____no_output_____" ] ], [ [ "documentAssembler = DocumentAssembler()\\\n.setInputCol(\"text\")\\\n.setOutputCol(\"document\")\n\ndate_matcher = MultiDateMatcher() \\\n .setInputCols('document') \\\n .setOutputCol(\"date\") \\\n .setDateFormat(\"yyyy/MM/dd\")\n \ndate_pipeline = PipelineModel(stages=[\n documentAssembler, \n date_matcher\n ])\n\nsample_df = spark.createDataFrame([['I saw him yesterday and he told me that he will visit us next week']]).toDF(\"text\")\n\nresult = date_pipeline.transform(sample_df)\n\nresult.select('date.result').show(truncate=False)", "+------------------------+\n|result |\n+------------------------+\n|[2020/10/21, 2020/10/13]|\n+------------------------+\n\n" ] ], [ [ "## Text Cleaning with UDF", "_____no_output_____" ] ], [ [ "text = '<h1 style=\"color: #5e9ca0;\">Have a great <span style=\"color: #2b2301;\">birth</span> day!</h1>'\n\ntext_df = spark.createDataFrame([[text]]).toDF(\"text\")\n\nimport re\nfrom pyspark.sql.functions import udf\nfrom pyspark.sql.types import StringType, IntegerType\n\nclean_text = lambda s: re.sub(r'<[^>]*>', '', s)\n\ntext_df.withColumn('cleaned', udf(clean_text, StringType())('text')).select('text','cleaned').show(truncate= False)", "+----------------------------------------------------------------------------------------------+-----------------------+\n|text |cleaned |\n+----------------------------------------------------------------------------------------------+-----------------------+\n|<h1 style=\"color: #5e9ca0;\">Have a great <span style=\"color: #2b2301;\">birth</span> day!</h1>|Have a great birth day!|\n+----------------------------------------------------------------------------------------------+-----------------------+\n\n" ], [ "find_not_alnum_count = lambda s: len([i for i in s if not i.isalnum() and i!=' '])\n\nfind_not_alnum_count(\"it's your birth day!\")", "_____no_output_____" ], [ "text = '<h1 style=\"color: #5e9ca0;\">Have a great <span style=\"color: #2b2301;\">birth</span> day!</h1>'\n\nfind_not_alnum_count(text)", "_____no_output_____" ], [ "text_df.withColumn('cleaned', udf(find_not_alnum_count, IntegerType())('text')).select('text','cleaned').show(truncate= False)", "+----------------------------------------------------------------------------------------------+-------+\n|text |cleaned|\n+----------------------------------------------------------------------------------------------+-------+\n|<h1 style=\"color: #5e9ca0;\">Have a great <span style=\"color: #2b2301;\">birth</span> day!</h1>|23 |\n+----------------------------------------------------------------------------------------------+-------+\n\n" ] ], [ [ "## Finisher", "_____no_output_____" ], [ "***Finisher:*** Once we have our NLP pipeline ready to go, we might want to use our annotation results somewhere else where it is easy to use. The Finisher outputs annotation(s) values into a string.\n\nIf we just want the desired output column in the final dataframe, we can use Finisher to drop previous stages in the final output and get the `result` from the process.\n\nThis is very handy when you want to use the output from Spark NLP annotator as an input to another Spark ML transformer.\n\nSettable parameters are:\n\n`setInputCols()`\n\n`setOutputCols()`\n\n`setCleanAnnotations(True)` -> Whether to remove intermediate annotations\n\n`setValueSplitSymbol(“#”)` -> split values within an annotation character\n\n`setAnnotationSplitSymbol(“@”)` -> split values between annotations character\n\n`setIncludeMetadata(False)` -> Whether to include metadata keys. Sometimes useful in some annotations.\n\n`setOutputAsArray(False)` -> Whether to output as Array. Useful as input for other Spark transformers.", "_____no_output_____" ] ], [ [ "finisher = Finisher() \\\n .setInputCols([\"regex_matches\"]) \\\n .setIncludeMetadata(False) # set to False to remove metadata\n\nnlpPipeline = Pipeline(stages=[\n documentAssembler, \n regex_matcher,\n finisher\n ])\n\nempty_df = spark.createDataFrame([['']]).toDF(\"text\")\n\npipelineModel = nlpPipeline.fit(empty_df)\n\nmatch_df = pipelineModel.transform(pubMedDF)\n\nmatch_df.show(truncate = 50)", "+--------------------------------------------------+--------------------------------------------------+\n| text| finished_regex_matches|\n+--------------------------------------------------+--------------------------------------------------+\n|The human KCNJ9 (Kir 3.3, GIRK3) is a member of...|[inwardly, family, spansapproximately, byapprox...|\n|BACKGROUND: At present, it is one of the most i...|[previously, previously, intravenously, previou...|\n|OBJECTIVE: To investigate the relationship betw...|[renal failure, cardiac surgery, cardiac surger...|\n|Combined EEG/fMRI recording has been used to lo...|[normally, significantly, effectively, analy, o...|\n|Kohlschutter syndrome is a rare neurodegenerati...| [family]|\n|Statistical analysis of neuroimages is commonly...|[analy, commonly, overly, normally, thatsuccess...|\n|The synthetic DOX-LNA conjugate was characteriz...| [wasanaly, substantially]|\n|Our objective was to compare three different me...|[daily, only, Conversely, Hourly, hourly, Hourl...|\n|We conducted a phase II study to assess the eff...|[analy, respectively, generally, 5-fluorouracil...|\n|\"Monomeric sarcosine oxidase (MSOX) is a flavoe...|[cataly, methylgly, gly, ethylgly, dimethylgly,...|\n|We presented the tachinid fly Exorista japonica...| [fly, fly, fly, fly, fly]|\n|The literature dealing with the water conductin...| [generally, mathematically, especially]|\n|A novel approach to synthesize chitosan-O-isopr...|[efficiently, poly, chitosan-O-isopropyl-5'-O-d...|\n|An HPLC-ESI-MS-MS method has been developed for...|[chromatographically, respectively, successfull...|\n|The localizing and lateralizing values of eye a...| [early, early]|\n|OBJECTIVE: To evaluate the effectiveness and ac...|[weekly, respectively, theanaly, 2006, 2007,, 2...|\n|For the construction of new combinatorial libra...| [newly]|\n|We report the results of a screen for genetic a...|[poly, threepoly, significantly, analy, actuall...|\n|Intraparenchymal pericatheter cyst is rarely re...| [rarely, possibly, unusually, Early]|\n|It is known that patients with Klinefelter's sy...| []|\n+--------------------------------------------------+--------------------------------------------------+\nonly showing top 20 rows\n\n" ], [ "match_df.printSchema()", "root\n |-- text: string (nullable = true)\n |-- finished_regex_matches: array (nullable = true)\n | |-- element: string (containsNull = true)\n\n" ], [ "match_df.filter(F.size('finished_regex_matches')>2).show(truncate = 50)", "+--------------------------------------------------+--------------------------------------------------+\n| text| finished_regex_matches|\n+--------------------------------------------------+--------------------------------------------------+\n|The human KCNJ9 (Kir 3.3, GIRK3) is a member of...|[inwardly, family, spansapproximately, byapprox...|\n|BACKGROUND: At present, it is one of the most i...|[previously, previously, intravenously, previou...|\n|OBJECTIVE: To investigate the relationship betw...|[renal failure, cardiac surgery, cardiac surger...|\n|Combined EEG/fMRI recording has been used to lo...|[normally, significantly, effectively, analy, o...|\n|Statistical analysis of neuroimages is commonly...|[analy, commonly, overly, normally, thatsuccess...|\n|Our objective was to compare three different me...|[daily, only, Conversely, Hourly, hourly, Hourl...|\n|We conducted a phase II study to assess the eff...|[analy, respectively, generally, 5-fluorouracil...|\n|\"Monomeric sarcosine oxidase (MSOX) is a flavoe...|[cataly, methylgly, gly, ethylgly, dimethylgly,...|\n|We presented the tachinid fly Exorista japonica...| [fly, fly, fly, fly, fly]|\n|The literature dealing with the water conductin...| [generally, mathematically, especially]|\n|A novel approach to synthesize chitosan-O-isopr...|[efficiently, poly, chitosan-O-isopropyl-5'-O-d...|\n|An HPLC-ESI-MS-MS method has been developed for...|[chromatographically, respectively, successfull...|\n|OBJECTIVE: To evaluate the effectiveness and ac...|[weekly, respectively, theanaly, 2006, 2007,, 2...|\n|We report the results of a screen for genetic a...|[poly, threepoly, significantly, analy, actuall...|\n|Intraparenchymal pericatheter cyst is rarely re...| [rarely, possibly, unusually, Early]|\n|PURPOSE: To compare the effectiveness, potentia...|[analy, comparatively, wassignificantly, respec...|\n|We have demonstrated a new type of all-optical ...|[approximately, fully, approximately, approxima...|\n|Physalis peruviana (PP) is a widely used medici...|[widely, (20,, 40,, 60,, 80, 95%, 100, 95%, (82...|\n|We report the discovery of a series of substitu...|[highly, potentially, highly, respectively, tub...|\n|The purpose of this study was to identify and c...|[family, Nearly, only, 43, 10, 44%, 32%, 64%, 4...|\n+--------------------------------------------------+--------------------------------------------------+\nonly showing top 20 rows\n\n" ] ], [ [ "## LightPipeline\n\nhttps://medium.com/spark-nlp/spark-nlp-101-lightpipeline-a544e93f20f1", "_____no_output_____" ], [ "LightPipelines are Spark NLP specific Pipelines, equivalent to Spark ML Pipeline, but meant to deal with smaller amounts of data. They’re useful working with small datasets, debugging results, or when running either training or prediction from an API that serves one-off requests.\n\nSpark NLP LightPipelines are Spark ML pipelines converted into a single machine but the multi-threaded task, becoming more than 10x times faster for smaller amounts of data (small is relative, but 50k sentences are roughly a good maximum). To use them, we simply plug in a trained (fitted) pipeline and then annotate a plain text. We don't even need to convert the input text to DataFrame in order to feed it into a pipeline that's accepting DataFrame as an input in the first place. This feature would be quite useful when it comes to getting a prediction for a few lines of text from a trained ML model.\n\n **It is nearly 20x faster than using Spark ML Pipeline**\n\n`LightPipeline(someTrainedPipeline).annotate(someStringOrArray)`", "_____no_output_____" ] ], [ [ "documentAssembler = DocumentAssembler()\\\n.setInputCol(\"text\")\\\n.setOutputCol(\"document\")\n\ntokenizer = Tokenizer() \\\n .setInputCols([\"document\"]) \\\n .setOutputCol(\"token\")\n\nstemmer = Stemmer() \\\n .setInputCols([\"token\"]) \\\n .setOutputCol(\"stem\")\n\nnlpPipeline = Pipeline(stages=[\n documentAssembler, \n tokenizer,\n stemmer,\n lemmatizer\n ])\n\nempty_df = spark.createDataFrame([['']]).toDF(\"text\")\n\npipelineModel = nlpPipeline.fit(empty_df)\n\npipelineModel.transform(spark_df).show()", "+--------------------+--------------------+--------------------+--------------------+--------------------+\n| text| document| token| stem| lemma|\n+--------------------+--------------------+--------------------+--------------------+--------------------+\n|Peter is a very g...|[[document, 0, 27...|[[token, 0, 4, Pe...|[[token, 0, 4, pe...|[[token, 0, 4, Pe...|\n|My life in Russia...|[[document, 0, 37...|[[token, 0, 1, My...|[[token, 0, 1, my...|[[token, 0, 1, My...|\n|John and Peter ar...|[[document, 0, 76...|[[token, 0, 3, Jo...|[[token, 0, 3, jo...|[[token, 0, 3, Jo...|\n|Lucas Nogal Dunbe...|[[document, 0, 67...|[[token, 0, 4, Lu...|[[token, 0, 4, lu...|[[token, 0, 4, Lu...|\n|Europe is very cu...|[[document, 0, 68...|[[token, 0, 5, Eu...|[[token, 0, 5, eu...|[[token, 0, 5, Eu...|\n+--------------------+--------------------+--------------------+--------------------+--------------------+\n\n" ], [ "from sparknlp.base import LightPipeline\n\nlight_model = LightPipeline(pipelineModel)\n\nlight_result = light_model.annotate(\"John and Peter are brothers. However they don't support each other that much.\")", "_____no_output_____" ], [ "light_result.keys()", "_____no_output_____" ], [ "list(zip(light_result['token'], light_result['stem'], light_result['lemma']))", "_____no_output_____" ], [ "light_result = light_model.fullAnnotate(\"John and Peter are brothers. However they don't support each other that much.\")", "_____no_output_____" ], [ "light_result", "_____no_output_____" ], [ "text_list= [\"How did serfdom develop in and then leave Russia ?\",\n\"There will be some exciting breakthroughs in NLP this year.\"]\n\nlight_model.annotate(text_list)", "_____no_output_____" ] ], [ [ "**important note:** When you use Finisher in your pipeline, regardless of setting `cleanAnnotations` to False or True, LigtPipeline will only return the finished columns.", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb99f98fa648d5ef781a6d803684697bf8be6636
21,956
ipynb
Jupyter Notebook
Model/Testing Embedding.ipynb
EthanH43/text_generator
363361b3320ec731401518d0bb428d00a9194d2b
[ "MIT" ]
null
null
null
Model/Testing Embedding.ipynb
EthanH43/text_generator
363361b3320ec731401518d0bb428d00a9194d2b
[ "MIT" ]
null
null
null
Model/Testing Embedding.ipynb
EthanH43/text_generator
363361b3320ec731401518d0bb428d00a9194d2b
[ "MIT" ]
null
null
null
28.076726
249
0.530652
[ [ [ "# check out list words and the apostrophe situation in the step for loop\n# check out allowing more than maxseqlen in generate text\n# look into batch size\n#text generation apostrpphe breaking", "_____no_output_____" ], [ "import pandas as pd\nimport numpy as np\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.optimizers import RMSprop\nfrom keras import optimizers\nimport sys\nfrom keras.callbacks import LambdaCallback\nimport random\nimport matplotlib.pyplot as plt\n\n", "/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nUsing TensorFlow backend.\n" ], [ "def sample(preds, temperature=1.0):\n # helper function to sample an index from a probability array\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)", "_____no_output_____" ], [ "def generate_text(seed_text, numb_next_words):\n output=seed_text\n for i in range (numb_next_words):\n \n words_gen = set(seed_text.split())\n words_gen=list(words_gen) #create list of unique words in seed text\n \n \n# for i in range (len(words_gen)): #replace all ' in seed text\n# words_gen[i]=words_gen[i].replace(\"‘\", '').replace(\"’\", '').replace(\"'\", '')\n \n #create a dictionary with index and word\n word_indices_gen = dict((c, i) for i, c in enumerate(words_gen, 1)) \n \n #turn sentence into a sequence of numbers\n sequence=[] \n for word in seed_text.split():\n sequence.append(word_indices_gen[word])\n sequence_padded = pad_sequences([sequence], maxlen=10, padding='pre')\n# sequence_padded=sequence\n \n #create an embedding matrix with same indices as word_index \n EMBEDDING_DIM=25\n total_words=len(word_indices_gen)+1\n embedding_matrix = np.zeros((total_words, EMBEDDING_DIM))\n for word, i in word_indices_gen.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n # words not found in embedding index will be all-zeros.\n embedding_matrix[i] = embedding_vector\n #create X input with embedding matrix for specific words (by their index)\n gener=[]\n for number in sequence_padded:\n gener.append(embedding_matrix[number])\n\n predicted=model.predict([gener], verbose=0)\n\n predicted=sample(predicted[0])\n output_word=\"\"\n for word, index in word_indices.items():\n if index == predicted:\n output_word = word\n break\n output+=\" \" + output_word\n seed_text+=\" \" + output_word\n seed_text=seed_text.split(' ', 1)[1]\n return output", "_____no_output_____" ], [ "def on_epoch_end(epoch, _):\n # Function invoked at end of each epoch. Prints generated text.\n print()\n print('----- Generating text after Epoch: %d' % epoch)\n\n start_index = random.randint(0, len(listofwords) - maxlen - 1)\n for diversity in [0.5, 1.0]:\n print('----- diversity:', diversity)\n generated = ''\n sentence = listofwords[start_index: start_index + maxlen].str.cat(sep=' ')\n generated += sentence\n print('----- Generating with seed: \"' + sentence + '\"')\n sys.stdout.write(generate_text(generated, 20))\n", "_____no_output_____" ], [ "tweet_data=pd.read_csv('../Load_Tweets/data/tweet_data.csv')", "_____no_output_____" ], [ "tweet_text = tweet_data['TEXT']", "_____no_output_____" ], [ "tweet_text_all = tweet_data['TEXT'].str.cat(sep=' ')", "_____no_output_____" ], [ "listofwords=pd.Series(tweet_text_all.split())", "_____no_output_____" ], [ "tweet_text.head()", "_____no_output_____" ], [ "# top_words=listofwords.value_counts()\n# top_words_percent= top_words/len(listofwords)\n# top_words.head(50).plot.bar()\n# # top_words.head(50)", "_____no_output_____" ], [ "total_chars=len(tweet_text_all)\ntotal_chars\ntotal_wordz=len((tweet_text_all.split()))\ntotal_wordz", "_____no_output_____" ], [ "chars = set(tweet_text_all)\nwords = set(tweet_text_all.split())\nprint (\"total number of unique words\", len(words))\nprint (\"total number of unique chars\", len(chars))", "total number of unique words 32293\ntotal number of unique chars 369\n" ], [ "words=list(words)", "_____no_output_____" ], [ "#replace apostrophes in dictionary keys \nfor i in range (len(words)):\n words[i]=words[i].replace(\"‘\", '').replace(\"’\", '').replace(\"'\", '')", "_____no_output_____" ], [ "words=set(words)\nlen(words)", "_____no_output_____" ], [ "#create forward and reverse word index\nword_indices = dict((c, i) for i, c in enumerate(words, 1))\nindices_word = dict((i, c) for i, c in enumerate(words,1 ))", "_____no_output_____" ], [ "len(word_indices)\nmax(word_indices.values())", "_____no_output_____" ], [ "#choose step \n\nmaxlen = 10\nstep = 2\n\nsentences = []\nnext_words = []\nnext_words = []\nlist_words = []\n\nsentences2 = []\nfor i in range (len(tweet_text)):\n list_words = tweet_text.iloc[i].split()\n for i in range(len( list_words)):\n list_words[i]=list_words[i].replace(\"‘\", '').replace(\"’\", '').replace(\"'\", '')\n for i in range(0, len(list_words) - maxlen, step):\n sentences2 = ' '.join(list_words[i: i + maxlen])\n sentences.append(sentences2)\n next_words.append((list_words[i + maxlen]))\n\nprint ('length of sentence list:', len(sentences))\nprint (\"length of next_word list\", len(next_words))", "length of sentence list: 77465\nlength of next_word list 77465\n" ], [ "sequences=[]\ny=[]\nfor i, sentence in enumerate(sentences):\n sequence=[]\n for j, word in enumerate(sentence.split()):\n sequence.append(word_indices[word])\n sequences.append(sequence)\n y.append(word_indices[next_words[i]])", "_____no_output_____" ], [ "sequences=np.asarray(sequences)\nsequences.shape", "_____no_output_____" ], [ "sequences", "_____no_output_____" ], [ "total_words= len(word_indices)+1\ntotal_words", "_____no_output_____" ], [ "embeddings_index = {}\nf = open('../word_embeding/glove.twitter.27B.25d.txt')\nfor line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\nf.close()\n\nprint('Found %s word vectors.' % len(embeddings_index))", "Found 1193514 word vectors.\n" ], [ "max(word_indices.values())\nlen(word_indices)", "_____no_output_____" ], [ "EMBEDDING_DIM=25\nembedding_matrix = np.zeros((total_words, EMBEDDING_DIM)) \nfor word, i in word_indices.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n # words not found in embedding index will be all-zeros.\n embedding_matrix[i] = embedding_vector", "_____no_output_____" ], [ "embedding_matrix.shape", "_____no_output_____" ], [ "X=sequences[0:100]\ny=y[0:100]", "_____no_output_____" ], [ "from keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Flatten\nfrom keras.layers import Embedding\nfrom keras.layers import Dropout\nfrom keras.layers import LSTM\nfrom keras.layers import Input\nfrom keras.regularizers import L1L2\nfrom keras import regularizers\nfrom keras import metrics\n# embedding_layer= Embedding(total_words, EMBEDDING_DIM, weights=[embedding_matrix],input_length=max_seq,trainable=False)\n# sequence_input = Input(shape=(max_seq,), dtype='int32')\n# embedded_sequences= embedding_layer(sequence_input)\nmodel=Sequential()\n# e=Embedding(total_words, EMBEDDING_DIM, weights=[embedding_matrix],input_length=maxlen,trainable=False)\n# model.add(e)\nfrom keras.layers import Embedding\n\nmodel.add( Embedding(total_words,\n EMBEDDING_DIM,\n weights=[embedding_matrix],\n input_length=maxlen,\n input_shape= (maxlen,),\n trainable=False))\nmodel.add(LSTM(128, bias_regularizer=regularizers.l1(0.01)))\nmodel.add(Dropout(0.2))\n# model.add(LSTM(512, return_sequences=False))\n# model.add(Dropout(0.1))\n# model.add(Flatten())\nmodel.add(Dense(total_words, activation=\"softmax\"))\noptimizer = RMSprop(lr=0.01)\n# sgd = optimizers.SGD(lr=0.01, clipvalue=0.5)\nmodel.compile(loss='sparse_categorical_crossentropy', metrics=['accuracy'], optimizer=optimizer)\nmodel.summary()\n\n# model.add(LSTM(128, input_shape=(maxlen, len(chars))))\n# model.add(Dense(len(chars), activation='softmax'))\n\n# model.compile(loss='categorical_crossentropy', optimizer=optimizer)\n\n", "WARNING:tensorflow:From /anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\nWARNING:tensorflow:From /anaconda3/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_1 (Embedding) (None, 10, 25) 797700 \n_________________________________________________________________\nlstm_1 (LSTM) (None, 128) 78848 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 128) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 31908) 4116132 \n=================================================================\nTotal params: 4,992,680\nTrainable params: 4,194,980\nNon-trainable params: 797,700\n_________________________________________________________________\n" ], [ "# from keras.models import load_model\n# model= load_model(\"../Saved_models/failed_on_99th_epoch_word_embedding\")", "_____no_output_____" ], [ "# print_callback = LambdaCallback(on_epoch_end=on_epoch_end)\nfrom keras.callbacks import EarlyStopping\nearly_stopping = EarlyStopping(monitor='val_acc', patience=3)\nmodel.fit(X, y, validation_split=0.2, epochs=10,callbacks=[early_stopping])", "WARNING:tensorflow:From /anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.cast instead.\nTrain on 80 samples, validate on 20 samples\nEpoch 1/10\n80/80 [==============================] - 6s 78ms/step - loss: 11.5542 - acc: 0.0125 - val_loss: 10.8645 - val_acc: 0.0500\nEpoch 2/10\n80/80 [==============================] - 1s 15ms/step - loss: 7.6992 - acc: 0.0250 - val_loss: 10.8853 - val_acc: 0.0000e+00\nEpoch 3/10\n80/80 [==============================] - 1s 16ms/step - loss: 6.3233 - acc: 0.0375 - val_loss: 10.4412 - val_acc: 0.0500\nEpoch 4/10\n80/80 [==============================] - 2s 19ms/step - loss: 5.9008 - acc: 0.0375 - val_loss: 10.6911 - val_acc: 0.0500\n" ], [ "loss, accuracy = model.evaluate(X_test_sample, y_test_sample, verbose=0)\nprint('Accuracy: %f' % (accuracy*100))\nprint('loss: %f' % (loss))\nperplexity = np.exp2(loss)\nprint ('perplexity: {}'.format(perplexity))\n", "Accuracy: 9.900000\nloss: 9.635607\nperplexity: 795.4386352619334\n" ], [ "print (generate_text(\"i will\", 20))", "i will be the U.S. and my great job. and the Dems They We including a big deal with Russia and people\n" ], [ "# model.save('../failed_on_99th_epoch_word_embedding')", "_____no_output_____" ], [ "predictions_test=model.predict(X_test)", "_____no_output_____" ], [ "len(predictions_test)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb99ffbe36a135e94cd7a50fc6497533c1745f70
12,008
ipynb
Jupyter Notebook
Untitled.ipynb
mannyfin/IRAS
a164856e80320c90e2decdfb3ded4b75a10d5061
[ "BSD-3-Clause" ]
null
null
null
Untitled.ipynb
mannyfin/IRAS
a164856e80320c90e2decdfb3ded4b75a10d5061
[ "BSD-3-Clause" ]
null
null
null
Untitled.ipynb
mannyfin/IRAS
a164856e80320c90e2decdfb3ded4b75a10d5061
[ "BSD-3-Clause" ]
null
null
null
81.687075
7,232
0.80538
[ [ [ "import numpy as np\nfrom sympy import *\ninit_printing()", "_____no_output_____" ], [ "x, y = symbols('x y', real=True)", "_____no_output_____" ], [ "a = 0.7190027e-2\nb = 0.3956443e-5\nc = -0.1842722e-8\nd = 0.3471851e-12\ne = -0.2616792e-16\nk = -0.234471", "_____no_output_____" ], [ "# Eq(a*y + b*(y**2) + c*(y**3) + d*(y**4) + e*(y**5) + k -x)\n# solve(Eq(a*y + b*(y**2) + c*(y**3) + d*(y**4) + e*(y**5) + k -x),y)\n# solveset(Eq(a*y + b*(y**2) + c*(y**3) + k -x),y, domain=S.Reals)\n# solve(Eq(a*y + b*(y**2) + c*(y**3) + d*(y**4)+ k -x),y)\nequation = sympify(a*y + b*(y**2) + c*(y**3) + d*(y**4)+e*(y**5)+ k -x)\nprint(equation)", "-x - 2.616792e-17*y**5 + 3.471851e-13*y**4 - 1.842722e-9*y**3 + 3.956443e-6*y**2 + 0.007190027*y - 0.234471\n" ], [ "# equat = Eq(equation)\nEq(equation)", "_____no_output_____" ], [ "T = np.arange(32,)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
cb9a127f1a185aec1c396aea05d27b21503d18b1
217,288
ipynb
Jupyter Notebook
notebooks/.ipynb_checkpoints/exploration-checkpoint.ipynb
Preeti240/Pokemon-Dataset-Exploration
64d0fbbf88e0fc35a874b6c8dad8eb59698e916f
[ "MIT" ]
1
2021-07-23T05:58:33.000Z
2021-07-23T05:58:33.000Z
notebooks/exploration.ipynb
Preeti240/Pokemon-Dataset-Exploration
64d0fbbf88e0fc35a874b6c8dad8eb59698e916f
[ "MIT" ]
null
null
null
notebooks/exploration.ipynb
Preeti240/Pokemon-Dataset-Exploration
64d0fbbf88e0fc35a874b6c8dad8eb59698e916f
[ "MIT" ]
null
null
null
292.053763
181,920
0.931018
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom IPython.core.display import display, HTML\n\nplt.style.use('fivethirtyeight')\nplt.rc('figure', figsize=(5.0, 2.0))", "_____no_output_____" ], [ "pokemon=pd.read_csv(\"../dataset/pokemon.csv\")", "_____no_output_____" ], [ "# Which pokémon is the most difficult to catch? ", "_____no_output_____" ], [ "pokemon['capture_rate']=pd.to_numeric(pokemon['capture_rate'])\npokemon['name'][pokemon['capture_rate']==min(pokemon['capture_rate'])]", "_____no_output_____" ], [ "#Which no-legendary pokémon is the most diffucult to catch? </b>", "_____no_output_____" ], [ "no_legendary=pokemon[pokemon['is_legendary']==False]\nno_legendary['name'][no_legendary['capture_rate']==min(no_legendary['capture_rate'])]", "_____no_output_____" ], [ "display(HTML(\"<img src='../img/beldum.png' width='200px' height='200px'>\"))\n\nrate=pokemon[pokemon['name']=='Beldum']['capture_rate'].values\nbeldum_rate=num = \"{0:.2f}\".format((rate[0]*100)/255)\nprint(\"Name: Beldum\\n\"+\"Percentage of catch: \" + beldum_rate + \" %\")", "_____no_output_____" ] ], [ [ "# BEST AND WORST", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-info\">\nThe min value for attack/special-attack and defense/special-defense statistics was calculated from the subsets of the pokemon which have the highest physical/special statistic compared to the special/physical equivalent. In this way the results acquire greater relevance.\n</div>", "_____no_output_____" ] ], [ [ "#Speed", "_____no_output_____" ], [ "pokemon['name'][pokemon['speed']==max(pokemon['speed'])]", "_____no_output_____" ], [ "pokemon['name'][pokemon['speed']==min(pokemon['speed'])]", "_____no_output_____" ] ], [ [ "<b> Atk </b>", "_____no_output_____" ] ], [ [ "pokemon['name'][pokemon['attack']==max(pokemon['attack'])]", "_____no_output_____" ], [ "physical_atk=pokemon[pokemon['attack']>=pokemon['sp_attack']]\nphysical_atk['name'][physical_atk['attack']==min(physical_atk['attack'])]", "_____no_output_____" ] ], [ [ "<b>Def</b>", "_____no_output_____" ] ], [ [ "pokemon['name'][pokemon['defense']==max(pokemon['defense'])]", "_____no_output_____" ], [ "physical_def=pokemon[pokemon['defense']>=pokemon['sp_defense']]\nphysical_def['name'][physical_def['defense']==min(physical_def['defense'])]", "_____no_output_____" ] ], [ [ "<b> Sp.Atk</b>", "_____no_output_____" ] ], [ [ "pokemon['name'][pokemon['sp_attack']==max(pokemon['sp_attack'])]", "_____no_output_____" ], [ "special_atk=pokemon[pokemon['sp_attack']>=pokemon['attack']]\nspecial_atk['name'][special_atk['sp_attack']==min(special_atk['sp_attack'])]", "_____no_output_____" ] ], [ [ "<b>Sp.Def</b>", "_____no_output_____" ] ], [ [ "pokemon['name'][pokemon['sp_defense']==max(pokemon['sp_defense'])]", "_____no_output_____" ], [ "special_def=pokemon[pokemon['sp_defense']>=pokemon['defense']]\nspecial_def['name'][special_def['sp_defense']==min(special_def['sp_defense'])]", "_____no_output_____" ] ], [ [ "<b>Hp</b>", "_____no_output_____" ] ], [ [ "pokemon['name'][pokemon['hp']==max(pokemon['hp'])]", "_____no_output_____" ], [ "pokemon['name'][pokemon['hp']==min(pokemon['hp'])]", "_____no_output_____" ] ], [ [ "Combining all the information we can see how <code>Shuckle</code> is a pokémon with <b>very particular statistics</b>. Look at them:", "_____no_output_____" ] ], [ [ "display(HTML(\"<img src='../img/shuckle.png' width='200px' height='200px'>\"))\n\npokemon.iloc[212][['name','hp','attack','sp_attack','defense','sp_defense','speed']]\n", "_____no_output_____" ] ], [ [ "# Which type is the most common? ", "_____no_output_____" ], [ "To answer this question, I think it's more interesting seeing the <b>absolute frequencies</b> for each type of pokémon in a <b>bar chart</b>.", "_____no_output_____" ] ], [ [ "types_abs_freq=(pokemon['type1'].value_counts()+pokemon['type2'].value_counts()).sort_values(ascending=False)\nx=types_abs_freq.index\ny=types_abs_freq.values\ntypes_abs_freq.plot.bar()\nplt.show()", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-info\">\nAbsolute frequencies were calculated from a set constructed as the union between the set of types 1 and 2 of each pokémon.\n</div>", "_____no_output_____" ], [ "The result obtained shows us a subdivision of the pokémon by type rather conform to reality: the pokémon closest to have an animal correspondent in the real world are the most widespread.<br>\n<b>The most common type is water</b> but the most interesting data is that the psychic type is the fifth most common type, even exceeding the bug type.", "_____no_output_____" ], [ "# Which ability is the most common? \n\nWe answer this question by printing the top 10 most common abilities.", "_____no_output_____" ] ], [ [ "ser_abilities=pokemon['abilities']\nabilities=[]\nfor i in range(0,801):\n arr_ab=ser_abilities[i].split(',')\n for j in range(0,len(arr_ab)):\n ability=arr_ab[j].replace(\"[\",\"\").replace(\"'\",\"\").replace(\"]\",\"\")\n abilities.append(ability)\n \nabilities_freq=pd.Series(abilities).value_counts().sort_values(ascending=False)\nabilities_freq.head(10)", "_____no_output_____" ] ], [ [ "<b> Be very careful to do earthquake! </b>", "_____no_output_____" ], [ "# Correlation", "_____no_output_____" ] ], [ [ "import seaborn as sns\n\nplt.figure(figsize=(20,20))\nsns.heatmap(pokemon.corr(), linewidths=.5)\nplt.show()", "_____no_output_____" ] ], [ [ "There is a strong positive correlation between:\n- generation and pokédex number (good information for building generation clusters),\n- against_ghost and against_dark (thanks to ghost type),\n- base_egg_steps and is_legendary (good information for building legendary classifier).\nThere is a good positive correlation between:\n- single stats and base_total,\n- height and weight. \n\nThere is a strong negative correlation between:\n- capture_rate and base_total,\n- single stats and capture_rate,\n- against_fight and against_ghost (thanks to normal type),\n- against_psychic and against_bug (thanks to dark type),\n- against_ground and against_ice (Why?),\n- base_happiness and base_egg_steps. \n\nThere is a good negative correlation between:\n- base_happiness and weight,\n- base_happiness and is_legendary. \n\nAnd so on.", "_____no_output_____" ] ] ]
[ "code", "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ], [ "raw" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
cb9a159a6d904e9a5e6477a3760556e427775254
1,609
ipynb
Jupyter Notebook
TensorFlow-with-dynamic-scaling/tensorflow/contrib/eager/python/examples/generative_examples/text_generation.ipynb
BingyangWu/Antman
e9323cc8ccda637d3962b0de29ce154317f17e7a
[ "MIT" ]
null
null
null
TensorFlow-with-dynamic-scaling/tensorflow/contrib/eager/python/examples/generative_examples/text_generation.ipynb
BingyangWu/Antman
e9323cc8ccda637d3962b0de29ce154317f17e7a
[ "MIT" ]
null
null
null
TensorFlow-with-dynamic-scaling/tensorflow/contrib/eager/python/examples/generative_examples/text_generation.ipynb
BingyangWu/Antman
e9323cc8ccda637d3962b0de29ce154317f17e7a
[ "MIT" ]
null
null
null
35.755556
318
0.585457
[ [ [ "##### Copyright 2018 The TensorFlow Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\").\n\n# Text Generation using a RNN\n\nThis example has moved.\n\n<table class=\"tfo-notebook-buttons\" align=\"left\"><td>\n<a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/sequences/text_generation.ipynb\">\n <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a> \n</td><td>\n<a target=\"_blank\" href=\"https://github.com.cnpmjs.org/tensorflow/docs/blob/master/site/en/tutorials/sequences/text_generation.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a></td></table>", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
cb9a16d7c4ab53ef85e3e46e2c7f25c6e58d99b5
6,051
ipynb
Jupyter Notebook
test/test-hifigan-multihifigan.ipynb
ishine/malaya-speech
fd34afc7107af1656dff4b3201fa51dda54fde18
[ "MIT" ]
null
null
null
test/test-hifigan-multihifigan.ipynb
ishine/malaya-speech
fd34afc7107af1656dff4b3201fa51dda54fde18
[ "MIT" ]
null
null
null
test/test-hifigan-multihifigan.ipynb
ishine/malaya-speech
fd34afc7107af1656dff4b3201fa51dda54fde18
[ "MIT" ]
null
null
null
24.204
330
0.562717
[ [ [ "# !wget https://raw.githubusercontent.com/TensorSpeech/TensorFlowTTS/master/examples/hifigan/conf/hifigan.v2.yaml", "_____no_output_____" ], [ "import os\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'", "_____no_output_____" ], [ "import sys\n\nSOURCE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__name__)))\nsys.path.insert(0, SOURCE_DIR)", "_____no_output_____" ], [ "import malaya_speech\nimport malaya_speech.train\nfrom malaya_speech.train.model import melgan, hifigan\nfrom malaya_speech.train.model import stft\nimport malaya_speech.config\nfrom malaya_speech.train.loss import calculate_2d_loss, calculate_3d_loss\nimport tensorflow as tf", "_____no_output_____" ], [ "hifigan_config = malaya_speech.config.hifigan_config_v2\nhifigan_config['hifigan_generator_params']['filters'] = 512", "_____no_output_____" ], [ "generator = hifigan.Generator(\n hifigan.GeneratorConfig(**hifigan_config[\"hifigan_generator_params\"]),\n name=\"hifigan_generator\",\n)\nmultigenerator = hifigan.MultiGenerator(\n hifigan.GeneratorConfig(**hifigan_config[\"hifigan_generator_params\"]),\n name=\"hifigan_generator\",\n)", "_____no_output_____" ], [ "multiperiod_discriminator = hifigan.MultiPeriodDiscriminator(\n hifigan.DiscriminatorConfig(**hifigan_config[\"hifigan_discriminator_params\"]),\n name=\"hifigan_multiperiod_discriminator\",\n)\nmultiscale_discriminator = melgan.MultiScaleDiscriminator(\n melgan.DiscriminatorConfig(\n **hifigan_config[\"melgan_discriminator_params\"],\n name=\"melgan_multiscale_discriminator\",\n )\n)", "_____no_output_____" ], [ "discriminator = hifigan.Discriminator(multiperiod_discriminator, multiscale_discriminator)", "_____no_output_____" ], [ "y = tf.placeholder(tf.float32, (None, None))\nx = tf.placeholder(tf.float32, (None, None, 80))\ny_hat = generator(x)\nmulti_y_hat = multigenerator(x)\n# p_hat = discriminator(y_hat)\n# p = discriminator(tf.expand_dims(y, -1))", "WARNING:tensorflow:From /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow_core/python/ops/resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\nInstructions for updating:\nIf using Keras pass *_constraint arguments to layers.\n" ], [ "y_hat, multi_y_hat", "_____no_output_____" ], [ "sess = tf.InteractiveSession()\nsess.run(tf.global_variables_initializer())", "_____no_output_____" ], [ "import numpy as np", "_____no_output_____" ], [ "%%time\n\nsess.run(y_hat, feed_dict = {x: np.random.uniform(size=(1,200,80))}).shape", "CPU times: user 2.04 s, sys: 151 ms, total: 2.19 s\nWall time: 421 ms\n" ], [ "%%time\n\nsess.run(multi_y_hat, feed_dict = {x: np.random.uniform(size=(1,200,80))}).shape", "CPU times: user 1.99 s, sys: 246 ms, total: 2.24 s\nWall time: 341 ms\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9a180099b3868dc623fc139c727c713fa53c90
11,714
ipynb
Jupyter Notebook
Chapter 11/DQN_Atari_v2.ipynb
arifmudi/Deep-Learning-with-TensorFlow-2-and-Keras
90018b42f486f55d559ec1d8807c180d55f6b28b
[ "MIT" ]
267
2020-01-01T23:38:26.000Z
2022-03-20T06:57:05.000Z
Chapter 11/DQN_Atari_v2.ipynb
arifmudi/Deep-Learning-with-TensorFlow-2-and-Keras
90018b42f486f55d559ec1d8807c180d55f6b28b
[ "MIT" ]
6
2020-03-20T05:14:50.000Z
2021-07-01T09:02:57.000Z
Chapter 11/DQN_Atari_v2.ipynb
arifmudi/Deep-Learning-with-TensorFlow-2-and-Keras
90018b42f486f55d559ec1d8807c180d55f6b28b
[ "MIT" ]
192
2019-12-24T18:49:44.000Z
2022-03-24T16:05:23.000Z
38.156352
126
0.517757
[ [ [ "import random\nimport gym\n#import math\nimport numpy as np\nfrom collections import deque\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten\nfrom tensorflow.keras.optimizers import Adam", "_____no_output_____" ], [ "EPOCHS = 1000\nTHRESHOLD = 10\nMONITOR = True", "_____no_output_____" ], [ "class DQN():\n def __init__(self, env_string,batch_size=64, IM_SIZE = 84, m = 4):\n self.memory = deque(maxlen=5000)\n self.env = gym.make(env_string)\n input_size = self.env.observation_space.shape[0]\n action_size = self.env.action_space.n\n self.batch_size = batch_size\n self.gamma = 1.0\n self.epsilon = 1.0\n self.epsilon_min = 0.01\n self.epsilon_decay = 0.995\n self.IM_SIZE = IM_SIZE\n self.m = m\n \n \n alpha=0.01\n alpha_decay=0.01\n if MONITOR: self.env = gym.wrappers.Monitor(self.env, '../data/'+env_string, force=True)\n \n # Init model\n self.model = Sequential()\n self.model.add( Conv2D(32, 8, (4,4), activation='relu',padding='valid', input_shape=(IM_SIZE, IM_SIZE, m)))\n #self.model.add(MaxPooling2D())\n self.model.add( Conv2D(64, 4, (2,2), activation='relu',padding='valid'))\n self.model.add(MaxPooling2D())\n self.model.add( Conv2D(64, 3, (1,1), activation='relu',padding='valid'))\n self.model.add(MaxPooling2D())\n self.model.add(Flatten())\n self.model.add(Dense(256, activation='elu'))\n self.model.add(Dense(action_size, activation='linear'))\n self.model.compile(loss='mse', optimizer=Adam(lr=alpha, decay=alpha_decay))\n self.model_target = tf.keras.models.clone_model(self.model)\n\n def remember(self, state, action, reward, next_state, done):\n self.memory.append((state, action, reward, next_state, done))\n\n def choose_action(self, state, epsilon):\n if np.random.random() <= epsilon:\n return self.env.action_space.sample()\n else:\n return np.argmax(self.model.predict(state))\n\n def preprocess_state(self, img):\n img_temp = img[31:195] # Choose the important area of the image\n img_temp = tf.image.rgb_to_grayscale(img_temp)\n img_temp = tf.image.resize(img_temp, [self.IM_SIZE, self.IM_SIZE],\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n img_temp = tf.cast(img_temp, tf.float32)\n return img_temp[:,:,0]\n\n def combine_images(self, img1, img2):\n if len(img1.shape) == 3 and img1.shape[0] == self.m:\n im = np.append(img1[1:,:, :],np.expand_dims(img2,0), axis=2)\n return tf.expand_dims(im, 0)\n else:\n im = np.stack([img1]*self.m, axis = 2)\n return tf.expand_dims(im, 0)\n #return np.reshape(state, [1, 4])\n\n def replay(self, batch_size):\n x_batch, y_batch = [], []\n minibatch = random.sample(self.memory, min(len(self.memory), batch_size))\n for state, action, reward, next_state, done in minibatch:\n y_target = self.model_target.predict(state)\n y_target[0][action] = reward if done else reward + self.gamma * np.max(self.model.predict(next_state)[0])\n x_batch.append(state[0])\n y_batch.append(y_target[0])\n \n self.model.fit(np.array(x_batch), np.array(y_batch), batch_size=len(x_batch), verbose=0)\n #epsilon = max(epsilon_min, epsilon_decay*epsilon) # decrease epsilon\n \n\n def train(self):\n scores = deque(maxlen=100)\n avg_scores = []\n \n\n for e in range(EPOCHS):\n state = self.env.reset()\n state = self.preprocess_state(state)\n state = self.combine_images(state, state)\n done = False\n i = 0\n while not done:\n action = self.choose_action(state,self.epsilon)\n next_state, reward, done, _ = self.env.step(action)\n next_state = self.preprocess_state(next_state)\n next_state = self.combine_images(next_state, state)\n #print(next_state.shape)\n self.remember(state, action, reward, next_state, done)\n state = next_state\n self.epsilon = max(self.epsilon_min, self.epsilon_decay*self.epsilon) # decrease epsilon\n i += reward\n\n scores.append(i)\n mean_score = np.mean(scores)\n avg_scores.append(mean_score)\n if mean_score >= THRESHOLD:\n print('Solved after {} trials ✔'.format(e))\n return avg_scores\n if e % 10 == 0:\n print('[Episode {}] - Average Score: {}.'.format(e, mean_score))\n self.model_target.set_weights(self.model.get_weights())\n\n self.replay(self.batch_size)\n \n print('Did not solve after {} episodes 😞'.format(e))\n return avg_scores\n\n", "_____no_output_____" ], [ "env_string = 'BreakoutDeterministic-v4'\nagent = DQN(env_string)\n", "_____no_output_____" ], [ "agent.model.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 20, 20, 32) 8224 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 9, 9, 64) 32832 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 4, 4, 64) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 2, 2, 64) 36928 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 1, 1, 64) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 64) 0 \n_________________________________________________________________\ndense (Dense) (None, 256) 16640 \n_________________________________________________________________\ndense_1 (Dense) (None, 4) 1028 \n=================================================================\nTotal params: 95,652\nTrainable params: 95,652\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "agent.model_target.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 20, 20, 32) 8224 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 9, 9, 64) 32832 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 4, 4, 64) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 2, 2, 64) 36928 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 1, 1, 64) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 64) 0 \n_________________________________________________________________\ndense (Dense) (None, 256) 16640 \n_________________________________________________________________\ndense_1 (Dense) (None, 4) 1028 \n=================================================================\nTotal params: 95,652\nTrainable params: 95,652\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "scores = agent.train()", "[Episode 0] - Average Score: 0.0.\n[Episode 10] - Average Score: 0.09090909090909091.\n[Episode 20] - Average Score: 0.14285714285714285.\n" ], [ "import matplotlib.pyplot as plt\nplt.plot(scores)\nplt.show()", "_____no_output_____" ], [ "agent.env.close()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9a2640ef027a0c5fb5d95b0597c433da39d5ea
851
ipynb
Jupyter Notebook
results-analysis.ipynb
humblef00ls/FinQA-new
ddef84d4dc397f5e14cb1623bc86dac133a8821c
[ "MIT" ]
null
null
null
results-analysis.ipynb
humblef00ls/FinQA-new
ddef84d4dc397f5e14cb1623bc86dac133a8821c
[ "MIT" ]
null
null
null
results-analysis.ipynb
humblef00ls/FinQA-new
ddef84d4dc397f5e14cb1623bc86dac133a8821c
[ "MIT" ]
null
null
null
16.686275
56
0.502938
[ [ [ "\n", "_____no_output_____" ], [ "NAED = load_json(\"./datasets/Fin-QA/train.json\")", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
cb9a2b371fbc04e57ea5f6034be52e4959aee159
27,916
ipynb
Jupyter Notebook
.ipynb_checkpoints/L05-Tidy_Data-Lesson-checkpoint.ipynb
shresthasrijana099/Data-Analytics-With-Python
09ca484816b44dfe5857d36a0401746871ae7d9c
[ "Apache-2.0" ]
null
null
null
.ipynb_checkpoints/L05-Tidy_Data-Lesson-checkpoint.ipynb
shresthasrijana099/Data-Analytics-With-Python
09ca484816b44dfe5857d36a0401746871ae7d9c
[ "Apache-2.0" ]
null
null
null
.ipynb_checkpoints/L05-Tidy_Data-Lesson-checkpoint.ipynb
shresthasrijana099/Data-Analytics-With-Python
09ca484816b44dfe5857d36a0401746871ae7d9c
[ "Apache-2.0" ]
null
null
null
44.311111
594
0.614379
[ [ [ "# Lesson 5: Tidy Data\n\n*Learn to prepare data for visualization and analytics.*\n", "_____no_output_____" ], [ "## Instructions\nThis tutorial provides step-by-step training divided into numbered sections. The sections often contain embeded exectable code for demonstration. This tutorial is accompanied by a practice notebook: [L05-Tidy_Data-Practice.ipynb](./L05-Tidy_Data-Practice.ipynb). \n\nThroughout this tutorial sections labeled as \"Tasks\" are interspersed and indicated with the icon: ![Task](http://icons.iconarchive.com/icons/sbstnblnd/plateau/16/Apps-gnome-info-icon.png). You should follow the instructions provided in these sections by performing them in the practice notebook. When the tutorial is completed you can turn in the final practice notebook. ", "_____no_output_____" ], [ "## Introduction\nThe purpose of this assignment is to learn and practice with preparing tidy datasets. Often data we are asked to analyze is provided to us in formats that are not easy to visualize or analyze. Many visualization tools such as Seaborn or analytical tools such as supervised machine learning libraries expect data to be tidied. It is important to know what \"tidy\" data is, how to reformat a data into a tidy format, and to organize our own scientific data to help ourselves and others analyze it.\n\n**What are \"tidy\" datasets?**\n\n> Tidy datasets are easy to manipulate, model and visualize, and have a specific structure: each variable is a column, each observation is a row, and each type of observational unit is a table.\n\n\\- Wickham, Hadley. [Tidy Data](https://www.jstatsoft.org/article/view/v059i10). *Journal of Statistical Software*, 59.10 (2014): 1 - 23.\n\nBefore proceeding, fully read the [Tidy Data paper](https://www.jstatsoft.org/article/view/v059i10) (quoted above) by Hadley Wickham. Once finished, return here to reinforce the techniques introduced by that paper.", "_____no_output_____" ], [ "---\n## 1. Getting Started\nAs before, we import any needed packages at the top of our notebook. Let's import Numpy and Pandas:", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd", "_____no_output_____" ] ], [ [ "#### Task 1a: Setup\n<span style=\"float:right; margin-left:10px; clear:both;\">![Task](./media/task-icon.png)</span>\n\nImport the following packages:\n+ `pandas` as `pd`\n+ `numpy` as `np`\n", "_____no_output_____" ], [ "\n## 2. Tidy Rules\n### 2.1 Recognizing data components\nTo understand the rules for tidy data, we should define a few terms: 'variable', 'observation' and 'observational unit'. \n\n+ **variable**: \n > A variable is a characteristic of a unit being observed... to which a numerical measure or a category... can be assigned (e.g. income, age, weight, etc., and “occupation”, “industry”, “disease”, etc.\n \n \\- [OECD Glossary of Statistical terms -- Variable](https://stats.oecd.org/glossary/detail.asp?ID=2857)\n+ **observation**:\n > An observation is the value, at a particular period, of a particular variable\n \n \\- [OECD Glossary of Statistical terms -- Observation](https://stats.oecd.org/glossary/detail.asp?ID=6132)\n \n+ **observational unit**:\n > Observation units are those entities on which information is received and statistics are compiled.\n \n \\- [OECD Glossary of Statistical terms -- Observation Unit](https://stats.oecd.org/glossary/detail.asp?ID=1873)\n \nWith those definitions for reference, remember from the text that in order for a dataset to be considered \"tidy\" it must be organized into a table (i.e. Pandas DataFrame) and follow these rules:\n\n+ Each variable forms a unique column in the data frame.\n+ Each observation forms a row in the data frame.\n+ Each **type** of observational unit needs its own table.\n\nTo demonstrate the meaning of these rules, let's first examine a dataset described in the Tidy Data paper. Execute the following lines of code that manually creates a Pandas data frame containing the example table:", "_____no_output_____" ] ], [ [ "# Create the data rows and columns.\ndata = [['John Smith', None, 2],\n ['Jane Doe', 16, 11],\n ['Mary Johnson', 3, 1]]\n# Create the list of labels for the data frame.\nheaders = ['', 'Treatment_A', 'Treatment_B']\n# Create the data frame.\npd.DataFrame(data, columns=headers)", "_____no_output_____" ] ], [ [ "This data is not in tidy format. Can you see why? ", "_____no_output_____" ], [ "#### Task 2a: Understand the data\n\n<span style=\"float:right; margin-left:10px; clear:both;\">![Task](http://icons.iconarchive.com/icons/sbstnblnd/plateau/96/Apps-gnome-info-icon.png)\n</span>\n\nUsing the table above, answer the following:\n\n- What are the variables?\n- What are the observations?\n- What is the observable unit?\n- Are the variables columns?\n- Are the observations rows? ", "_____no_output_____" ], [ "### 2.1 Spotting messy data\nThe author provides a few useful indicators that help us spot untidied data:\n1. Column headers are values, not variable names.\n2. Multiple variables are stored in one column.\n3. Variables are stored in both rows and columns.\n4. Multiple types of observational units are stored in the same table.\n5. A single observational unit is stored in multiple tables.", "_____no_output_____" ], [ "As an example, let's look at a data set that the author borrowed from the Pew Reserach Center that provides religious affiliation and yearly income ranges for individuals surveyed. Execute the following code which manually puts that data into a Pandas data frame: ", "_____no_output_____" ] ], [ [ "data = [['Agnostic',27,34,60,81,76,137],\n ['Atheist',12,27,37,52,35,70],\n ['Buddhist',27,21,30,34,33,58],\n ['Catholic',418,617,732,670,638,1116],\n ['Don\\'t know/refused',15,14,15,11,10,35],\n ['Evangelical Prot',575,869,1064,982,881,1486],\n ['Hindu',1,9,7,9,11,34],\n ['Historically Black Prot',228,244,236,238,197,223],\n ['Jehovah\\'s Witness',20,27,24,24,21,30],\n ['Jewish',19,19,25,25,30,95]]\nheaders = ['religion','<$10k','$10-20k','$20-30k','$30-40k','$40-50k','$50-75k']\nreligion = pd.DataFrame(data, columns=headers)\nreligion", "_____no_output_____" ] ], [ [ "#### Task 2b: Explain causes of untidyness\n\n<span style=\"float:right; margin-left:10px; clear:both;\">![Task](http://icons.iconarchive.com/icons/sbstnblnd/plateau/96/Apps-gnome-info-icon.png)\n</span>\n\nUsing the data set above:\n\n- Explain why the data above is untidy?\n- What are the variables? \n- What are the observations?", "_____no_output_____" ], [ "As another example, consider the data frame also provided by the author. For this data, the demographic groups are broken down by sex (m, f) and age (0–14, 15–25, 25–34, 35–44, 45–54, 55–64, 65+, or unknown). Execute the following:", "_____no_output_____" ] ], [ [ "data = [['AD', 2000, 0, 0, 1, 0, 0, 0, 0, None, None],\n ['AE', 2000, 2, 4, 4, 6, 5, 12, 10, None, 3],\n ['AF', 2000, 52, 228, 183, 149, 129, 94, 80, None, 93],\n ['AG', 2000, 0, 0, 0, 0, 0, 0, 1, None, 1],\n ['AL', 2000, 2, 19, 21, 14, 24, 19, 16, None, 3],\n ['AM', 2000, 2, 152, 130, 131, 63, 26, 21, None, 1],\n ['AN', 2000, 0, 0, 1, 2, 0, 0, 0, None, 0],\n ['AO', 2000, 186, 999, 1003, 912, 482, 312, 194, None, 247],\n ['AR', 2000, 97, 278, 594, 402, 419, 368, 330, None, 121],\n ['AS', 2000, None, None, None, None, 1, 1, None, None, None]]\nheaders = ['country', 'year', 'm014', 'm1524', 'm2534', 'm3544', 'm4554', 'm5564', \n 'm65', 'mu', 'f014']\ndemographics = pd.DataFrame(data, columns=headers)\ndemographics", "_____no_output_____" ] ], [ [ "#### Task 2c: Explain causes of untidyness\n\n<span style=\"float:right; margin-left:10px; clear:both;\">![Task](http://icons.iconarchive.com/icons/sbstnblnd/plateau/96/Apps-gnome-info-icon.png)\n</span>\n\nUsing the data set above:\n\n- Explain why the data above is untidy?\n- What are the variables? \n- What are the observations?", "_____no_output_____" ], [ "---\n## 3. Melting Data\nIn the Tidy paper, the author indicated that many times a data set can be corrected, or tidied, by first \"melting\" the data. Fortunately, Pandas provides the `pd.melt` function! See the [online documenation for pd.melt](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.melt.html) for full usage instructions. The author provides five different use cases where melting (and other transformations) can be performed:\n\n1. Column headers are values, not variable names.\n2. Multiple variables are stored in one column.\n3. Variables are stored in both rows and columns.\n4. Multiple types of observational units are stored in the same table.\n5. A single observational unit is stored in multiple tables.\n\nWe will explore only a few of these use cases. However, the techniques provided by these examples will help with melting for all of them.", "_____no_output_____" ], [ "### 3.1 Use Case #1: column headers are values\nTo demonsrate melting let's create a sample dataframe that provides the progress level of different groups of individuals in a process that has two stages:", "_____no_output_____" ] ], [ [ "df = pd.DataFrame({'Group': {0: 'A', 1: 'B', 2: 'C'},\n 'Stage1': {0: 1, 1: 3, 2: 5},\n 'Stage2': {0: 2, 1: 4, 2: 6}})\ndf", "_____no_output_____" ] ], [ [ "It's clear that this dataset does not follow tidy rules. This is because information about the stage is housed in the header (i.e. two different stages: stage1 and stage2). To tidy this up, we should have a separate column that indicates the stage and a corresponding column that indicates the observation for each stage.\n\nThe first step to correct this is to melt the data. To melt a dataset using Pandas, you must indicate which columns in the current data frame should be kept as columns and which columns should be melted (also called **unpivoted**) to rows. This is indicated using two arguments provided to `pd.melt`:\n\n- `id_vars`: indicates the columns to use as identifier variables. These columns remain as columns in the dataframe after melting.\n- `value_vars`: indicates the columns to melt (unpivot). If not specified, then all columns that are not set as `id_vars` are used. \n - The column header becomes a value in a new column\n - The value within the original column is matched with the header value in an adjacent column.\n\nAs an example, let's melt the example dataframe:", "_____no_output_____" ] ], [ [ "df2 = pd.melt(df, id_vars=['Group'], value_vars=['Stage1', 'Stage2'])\ndf2", "_____no_output_____" ] ], [ [ "Observe that the new column labels named 'variable' and 'value' do not indicate what the data the colomn contains. We can either set these manually using:\n\n```python\ndf2.columns = ['Group', 'Stage', 'Level']\n```\n\nOr, we can provide the new labels when we melt the data using the `var_name` and `value_name` arguments:", "_____no_output_____" ] ], [ [ "df2 = pd.melt(df, id_vars=['Group'], value_vars=['Stage1', 'Stage2'],\n var_name='Stage', value_name='Level')\ndf2", "_____no_output_____" ] ], [ [ "#### Task 3a: Melt data, use case #1\n\n<span style=\"float:right; margin-left:10px; clear:both;\">![Task](http://icons.iconarchive.com/icons/sbstnblnd/plateau/96/Apps-gnome-info-icon.png)\n</span>\n\nUsing the `pd.melt` function, melt the demographics data introduced in section 2. Be sure to:\n- Set the column headers correctly. \n- Order by country \n- Print the first 10 lines of the resulting melted dataset.", "_____no_output_____" ], [ "### 3.2 Use Case #2: multiple variables stored in one column\nSometimes, melting the data is not enough. Consider the demographics example where the sex and the age range are combined into a single column label. In Task 3a we melted that dataset:\n\n<table>\n <tr><th>country</th><th>year</th><th>age</th><th>freq</th></tr>\n <tr><td>AD</td><td>2000</td><td>m014</td><td>0</td></tr>\n <tr><td>AD</td><td>2000</td><td>m5564</td><td>0</td></tr>\n <tr><td>AD</td><td>2000</td><td>m3544</td><td>0</td></tr>\n <tr><td>AD</td><td>2000</td><td>m65</td><td>0</td></tr>\n <tr><td>AD</td><td>2000</td><td>m2534</td><td>1</td></tr>\n <tr><td>AD</td><td>2000</td><td>mu</td><td>None</td></tr>\n <tr><td>AD</td><td>2000</td><td>m1524</td><td>0</td></tr>\n <tr><td>AD</td><td>2000</td><td>f014</td><td>NaN</td></tr>\n <tr><td>AD</td><td>2000</td><td>m4554</td><td>0</td></tr>\n <tr><td>AE</td><td>2000</td><td>m5564</td><td>12</td></tr>\n</table>\n\nWe need to split that `age` column into three different columns corresponding to the sex, minimum age and maximum age. To do this, we can use the following line of code:\n\n```Python\ntemp_df = melted_df[\"age\"].str.extract(\"(\\D)(\\d+)(\\d{2})\") \n\n```\nRemember, that Pandas provides a [pandas.Series.str.extract](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.extract.html) function for manipulating the string values of a Series, and each column in a Pandas dataframe is a series. We can use this function to break apart the value into three separate columns.\n\nObserve the argument provided to the `.str.extract` function: `(\\D)(\\d+)(\\d{2})`. This type of string is called a regular expression (RE). We will not cover regular expressions in detail, but they are a powerful method for parsing strings to either match elements of the string or to split them. An [introduction to REs](https://docs.python.org/3.4/howto/regex.html#regex-howto) for Python and [a full syntax description](https://docs.python.org/3.4/library/re.html#regular-expression-syntax) is available online. But here is a short explanation for the elements of the RE above:\n\n+ `(\\D)`: Matches any single character which is not a digit. This correspondes to the sex: 'f' or 'm'.\n+ `(\\d+)`: Matches one or more digits. This correspondes to the minimum age which may be one or more digts.\n+ `(\\d{2})`: Matches exactly two digits. This requires that the last two digits are the max age.\n\nLet's try it and see how it works:", "_____no_output_____" ] ], [ [ "# Melt the demographics dataset and sort by country:\nmelted_df = pd.melt(demographics, id_vars=[\"country\", \"year\"],\n var_name=\"age\", value_name=\"freq\")\nmelted_df = melted_df.sort_values(by=[\"country\"])\n\n# Split 'age' column into a new dataframe containing the three components: sex, \n# minimum age and maximum age.\ntemp_df = melted_df[\"age\"].str.extract(\"(\\D)(\\d+)(\\d{2})\")\ntemp_df.columns = ['sex', 'min_age', 'max_age']\ntemp_df.head(10)", "_____no_output_____" ] ], [ [ "### 3.3 Use Case #3: variables are in both rows and columns\n\nConsider the following dataset which contains the daily weather records for five months in 2010 for the MX17004 weather station in Mexico. Each day of the month has it's own column (e.g. d1, d2, d3, etc.). The example data only provides the first 8 days:", "_____no_output_____" ] ], [ [ "data = [['MX17004',2010,1,'tmax',None,None,None,None,None,None,None,None],\n ['MX17004',2010,1,'tmin',None,None,None,None,None,None,None,None],\n ['MX17004',2010,2,'tmax',None,27.3,24.1,None,None,None,None,None],\n ['MX17004',2010,2,'tmin',None,14.4,14.4,None,None,None,None,None],\n ['MX17004',2010,3,'tmax',None,None,None,None,32.1,None,None,None],\n ['MX17004',2010,3,'tmin',None,None,None,None,14.2,None,None,None],\n ['MX17004',2010,4,'tmax',None,None,None,None,None,None,None,None],\n ['MX17004',2010,4,'tmin',None,None,None,None,None,None,None,None],\n ['MX17004',2010,5,'tmax',None,None,None,None,None,None,None,None],\n ['MX17004',2010,5,'tmin',None,None,None,None,None,None,None,None]]\nheaders = ['id','year','month','element','d1','d2','d3','d4','d5','d6','d7','d8']\nweather = pd.DataFrame(data, columns=headers)\nweather", "_____no_output_____" ] ], [ [ "In this dataset there are two problems. First, we have a violation of use case #1 where observations are stored in the column labels for the days (e.g. d1, d2, d3, etc.). Second, we have a violation of use case #3. Observe that the 'element' column contains values that should be variables. We want the min and max temperatures for each day as columns. \n\nFirst, let's deal with the first problem by including `id`, `year`, `month` and `element` as `id_vars`. Observe that we will currently not try to tidy the `element` column. We want to remove the 'd' from the day so let's name the column `temp_day`:", "_____no_output_____" ] ], [ [ "melted_weather = pd.melt(weather, id_vars=['id', 'year', 'month', 'element'],\n var_name='temp_day', value_name='temperature')\nmelted_weather.head(10)", "_____no_output_____" ] ], [ [ "Now, let's create an actual date for the measurement rather than storing year, month and day separately. Let's add a new column to the dataframe named 'day' that uses a regular expression to remove the letter 'd' from the beginning of the day.", "_____no_output_____" ] ], [ [ "melted_weather[\"day\"] = melted_weather[\"temp_day\"].str.extract(\"d(\\d+)\", expand=False) \nmelted_weather.head(10)", "_____no_output_____" ] ], [ [ "We can now combine the year, month and day to form a proper date using the Pandas `apply` function. Execute the code below and observe the in-line comments for the meaning of each line of code:", "_____no_output_____" ] ], [ [ "# Import the datetime library.\nimport datetime\n\n# Our year, month, and day columns must be numeric. Currently they are \n# strings. We can use the Pandas \"apply\" function to convert these columns.\nmelted_weather[[\"year\", \"month\", \"day\"]] = melted_weather[[\"year\", \"month\", \"day\"]].apply(pd.to_numeric)\n\n# Convert temperature to numeric as well\nmelted_weather[[\"temperature\"]] = melted_weather[[\"temperature\"]].apply(pd.to_numeric)\n\n# We want to use the Python datetime function to cobmine the year, month and day\n# into a proper date. In Python this is a datetime object, not a string. So, we \n# need to use the apply function, just like above, to convert the dates. We'll\n# create a simple little function that we'll use to apply the datetime change.\ndef create_date(row):\n return datetime.datetime(year=row[\"year\"], month=int(row[\"month\"]), day=row[\"day\"])\n\n# Apply the create_date function to each row of our data frame for the \"date\" column.\nmelted_weather[\"date\"] = melted_weather.apply(lambda row: create_date(row), axis=1)\n\n# Now take a look!\nmelted_weather.head(10)", "_____no_output_____" ] ], [ [ "Now that we have our date corrected, and properly melted, we can address the second problem: the `element` column containing variable names. To fix this we need to do the opposite of melting and we need to **pivot**. To do this we can use the [pd.pivot](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.pivot.html) function. This function takes the following arguments:\n\n- `index`: indicates the columns to use to make the new frame’s index. If None, uses existing index\n- `columns`: indicates the column to use whose values will become the new frame’s columns.\n- `values`: indicates the columns to use for populating new frame’s values.\n\nLet's use the `pivot_table` function, which is a generalization of the `pivot` function that handles duplicate values or one index/column pair. This will move the `element` column values to be new columns in our data frame. But first, we will also want to drop unwanted columns:", "_____no_output_____" ] ], [ [ "# Remove unwanted columns\nweather_min = melted_weather.drop(['year', 'month', 'day', 'temp_day'], axis=1)\nweather_min.head(10)", "_____no_output_____" ], [ "# Unpivot and reset indexes. The pivot_table function automatically removes rows with null values.\nweather_tidy = weather_min.pivot_table(index=[\"id\",\"date\"], columns=\"element\", values=\"temperature\")\nweather_tidy.reset_index(drop=False, inplace=True)\nweather_tidy", "_____no_output_____" ] ], [ [ "The weather data is now tidy (although rather small). \n\nObserve, that in the code above, we called the function `reset_index` on the Tidy'ed weather data. If we do not do this, then the row indexes are not incremental within the data frame.", "_____no_output_____" ], [ "#### Task 3b: Practice with a new dataset\n\n<span style=\"float:right; margin-left:10px; clear:both;\">![Task](http://icons.iconarchive.com/icons/sbstnblnd/plateau/96/Apps-gnome-info-icon.png)\n</span>\n\nDownload the [PI_DataSet.txt](https://hivdb.stanford.edu/download/GenoPhenoDatasets/PI_DataSet.txt) file from [HIV Drug Resistance Database](https://hivdb.stanford.edu/pages/genopheno.dataset.html). Store the file in the same directory as the practice notebook for this assignment.\n\nHere is the meaning of data columns:\n- SeqID: a numeric identifier for a unique HIV isolate protease sequence. Note: disruption of the protease inhibits HIV’s ability to reproduce.\n- The Next 8 columns are identifiers for unique protease inhibitor class drugs. \n - The values in these columns are the fold resistance over wild type (the HIV strain susceptible to all drugs).\n - Fold change is the ratio of the drug concentration needed to inhibit the isolate.\n- The latter columns, with P as a prefix, are the positions of the amino acids in the protease. \n - '-' indicates consensus.\n - '.' indicates no sequence.\n - '#' indicates an insertion. \n - '~' indicates a deletion;.\n - '*' indicates a stop codon\n - a letter indicates one letter Amino Acid substitution. \n - two and more amino acid codes indicates a mixture. \n\nImport this dataset into your notebook, view the top few rows of the data and respond to these questions:\n\n- What are the variables? \n- What are the observations? \n- What are the values? ", "_____no_output_____" ], [ "#### Task 3c: Practice with a new dataset Part 2\n\n<span style=\"float:right; margin-left:10px; clear:both;\">![Task](http://icons.iconarchive.com/icons/sbstnblnd/plateau/96/Apps-gnome-info-icon.png)\n</span>\n\nUse the data retreived from task 3b, generate a data frame containing a Tidy’ed set of values for drug concentration fold change. BE sure to:\n\n- Set the column names as ‘SeqID’, ‘Drug’ and ‘Fold_change’.\n- Order the data frame first by sequence ID and then by Drug name\n- Reset the row indexes\n- Display the first 10 elements.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
cb9a2c35eb0cb734744f10b654cd2465f54a2095
115,257
ipynb
Jupyter Notebook
Version 1/Prediction.ipynb
Ray-Young/Industries-Distribution-Analyzer-and-predicter
0e9f3bbb43720a552fa2e015becf28662605623f
[ "MIT" ]
null
null
null
Version 1/Prediction.ipynb
Ray-Young/Industries-Distribution-Analyzer-and-predicter
0e9f3bbb43720a552fa2e015becf28662605623f
[ "MIT" ]
null
null
null
Version 1/Prediction.ipynb
Ray-Young/Industries-Distribution-Analyzer-and-predicter
0e9f3bbb43720a552fa2e015becf28662605623f
[ "MIT" ]
null
null
null
56.086131
39,384
0.519144
[ [ [ "import os\nimport pandas as pd\nimport time\nimport statsmodels.api as sm\nimport sklearn.utils as utils\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "start_time = time.time()\nMA_location = {\"Greater_Boston_Area\" : 0,\n \"Salem\" : 0, \n \"Plymouth\" : 0, \n \"Waltham\" : 0, \n \"Framingham\" : 0,\n \"Worcester\" : 0,\n \"Lexington\" : 0,\n \"Danvers\" : 0,\n }\nGreater_Boston_Area = {\"Boston\" : 0, \n \"Providence\" : 0, \n \"Lowell\" : 0, \n \"Cambridge\" : 0, \n \"Quincy\" : 0,\n \"Newton\" : 0\n }\n#A_cities = [\"Boston\",\"Worcester\",\"Salem\",\"Plymouth\",\"Newton\",\"Waltham\",\"Framingham\"]\ndata = pd.DataFrame(columns = [\"Company\",\"Title\",\"Location\",\"Rating\",\"Work/Life Balance\",\"Benefit\",\"Security\",\"Culture\"])\nprint(MA_cities)\n#path = os.getcwd()+str(\"/Company\")\n#print(\"Path is :\",path)\n#index = 0\n#for file in os.listdir(path):\n# f = open(path+\"/\"+file,'r')\n# df = pd.read_csv(f)\n# for i in range(len(df.index)):\n# if df[\"Location\"][i] in MA_cities:\n# data.loc[index] = df.loc[i]\n# index = index+1\npath = os.getcwd()\nprint(\"Path is :\", path)\nf = open(\"Merged_Company_Information.csv\",'r')\n#f = open(path+\"/Company/Accion-International.csv\",'r')\ndf = pd.read_csv(f)\nindex = 0\nfor i in range(len(df.index)):\n if df[\"Location\"][i] in MA_location.keys():\n data.loc[index] = df.loc[i]\n index = index+1\n elif df[\"Location\"][i] in Greater_Boston_Area.keys():\n data.loc[index] = df.loc[i]\n data.set_value(index,\"Location\",\"Greater_Boston_Area\")\n #ata.loc[index][\"Location\",copy = False]=str(\"Greater_Boston_Area\")\n index = index+1\nprint(\"Done!!!-------- %s seconds--------\" % (time.time()-start_time))", "['Boston', 'Worcester', 'Salem', 'Plymouth', 'Newton', 'Waltham', 'Framingham']\nPath is : /Users/shuhanliu/Documents/CS505/project_new/Industries-Distribution-Analyzer-and-predicter\nDone!!!-------- 36.52635407447815 seconds--------\n" ], [ "data", "_____no_output_____" ], [ "data.to_csv(\"collected_data.csv\")", "_____no_output_____" ], [ "def findMean(lst):\n total = 0 \n num = 0\n for i in lst:\n if i!= \"none\":\n total = total + int(i)\n num = num + 1\n avg = float(total)/float(num)\n return avg", "_____no_output_____" ], [ "#normalize the data:\nstart_time = time.time()\ndata_list = []\navg_work = findMean(data[\"Work/Life Balance\"])\navg_benefit = findMean(data[\"Benefit\"])\navg_security = findMean(data[\"Security\"])\navg_culture = findMean(data[\"Culture\"])\nfor i in range(len(data.index)):\n temp = [0,0,0,0,0,0,0]\n if data[\"Title\"][i].find(\"Engineer\")!=-1:\n temp[0] = 1\n elif data[\"Title\"][i].find(\"Analyst\")!=-1:\n temp[0] = 2\n elif data[\"Title\"][i].find(\"Consultant\")!=-1:\n temp[0] = 3\n elif data[\"Title\"][i].find(\"Writer\")!=-1:\n temp[0] = 4\n elif data[\"Title\"][i].find(\"Account\")!=-1:\n temp[0] = 5\n elif data[\"Title\"][i].find(\"Marketing\")!=-1:\n temp[0] = 6\n elif data[\"Title\"][i].find(\"Research\")!=-1:\n temp[0] = 7\n elif data[\"Title\"][i].find(\"Clinic\")!=-1:\n temp[0] = 8\n elif data[\"Title\"][i].find(\"Develop\")!=-1:\n temp[0] = 9\n else:\n temp[0] = 0\n \n Location = data[\"Location\"][i]\n lst = list(MA_location.keys())\n for k in range(len(lst)):\n if lst[k] == Location:\n temp[1] = k+1\n temp[2] = data[\"Rating\"][i]\n if data[\"Work/Life Balance\"][i]!=\"none\":\n temp[3] = data[\"Work/Life Balance\"][i]\n else:\n temp[3] = avg_work\n if data[\"Benefit\"][i]!=\"none\":\n temp[4] = data[\"Benefit\"][i]\n else:\n temp[4] = avg_benefit\n if data[\"Security\"][i]!=\"none\":\n temp[5] = data[\"Security\"][i]\n else:\n temp[5] = avg_security\n if data[\"Culture\"][i]!=\"none\":\n temp[6] = data[\"Culture\"][i]\n else:\n temp[6] = avg_culture\n data_list.append(temp)\nprint(\"Done!!!-------- %s seconds--------\" % (time.time()-start_time))", "Done!!!-------- 2.0568978786468506 seconds--------\n" ], [ "df2 = pd.DataFrame(data_list,columns = [\"Category\",\"Location\",\"Rating\",\"Work/Life Balance\",\"Benefit\",\"Security\",\"Culture\"])", "_____no_output_____" ], [ "df2", "_____no_output_____" ], [ "X = df2[[\"Category\",\"Location\",\"Work/Life Balance\",\"Benefit\",\"Security\",\"Culture\"]]\nY = df2[\"Rating\"]\nplt.scatter(range(len(Y)), Y, c=\"slategray\", alpha=0.3, linewidths=0.2)", "_____no_output_____" ], [ "X, Y = utils.shuffle(X, Y, random_state=1)\nmodel = sm.OLS(Y.astype(float), X.astype(float))\nresult1 = model.fit()\nprint(result1.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: Rating R-squared: 0.928\nModel: OLS Adj. R-squared: 0.928\nMethod: Least Squares F-statistic: 2.110e+04\nDate: Tue, 13 Dec 2016 Prob (F-statistic): 0.00\nTime: 16:23:54 Log-Likelihood: -14932.\nNo. Observations: 9771 AIC: 2.988e+04\nDf Residuals: 9765 BIC: 2.992e+04\nDf Model: 6 \nCovariance Type: nonrobust \n=====================================================================================\n coef std err t P>|t| [95.0% Conf. Int.]\n-------------------------------------------------------------------------------------\nCategory 0.0467 0.005 9.394 0.000 0.037 0.056\nLocation 0.0588 0.005 11.750 0.000 0.049 0.069\nWork/Life Balance -0.0128 0.015 -0.837 0.403 -0.043 0.017\nBenefit 0.4001 0.014 28.893 0.000 0.373 0.427\nSecurity 0.2859 0.016 18.161 0.000 0.255 0.317\nCulture 0.2052 0.016 12.484 0.000 0.173 0.237\n==============================================================================\nOmnibus: 417.950 Durbin-Watson: 1.995\nProb(Omnibus): 0.000 Jarque-Bera (JB): 490.096\nSkew: -0.491 Prob(JB): 3.77e-107\nKurtosis: 3.489 Cond. No. 15.3\n==============================================================================\n\nWarnings:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n" ], [ "good = []\nfor i in range(len(df2[\"Rating\"])):\n if df2[\"Rating\"][i] >3:\n good.append(1)\n else:\n good.append(0)\n\ncols = [\"Category\",\"Location\",\"Work/Life Balance\",\"Benefit\",\"Security\",\"Culture\"]\n#df2.columns[:2]\nlogit = sm.Logit(good,df2[cols].astype(float))\n\n#fit the model\nresult2 = logit.fit()\nresult2.summary()", "Optimization terminated successfully.\n Current function value: 0.574315\n Iterations 5\n" ], [ "#According to the results provided by both \n#Linear Regression and Logic Regression,\n#Benefit always has a larger influence\n#on rating among all attributes.", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9a32cb2b659b561149f867905c1af9a0b90bb8
375,008
ipynb
Jupyter Notebook
scripts/2018-summer/Notebook_scripts/MutDataVis.ipynb
amlalejini/Aagos
bb82da3a177fe467a912c9f44f516b445b4eba3b
[ "MIT" ]
1
2018-07-27T22:09:22.000Z
2018-07-27T22:09:22.000Z
scripts/2018-summer/Notebook_scripts/MutDataVis.ipynb
amlalejini/Aagos
bb82da3a177fe467a912c9f44f516b445b4eba3b
[ "MIT" ]
19
2018-06-06T19:46:15.000Z
2019-04-11T15:11:35.000Z
scripts/2018-summer/Notebook_scripts/MutDataVis.ipynb
amlalejini/Aagos
bb82da3a177fe467a912c9f44f516b445b4eba3b
[ "MIT" ]
1
2020-04-06T20:05:15.000Z
2020-04-06T20:05:15.000Z
854.232346
66,712
0.952721
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport glob\nimport sys\nimport argparse as argp", "_____no_output_____" ], [ "change_50_dat = pd.read_csv('/Users/leg2015/workspace/Aagos/Data/Mut_Treat_Change_50_CleanedDataStatFit.csv', index_col=\"update\", float_precision=\"high\")\nchange_0_dat = pd.read_csv('/Users/leg2015/workspace/Aagos/Data/Mut_Treat_Change_0_CleanedDataStatFit.csv', index_col=\"update\", float_precision=\"high\")\nmut_dat = pd.read_csv('/Users/leg2015/workspace/Aagos/Data/Change_Treat_f_.003_CleanedDataStatFit.csv', index_col=\"update\", float_precision=\"high\")", "_____no_output_____" ], [ "# max_gen_data = all_dat.loc[49000]\n# early_gen_data = all_dat.loc[10000]\nchange_50_max = change_50_dat.loc[50000]\nchange_0_max = change_0_dat.loc[50000]\nmut_max = mut_dat.loc[50000]", "_____no_output_____" ], [ "change_50_plot = sns.boxplot(y=max_gen_change_data[\"mean_coding_sites\"], x=\"change\", data=max_gen_change_data)\nplt.suptitle(\"gen 49000 mean coding sites \")\nplt.savefig(\"Change_m_.003_c_.01_f_.001_mean_coding_2.pdf\")", "_____no_output_____" ], [ "max_gen_fitness = max_gen_data.loc[:,[ 'max_fitness', 'c', 'm', 'f', 'replicate']]\nmax_gen_gene_len = max_gen_data.loc[:, ['max_gene_length', 'c', 'm', 'f', 'replicate']]\nmax_gen_overlap = max_gen_data.loc[:,[ 'max_overlap', 'c', 'm', 'f', 'replicate']]\nmax_gen_coding = max_gen_data.loc[:,[ 'max_coding_sites', 'c', 'm', 'f', 'replicate']]\nmax_gen_neutral = max_gen_data.loc[:,[ 'max_neutral_sites', 'c', 'm', 'f', 'replicate']]\nmax_gen_neighbor = max_gen_data.loc[:,[ 'max_neighbor_genes', 'c', 'm', 'f', 'replicate']]", "_____no_output_____" ], [ "facet = sns.FacetGrid(max_gen_gene_len, col=\"c\", row=\"m\",)\nfacet.map_dataframe(sns.boxplot, x=\"f\", y=\"max_gene_length\")\nfor axis in facet.axes.flat:\n axis.set_xlabel(\"f\")\nplt.subplots_adjust(top=0.95)\nfacet.fig.suptitle('Gen 49,000 max gene length') # can also get\nfacet.savefig(\"Mut_Rate_Low_max_genlen.pdf\")\n# TODO: figure early_gen_neighbor how to save pdfs to figure directory\n# also way so don't have to boilerplate would be nice", "_____no_output_____" ], [ "facet = sns.FacetGrid(early_gen_rep_gene_len, col=\"c\", row=\"m\",)\nfacet.map_dataframe(sns.boxplot, x=\"f\", y=\"genome_size\")\nfor axis in facet.axes.flat:\n axis.set_xlabel(\"f\")\nplt.subplots_adjust(top=0.95)\nfacet.fig.suptitle('Gen 10,000 genome size of representative org') # can also get\nfacet.savefig(\"early_gen_rep_gene_len.pdf\")", "_____no_output_____" ], [ "facet = sns.FacetGrid(max_gen_overlap, col=\"c\", row=\"m\",)\nfacet.map_dataframe(sns.boxplot, x=\"f\", y=\"mean_Overlap\")\nfor axis in facet.axes.flat:\n axis.set_xlabel(\"f\")\nplt.subplots_adjust(top=0.95)\nfacet.fig.suptitle('Gen 49,000 mean overlap') # can also get\nfacet.savefig(\"max_gen_mean_overlap.pdf\")", "_____no_output_____" ], [ "facet = sns.FacetGrid(early_gen_overlap, col=\"c\", row=\"m\",)\nfacet.map_dataframe(sns.boxplot, x=\"f\", y=\"mean_Overlap\")\nfor axis in facet.axes.flat:\n axis.set_xlabel(\"f\")\nplt.subplots_adjust(top=0.95)\nfacet.fig.suptitle('Gen 10,000 mean overlap') # can also get\nfacet.savefig(\"early_gen_mean_overlap.pdf\")", "_____no_output_____" ], [ "facet = sns.FacetGrid(early_gen_fitness, col=\"c\", row=\"m\",)\nfacet.map_dataframe(sns.boxplot, x=\"f\", y=\"mean_fitness\")\nfor axis in facet.axes.flat:\n axis.set_xlabel(\"f\")\nplt.subplots_adjust(top=0.95)\nfacet.fig.suptitle('Gen 10,000 mean fitness') # can also get\nfacet.savefig(\"early_gen_mean_fitness.pdf\")", "_____no_output_____" ], [ "facet = sns.FacetGrid(max_gen_fitness, col=\"c\", row=\"m\",)\nfacet.map_dataframe(sns.boxplot, x=\"f\", y=\"mean_fitness\")\n\n# for plot in plt.subplots():\n# print(plot.AxesSubplot)\n# print(facet.axes[4,0].get_yticklabels())\n# print(facet.axes[4,0].get_xticklabels())\n# print(\"\\n\")\n# labels = [\"meme1\", \"meme2\"]\nfor axis in facet.axes.flat:\n# locs, labels = axis.xticks()\n# axis.set_yticklabels(labels)\n axis.set_xlabel(\"f\")\n# _ = plt.setp(axis.get_yticklabels(), visible=True)\n# _ = plt.setp(axis.get_xticklabels(), visible=True)\n# axis.set_yticklabels(axis.get_yticklabels(), visible=True)\n# print(axis.get_xticklabels())\n# axis.set_xticklabels(axis.get_xticklabels(), visible=True)\n# axis.set_yticklabels(facet.axes[4,0].get_yticklabels(), visible=True)\n# axis.set_xticklabels(facet.axes[4,0].get_xticklabels(), visible=True)\nplt.subplots_adjust(hspace=0.3)\n\n# yticklabels = facet.axes[4,0].get_yticklabels()\n# xticklabels = facet.axes[4,0].get_xticklabels()\n# meme = []\n# mema = []\n# for ax in facet.axes[-1,:]:\n# xlabel = ax.get_xticklabels()\n# print(\"x lab \", xlabel)\n# meme.append(xlabel)\n \n# for ax in facet.axes[:,0]:\n# ylabel = ax.get_xticklabels()\n# print(\"y lab\", ylabel)\n# mema.append(ylabel)\n\n# for i in range(len(meme)):\n# for j in range(len(mema)):\n# facet.axes[j,i].set_xticklabels(\"meme\")\n# facet.axes[j,i].set_yticklabels(\"mema\")\n\n\n# for ax in facet.axes:\n# _ = plt.setp(ax.get_yticklabels(), visible=True)\n# _ = plt.setp(ax.get_xticklabels(), visible=True)\n\n \n# for ax in facet.axes:\n \n\n# plt.show()\nplt.subplots_adjust(top=0.95)\nfacet.fig.suptitle('Gen 49,000 mean fitness') # can also get\nfacet.savefig(\"max_gen_mean_fitness.pdf\")", "_____no_output_____" ], [ "facet = sns.FacetGrid(max_gen_fitness, col=\"c\", row=\"m\",)\nfacet.map_dataframe(sns.boxplot, x=\"f\", y=\"mean_fitness\")\n\n# for plot in plt.subplots():\n# print(plot.AxesSubplot)\n# print(facet.axes[4,0].get_yticklabels())\n# print(facet.axes[4,0].get_xticklabels())\n# print(\"\\n\")\n# labels = [\"meme1\", \"meme2\"]\nfor axis in facet.axes.flat:\n# locs, labels = axis.xticks()\n# axis.set_yticklabels(labels)\n axis.set_xlabel(\"f\")\n# _ = plt.setp(axis.get_yticklabels(), visible=True)\n# _ = plt.setp(axis.get_xticklabels(), visible=True)\n# axis.set_yticklabels(axis.get_yticklabels(), visible=True)\n# print(axis.get_xticklabels())\n# axis.set_xticklabels(axis.get_xticklabels(), visible=True)\n# axis.set_yticklabels(facet.axes[4,0].get_yticklabels(), visible=True)\n# axis.set_xticklabels(facet.axes[4,0].get_xticklabels(), visible=True)\nplt.subplots_adjust(hspace=0.3)\n\n# yticklabels = facet.axes[4,0].get_yticklabels()\n# xticklabels = facet.axes[4,0].get_xticklabels()\n# meme = []\n# mema = []\n# for ax in facet.axes[-1,:]:\n# xlabel = ax.get_xticklabels()\n# print(\"x lab \", xlabel)\n# meme.append(xlabel)\n \n# for ax in facet.axes[:,0]:\n# ylabel = ax.get_xticklabels()\n# print(\"y lab\", ylabel)\n# mema.append(ylabel)\n\n# for i in range(len(meme)):\n# for j in range(len(mema)):\n# facet.axes[j,i].set_xticklabels(\"meme\")\n# facet.axes[j,i].set_yticklabels(\"mema\")\n\n\n# for ax in facet.axes:\n# _ = plt.setp(ax.get_yticklabels(), visible=True)\n# _ = plt.setp(ax.get_xticklabels(), visible=True)\n\n \n# for ax in facet.axes:\n \n\nplt.show()\nfacet.savefig(\"max_gen__mean_fitness.pdf\")", "_____no_output_____" ], [ "for curr in group:\n plt.scatter((curr[1].m + curr[1].f + curr[1].c), curr[1].mean_fitness)\nplt.show()", "_____no_output_____" ], [ "playData = max_gen_fitness.iloc[0:5]\nplayData", "_____no_output_____" ], [ "memes = max_gen_fitness.iloc[20:25]\nmemes", "_____no_output_____" ], [ "plt.boxplot(playData.mean_fitness)\nplt.show()", "_____no_output_____" ], [ "# plt.boxplot(playData.mean_fitness)\n# plt.boxplot(memes.mean_fitness)\nsuperData = [playData.mean_fitness, memes.mean_fitness]\nplt.boxplot(superData)\nplt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9a398167a2f0b09d6f9a08a09fb90c0d867202
44,824
ipynb
Jupyter Notebook
dlnd_tv_script_generation.ipynb
MaxRoecker/DLND_tv_script_generation
7ff0abe323914e46699794e7f0e9046055c369ae
[ "MIT" ]
null
null
null
dlnd_tv_script_generation.ipynb
MaxRoecker/DLND_tv_script_generation
7ff0abe323914e46699794e7f0e9046055c369ae
[ "MIT" ]
null
null
null
dlnd_tv_script_generation.ipynb
MaxRoecker/DLND_tv_script_generation
7ff0abe323914e46699794e7f0e9046055c369ae
[ "MIT" ]
null
null
null
34.400614
556
0.565054
[ [ [ "# TV Script Generation\nIn this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern).\n## Get the Data\nThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like \"Moe's Cavern\", \"Flaming Moe's\", \"Uncle Moe's Family Feed-Bag\", etc..", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport helper\n\ndata_dir = './data/simpsons/moes_tavern_lines.txt'\ntext = helper.load_data(data_dir)\n# Ignore notice, since we don't use it for analysing the data\ntext = text[81:]", "_____no_output_____" ] ], [ [ "## Explore the Data\nPlay around with `view_sentence_range` to view different parts of the data.", "_____no_output_____" ] ], [ [ "view_sentence_range = (0, 10)\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport numpy as np\n\nprint('Dataset Stats')\nprint('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))\nscenes = text.split('\\n\\n')\nprint('Number of scenes: {}'.format(len(scenes)))\nsentence_count_scene = [scene.count('\\n') for scene in scenes]\nprint('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))\n\nsentences = [sentence for scene in scenes for sentence in scene.split('\\n')]\nprint('Number of lines: {}'.format(len(sentences)))\nword_count_sentence = [len(sentence.split()) for sentence in sentences]\nprint('Average number of words in each line: {}'.format(np.average(word_count_sentence)))\n\nprint()\nprint('The sentences {} to {}:'.format(*view_sentence_range))\nprint('\\n'.join(text.split('\\n')[view_sentence_range[0]:view_sentence_range[1]]))", "Dataset Stats\nRoughly the number of unique words: 11492\nNumber of scenes: 262\nAverage number of sentences in each scene: 15.248091603053435\nNumber of lines: 4257\nAverage number of words in each line: 11.50434578341555\n\nThe sentences 0 to 10:\nMoe_Szyslak: (INTO PHONE) Moe's Tavern. Where the elite meet to drink.\nBart_Simpson: Eh, yeah, hello, is Mike there? Last name, Rotch.\nMoe_Szyslak: (INTO PHONE) Hold on, I'll check. (TO BARFLIES) Mike Rotch. Mike Rotch. Hey, has anybody seen Mike Rotch, lately?\nMoe_Szyslak: (INTO PHONE) Listen you little puke. One of these days I'm gonna catch you, and I'm gonna carve my name on your back with an ice pick.\nMoe_Szyslak: What's the matter Homer? You're not your normal effervescent self.\nHomer_Simpson: I got my problems, Moe. Give me another one.\nMoe_Szyslak: Homer, hey, you should not drink to forget your problems.\nBarney_Gumble: Yeah, you should only drink to enhance your social skills.\n\n\n" ] ], [ [ "## Implement Preprocessing Functions\nThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:\n- Lookup Table\n- Tokenize Punctuation\n\n### Lookup Table\nTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:\n- Dictionary to go from the words to an id, we'll call `vocab_to_int`\n- Dictionary to go from the id to word, we'll call `int_to_vocab`\n\nReturn these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`", "_____no_output_____" ] ], [ [ "import numpy as np\nimport problem_unittests as tests\nfrom collections import Counter\n\n\ndef create_lookup_tables(text):\n \"\"\"\n Create lookup tables for vocabulary\n :param text: The text of tv scripts split into words\n :return: A tuple of dicts (vocab_to_int, int_to_vocab)\n \"\"\"\n word_counts = Counter(text)\n sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)\n int_to_vocab = {i: word for (i, word) in enumerate(sorted_vocab)}\n vocab_to_int = {word: i for (i, word) in int_to_vocab.items()}\n return (vocab_to_int, int_to_vocab)\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_create_lookup_tables(create_lookup_tables)", "Tests Passed\n" ] ], [ [ "### Tokenize Punctuation\nWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word \"bye\" and \"bye!\".\n\nImplement the function `token_lookup` to return a dict that will be used to tokenize symbols like \"!\" into \"||Exclamation_Mark||\". Create a dictionary for the following symbols where the symbol is the key and value is the token:\n- Period ( . )\n- Comma ( , )\n- Quotation Mark ( \" )\n- Semicolon ( ; )\n- Exclamation mark ( ! )\n- Question mark ( ? )\n- Left Parentheses ( ( )\n- Right Parentheses ( ) )\n- Dash ( -- )\n- Return ( \\n )\n\nThis dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token \"dash\", try using something like \"||dash||\".", "_____no_output_____" ] ], [ [ "def token_lookup():\n \"\"\"\n Generate a dict to turn punctuation into a token.\n :return: Tokenize dictionary where the key is the punctuation and the value is the token\n \"\"\"\n return {\n '.' : '||Period||',\n ',' : '||Comma||',\n '\"' : '||Quotation_Mark||',\n ';' : '||Semicolon||',\n '!' : '||Exclamation_mark||',\n '?' : '||Question_mark||',\n '(' : '||Left_Parentheses||',\n ')' : '||Right_Parentheses||',\n '--' : '||Dash||',\n '\\n' : '||Return||'}\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_tokenize(token_lookup)", "Tests Passed\n" ] ], [ [ "## Preprocess all the data and save it\nRunning the code cell below will preprocess all the data and save it to file.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n# Preprocess Training, Validation, and Testing Data\nhelper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)", "_____no_output_____" ] ], [ [ "# Check Point\nThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport helper\nimport numpy as np\nimport problem_unittests as tests\n\nint_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()", "_____no_output_____" ] ], [ [ "## Build the Neural Network\nYou'll build the components necessary to build a RNN by implementing the following functions below:\n- get_inputs\n- get_init_cell\n- get_embed\n- build_rnn\n- build_nn\n- get_batches\n\n### Check the Version of TensorFlow and Access to GPU", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nfrom distutils.version import LooseVersion\nimport warnings\nimport tensorflow as tf\n\n# Check TensorFlow Version\nassert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'\nprint('TensorFlow Version: {}'.format(tf.__version__))\n\n# Check for a GPU\nif not tf.test.gpu_device_name():\n warnings.warn('No GPU found. Please use a GPU to train your neural network.')\nelse:\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))", "TensorFlow Version: 1.0.0\nDefault GPU Device: /gpu:0\n" ] ], [ [ "### Input\nImplement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:\n- Input text placeholder named \"input\" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.\n- Targets placeholder\n- Learning Rate placeholder\n\nReturn the placeholders in the following tuple `(Input, Targets, LearningRate)`", "_____no_output_____" ] ], [ [ "def get_inputs():\n \"\"\"\n Create TF Placeholders for input, targets, and learning rate.\n :return: Tuple (input, targets, learning rate)\n \"\"\"\n # TODO: Implement Function\n input = tf.placeholder(dtype=tf.int32, shape=[None, None], name='input')\n targets = tf.placeholder(dtype=tf.int32, shape=[None, None], name='targets')\n learning_rate = tf.placeholder(dtype=tf.float32, name='learning_rate')\n return (input, targets, learning_rate)\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_inputs(get_inputs)", "Tests Passed\n" ] ], [ [ "### Build RNN Cell and Initialize\nStack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).\n- The Rnn size should be set using `rnn_size`\n- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell#zero_state) function\n - Apply the name \"initial_state\" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)\n\nReturn the cell and initial state in the following tuple `(Cell, InitialState)`", "_____no_output_____" ] ], [ [ "def get_init_cell(batch_size, rnn_size):\n \"\"\"\n Create an RNN Cell and initialize it.\n :param batch_size: Size of batches\n :param rnn_size: Size of RNNs\n :return: Tuple (cell, initialize state)\n \"\"\"\n layers_count = 2\n cell = tf.contrib.rnn.BasicLSTMCell(rnn_size)\n multi = tf.contrib.rnn.MultiRNNCell([cell] * layers_count)\n initial_state = tf.identity(\n input=multi.zero_state(batch_size, tf.float32), name='initial_state')\n return (multi, initial_state)\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_init_cell(get_init_cell)", "Tests Passed\n" ] ], [ [ "### Word Embedding\nApply embedding to `input_data` using TensorFlow. Return the embedded sequence.", "_____no_output_____" ] ], [ [ "def get_embed(input_data, vocab_size, embed_dim):\n \"\"\"\n Create embedding for <input_data>.\n :param input_data: TF placeholder for text input.\n :param vocab_size: Number of words in vocabulary.\n :param embed_dim: Number of embedding dimensions\n :return: Embedded input.\n \"\"\"\n embedding = tf.Variable(\n initial_value=tf.random_uniform(\n dtype=tf.float32,\n shape=(vocab_size, embed_dim),\n minval=-1.0,\n maxval=1.0))\n embed = tf.nn.embedding_lookup(embedding, input_data)\n return embed\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_embed(get_embed)", "Tests Passed\n" ] ], [ [ "### Build RNN\nYou created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.\n- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn)\n - Apply the name \"final_state\" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)\n\nReturn the outputs and final_state state in the following tuple `(Outputs, FinalState)` ", "_____no_output_____" ] ], [ [ "def build_rnn(cell, inputs):\n \"\"\"\n Create a RNN using a RNN Cell\n :param cell: RNN Cell\n :param inputs: Input text data\n :return: Tuple (Outputs, Final State)\n \"\"\"\n outputs, final_state = tf.nn.dynamic_rnn(\n cell=cell, inputs=inputs, dtype=tf.float32)\n final_state = tf.identity(input=final_state, name='final_state')\n return (outputs, final_state)\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_build_rnn(build_rnn)", "Tests Passed\n" ] ], [ [ "### Build the Neural Network\nApply the functions you implemented above to:\n- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.\n- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.\n- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.\n\nReturn the logits and final state in the following tuple (Logits, FinalState) ", "_____no_output_____" ] ], [ [ "def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):\n \"\"\"\n Build part of the neural network\n :param cell: RNN cell\n :param rnn_size: Size of rnns\n :param input_data: Input data\n :param vocab_size: Vocabulary size\n :param embed_dim: Number of embedding dimensions\n :return: Tuple (Logits, FinalState)\n \"\"\"\n embed = get_embed(input_data, vocab_size, embed_dim)\n outputs, final_state = build_rnn(cell, embed)\n logits = tf.contrib.layers.fully_connected(\n inputs=outputs,\n num_outputs=vocab_size,\n activation_fn=None,\n weights_initializer=tf.truncated_normal_initializer(stddev= 0.1),\n biases_initializer=tf.zeros_initializer())\n return logits, final_state\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_build_nn(build_nn)", "Tests Passed\n" ] ], [ [ "### Batches\nImplement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:\n- The first element is a single batch of **input** with the shape `[batch size, sequence length]`\n- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`\n\nIf you can't fill the last batch with enough data, drop the last batch.\n\nFor exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2)` would return a Numpy array of the following:\n```\n[\n # First Batch\n [\n # Batch of Input\n [[ 1 2], [ 7 8], [13 14]]\n # Batch of targets\n [[ 2 3], [ 8 9], [14 15]]\n ]\n\n # Second Batch\n [\n # Batch of Input\n [[ 3 4], [ 9 10], [15 16]]\n # Batch of targets\n [[ 4 5], [10 11], [16 17]]\n ]\n\n # Third Batch\n [\n # Batch of Input\n [[ 5 6], [11 12], [17 18]]\n # Batch of targets\n [[ 6 7], [12 13], [18 1]]\n ]\n]\n```\n\nNotice that the last target value in the last batch is the first input value of the first batch. In this case, `1`. This is a common technique used when creating sequence batches, although it is rather unintuitive.", "_____no_output_____" ] ], [ [ "def get_batches(int_text, batch_size, seq_length):\n \"\"\"\n Return batches of input and target\n :param int_text: Text with the words replaced by their ids\n :param batch_size: The size of batch\n :param seq_length: The length of sequence\n :return: Batches as a Numpy array\n \"\"\"\n number_of_batches = int(len(int_text) / (batch_size * seq_length))\n\n input_batch = np.array(int_text[: number_of_batches * batch_size * seq_length])\n target_batch = np.array(int_text[1: number_of_batches * batch_size * seq_length + 1])\n target_batch[-1] = int_text[0]\n\n input_batch = np.split(input_batch.reshape(batch_size, -1), number_of_batches, axis = 1)\n target_batch = np.split(target_batch.reshape(batch_size, -1), number_of_batches, axis = 1)\n \n return np.array(list(zip(input_batch, target_batch)))\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_batches(get_batches)", "Tests Passed\n" ] ], [ [ "## Neural Network Training\n### Hyperparameters\nTune the following parameters:\n\n- Set `num_epochs` to the number of epochs.\n- Set `batch_size` to the batch size.\n- Set `rnn_size` to the size of the RNNs.\n- Set `embed_dim` to the size of the embedding.\n- Set `seq_length` to the length of sequence.\n- Set `learning_rate` to the learning rate.\n- Set `show_every_n_batches` to the number of batches the neural network should print progress.", "_____no_output_____" ] ], [ [ "# Number of Epochs\nnum_epochs = 256\n# Batch Size\nbatch_size = 64\n# RNN Size\nrnn_size = 512\n# Embedding Dimension Size\nembed_dim = 128\n# Sequence Length\nseq_length = 16\n# Learning Rate\nlearning_rate = 0.001\n# Show stats for every n number of batches\nshow_every_n_batches = 128\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\nsave_dir = './save'", "_____no_output_____" ] ], [ [ "### Build the Graph\nBuild the graph using the neural network you implemented.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nfrom tensorflow.contrib import seq2seq\n\ntrain_graph = tf.Graph()\nwith train_graph.as_default():\n vocab_size = len(int_to_vocab)\n input_text, targets, lr = get_inputs()\n input_data_shape = tf.shape(input_text)\n cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)\n logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)\n\n # Probabilities for generating words\n probs = tf.nn.softmax(logits, name='probs')\n\n # Loss function\n cost = seq2seq.sequence_loss(\n logits,\n targets,\n tf.ones([input_data_shape[0], input_data_shape[1]]))\n\n # Optimizer\n optimizer = tf.train.AdamOptimizer(lr)\n\n # Gradient Clipping\n gradients = optimizer.compute_gradients(cost)\n capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]\n train_op = optimizer.apply_gradients(capped_gradients)", "_____no_output_____" ] ], [ [ "## Train\nTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forums](https://discussions.udacity.com/) to see if anyone is having the same problem.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nbatches = get_batches(int_text, batch_size, seq_length)\n\nwith tf.Session(graph=train_graph) as sess:\n sess.run(tf.global_variables_initializer())\n\n for epoch_i in range(num_epochs):\n state = sess.run(initial_state, {input_text: batches[0][0]})\n\n for batch_i, (x, y) in enumerate(batches):\n feed = {\n input_text: x,\n targets: y,\n initial_state: state,\n lr: learning_rate}\n train_loss, state, _ = sess.run([cost, final_state, train_op], feed)\n\n # Show every <show_every_n_batches> batches\n if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:\n print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(\n epoch_i,\n batch_i,\n len(batches),\n train_loss))\n\n # Save Model\n saver = tf.train.Saver()\n saver.save(sess, save_dir)\n print('Model Trained and Saved')", "Epoch 0 Batch 0/67 train_loss = 8.827\nEpoch 1 Batch 61/67 train_loss = 5.340\nEpoch 3 Batch 55/67 train_loss = 4.792\nEpoch 5 Batch 49/67 train_loss = 4.297\nEpoch 7 Batch 43/67 train_loss = 4.054\nEpoch 9 Batch 37/67 train_loss = 3.867\nEpoch 11 Batch 31/67 train_loss = 3.354\nEpoch 13 Batch 25/67 train_loss = 3.063\nEpoch 15 Batch 19/67 train_loss = 2.578\nEpoch 17 Batch 13/67 train_loss = 2.223\nEpoch 19 Batch 7/67 train_loss = 1.835\nEpoch 21 Batch 1/67 train_loss = 1.558\nEpoch 22 Batch 62/67 train_loss = 1.148\nEpoch 24 Batch 56/67 train_loss = 0.827\nEpoch 26 Batch 50/67 train_loss = 0.661\nEpoch 28 Batch 44/67 train_loss = 0.489\nEpoch 30 Batch 38/67 train_loss = 0.378\nEpoch 32 Batch 32/67 train_loss = 0.343\nEpoch 34 Batch 26/67 train_loss = 0.332\nEpoch 36 Batch 20/67 train_loss = 0.313\nEpoch 38 Batch 14/67 train_loss = 0.257\nEpoch 40 Batch 8/67 train_loss = 0.235\nEpoch 42 Batch 2/67 train_loss = 0.256\nEpoch 43 Batch 63/67 train_loss = 0.258\nEpoch 45 Batch 57/67 train_loss = 0.242\nEpoch 47 Batch 51/67 train_loss = 0.271\nEpoch 49 Batch 45/67 train_loss = 0.226\nEpoch 51 Batch 39/67 train_loss = 0.251\nEpoch 53 Batch 33/67 train_loss = 0.200\nEpoch 55 Batch 27/67 train_loss = 0.196\nEpoch 57 Batch 21/67 train_loss = 0.232\nEpoch 59 Batch 15/67 train_loss = 0.184\nEpoch 61 Batch 9/67 train_loss = 0.213\nEpoch 63 Batch 3/67 train_loss = 0.206\nEpoch 64 Batch 64/67 train_loss = 0.204\nEpoch 66 Batch 58/67 train_loss = 0.222\nEpoch 68 Batch 52/67 train_loss = 0.217\nEpoch 70 Batch 46/67 train_loss = 0.188\nEpoch 72 Batch 40/67 train_loss = 0.211\nEpoch 74 Batch 34/67 train_loss = 0.229\nEpoch 76 Batch 28/67 train_loss = 0.195\nEpoch 78 Batch 22/67 train_loss = 0.227\nEpoch 80 Batch 16/67 train_loss = 0.217\nEpoch 82 Batch 10/67 train_loss = 0.207\nEpoch 84 Batch 4/67 train_loss = 0.221\nEpoch 85 Batch 65/67 train_loss = 0.214\nEpoch 87 Batch 59/67 train_loss = 0.240\nEpoch 89 Batch 53/67 train_loss = 0.179\nEpoch 91 Batch 47/67 train_loss = 0.220\nEpoch 93 Batch 41/67 train_loss = 0.519\nEpoch 95 Batch 35/67 train_loss = 0.254\nEpoch 97 Batch 29/67 train_loss = 0.198\nEpoch 99 Batch 23/67 train_loss = 0.238\nEpoch 101 Batch 17/67 train_loss = 0.247\nEpoch 103 Batch 11/67 train_loss = 0.204\nEpoch 105 Batch 5/67 train_loss = 0.203\nEpoch 106 Batch 66/67 train_loss = 0.194\nEpoch 108 Batch 60/67 train_loss = 0.203\nEpoch 110 Batch 54/67 train_loss = 0.195\nEpoch 112 Batch 48/67 train_loss = 0.188\nEpoch 114 Batch 42/67 train_loss = 0.188\nEpoch 116 Batch 36/67 train_loss = 0.212\nEpoch 118 Batch 30/67 train_loss = 0.194\nEpoch 120 Batch 24/67 train_loss = 0.199\nEpoch 122 Batch 18/67 train_loss = 0.208\nEpoch 124 Batch 12/67 train_loss = 0.234\nEpoch 126 Batch 6/67 train_loss = 0.201\nEpoch 128 Batch 0/67 train_loss = 0.214\nEpoch 129 Batch 61/67 train_loss = 0.209\nEpoch 131 Batch 55/67 train_loss = 0.213\nEpoch 133 Batch 49/67 train_loss = 0.218\nEpoch 135 Batch 43/67 train_loss = 0.213\nEpoch 137 Batch 37/67 train_loss = 0.222\nEpoch 139 Batch 31/67 train_loss = 0.241\nEpoch 141 Batch 25/67 train_loss = 0.220\nEpoch 143 Batch 19/67 train_loss = 0.193\nEpoch 145 Batch 13/67 train_loss = 0.193\nEpoch 147 Batch 7/67 train_loss = 0.229\nEpoch 149 Batch 1/67 train_loss = 0.179\nEpoch 150 Batch 62/67 train_loss = 0.224\nEpoch 152 Batch 56/67 train_loss = 0.229\nEpoch 154 Batch 50/67 train_loss = 0.176\nEpoch 156 Batch 44/67 train_loss = 0.176\nEpoch 158 Batch 38/67 train_loss = 0.199\nEpoch 160 Batch 32/67 train_loss = 0.218\nEpoch 162 Batch 26/67 train_loss = 0.230\nEpoch 164 Batch 20/67 train_loss = 0.240\nEpoch 166 Batch 14/67 train_loss = 0.197\nEpoch 168 Batch 8/67 train_loss = 0.194\nEpoch 170 Batch 2/67 train_loss = 0.283\nEpoch 171 Batch 63/67 train_loss = 0.400\nEpoch 173 Batch 57/67 train_loss = 0.225\nEpoch 175 Batch 51/67 train_loss = 0.234\nEpoch 177 Batch 45/67 train_loss = 0.201\nEpoch 179 Batch 39/67 train_loss = 0.225\nEpoch 181 Batch 33/67 train_loss = 0.180\nEpoch 183 Batch 27/67 train_loss = 0.179\nEpoch 185 Batch 21/67 train_loss = 0.213\nEpoch 187 Batch 15/67 train_loss = 0.167\nEpoch 189 Batch 9/67 train_loss = 0.195\nEpoch 191 Batch 3/67 train_loss = 0.194\nEpoch 192 Batch 64/67 train_loss = 0.189\nEpoch 194 Batch 58/67 train_loss = 0.204\nEpoch 196 Batch 52/67 train_loss = 0.202\nEpoch 198 Batch 46/67 train_loss = 0.176\nEpoch 200 Batch 40/67 train_loss = 0.197\nEpoch 202 Batch 34/67 train_loss = 0.214\nEpoch 204 Batch 28/67 train_loss = 0.183\nEpoch 206 Batch 22/67 train_loss = 0.212\nEpoch 208 Batch 16/67 train_loss = 0.202\nEpoch 210 Batch 10/67 train_loss = 0.193\nEpoch 212 Batch 4/67 train_loss = 0.205\nEpoch 213 Batch 65/67 train_loss = 0.203\nEpoch 215 Batch 59/67 train_loss = 0.227\nEpoch 217 Batch 53/67 train_loss = 0.167\nEpoch 219 Batch 47/67 train_loss = 0.198\nEpoch 221 Batch 41/67 train_loss = 0.209\nEpoch 223 Batch 35/67 train_loss = 0.224\nEpoch 225 Batch 29/67 train_loss = 0.187\nEpoch 227 Batch 23/67 train_loss = 0.228\nEpoch 229 Batch 17/67 train_loss = 0.236\nEpoch 231 Batch 11/67 train_loss = 0.195\nEpoch 233 Batch 5/67 train_loss = 0.195\nEpoch 234 Batch 66/67 train_loss = 0.186\nEpoch 236 Batch 60/67 train_loss = 0.196\nEpoch 238 Batch 54/67 train_loss = 0.188\nEpoch 240 Batch 48/67 train_loss = 0.182\nEpoch 242 Batch 42/67 train_loss = 0.181\nEpoch 244 Batch 36/67 train_loss = 0.204\nEpoch 246 Batch 30/67 train_loss = 0.185\nEpoch 248 Batch 24/67 train_loss = 0.191\nEpoch 250 Batch 18/67 train_loss = 0.200\nEpoch 252 Batch 12/67 train_loss = 0.226\nEpoch 254 Batch 6/67 train_loss = 0.196\nModel Trained and Saved\n" ] ], [ [ "## Save Parameters\nSave `seq_length` and `save_dir` for generating a new TV script.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n# Save parameters for checkpoint\nhelper.save_params((seq_length, save_dir))", "_____no_output_____" ] ], [ [ "# Checkpoint", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport helper\nimport problem_unittests as tests\n\n_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()\nseq_length, load_dir = helper.load_params()", "_____no_output_____" ] ], [ [ "## Implement Generate Functions\n### Get Tensors\nGet tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graph#get_tensor_by_name). Get the tensors using the following names:\n- \"input:0\"\n- \"initial_state:0\"\n- \"final_state:0\"\n- \"probs:0\"\n\nReturn the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)` ", "_____no_output_____" ] ], [ [ "def get_tensors(loaded_graph):\n \"\"\"\n Get input, initial state, final state, and probabilities tensor from <loaded_graph>\n :param loaded_graph: TensorFlow graph loaded from file\n :return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)\n \"\"\"\n input_tensor = loaded_graph.get_tensor_by_name('input:0')\n initial_state_tensor = loaded_graph.get_tensor_by_name('initial_state:0')\n final_state_tensor = loaded_graph.get_tensor_by_name('final_state:0')\n probs_tensor = loaded_graph.get_tensor_by_name('probs:0')\n return (input_tensor, initial_state_tensor, final_state_tensor, probs_tensor)\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_tensors(get_tensors)", "Tests Passed\n" ] ], [ [ "### Choose Word\nImplement the `pick_word()` function to select the next word using `probabilities`.", "_____no_output_____" ] ], [ [ "def pick_word(probabilities, int_to_vocab):\n \"\"\"\n Pick the next word in the generated text\n :param probabilities: Probabilites of the next word\n :param int_to_vocab: Dictionary of word ids as the keys and words as the values\n :return: String of the predicted word\n \"\"\"\n return int_to_vocab[np.argmax(probabilities)]\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_pick_word(pick_word)", "Tests Passed\n" ] ], [ [ "## Generate TV Script\nThis will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.", "_____no_output_____" ] ], [ [ "gen_length = 200\n# homer_simpson, moe_szyslak, or Barney_Gumble\nprime_word = 'moe_szyslak'\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\nloaded_graph = tf.Graph()\nwith tf.Session(graph=loaded_graph) as sess:\n # Load saved model\n loader = tf.train.import_meta_graph(load_dir + '.meta')\n loader.restore(sess, load_dir)\n\n # Get Tensors from loaded model\n input_text, initial_state, final_state, probs = get_tensors(loaded_graph)\n\n # Sentences generation setup\n gen_sentences = [prime_word + ':']\n prev_state = sess.run(initial_state, {input_text: np.array([[1]])})\n\n # Generate sentences\n for n in range(gen_length):\n # Dynamic Input\n dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]\n dyn_seq_length = len(dyn_input[0])\n\n # Get Prediction\n probabilities, prev_state = sess.run(\n [probs, final_state],\n {input_text: dyn_input, initial_state: prev_state})\n \n pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)\n\n gen_sentences.append(pred_word)\n \n # Remove tokens\n tv_script = ' '.join(gen_sentences)\n for key, token in token_dict.items():\n ending = ' ' if key in ['\\n', '(', '\"'] else ''\n tv_script = tv_script.replace(' ' + token.lower(), key)\n tv_script = tv_script.replace('\\n ', '\\n')\n tv_script = tv_script.replace('( ', '(')\n \n print(tv_script)", "moe_szyslak:(explaining)\" go near moe.\" i'd say that's a pretty strong endorsement endorsement.(beat) your arm imported-sounding love.\nmoe_szyslak:(into phone) yeah, i know, i got a walk on my way.\nmoe_szyslak: all right.\nmoe_szyslak: hey, come on, mr. x be just gonna let me here, like ya.\nlenny_leonard: can do you think it is 'cause you don't get to look at the other day.\nhomer_simpson:(drunk) what's the matter?, moe?\nhomer_simpson:(sheepish) are you sure what? you think is the matter or that?\nmoe_szyslak:(gently) i know, i know. i lost my wife.\n\n\nmoe_szyslak: ya, i'm gonna a a big girl, homer.\nmoe_szyslak:(friendly) no, i'm just a guy that won't close him to see his name, the next thing you ever know.\n\n\nhomer_simpson: hey, guys.\nhomer_simpson: oh, cold shoulder, huh?\nmoe_szyslak: yeah, i see\n" ] ], [ [ "# The TV Script is Nonsensical\nIt's ok if the TV script doesn't make any sense. We trained on less than a megabyte of text. In order to get good results, you'll have to use a smaller vocabulary or get more data. Luckly there's more data! As we mentioned in the begging of this project, this is a subset of [another dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data). We didn't have you train on all the data, because that would take too long. However, you are free to train your neural network on all the data. After you complete the project, of course.\n# Submitting This Project\nWhen submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as \"dlnd_tv_script_generation.ipynb\" and save it as a HTML file under \"File\" -> \"Download as\". Include the \"helper.py\" and \"problem_unittests.py\" files in your submission.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb9a41b7c472e333b982f0f83fa50b87d3c52e81
1,058
ipynb
Jupyter Notebook
Course 1. Introduction to Machine Learning in Production/3.3 Scoping.ipynb
The-AI-Book/MLOps
12f05b2e6cfd3a6683da70533ada5f455d66bbce
[ "MIT" ]
null
null
null
Course 1. Introduction to Machine Learning in Production/3.3 Scoping.ipynb
The-AI-Book/MLOps
12f05b2e6cfd3a6683da70533ada5f455d66bbce
[ "MIT" ]
null
null
null
Course 1. Introduction to Machine Learning in Production/3.3 Scoping.ipynb
The-AI-Book/MLOps
12f05b2e6cfd3a6683da70533ada5f455d66bbce
[ "MIT" ]
null
null
null
18.241379
78
0.52552
[ [ [ "# Optional. Scoping\n\n---", "_____no_output_____" ], [ "<img src = \"https://i.gyazo.com/25dbcad9caf92167d271daf271d15cbf.png\">", "_____no_output_____" ], [ "## Feasibility: Is this project technically feasiable?\n\n- Use external benchmark (literature, other company, competitor)\n", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown" ] ]
cb9a45a33ae026ad07da7c97f9250ff54ea8e048
13,244
ipynb
Jupyter Notebook
Road Signs Dataset v1/roadSignsWebScrapingScript.ipynb
prox-vision/ProxVision
fa8e4a8e81854cf99f2caab5e6deafb0004b8e02
[ "MIT" ]
null
null
null
Road Signs Dataset v1/roadSignsWebScrapingScript.ipynb
prox-vision/ProxVision
fa8e4a8e81854cf99f2caab5e6deafb0004b8e02
[ "MIT" ]
null
null
null
Road Signs Dataset v1/roadSignsWebScrapingScript.ipynb
prox-vision/ProxVision
fa8e4a8e81854cf99f2caab5e6deafb0004b8e02
[ "MIT" ]
null
null
null
41.517241
1,382
0.598837
[ [ [ "## Roadsigns Data Collection", "_____no_output_____" ], [ "# Installing Selenium", "_____no_output_____" ] ], [ [ "pip install selenium", "Requirement already satisfied: selenium in c:\\users\\vedanth\\anaconda3\\lib\\site-packages (3.141.0)\nRequirement already satisfied: urllib3 in c:\\users\\vedanth\\anaconda3\\lib\\site-packages (from selenium) (1.25.9)\nNote: you may need to restart the kernel to use updated packages.\n" ] ], [ [ "### Starting the web driver", "_____no_output_____" ] ], [ [ "import selenium\nfrom selenium import webdriver\n# Put the path for your ChromeDriver here\nDRIVER_PATH = 'D:\\Vedanth\\proxvision\\chromedriver'\nwd = webdriver.Chrome(executable_path=DRIVER_PATH)", "_____no_output_____" ], [ "wd.get('https://google.com')\n", "_____no_output_____" ] ], [ [ "When you run the above two cells a window with google.com should open", "_____no_output_____" ], [ "Search for Dogs", "_____no_output_____" ] ], [ [ "search_box = wd.find_element_by_css_selector('input.gLFyf')\nsearch_box.send_keys('Dogs')", "_____no_output_____" ], [ "wd.quit()\n#Close the driver", "_____no_output_____" ] ], [ [ "The function fetch_image_urls expects three input parameters:<br>\nquery : Search term, like Dog<br>\nmax_links_to_fetch : Number of links the scraper is supposed to collect<br>\nwebdriver : instantiated Webdriver", "_____no_output_____" ] ], [ [ "def fetch_image_urls(query:str, max_links_to_fetch:int, wd:webdriver, sleep_between_interactions:int=1):\n def scroll_to_end(wd):\n wd.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(sleep_between_interactions) \n \n # build the google query\n search_url = \"https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&q={q}&oq={q}&gs_l=img\"\n\n # load the page\n wd.get(search_url.format(q=query))\n\n image_urls = set()\n image_count = 0\n results_start = 0\n while image_count < max_links_to_fetch:\n scroll_to_end(wd)\n\n # get all image thumbnail results\n thumbnail_results = wd.find_elements_by_css_selector(\"img.Q4LuWd\")\n number_results = len(thumbnail_results)\n \n print(f\"Found: {number_results} search results. Extracting links from {results_start}:{number_results}\")\n \n for img in thumbnail_results[results_start:number_results]:\n # try to click every thumbnail such that we can get the real image behind it\n try:\n img.click()\n time.sleep(sleep_between_interactions)\n except Exception:\n continue\n\n # extract image urls \n actual_images = wd.find_elements_by_css_selector('img.n3VNCb')\n for actual_image in actual_images:\n if actual_image.get_attribute('src') and 'http' in actual_image.get_attribute('src'):\n image_urls.add(actual_image.get_attribute('src'))\n\n image_count = len(image_urls)\n\n if len(image_urls) >= max_links_to_fetch:\n print(f\"Found: {len(image_urls)} image links, done!\")\n break\n else:\n print(\"Found:\", len(image_urls), \"image links, looking for more ...\")\n time.sleep(30)\n return\n load_more_button = wd.find_element_by_css_selector(\".mye4qd\")\n if load_more_button:\n wd.execute_script(\"document.querySelector('.mye4qd').click();\")\n\n # move the result startpoint further down\n results_start = len(thumbnail_results)\n\n return image_urls\n", "_____no_output_____" ] ], [ [ "### Downloading images with Pillow", "_____no_output_____" ] ], [ [ "pip install Pillow", "Requirement already satisfied: Pillow in c:\\users\\vedanth\\anaconda3\\lib\\site-packages (7.2.0)Note: you may need to restart the kernel to use updated packages.\n\n" ] ], [ [ "The persist_image function grabs an image URL url and downloads it into the folder_path. The function will assign the image a random 10-digit id.\n", "_____no_output_____" ] ], [ [ "def persist_image(folder_path:str,url:str):\n try:\n image_content = requests.get(url).content\n\n except Exception as e:\n print(f\"ERROR - Could not download {url} - {e}\")\n\n try:\n image_file = io.BytesIO(image_content)\n image = Image.open(image_file).convert('RGB')\n file_path = os.path.join(folder_path,hashlib.sha1(image_content).hexdigest()[:10] + '.jpg')\n with open(file_path, 'wb') as f:\n image.save(f, \"JPEG\", quality=85)\n print(f\"SUCCESS - saved {url} - as {file_path}\")\n except Exception as e:\n print(f\"ERROR - Could not save {url} - {e}\")\n", "_____no_output_____" ] ], [ [ "Now let's combine above two functions and make a single short and concise function", "_____no_output_____" ] ], [ [ "import os\ndef search_and_download(search_term:str,driver_path:str,target_path='./images',number_images=5):\n target_folder = os.path.join(target_path,'_'.join(search_term.lower().split(' ')))\n\n if not os.path.exists(target_folder):\n os.makedirs(target_folder)\n\n with webdriver.Chrome(executable_path=driver_path) as wd:\n res = fetch_image_urls(search_term, number_images, wd=wd, sleep_between_interactions=0.5)\n \n for elem in res:\n persist_image(target_folder,elem)", "_____no_output_____" ] ], [ [ "## Entering out search terms and downloading the images", "_____no_output_____" ] ], [ [ "search_term = \"dogs\"\nsearch_and_download(\n search_term = search_term,\n driver_path = DRIVER_PATH\n )", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb9a647655a71cb6f31ea2a50bb6df826bbc3434
226,835
ipynb
Jupyter Notebook
chapter4/01-titanic-classification.ipynb
lienero/python_data_analysis
88c88eee3aae657e886c00ec19f01472fc616df9
[ "MIT" ]
null
null
null
chapter4/01-titanic-classification.ipynb
lienero/python_data_analysis
88c88eee3aae657e886c00ec19f01472fc616df9
[ "MIT" ]
null
null
null
chapter4/01-titanic-classification.ipynb
lienero/python_data_analysis
88c88eee3aae657e886c00ec19f01472fc616df9
[ "MIT" ]
null
null
null
129.472032
28,708
0.834479
[ [ [ "# -*- coding: utf-7 -*-\n%matplotlib inline\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns", "_____no_output_____" ], [ "df_train = pd.read_csv(\"../data/titanic_train.csv\")\ndf_test = pd.read_csv(\"../data/titanic_test.csv\")\ndf_train.head(5)", "_____no_output_____" ], [ "print(df_train.info())\nprint(\"-----------------\")\nprint(df_test.info())", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 916 entries, 0 to 915\nData columns (total 13 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 pclass 916 non-null int64 \n 1 survived 916 non-null int64 \n 2 name 916 non-null object \n 3 sex 916 non-null object \n 4 age 741 non-null float64\n 5 sibsp 916 non-null int64 \n 6 parch 916 non-null int64 \n 7 ticket 916 non-null object \n 8 fare 916 non-null float64\n 9 cabin 214 non-null object \n 10 embarked 914 non-null object \n 11 body 85 non-null float64\n 12 home.dest 527 non-null object \ndtypes: float64(3), int64(4), object(6)\nmemory usage: 93.2+ KB\nNone\n-----------------\n<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 393 entries, 0 to 392\nData columns (total 13 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 pclass 393 non-null int64 \n 1 survived 393 non-null int64 \n 2 name 393 non-null object \n 3 sex 393 non-null object \n 4 age 305 non-null float64\n 5 sibsp 393 non-null int64 \n 6 parch 393 non-null int64 \n 7 ticket 393 non-null object \n 8 fare 393 non-null float64\n 9 cabin 81 non-null object \n 10 embarked 393 non-null object \n 11 body 36 non-null float64\n 12 home.dest 218 non-null object \ndtypes: float64(3), int64(4), object(6)\nmemory usage: 40.0+ KB\nNone\n" ], [ "# 데이터셋에서 name, ticket, bodt, cabin, home.dest 피처를 제거합니다.\ndf_train = df_train.drop(['name', 'ticket', 'body', 'cabin', 'home.dest'], axis=1)\ndf_test = df_test.drop(['name', 'ticket', 'body', 'cabin', 'home.dest'], axis=1)", "_____no_output_____" ], [ "print(df_train['survived'].value_counts())\ndf_train['survived'].value_counts().plot.bar()", "0 563\n1 353\nName: survived, dtype: int64\n" ], [ "# survived 피처를 기준으로 그룹을 나누어 그룹별 pclass 피처의 분포를 살펴봅니다.\nprint(df_train['pclass'].value_counts())\n# countplot() : 카테고리별(x) 데이터의 양 확인\nax = sns.countplot(x ='pclass', hue = 'survived', data = df_train)", "3 498\n1 230\n2 188\nName: pclass, dtype: int64\n" ], [ "from scipy import stats", "_____no_output_____" ], [ "# 두 집단을 피처를 비교해주며 탐색작업을 자동화하는 함수를 정의합니다.\ndef valid_features(df, col_name, distribution_check=True):\n \n # 두 집단 (survived=1, survivied=0)의 분포 그래프를 출력합니다.\n # FacetGrid(data, row, col, hue) : 다중 플롯 그리드를 만들어서 여러가지 쌍 관계를 표현하기 위한 그리드 Class이다. 도화지에 축을 나누는것과 같다.\n g = sns.FacetGrid(df, col='survived')\n g.map(plt.hist, col_name, bins=30)\n \n # 두 집단 (survived=1, survived=0)의 표준편차를 각각 출력합니다.\n titanic_survived = df[df['survived']==1]\n titanic_survived_static = np.array(titanic_survived[col_name])\n # Numpy.std() 함수는 지정된 축을 따라 주어진 배열의 표준 편차를 계산합니다.\n print(\"data std is\" '%.2f' % np.std(titanic_survived_static))\n titanic_n_survived = df[df['survived']==0]\n titanic_n_survived_static = np.array(titanic_n_survived[col_name])\n # Numpy.std() 함수는 지정된 축을 따라 주어진 배열의 표준 편차를 계산합니다.\n print(\"data std is\" '%.2f' % np.std(titanic_n_survived_static))\n \n # T-test로 두 집단의 평균 차이를 검정합니다\n tTestResult = stats.ttest_ind(titanic_survived[col_name], titanic_n_survived[col_name])\n # equal_var는 등분산 여부를 표시해서 넣어주는 것인데 True와 False중에서 선택해서 표시해주면된다.\n tTestResultDiffVar = stats.ttest_ind(titanic_survived[col_name], titanic_n_survived[col_name], equal_var=False)\n print(\"The t-statistic and p-value assuming equal variances is %.3f and %.3f\" % tTestResult)\n print(\"The t-statistic and p-value not assuming equal variances is %.3f and %.3f\" % tTestResultDiffVar)\n \n if distribution_check:\n # Shapiro-Wilk 검정 : 분포의 정규성 정도를 검증합니다.\n print(\"The w-statistic and p-value in Survived %.3f and %.3f\" %stats.shapiro(titanic_survived[col_name]))\n print(\"The w-statistic and p-value in Non-Survived %.3f and %.3f\" %stats.shapiro(titanic_n_survived[col_name]))", "_____no_output_____" ], [ "# 앞서 정의한 vaild_feautures 함수를 실행합니다. age 피처와 sibso 피처를 탐색합니다.\nvalid_features(df_train[df_train['age'] > 0], 'age', distribution_check=True)\nvalid_features(df_train, 'sibsp', distribution_check=False)", "data std is14.22\ndata std is13.71\nThe t-statistic and p-value assuming equal variances is -0.546 and 0.585\nThe t-statistic and p-value not assuming equal variances is -0.543 and 0.587\nThe w-statistic and p-value in Survived 0.982 and 0.001\nThe w-statistic and p-value in Non-Survived 0.968 and 0.000\ndata std is0.64\ndata std is1.34\nThe t-statistic and p-value assuming equal variances is -2.118 and 0.034\nThe t-statistic and p-value not assuming equal variances is -2.446 and 0.015\n" ], [ "# 로지스틱 회귀 모델을 사용하기 위해서는 회귀분석을 수행할 때와 동일한 방법으로 데이터를 가공해야한다.\n# age의 결측값을 평균값으로 대체합니다.\nreplace_mean = df_train[df_train['age'] > 0]['age'].mean()\n# fillna() : 결측값을 특정 값으로 채운다\ndf_train['age'] = df_train['age'].fillna(replace_mean)\ndf_test['age'] = df_test['age'].fillna(replace_mean)\n\n# embark: 2개의 결측값을 최빈값으로 대체합니다.\nembarked_mode = df_train['embarked'].value_counts().index[0]\ndf_train['embarked'] = df_train['embarked'].fillna(embarked_mode)\ndf_test['embarked'] = df_test['embarked'].fillna(embarked_mode)\n\n# 원-핫 인코딩을 위한 통합 데이터 프레임(whole_df)을 생성합니다.\nwhole_df = df_train.append(df_test)\ntrain_idx_num = len(df_train)\n\n# pandas 패키지를 이용한 원-핫 인코딩을 수행합니다.\nwhole_df_encoded = pd.get_dummies(whole_df)\ndf_train = whole_df_encoded[:train_idx_num]\ndf_test = whole_df_encoded[train_idx_num:]", "_____no_output_____" ], [ "df_train.head()", "_____no_output_____" ], [ "# 데이터를 학습 데이터셋, 테스트 데이터셋으로 분리합니다.\nx_train, y_train = df_train.loc[:, df_train.columns != 'survived'].values, df_train['survived'].values\nx_test, y_test = df_test.loc[:, df_test.columns != 'survived'].values, df_test['survived'].values", "_____no_output_____" ], [ "# 로지스틱 회귀 모델을 사용\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score", "_____no_output_____" ], [ "# 로지스틱 회귀 모델을 학습합니다.\n# max_iter=100 이 부족하여 오류 발생, 고칠 필요가 있음\nlr = LogisticRegression(random_state=0,max_iter=500)\nlr.fit(x_train, y_train)\n\n# 학습한 모델의 테스트 데이터셋에 대한 예측 결과를 반환합니다.\ny_pred = lr.predict(x_test)\n# predict_proba의 출력은 각 클래스에 대한 확률\ny_pred_probability = lr.predict_proba(x_test)[:,1]", "_____no_output_____" ], [ "# 테스트 데이터셋에 대한 정확도, 정밀도, 특이도, f1 평가 지표를 각자 출력합니다.\nprint(\"accuracy: %.2f\" % accuracy_score(y_test, y_pred))\nprint(\"Precision : %.3f\" % precision_score(y_test, y_pred))\nprint(\"Recall : %.3f\" % f1_score(y_test, y_pred))\nprint(\"F1 : %.3f\" % f1_score(y_test, y_pred))", "accuracy: 0.80\nPrecision : 0.756\nRecall : 0.712\nF1 : 0.712\n" ], [ "from sklearn.metrics import confusion_matrix", "_____no_output_____" ], [ "# Confusion Matrix를 출력합니다.\nconfmat = confusion_matrix(y_true=y_test, y_pred=y_pred)\nprint(confmat)", "[[214 32]\n [ 48 99]]\n" ], [ "from sklearn.metrics import roc_curve, roc_auc_score\n\n# AUC(Area Under the Curve)를 계산하여 출력합니다.\nfalse_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred_probability)\nroc_auc = roc_auc_score(y_test, y_pred_probability)\nprint(\"AUC : %.3f\" % roc_auc)\n\n# ROC curve를 그래프로 출력합니다.\nplt.rcParams['figure.figsize'] = [5, 4]\nplt.plot(false_positive_rate, true_positive_rate, label='ROC curve (area = %0.3f)' % roc_auc, color='red', linewidth=4.0)\nplt.plot([0,1], [0, 1], 'k--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.0])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('ROC curve of Logistic regression')\nplt.legend(loc=\"lower right\")", "AUC : 0.838\n" ], [ "from sklearn.tree import DecisionTreeClassifier", "_____no_output_____" ], [ "# 의사결정나무를 학습하고, 학습한 모델로 테스트 데이터셋에 대한 예측값을 반환합니다.\ndtc = DecisionTreeClassifier()\ndtc.fit(x_train, y_train)\ny_pred = dtc.predict(x_test)\ny_pred_probability = dtc.predict_proba(x_test)[:,1]\n\n# 학습한 모델의 성능을 계산하여 출력합니다.\nprint(\"accuracy: %.2f\" % accuracy_score(y_test, y_pred))\nprint(\"Precision : %.3f\" % precision_score(y_test, y_pred))\nprint(\"Recall : %.3f\" % recall_score(y_test, y_pred))\nprint(\"F1 : %.3f\" % f1_score(y_test, y_pred))", "accuracy: 0.75\nPrecision : 0.677\nRecall : 0.612\nF1 : 0.643\n" ], [ "# 학습한 모델의 AUC를 계산하여 출력합니다.\nfalse_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred_probability)\nroc_auc = roc_auc_score(y_test, y_pred_probability)\nprint(\"AUC : %.3f\" % roc_auc)\n\n# ROC curve를 그래프로 출력합니다.\nplt.rcParams['figure.figsize'] = [5, 4]\nplt.plot(false_positive_rate, true_positive_rate, label='ROC curve (area = %0.3f)' % roc_auc, \n color='red', linewidth=4.0)\nplt.plot([0, 1], [0, 1], 'k--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.0])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('ROC curve of Logistic regression')\nplt.legend(loc=\"lower right\")", "AUC : 0.732\n" ], [ "# 데이터를 다시 불러옵니다.\ndf_train = pd.read_csv(\"../data/titanic_train.csv\")\ndf_test = pd.read_csv(\"../data/titanic_test.csv\")\ndf_train = df_train.drop(['ticket', 'body', 'home.dest'], axis=1)\ndf_test = df_test.drop(['ticket', 'body', 'home.dest'], axis=1)", "_____no_output_____" ], [ "# age의 결측값을 평균값으로 대체합니다.\nreplace_mean = df_train[df_train['age'] > 0]['age'].mean()\ndf_train['age'] = df_train['age'].fillna(replace_mean)\ndf_test['age'] = df_test['age'].fillna(replace_mean)\n\n#embark : 2개의 결속값을 최반값으로 대체합니다.\nembarked_mode = df_train['embarked'].value_counts().index[0]\ndf_train['embarked'] = df_train['embarked'].fillna(embarked_mode)\ndf_test['embarked'] = df_test['embarked'].fillna(embarked_mode)\n\n# 원-핫 인코딩을 위한 통합 데이터 프레임(whole_df)을 생성합니다.\nwhole_df = df_train.append(df_test)\ntrain_idx_num = len(df_train)", "_____no_output_____" ], [ "print(whole_df['cabin'].value_counts()[:10])", "C23 C25 C27 6\nG6 5\nB57 B59 B63 B66 5\nF33 4\nB96 B98 4\nF2 4\nC22 C26 4\nD 4\nF4 4\nC78 4\nName: cabin, dtype: int64\n" ], [ "# 결측 데이터의 경우는 'X'로 대체합니다.\nwhole_df['cabin'] = whole_df['cabin'].fillna('X')\n\n# cabin 피처의 첫 번째 알파벳을 추출합니다.\nwhole_df['cabin'] = whole_df['cabin'].apply(lambda x: x[0])\n\n# 추출한 알파벳 중, G와 T는 수가 너무 작기 때문에 마찬가지로 'X'로 대체합니다.\nwhole_df['cabin'] = whole_df['cabin'].replace({\"G\":\"X\", \"T\":\"X\"})\n\nax = sns.countplot(x='cabin', hue = 'survived', data = whole_df)\nplt.show()", "_____no_output_____" ], [ "# 이름에서 호칭을 추출합니다.\nname_grade = whole_df['name'].apply(lambda x : x.split(\", \",1)[1].split(\".\")[0])\nname_grade = name_grade.unique().tolist()\nprint(name_grade)", "['Miss', 'Mr', 'Master', 'Mrs', 'Dr', 'Mlle', 'Col', 'Rev', 'Ms', 'Mme', 'Sir', 'the Countess', 'Dona', 'Jonkheer', 'Lady', 'Major', 'Don', 'Capt']\n" ], [ "# 호칭에 따라 사회적 지위(1910년대 기준)를 정의합니다.\ngrade_dict = {'A': ['Rev', 'Col', 'Major', 'Dr', 'Capt', 'Sir'], # 명예직을 나타냅니다.\n 'B': ['Ms', 'Mme', 'Mrs', 'Dona'], # 여성을 나타냅니다.\n 'C': ['Jonkheer', 'the Countess'], # 귀족이나 작위를 나타냅니다.\n 'D': ['Mr', 'Don'], # 남성을 나타냅니다.\n 'E': ['Master'], # 젊은남성을 나타냅니다.\n 'F': ['Miss', 'Mlle', 'Lady']} # 젊은 여성을 나타냅니다.\n\n# 정의한 호칭의 기준에 따라 A~F의 문자로 name 피처를 다시 정의하는 함수입니다.\ndef give_grade(x):\n # split 함수는 a.split()처럼 괄호 안에 아무 값도 넣어 주지 않으면 공백(스페이스, 탭, 엔터 등)을 기준으로 문자열을 나누어 준다. \n # 만약 b.split(':')처럼 괄호 안에 특정 값이 있을 경우에는 괄호 안의 값을 구분자로 해서 문자열을 나누어 준다. \n grade = x.split(\", \", 1)[1].split(\".\")[0]\n for key, value in grade_dict.items():\n for title in value:\n if grade == title:\n return key\n return 'G' \n\n# 위의 함수를 적용하여 name 피처를 새롭게 정의합니다.\nwhole_df['name'] = whole_df['name'].apply(lambda x: give_grade(x))\nprint(whole_df['name'].value_counts())", "D 758\nF 263\nB 201\nE 61\nA 24\nC 2\nName: name, dtype: int64\n" ], [ "# pandas 패키지를 이용한 원-핫 인코딩을 수행합니다.\nwhole_df_encoded = pd.get_dummies(whole_df)\ndf_train = whole_df_encoded[:train_idx_num]\ndf_test = whole_df_encoded[train_idx_num:]", "_____no_output_____" ], [ "df_train.head()", "_____no_output_____" ], [ "# 데이터를 학습 데이터셋, 테스트 데이터셋으로 분리합니다.\nx_train, y_train = df_train.loc[:, df_train.columns != 'survived'].values, df_train['survived'].values\nx_test, y_test = df_test.loc[:, df_test.columns != 'survived'].values, df_test['survived'].values\n\n# 로지스틱 회귀 모델을 학습합니다.\nlr = LogisticRegression(random_state=0)\nlr.fit(x_train, y_train)\n\n# 학습한 모델의 테스트 데이터셋에 대한 예측 결과를 반환합니다.\ny_pred = lr.predict(x_test)\ny_pred_probability = lr.predict_proba(x_test)[:,1]\n\n# 테스트 데이터셋에 대한 accuracy, precision, recall, f1 평가 지표를 각각 출력합니다.\nprint(\"accuracy: %.2f\" % accuracy_score(y_test, y_pred))\nprint(\"Precision : %.3f\" % precision_score(y_test, y_pred))\nprint(\"Recall : %.3f\" % recall_score(y_test, y_pred))\nprint(\"F1 : %.3f\" % f1_score(y_test, y_pred)) # AUC (Area Under the Curve) & ROC curve\n\n# AUC (Area Under the Curve)를 계산하여 출력합니다.\nfalse_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred_probability)\nroc_auc = roc_auc_score(y_test, y_pred_probability)\nprint(\"AUC : %.3f\" % roc_auc)\n\n# ROC curve를 그래프로 출력합니다.\nplt.rcParams['figure.figsize'] = [5, 4]\nplt.plot(false_positive_rate, true_positive_rate, label='ROC curve (area = %0.3f)' % roc_auc, \n color='red', linewidth=4.0)\nplt.plot([0, 1], [0, 1], 'k--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.0])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('ROC curve of Logistic regression')\nplt.legend(loc=\"lower right\")", "accuracy: 0.79\nPrecision : 0.736\nRecall : 0.701\nF1 : 0.718\nAUC : 0.853\n" ], [ "# 예측 대상인 survived 피처를 제외한 모든 피처를 리스트로 반환합니다. (그래프의 y축)\ncols = df_train.columns.to_list()\ncols.remove('survived')\ny_pos = np.arange(len(cols))\n\n# 각 피처별 회귀 분석 계수를 그래프의 x축으로 하여 피처 영향력 그래프를 출력합니다.\nplt.rcParams['figure.figsize'] = [5, 4]\nfig, ax = plt.subplots()\nax.barh(y_pos, lr.coef_[0], align='center', color='green', ecolor='black')\nax.set_yticks(y_pos)\nax.set_yticklabels(cols)\nax.invert_yaxis()\nax.set_xlabel('Coef')\nax.set_title(\"Each Feature's Coef\")\n\nplt.show()", "_____no_output_____" ], [ "from sklearn.model_selection import KFold\n\n# K-fold 교차 검증의 k를 5로 설정합니다.\nk = 5\ncv = KFold(k, shuffle=True, random_state=0)\nauc_history = []\n\n# K-fold를 5번의 분할 학습으로 반복합니다.\n# 파이썬 enumerate 는 순서가 있는 자료형의 index번호 와 index값 을 반환하는 함수다.\nfor i, (train_data_row, test_data_row) in enumerate(cv.split(whole_df_encoded)):\n \n # 5개로 분할된 fold 중 4개를 학습 데이터셋, 1개를 테스트 데이터셋으로 지정합니다.\n # 매 반복시마다 테스트 데이터셋은 변경됩니다.\n # iloc\t행 번호를 기준으로 행 데이터 읽기\n df_train = whole_df_encoded.iloc[train_data_row]\n df_test = whole_df_encoded.iloc[test_data_row]\n \n # survived 피처를 y, 나머지 피처들을 x 데이터로 지정합니다.\n # loc\t인덱스 기준으로 행 데이터 읽기\n splited_x_train, splited_y_train = df_train.loc[:, df_train.columns != 'survived'].values, df_train['survived'].values\n splited_x_test, splited_y_test = df_test.loc[:, df_test.columns != 'survived'].values, df_test['survived'].values\n \n # 주어진 데이터로 로지스틱 회귀 모델을 학습합니다.\n lr = LogisticRegression(random_state=0)\n lr.fit(splited_x_train, splited_y_train)\n y_pred = lr.predict(splited_x_test)\n # predict_proba의 출력은 각 클래스에 대한 확률\n y_pred_probability = lr.predict_proba(splited_x_test)[:,1]\n \n # 테스트 데이터셋의 AUC를 계산하여 auc_history에 저장합니다.\n false_positive_rate, true_positive_rate, thresholds = roc_curve(splited_y_test, y_pred_probability)\n roc_auc = roc_auc_score(splited_y_test, y_pred_probability)\n auc_history.append(roc_auc)\n \n# auc_history에 저장된 다섯 번의 학습 결과(AUC)를 그래프로 출력합니다.\nplt.xlabel(\"Each K-fold\")\nplt.ylabel(\"AUC of splited test data\")\nplt.plot(range(1, k+1), auc_history) # baseline ", "c:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\n" ], [ "!pip install scikit-plot", "Collecting scikit-plot\n Downloading scikit_plot-0.3.7-py3-none-any.whl (33 kB)\nRequirement already satisfied: matplotlib>=1.4.0 in c:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from scikit-plot) (3.3.3)\nRequirement already satisfied: scipy>=0.9 in c:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from scikit-plot) (1.6.0)\nRequirement already satisfied: joblib>=0.10 in c:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from scikit-plot) (1.0.0)\nRequirement already satisfied: scikit-learn>=0.18 in c:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from scikit-plot) (0.24.0)\nRequirement already satisfied: python-dateutil>=2.1 in c:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from matplotlib>=1.4.0->scikit-plot) (2.8.1)\nRequirement already satisfied: cycler>=0.10 in c:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from matplotlib>=1.4.0->scikit-plot) (0.10.0)\nRequirement already satisfied: pillow>=6.2.0 in c:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from matplotlib>=1.4.0->scikit-plot) (8.0.1)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.3 in c:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from matplotlib>=1.4.0->scikit-plot) (2.4.7)\nRequirement already satisfied: kiwisolver>=1.0.1 in c:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from matplotlib>=1.4.0->scikit-plot) (1.3.1)\nRequirement already satisfied: numpy>=1.15 in c:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from matplotlib>=1.4.0->scikit-plot) (1.19.3)\nRequirement already satisfied: six in c:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from cycler>=0.10->matplotlib>=1.4.0->scikit-plot) (1.15.0)\nRequirement already satisfied: threadpoolctl>=2.0.0 in c:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages (from scikit-learn>=0.18->scikit-plot) (2.1.0)\nInstalling collected packages: scikit-plot\nSuccessfully installed scikit-plot-0.3.7\n" ], [ "import scikitplot as skplt\nskplt.estimators.plot_learning_curve(lr, x_train, y_train)\nplt.show()", "c:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\nc:\\users\\lsc11\\appdata\\local\\programs\\python\\python39\\lib\\site-packages\\sklearn\\linear_model\\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n n_iter_i = _check_optimize_result(\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9a6d7e36019ab3ae16857f8787f0f2a9fdb2b8
381,510
ipynb
Jupyter Notebook
TEM_training.ipynb
Ccx55/n2v
dbf1daab862c6a70edab567f307a1b713eac472f
[ "BSD-3-Clause" ]
null
null
null
TEM_training.ipynb
Ccx55/n2v
dbf1daab862c6a70edab567f307a1b713eac472f
[ "BSD-3-Clause" ]
null
null
null
TEM_training.ipynb
Ccx55/n2v
dbf1daab862c6a70edab567f307a1b713eac472f
[ "BSD-3-Clause" ]
null
null
null
606.534181
103,592
0.925887
[ [ [ "# Noise2Void - 2D Example for SEM data", "_____no_output_____" ] ], [ [ "# We import all our dependencies.\nfrom n2v.models import N2VConfig, N2V\nimport numpy as np\nfrom csbdeep.utils import plot_history\nfrom n2v.utils.n2v_utils import manipulate_val_data\nfrom n2v.internals.N2V_DataGenerator import N2V_DataGenerator\nfrom matplotlib import pyplot as plt\nimport urllib\nimport os\nimport zipfile", "Using TensorFlow backend.\n" ] ], [ [ "# Download Example Data\nData by Reza Shahidi and Gaspar Jekely, Living Systems Institute, Exeter<br>\nThanks!\n", "_____no_output_____" ], [ "# Training Data Preparation", "_____no_output_____" ], [ "For training we load __one__ set of low-SNR images and use the <code>N2V_DataGenerator</code> to extract training <code>X</code> and validation <code>X_val</code> patches.", "_____no_output_____" ] ], [ [ "# We create our DataGenerator-object.\n# It will help us load data and extract patches for training and validation.\ndatagen = N2V_DataGenerator()", "_____no_output_____" ], [ "# We load all the '.tif' files from the 'data' directory.\n# If you want to load other types of files see the RGB example.\n# The function will return a list of images (numpy arrays).\nimgs = datagen.load_imgs_from_directory(directory = \"C:/Users/ccx55/OneDrive/Documents/GitHub/Phd/Single-nanoparticle-catalysis/CO_OX_TEM/Data/200420/all_data/\")\n\n# Let's look at the shape of the images.\nprint(imgs[0].shape,imgs[1].shape)\n# The function automatically added two extra dimensions to the images:\n# One at the beginning, is used to hold a potential stack of images such as a movie.\n# One at the end, represents channels.", "(1, 2048, 2048, 1) (1, 2048, 2048, 1)\n" ], [ "# Lets' look at the images.\n# We have to remove the added extra dimensions to display them as 2D images.\nplt.imshow(imgs[0][0,...,0], cmap='magma')\nplt.show()\n\nplt.imshow(imgs[1][0,...,0], cmap='magma')\nplt.show()", "_____no_output_____" ], [ "# We will use the first image to extract training patches and store them in 'X'\npatch_shape = (96,96)\nX = datagen.generate_patches_from_list(imgs[:1], shape=patch_shape)\n\n# We will use the second image to extract validation patches.\nX_val = datagen.generate_patches_from_list(imgs[1:], shape=patch_shape)\n\n# Patches are created so they do not overlap.\n# (Note: this is not the case if you specify a number of patches. See the docstring for details!)\n# Non-overlapping patches would also allow us to split them into a training and validation set \n# per image. This might be an interesting alternative to the split we performed above.", "Generated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\nGenerated patches: (3528, 96, 96, 1)\n" ], [ "# Just in case you don't know how to access the docstring of a method:\ndatagen.generate_patches_from_list?", "_____no_output_____" ], [ "# Let's look at one of our training and validation patches.\nplt.figure(figsize=(14,7))\nplt.subplot(1,2,1)\nplt.imshow(X[0,...,0], cmap='magma')\nplt.title('Training Patch');\nplt.subplot(1,2,2)\nplt.imshow(X_val[0,...,0], cmap='magma')\nplt.title('Validation Patch');", "_____no_output_____" ] ], [ [ "# Configure", "_____no_output_____" ], [ "Noise2Void comes with a special config-object, where we store network-architecture and training specific parameters. See the docstring of the <code>N2VConfig</code> constructor for a description of all parameters.\n\nWhen creating the config-object, we provide the training data <code>X</code>. From <code>X</code> we extract <code>mean</code> and <code>std</code> that will be used to normalize all data before it is processed by the network. We also extract the dimensionality and number of channels from <code>X</code>.\n\nCompared to supervised training (i.e. traditional CARE), we recommend to use N2V with an increased <code>train_batch_size</code> and <code>batch_norm</code>.\nTo keep the network from learning the identity we have to manipulate the input pixels during training. For this we have the parameter <code>n2v_manipulator</code> with default value <code>'uniform_withCP'</code>. Most pixel manipulators will compute the replacement value based on a neighborhood. With <code>n2v_neighborhood_radius</code> we can control its size. \n\nOther pixel manipulators:\n* normal_withoutCP: samples the neighborhood according to a normal gaussian distribution, but without the center pixel\n* normal_additive: adds a random number to the original pixel value. The random number is sampled from a gaussian distribution with zero-mean and sigma = <code>n2v_neighborhood_radius</code>\n* normal_fitted: uses a random value from a gaussian normal distribution with mean equal to the mean of the neighborhood and standard deviation equal to the standard deviation of the neighborhood.\n* identity: performs no pixel manipulation\n\nFor faster training multiple pixels per input patch can be manipulated. In our experiments we manipulated about 0.198% of the input pixels per patch. For a patch size of 64 by 64 pixels this corresponds to about 8 pixels. This fraction can be tuned via <code>n2v_perc_pix</code>.\n\nFor Noise2Void training it is possible to pass arbitrarily large patches to the training method. From these patches random subpatches of size <code>n2v_patch_shape</code> are extracted during training. Default patch shape is set to (64, 64). \n\nIn the past we experienced bleedthrough artifacts between channels if training was terminated to early. To counter bleedthrough we added the `single_net_per_channel` option, which is turned on by default. In the back a single U-Net for each channel is created and trained independently, thereby removing the possiblity of bleedthrough. <br/>\n__Note:__ Essentially the network gets multiplied by the number of channels, which increases the memory requirements. If your GPU gets too small, you can always split the channels manually and train a network for each channel one after another.", "_____no_output_____" ], [ "<font color='red'>Warning:</font> to make this example notebook execute faster, we have set <code>train_epochs</code> to only 10. <br>For better results we suggest 100 to 200 <code>train_epochs</code>.", "_____no_output_____" ] ], [ [ "# train_steps_per_epoch is set to (number of training patches)/(batch size), like this each training patch \n# is shown once per epoch. \nconfig = N2VConfig(X, unet_kern_size=3, \n train_steps_per_epoch=int(X.shape[0]/128), train_epochs=10, train_loss='mse', batch_norm=True, \n train_batch_size=128, n2v_perc_pix=0.198, n2v_patch_shape=(64, 64), \n n2v_manipulator='uniform_withCP', n2v_neighborhood_radius=5)\n\n# Let's look at the parameters stored in the config-object.\nvars(config)", "_____no_output_____" ], [ "# a name used to identify the model\n \nmodel_name = 'n2v_2D'\n# the base directory in which our model will live\nbasedir = 'models'\n# We are now creating our network model.\nmodel = N2V(config, model_name, basedir=basedir)", "C:\\Users\\ccx55\\OneDrive\\Documents\\GitHub\\n2v\\n2v\\models\\n2v_standard.py:430: UserWarning: output path for model already exists, files may be overwritten: C:\\Users\\ccx55\\OneDrive\\Documents\\GitHub\\n2v\\models\\n2v_2D\n warnings.warn('output path for model already exists, files may be overwritten: %s' % str(self.logdir.resolve()))\n" ] ], [ [ "# Training\n\nTraining the model will likely take some time. We recommend to monitor the progress with TensorBoard, which allows you to inspect the losses during training. Furthermore, you can look at the predictions for some of the validation images, which can be helpful to recognize problems early on.\n\nYou can start TensorBoard in a terminal from the current working directory with tensorboard --logdir=. Then connect to http://localhost:6006/ with your browser.", "_____no_output_____" ] ], [ [ "# We are ready to start training now.\nhistory = model.train(X, X_val)", "Preparing validation data: 28%|██▊ | 153/544 [00:00<00:00, 1523.55it/s]" ] ], [ [ "### After training, lets plot training and validation loss.", "_____no_output_____" ] ], [ [ "print(sorted(list(history.history.keys())))\nplt.figure(figsize=(16,5))\nplot_history(history,['loss','val_loss']);", "['loss', 'lr', 'n2v_abs', 'n2v_mse', 'val_loss', 'val_n2v_abs', 'val_n2v_mse']\n" ] ], [ [ "## Export Model in BioImage ModelZoo Format\nSee https://imagej.net/N2V#Prediction for details.", "_____no_output_____" ] ], [ [ "model.export_TF(name='Noise2Void - 2D SEM Example', \n description='This is the 2D Noise2Void example trained on SEM data in python.', \n authors=[\"Tim-Oliver Buchholz\", \"Alexander Krull\", \"Florian Jug\"],\n test_img=X_val[0,...,0], axes='YX',\n patch_shape=patch_shape)", "INFO:tensorflow:No assets to save.\nINFO:tensorflow:No assets to write.\nINFO:tensorflow:SavedModel written to: /tmp/tmp2p3nbvb3/model/saved_model.pb\n\nModel exported in BioImage ModelZoo format:\n/home/tbuchhol/Gitrepos/n2v/examples/2D/denoising2D_SEM/models/n2v_2D/export.bioimage.io.zip\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb9a7515a8de5ff6d6a172150303d6cfe4c47688
15,010
ipynb
Jupyter Notebook
10-deploy-model.ipynb
hhy37/MachineLearningSamples-EnergyDemandTimeSeriesForecasting
fb6144e420ba2f1be830b1ee65be8e7b758c6df7
[ "MIT" ]
29
2017-11-20T02:32:33.000Z
2019-06-22T04:32:11.000Z
10-deploy-model.ipynb
hhy37/MachineLearningSamples-EnergyDemandTimeSeriesForecasting
fb6144e420ba2f1be830b1ee65be8e7b758c6df7
[ "MIT" ]
1
2018-01-16T21:56:52.000Z
2018-02-08T15:10:06.000Z
10-deploy-model.ipynb
hhy37/MachineLearningSamples-EnergyDemandTimeSeriesForecasting
fb6144e420ba2f1be830b1ee65be8e7b758c6df7
[ "MIT" ]
23
2017-10-06T00:54:28.000Z
2019-06-20T22:57:11.000Z
30.140562
883
0.520986
[ [ [ "# Deploy model\n**Important**: Change the kernel to *PROJECT_NAME local*. You can do this from the *Kernel* menu under *Change kernel*. You cannot deploy the model using the *PROJECT_NAME docker* kernel.", "_____no_output_____" ] ], [ [ "from azureml.api.schema.dataTypes import DataTypes\nfrom azureml.api.schema.sampleDefinition import SampleDefinition\nfrom azureml.api.realtime.services import generate_schema\nimport pandas as pd\nimport numpy as np\nimport imp\nimport pickle\nimport os\nimport sys\nimport json", "_____no_output_____" ], [ "from azureml.logging import get_azureml_logger\nrun_logger = get_azureml_logger()\nrun_logger.log('amlrealworld.timeseries.deploy-model','true')", "_____no_output_____" ] ], [ [ "Enter the name of the model to deploy.", "_____no_output_____" ] ], [ [ "model_name = \"linear_regression\"", "_____no_output_____" ] ], [ [ "Load the test dataset and retain just one row. This record will be used to create and input schema for the web service. It will also allow us to simulate invoking the web service with features for one hour period and generating a demand forecast for this hour.", "_____no_output_____" ] ], [ [ "aml_dir = os.environ['AZUREML_NATIVE_SHARE_DIRECTORY']\ntest_df = pd.read_csv(os.path.join(aml_dir, 'nyc_demand_test.csv'), parse_dates=['timeStamp'])\ntest_df = test_df.drop(['demand', 'timeStamp'], axis=1).copy().iloc[[0]]\ntest_df", "_____no_output_____" ] ], [ [ "Load model from disk and transfer it to the working directory. ", "_____no_output_____" ] ], [ [ "with open(os.path.join(aml_dir, model_name + '.pkl'), 'rb') as f:\n mod = pickle.load(f)\n\nwith open('model_deploy.pkl', 'wb') as f:\n pickle.dump(mod, f)", "_____no_output_____" ] ], [ [ "Check model object has loaded as expected.", "_____no_output_____" ] ], [ [ "mod", "_____no_output_____" ] ], [ [ "Apply model to predict test record", "_____no_output_____" ] ], [ [ "np.asscalar(mod.predict(test_df))", "_____no_output_____" ] ], [ [ "### Author a realtime web service", "_____no_output_____" ], [ "Create a score.py script which implements the scoring function to run inside the web service. Change model_name variable as required.", "_____no_output_____" ] ], [ [ "%%writefile score.py\n# The init and run functions will load and score the input using the saved model.\n# The score.py file will be included in the web service deployment package.\ndef init():\n import pickle\n import os\n global model\n \n with open('model_deploy.pkl', 'rb') as f:\n model = pickle.load(f)\n \ndef run(input_df):\n input_df = input_df[['precip', 'temp', 'hour', 'month', 'dayofweek',\n 'temp_lag1', 'temp_lag2', 'temp_lag3', 'temp_lag4', 'temp_lag5',\n 'temp_lag6', 'demand_lag1', 'demand_lag2', 'demand_lag3',\n 'demand_lag4', 'demand_lag5', 'demand_lag6']]\n try:\n if (input_df.shape != (1,17)):\n return 'Bad imput: Expecting dataframe of shape (1,17)'\n else:\n pred = model.predict(input_df)\n return int(pred)\n except Exception as e:\n return(str(e))", "Writing score.py\n" ] ], [ [ "This script will be written to your current working directory:", "_____no_output_____" ] ], [ [ "os.getcwd()", "_____no_output_____" ] ], [ [ "#### Test the *init* and *run* functions", "_____no_output_____" ] ], [ [ "import score\nimp.reload(score)", "_____no_output_____" ], [ "score.init()\nscore.run(test_df)", "_____no_output_____" ] ], [ [ "#### Create web service schema\nThe web service schema provides details on the required structure of the input data as well as the data types of each column.", "_____no_output_____" ] ], [ [ "inputs = {\"input_df\": SampleDefinition(DataTypes.PANDAS, test_df)}\ngenerate_schema(run_func=score.run, inputs=inputs, filepath='service_schema.json')", "_____no_output_____" ] ], [ [ "#### Deploy the web service\nThe command below deploys a web service names \"demandforecast\", with input schema defined by \"service_schema.json\". The web service runs \"score.py\" which scores the input data using the model \"model_deploy.pkl\". This may take a few minutes.", "_____no_output_____" ] ], [ [ "!az ml service create realtime -f score.py -m model_deploy.pkl -s service_schema.json -n demandforecast -r python", "_____no_output_____" ] ], [ [ "Check web service is running.", "_____no_output_____" ] ], [ [ "!az ml service show realtime -i demandforecast", "_____no_output_____" ] ], [ [ "Test the web service is working by invoking it with a test record.", "_____no_output_____" ] ], [ [ "!az ml service run realtime -i demandforecast -d \"{\\\"input_df\\\": [{\\\"hour\\\": 0, \\\"month\\\": 6, \\\"demand_lag3\\\": 7576.558, \\\"temp_lag5\\\": 77.36, \\\"temp\\\": 74.63, \\\"demand_lag1\\\": 6912.7, \\\"demand_lag5\\\": 7788.292, \\\"temp_lag6\\\": 80.92, \\\"temp_lag3\\\": 76.72, \\\"demand_lag6\\\": 8102.142, \\\"temp_lag4\\\": 75.85, \\\"precip\\\": 0.0, \\\"temp_lag2\\\": 75.72, \\\"demand_lag2\\\": 7332.625, \\\"temp_lag1\\\": 75.1, \\\"demand_lag4\\\": 7603.008, \\\"dayofweek\\\": 4}]}\"", "_____no_output_____" ] ], [ [ "#### Delete the web service", "_____no_output_____" ] ], [ [ "!az ml service delete realtime --id=demandforecast", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb9a83c0a762fe40f62593399037b75a5654d3fb
3,358
ipynb
Jupyter Notebook
Index.ipynb
philge/WhirlwindTourOfPython
3cea3b1977dbbdc7f6658fbca24fca1b27606401
[ "CC0-1.0" ]
2
2020-07-07T09:13:12.000Z
2021-01-25T16:36:53.000Z
Index.ipynb
karthikraman/WhirlwindTourOfPython
3cea3b1977dbbdc7f6658fbca24fca1b27606401
[ "CC0-1.0" ]
null
null
null
Index.ipynb
karthikraman/WhirlwindTourOfPython
3cea3b1977dbbdc7f6658fbca24fca1b27606401
[ "CC0-1.0" ]
2
2018-03-26T12:44:30.000Z
2020-01-18T15:26:42.000Z
36.901099
97
0.645325
[ [ [ "# A Whirlwind Tour of Python\n\n*Jake VanderPlas, Summer 2016*\n\nThese are the Jupyter Notebooks behind my O'Reilly report,\n*A Whirlwind Tour of Python*.\n\n*A Whirlwind Tour of Python* is a fast-paced introduction to essential\ncomponents of the Python language for researchers and developers who are\nalready familiar with programming in another language.\n\nThe material is particularly aimed at those who wish to use Python for data \nscience and/or scientific programming, and in this capacity serves as an\nintroduction to my upcoming book, *The Python Data Science Handbook*.\nThese notebooks are adapted from lectures and workshops I've given on these\ntopics at University of Washington and at various conferences, meetings, and\nworkshops around the world.", "_____no_output_____" ], [ "## Index\n\n1. [Introduction](00-Introduction.ipynb)\n2. [How to Run Python Code](01-How-to-Run-Python-Code.ipynb)\n3. [Basic Python Syntax](02-Basic-Python-Syntax.ipynb)\n4. [Python Semantics: Variables](03-Semantics-Variables.ipynb)\n5. [Python Semantics: Operators](04-Semantics-Operators.ipynb)\n6. [Built-In Scalar Types](05-Built-in-Scalar-Types.ipynb)\n7. [Built-In Data Structures](06-Built-in-Data-Structures.ipynb)\n8. [Control Flow Statements](07-Control-Flow-Statements.ipynb)\n9. [Defining Functions](08-Defining-Functions.ipynb)\n10. [Errors and Exceptions](09-Errors-and-Exceptions.ipynb)\n11. [Iterators](10-Iterators.ipynb)\n12. [List Comprehensions](11-List-Comprehensions.ipynb)\n13. [Generators and Generator Expressions](12-Generators.ipynb)\n14. [Modules and Packages](13-Modules-and-Packages.ipynb)\n15. [Strings and Regular Expressions](14-Strings-and-Regular-Expressions.ipynb)\n16. [Preview of Data Science Tools](15-Preview-of-Data-Science-Tools.ipynb)\n17. [Resources for Further Learning](16-Further-Resources.ipynb)\n18. [Appendix: Code To Reproduce Figures](17-Figures.ipynb)", "_____no_output_____" ], [ "## License\n\nThis material is released under the \"No Rights Reserved\" [CC0](LICENSE)\nlicense, and thus you are free to re-use, modify, build-on, and enhance\nthis material for any purpose.\n\nThat said, I request that if you use or adapt this material, that you include\na proper attribution and/or citation (citation information coming soon).\n\nRead more about CC0 [here](https://creativecommons.org/share-your-work/public-domain/cc0/).", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown" ] ]
cb9a8b0b10740b0a7ea8f6b0a5eba69c3cb3391c
19,999
ipynb
Jupyter Notebook
santiagoangee/Ejercicio1.1-Copy1.ipynb
spulido99/NetworksAnalysis
3f6f585305f5825e25488bae8c6b427bc18436c6
[ "MIT" ]
null
null
null
santiagoangee/Ejercicio1.1-Copy1.ipynb
spulido99/NetworksAnalysis
3f6f585305f5825e25488bae8c6b427bc18436c6
[ "MIT" ]
null
null
null
santiagoangee/Ejercicio1.1-Copy1.ipynb
spulido99/NetworksAnalysis
3f6f585305f5825e25488bae8c6b427bc18436c6
[ "MIT" ]
null
null
null
25.838501
171
0.484974
[ [ [ "# Ejercicios Graphs, Paths & Components\n\nEjercicios básicos de Grafos.", "_____no_output_____" ], [ "## Ejercicio - Número de Nodos y Enlaces\n\n_ (resuelva en código propio y usando la librería NetworkX (python) o iGraph (R)) _\n\nCuente el número de nodos y enlaces con los siguientes links (asumiendo que el grafo puede ser dirigido Y no dirigido): ", "_____no_output_____" ] ], [ [ "edges = set([(1, 2), (3, 1), (3, 2), (2, 4)])", "_____no_output_____" ], [ "edges = set([(1, 2), (3, 1), (3, 2), (2, 4)])\nedges_list = [i[0] for i in edges] + [i[1] for i in edges]\nnodes = set(edges_list)\n\nedges_number = len(edges)\nnodes_number = len(nodes)\n\nprint \"Número de nodos: \" + str(nodes_number)\nprint \"Número de enlaces: \" + str(edges_number)\n\n\"\"\"Now using NetorkX\"\"\"\nimport networkx as nx\n\nG = nx.Graph()\nG.add_edges_from(edges)\n\nprint \"Número de nodos: \" + str(G.number_of_nodes())\nprint \"Número de aristas: \" + str(G.number_of_edges())", "Número de nodos: 4\nNúmero de enlaces: 4\nNúmero de nodos: 4\nNúmero de aristas4\n" ] ], [ [ "## Ejercicio - Matriz de Adyacencia\n\n_ (resuelva en código propio y usando la librería NetworkX (python) o iGraph (R)) _\n\n\nCree la matriz de adyacencia del grafo del ejercicio anterior (para dirigido y no-dirigido)", "_____no_output_____" ] ], [ [ "\"\"\"Código propio\"\"\"\nimport numpy as np\n\nedges = set([(1,2), (3, 1), (3, 2), (2, 4)])\n\ndef adj_matrix_dgraph(edges):\n edges_list = [i[0] for i in edges] + [i[1] for i in edges]\n nodes = set(edges_list)\n \n \"\"\"create matrix\"\"\"\n matrix = np.zeros((len(nodes),len(nodes)))\n \n for edge in edges:\n matrix[edge[0] - 1,edge[1] -1] = 1\n \n return matrix\n\ndef adj_matrix(edges):\n edges_list = [i[0] for i in edges] + [i[1] for i in edges]\n nodes = set(edges_list)\n \n \"\"\"create matrix\"\"\"\n matrix = np.zeros((len(nodes),len(nodes)))\n \n for edge in edges:\n i = edge[0]-1\n j = edge[1]-1\n matrix[i,j] = 1\n matrix[j,i] = 1\n \n return matrix\n\nprint \"matriz para grafo dirigido:\\n\" + str(adj_matrix_dgraph(edges))\nprint \"\\n\"\nprint \"matriz para grafo no dirigido:\\n\" + str(adj_matrix(edges))\n\n\"\"\"Solución con NetworkX\"\"\"\nimport networkx as nx\nG = nx.Graph()\nG.add_edges_from(edges)\n\nmatrix = nx.adjacency_matrix(G)\n\nprint matrix\n\nDG = nx.DiGraph()\nDG.add_edges_from(edges)\n\nprint \"\\n\"\nprint (nx.adjacency_matrix(DG))", "matriz para grafo dirigido:\n[[ 0. 1. 0. 0.]\n [ 0. 0. 0. 1.]\n [ 1. 1. 0. 0.]\n [ 0. 0. 0. 0.]]\n\n\nmatriz para grafo no dirigido:\n[[ 0. 1. 1. 0.]\n [ 1. 0. 1. 1.]\n [ 1. 1. 0. 0.]\n [ 0. 1. 0. 0.]]\n (0, 1)\t1\n (0, 2)\t1\n (1, 0)\t1\n (1, 2)\t1\n (1, 3)\t1\n (2, 0)\t1\n (2, 1)\t1\n (3, 1)\t1\n\n\n (0, 1)\t1\n (1, 3)\t1\n (2, 0)\t1\n (2, 1)\t1\n" ] ], [ [ "D## Ejercicio - Sparseness\n\nCalcule la proporción entre número de links existentes en 3 redes reales (http://snap.stanford.edu/data/index.html) contra el número de links posibles.", "_____no_output_____" ] ], [ [ "import numpy as np\n\n\"\"\" The entered datasets correspond to non-directed graphs\"\"\"\n\"\"\" information about the dataset can be found in the following link:\n http://snap.stanford.edu/data/egonets-Facebook.html \"\"\"\n\nedges1 = np.genfromtxt('0.edges', dtype=\"int\", delimiter=\" \")\n\nedges2 = np.genfromtxt('348.edges', dtype=\"int\", delimiter=\" \")\n\nedges3 = np.genfromtxt('414.edges', dtype=\"int\", delimiter=\" \")\n\ndef edges_to_nodes(edges):\n edges_list = [i[0] for i in edges] + [i[1] for i in edges]\n nodes = set(edges_list)\n \n return nodes\n\n\ndef edge_rate(edges):\n nodes = edges_to_nodes(edges)\n n = len(nodes)\n \n print (\"len(n) = %d\" %(n))\n \n \"\"\" For a non-directed graph and excluding reflexive relations\"\"\"\n possible_edges = (n*(n-1))/2\n \n print (\"possible_edges=%d\" % (possible_edges))\n result = float(len(edges))/possible_edges\n return result\n\ndef edge_rate_dgraph(edges):\n nodes = edges_to_nodes(edges)\n n = len(nodes)\n \n \"\"\" For a directed graph including reflexive relations\"\"\"\n possible_edges = n**2\n result = float(len(edges))/possible_edges\n return result\n\nprint (edge_rate(edges1))\nprint (edge_rate(edges2))\nprint (edge_rate(edges3))\n\n\"\"\" With networkx \"\"\"\n\nimport networkx as nx\n\nG1 = nx.read_edgelist('0.edges', delimiter=\" \")\nG2 = nx.read_edgelist('348.edges', delimiter=\" \")\nG3 = nx.read_edgelist('414.edges', delimiter=\" \")\n\n\ndef possible_edges(graph):\n nodes = graph.number_of_nodes()\n return (nodes*(nodes-1))/2\n\nprint (\"possible_edges(G1)=%d\" % (possible_edges(G1)))\n\ndef edge_rate_nx(graph):\n return float(graph.number_of_edges())/float(possible_edges(graph))\n\nprint (\"\\n\")\nprint (edge_rate_nx(G1))\nprint (edge_rate_nx(G2))\nprint (edge_rate_nx(G3))", "len(n) = 333\npossible_edges=55278\n0.09113933210318753\nlen(n) = 224\npossible_edges=24976\n0.2556053811659193\nlen(n) = 150\npossible_edges=11175\n0.3029977628635347\npossible_edges(G1)=55278\n\n\n0.045569666051593766\n0.12780269058295965\n0.15149888143176735\n" ] ], [ [ "En la matriz de adyacencia de cada uno de las redes elegidas, cuantos ceros hay?", "_____no_output_____" ] ], [ [ "\"\"\" Without NetworkX \"\"\"\nimport numpy as np\n\ndef edges_to_nodes(edges):\n edges_list = [i[0] for i in edges] + [i[1] for i in edges]\n nodes = set(edges_list)\n print (\"len(nodes)=%d\" %(len(nodes)))\n return nodes\n\n\"\"\" The entered datasets correspond to non-directed graphs\"\"\"\n\"\"\" information about the dataset can be found in the following link:\n http://snap.stanford.edu/data/egonets-Facebook.html \"\"\"\n\nedges1 = np.genfromtxt('0.edges', dtype=\"int\", delimiter=\" \")\nprint (len(edges1))\n\nedges2 = np.genfromtxt('348.edges', dtype=\"int\", delimiter=\" \")\nprint (len(edges2))\n\nedges3 = np.genfromtxt('414.edges', dtype=\"int\", delimiter=\" \")\nprint (len(edges3))\n\n\"\"\" Asuming there aren't repeated elements in the dataset \"\"\"\n\ndef number_of_zeroes(edges):\n n = len(edges_to_nodes(edges))\n zeroes = n**2 - len(edges)\n return zeroes\n\ndef number_of_zeroes_dgraph(edges):\n n = len(edges_to_nodes(edges))\n zeroes = n**2 - len(edges)\n return zeroes\n\n\nprint (\"number_of_zeroes(edges1)=%d\" %(number_of_zeroes(edges1)))\nprint (\"number_of_zeroes(edges2)=%d\" %(number_of_zeroes(edges2)))\nprint (\"number_of_zeroes(edges3)=%d\" %(number_of_zeroes(edges3)))\n", "5038\n6384\n3386\nlen(nodes)=333\nnumber_of_zeroes(edges1)=105851\nlen(nodes)=224\nnumber_of_zeroes(edges2)=43792\nlen(nodes)=150\nnumber_of_zeroes(edges3)=19114\n" ], [ "\"\"\" With NetworkX \"\"\"\nimport networkx as nx\n\n\"\"\" The selected datasets are non-directed graphs. Therefore their adjacency matrix is simetrical \"\"\"\n\n\"\"\" For undirected graphs NetworkX stores only the edges of one of the matrix's triangles (upper or lower)\"\"\"\n\nG1 = nx.read_edgelist('0.edges', delimiter=\" \")\nprint (len(G1.edges()))\n\nG2 = nx.read_edgelist('348.edges', delimiter=\" \")\nprint (len(G2.edges()))\n\nG3 = nx.read_edgelist('414.edges', delimiter=\" \")\nprint (len(G3.edges()))\n\nN1 = len(G1.nodes())\nN2 = len(G2.nodes())\nN3 = len(G3.nodes())\n\ndef zeroes(graph):\n N = len(graph.nodes())\n result = N**2 - 2*len(graph.edges())\n print (\"zeroes=%d\" %(result))\n return result\n\nzeroes(G1)\nzeroes(G2)\nzeroes(G3)\n\n", "2519\n3192\n1693\nzeroes=105851\nzeroes=43792\nzeroes=19114\n" ] ], [ [ "## Ejercicio - Redes Bipartitas\n\nDefina una red bipartita y genere ambas proyecciones, explique qué son los nodos y links tanto de la red original como de las proyeccciones", "_____no_output_____" ] ], [ [ "import numpy as np\n\nnetwork1 = set([(1,'a'),(3,'b'), (4,'d'),(5,'b'),(1,'b'), (2,'d'), (1,'d'), (3,'c')])\n\ndef projection_u(edges):\n edges_list = list(edges)\n result = []\n for i in range(0,len(edges_list)):\n for j in range(i+1, len(edges_list)):\n if edges_list[i][1] == edges_list[j][1]:\n tup = (edges_list[i][0], edges_list[j][0])\n result.append(tup)\n \n return set(result)\n\nprint (projection_u(network1))\n\ndef projection_v(edges):\n edges_list = list(edges)\n result = []\n for i in range(0,len(edges_list)):\n for j in range(i+1, len(edges_list)):\n if edges_list[i][0] == edges_list[j][0]:\n tup = (edges_list[i][1], edges_list[j][1])\n result.append(tup)\n \n return set(result)\n\nprint (projection_v(network1)) ", "{(1, 3), (1, 4), (2, 1), (1, 5), (2, 4), (5, 3)}\n{('a', 'd'), ('b', 'd'), ('a', 'b'), ('c', 'b')}\n" ] ], [ [ "## Ejercicio - Paths\n\nCree un grafo de 5 nodos con 5 enlaces. Elija dos nodos cualquiera e imprima:\n+ 5 Paths diferentes entre los nodos\n+ El camino mas corto entre los nodos\n+ El diámetro de la red\n+ Un self-avoiding path\n", "_____no_output_____" ], [ "# Ejercicio - Componentes\n\nBaje una red real (http://snap.stanford.edu/data/index.html) y lea el archivo ", "_____no_output_____" ], [ "Utilizando NetworkX o iGraph descubra el número de componentes", "_____no_output_____" ], [ "Implemente el algorithmo Breadth First para encontrar el número de componentes (revise que el resultado es el mismo que utilizando la librería)", "_____no_output_____" ], [ "## Ejercicio - Degree distribution\n\n_ (resuelva en código propio y usando la librería NetworkX (python) o iGraph (R)) _\n\nHaga un plot con la distribución de grados de la red real", "_____no_output_____" ], [ "Calcule el grado promedio", "_____no_output_____" ], [ "## Ejercicio - Diámetro", "_____no_output_____" ] ], [ [ "N = 5", "_____no_output_____" ] ], [ [ "Cree un grafo de N nodos con el máximo diámetro posible", "_____no_output_____" ], [ "Cree un grafo de N nodos con el mínimo diámetro posible", "_____no_output_____" ], [ "Cree un grafo de N nodos que sea un ciclo simple", "_____no_output_____" ], [ "## Ejercicio - Pregunta \"real\"\n\nUna aerolínea tiene las siguientes rutas desde las ciudades a las que sirve (cada par tiene servicio en ambas direcciones).", "_____no_output_____" ] ], [ [ "routemap = [('St. Louis', 'Miami'), \n ('St. Louis', 'San Diego'), \n ('St. Louis', 'Chicago'), \n ('San Diego', 'Chicago'), \n ('San Diego', 'San Francisco'), \n ('San Diego', 'Minneapolis'), \n ('San Diego', 'Boston'), \n ('San Diego', 'Portland'), \n ('San Diego', 'Seattle'), \n ('Tulsa', 'New York'), \n ('Tulsa', 'Dallas'), \n ('Phoenix', 'Cleveland'), \n ('Phoenix', 'Denver'), \n ('Phoenix', 'Dallas'), \n ('Chicago', 'New York'), \n ('Chicago', 'Los Angeles'), \n ('Miami', 'New York'), \n ('Miami', 'Philadelphia'), \n ('Miami', 'Denver'), \n ('Boston', 'Atlanta'), \n ('Dallas', 'Cleveland'), \n ('Dallas', 'Albuquerque'), \n ('Philadelphia', 'Atlanta'), \n ('Denver', 'Minneapolis'), \n ('Denver', 'Cleveland'), \n ('Albuquerque', 'Atlanta'), \n ('Minneapolis', 'Portland'), \n ('Los Angeles', 'Seattle'), \n ('San Francisco', 'Portland'), \n ('San Francisco', 'Seattle'), \n ('San Francisco', 'Cleveland'), \n ('Seattle', 'Portland')]", "_____no_output_____" ] ], [ [ "Cuál es el máximo número de intercambios que tendría que hacer un pasajero en un solo viaje entre dos ciudades servidas? (suponiendo rutas óptimas)", "_____no_output_____" ], [ "Si usted necesitara viajar mucho en esta aerolínea, cual sería el lugar óptimo para vivir? (i.e. minimizar el número de intercambios para llegar a cualquier ciudad) ", "_____no_output_____" ], [ "Visualize la red", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
cb9a9892d0d3ec52f0789c3340d8415aa2ef5dea
21,543
ipynb
Jupyter Notebook
jwst_validation_notebooks/resample/jwst_resample_miri_test/jwst_resample_miri_testing.ipynb
jbhagan/jwst_validation_notebooks
01062937ba0d5797c5ea08dfca184b3864ff7f1d
[ "BSD-3-Clause" ]
null
null
null
jwst_validation_notebooks/resample/jwst_resample_miri_test/jwst_resample_miri_testing.ipynb
jbhagan/jwst_validation_notebooks
01062937ba0d5797c5ea08dfca184b3864ff7f1d
[ "BSD-3-Clause" ]
null
null
null
jwst_validation_notebooks/resample/jwst_resample_miri_test/jwst_resample_miri_testing.ipynb
jbhagan/jwst_validation_notebooks
01062937ba0d5797c5ea08dfca184b3864ff7f1d
[ "BSD-3-Clause" ]
2
2019-04-08T15:33:14.000Z
2019-04-10T16:23:32.000Z
42.241176
552
0.638583
[ [ [ "<a id=\"title_ID\"></a>\n# JWST Pipeline Validation Testing Notebook: Calwebb_Image3, Resample step\n\n<span style=\"color:red\"> **Instruments Affected**</span>: FGS, MIRI, NIRCam, NIRISS, NIRSpec \n\nTested on MIRI Simulated data\n\n### Table of Contents\n<div style=\"text-align: left\"> \n\n<br> [Introduction](#intro_ID) <br> [Run JWST Pipelines](#pipeline_ID) <br> [Imports](#imports_ID) <br> [Create an association table for your cal files and run them through calwebb_image3](#runpipeline_ID) <br> [Find Stars in Image and Determine their Coordinates](#runscript_ID) <br> [Compare RA and Dec to expected Values](#residual_ID) <br> [About This Notebook](#about_ID) <br>\n\n\n</div>", "_____no_output_____" ], [ "<a id=\"intro_ID\"></a>\n# Introduction\n\n\nThis test is designed to test the resample step in the calwebb_image3 pipeline. At the end of the calwebb_image3 pipeline, the set of files defined in an association table will be distortion corrected and combined. Resample is the step that applies the distortion correction using the drizzling algorithm (as defined in the DrizzlePac handbook) and combines the listed files. For more information on the pipeline step visit the links below. \n\nStep description: https://jwst-pipeline.readthedocs.io/en/latest/jwst/resample/main.html\n\nPipeline code: https://github.com/spacetelescope/jwst/tree/master/jwst/resample\n\nThe data for this test were created with the MIRI Data Simulator, and the documentation for that code can be found here: http://miri.ster.kuleuven.be/bin/view/Public/MIRISim_Public\n\n\n### Calibration WG Requested Algorithm: \n\nA short description and link to the page: https://outerspace.stsci.edu/display/JWSTCC/Vanilla+Image+Combination\n\n\n### Defining Terms\nDefinition of terms or acronymns.\n\nJWST: James Webb Space Telescope\n\nMIRI: Mid-Infrared Instrument\n\nMIRISim: MIRI Data Simulator\n\n### Description of test\n\nThis test is performed by creating a set of simulated data with multiple point sources located at specified coordinates. The simulator puts in the expected distortion, so the initial output data comes out of the simulator in distorted coordinates. When this data is then run through calwebb_detector1, calwebb_image2 and calwebbb_image3, the combined, undistorted image should have the point sources registered at the expected locations. In flight, this test can be repeated with known stars that should be found at their expected coordinates.\n\n### Create the data for testing\n\nThe set of data used in this particular test were created with the MIRI Data Simulator (MIRISim). Referring to the MIRISim link, you can see how to set up and run the simulator to re-create the input files if you wish. The data was run with a scene.ini file that specified what the scene should look like, with coordinates for the stars given in units of arcsecond offsets from the center of the field of view. The scene.ini file as well as the setup files simuation.ini and simulator.ini are needed to run the simulation.\n\nOnce in the mirisim conda environment, the simulation is run with the command line:\n> mirisim simulation.ini\n\nThe simulator created four files, two exposures each at two different dither positions, using the specified filter. Make sure the WCSAXES header keyword in the SCI extension is set to 2 and not 4. If it is set to 4, change it to 2.\n\n\n\n[Top of Page](#title_ID)", "_____no_output_____" ], [ "<a id=\"pipeline_ID\"></a>\n## Run JWST Pipelines\n\nThe four files were then run individually through the calwebb_detector1 and calwebb_image2 pipelines. When running the calwebb_detector1 pipeline, increase the threshold for a detection in the jump step from 4 sigma to 10 sigma to avoid a current issue where the jump detection step flags a large percentage of pixels as jumps. This can be done on the command line. (commands to be typed start with $)\n\nThe pipelines can be run on the command line with the following commands or put into a script while using the pipeline conda environment.\n\n$ strun calwebb_detector1.cfg filename --steps.jump.rejection_threshold 10.0\n\nThe output of the calwebb_detector1 pipeline is a set of four *rate.fits files which will then be run through the calwebb_image2 pipeline.\n\n$ strun calwebb_image2.cfg filename\n\nThe output of the calwebb_image2 pipeline was then a set of four *cal.fits files. An association table was created that included these four files as input, and then the files and the association table were run through the calwebb_image3 pipeline. \n\nThe cal files are stored in artifactory, and this notebook is meant to pull those files for the test of resample. Step through the cells of this notebook to run calwebb_image3 and then check the alignment.\n\n\n\n[Top of Page](#title_ID)", "_____no_output_____" ], [ "\n<a id=\"imports_ID\"></a>\n# Imports\nThe following packages will need to be imported for the scripts to work.\n\n\n* astropy.io for opening files\n* astropy.stats for sigma clipping routine\n* astropy.visualization for image plotting\n* ci_watson.artifactory_helpers to read in data from artifactory\n* jwst.datamodels for opening files as a JWST Datamodel\n* jwst.pipeline to run the pipeline step/module\n* jwst.associations to create association table\n* numpy for calculations\n* matplotlib.pyplot.plt to generate plot\n* os for path information \n* photutils for star finding and aperture photometry\n* regtest to retrieve data from artifactory needed to run notebook\n\n\n[Top of Page](#title_ID)", "_____no_output_____" ] ], [ [ "from astropy.io import ascii, fits\nfrom astropy.stats import sigma_clipped_stats\nfrom astropy.table import Column\nfrom astropy.visualization import SqrtStretch\nfrom astropy.visualization.mpl_normalize import ImageNormalize\nfrom ci_watson.artifactory_helpers import get_bigdata\nfrom jwst.datamodels import DrizProductModel, ImageModel\nfrom jwst.pipeline import Image3Pipeline\nfrom jwst import associations\nfrom jwst.associations.lib.rules_level3_base import DMS_Level3_Base\nfrom jwst.associations import asn_from_list\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom photutils import CircularAperture, DAOStarFinder, CircularAnnulus, aperture_photometry\nfrom jwst.regtest.regtestdata import RegtestData", "_____no_output_____" ] ], [ [ "<a id=\"runpipeline_ID\"></a>\n# Open an association table for your cal files and run them through calwebb_image3\n\nLoad the association table to use the .cal files that were output from calwebb_image2. That will be the input for calwebb_image3 that uses the resample step to combine each of the individual images.\n\n[Top of Page](#title_ID)", "_____no_output_____" ] ], [ [ "# Use regtest infrastructure to access all input files associated with the association file\n\nrtdata = RegtestData(inputs_root=\"jwst_validation_notebooks\", env=\"validation_data\")\nrtdata.get_asn(\"resample/resample_miri_test/starfield_74_asnfile.json\")\nrtdata.input #this should be the list of files associated with the asn", "_____no_output_____" ], [ "# Run Calwebb_image3 on the association table\n \n# set any specific parameters\n# tweakreg parameters to allow data to run\nfwhm=2.5 # Gaussian kernel FWHM of objects expected, default=2.5\nminobj=5 # minimum number of objects needed to match positions for a good fit, default=15\nsnr= 250 # signal to noise threshold, default=5\nsigma= 3 # clipping limit, in sigma units, used when performing fit, default=3\nfit_geom='shift' # ftype of affine transformation to be considered when fitting catalogs, default='general'\nuse2dhist=False # boolean indicating whether to use 2D histogram to find initial offset, default=True\n \npipe3=Image3Pipeline() \npipe3.tweakreg.kernel_fwhm = fwhm\npipe3.tweakreg.snr_threshold = snr\npipe3.tweakreg.minobj = minobj\npipe3.tweakreg.sigma = sigma\npipe3.tweakreg.fitgeometry = fit_geom\npipe3.tweakreg.use2dhist = use2dhist\n#pipe3.skymatch.skip = True # test to see if this affects the final output\npipe3.source_catalog.save_results = True\npipe3.save_results = True\n\n \n# run Image3\n\nim = pipe3.run(rtdata.input)\n", "_____no_output_____" ] ], [ [ "<a id=\"runscript_ID\"></a>\n# Find stars in image and determine their coordinates\n\nThe output of the pipeline command in the previous step (given our association table) is an i2d.fits file. This file is in the format of a JWST Data model type of DrizProductModel and should be opened as such. It is this file that we will use for source finding and to determine whether the stars are found in the expected locations. The i2d file and the associated text file containing the input coordinates of the stars can be found in artifactory.\n\n[Top of Page](#title_ID)", "_____no_output_____" ], [ "#### Read in combined i2d data file and list of coordinates", "_____no_output_____" ] ], [ [ "# Read in the combined data file and list of coordinates\n\nwith ImageModel('starfield_74_combined_i2d.fits') as im:\n # raises exception if file is not the correct model \n pass\n\n\ncoords = get_bigdata('jwst_validation_notebooks',\n 'validation_data',\n 'resample',\n 'resample_miri_test', \n 'radec_coords.txt')\n\n# read in text file with RA and Dec input coordinates\nRA_in, Dec_in = np.loadtxt( coords, dtype=str, unpack=True)\n\n# put RA and Dec into floats\nRA_sim = RA_in.astype(float)\nDec_sim = Dec_in.astype(float)\n\n\n# pull out data portion of input file\ndata = im.data\n\n# print stats on input image\nmean, median, std = sigma_clipped_stats(data, sigma=200.0, maxiters=5) # default sigma=3\nprint(mean, median, std)\n", "_____no_output_____" ] ], [ [ "#### Run DAOStar finder to find sources in the image and examine the image and positions marked. \nThe block of code below will find the sources in the image, create apertures for each source found, and output the table of x, y coordinates along with the peak pixel value. It will also show a scaled version of the image and mark in blue the positions of sources found.\n", "_____no_output_____" ] ], [ [ "# Run DAOStarFinder to find sources in image\n\nap_radius = 4. # radius for aperture for centroiding and photometry\n\ndaofind = DAOStarFinder(fwhm=3.0, threshold=10.*std) # default threshold=5*std, fwhm=3\nsources = daofind(data) \nprint(sources['xcentroid','ycentroid','peak']) \n\n# create apertures for sources\n\npositions = (sources['xcentroid'], sources['ycentroid'])\napertures = CircularAperture(positions, r=ap_radius)\n\n# mark sources on image frame to see if the correct sources were found\nnorm = ImageNormalize(stretch=SqrtStretch())\n# keep image stretch in mind for plotting. sky subtracted range ~ (-15, 10), single sample ~ (0, 20)\nplt.imshow(data, cmap='Greys', origin='lower', vmin=-15,vmax=10, norm=norm)\napertures.plot(color='blue', lw=1.5, alpha=0.5)\nplt.show()\n", "_____no_output_____" ] ], [ [ "#### Run photometry on apertures (with a specified annulus for background subtraction)\n\nSet a specified annulus (inner and outer radii for the annulus).\n\nRun photometry on aperture and annuli.\n\nSubtract background values in annulus from aperture photometry.\n\nOutput should be a table of photometry values printed to the screen (full table has columns id, xcenter, ycenter, aperture_sum and the added columns annulus_median, aperture_bkg and aperture_sum_bkgsub). You can choose which columns you wish to see printed.", "_____no_output_____" ] ], [ [ "# set values for inner and outer annuli to collect background counts\n\ninner_annulus = 10.\nouter_annulus = 15.\n\n# set up annulus for background\nbackground_aper = CircularAnnulus(positions, r_in=inner_annulus, r_out=outer_annulus)\n\n# perform photometry on apertures for targets and background annuli\nphot_table = aperture_photometry(im.data, apertures)\n\n# perform background subtraction with outlier rejection\nbkg_median = []\nbkg_mask = background_aper.to_mask(method='center')\nbmask = bkg_mask[0]\nfor mask in bkg_mask:\n aper_data = bmask.multiply(data)\n aper_data = aper_data[mask.data > 0]\n \n # perform sigma-clipped median\n _, median_sigclip, _ = sigma_clipped_stats(aper_data)\n bkg_median.append(median_sigclip)\nbkg_median = np.array(bkg_median)\n\n\n# do calculations on background regions found in annuli\n# Get average background per pixel\nphot_table['annulus_median'] = bkg_median\n# Get total background in the science aperture (per pixel * area in aperture)\nphot_table['aperture_bkg'] = bkg_median * apertures.area\n# subtract background in aperture from flux in aperture\nphot_table['aperture_sum_bkgsub'] = phot_table['aperture_sum'] - phot_table['aperture_bkg']\n\nprint(phot_table['aperture_sum','annulus_median','aperture_bkg','aperture_sum_bkgsub'])\n", "_____no_output_____" ] ], [ [ "#### Put x, y coordinates into RA and Dec using the wcs information from the files.\nThe output of the next block of code should be a table showing the x and y centroid positions as well as the associated RA and Dec values.", "_____no_output_____" ] ], [ [ "# using wcs info from images, put coordinates into RA, Dec\nra, dec = im.meta.wcs(sources['xcentroid'], sources['ycentroid'])\n\n# add RA, Dec to sources table\n\nra_col = Column(name='RA', data=ra)\ndec_col = Column(name='Dec', data=dec)\nsources.add_column(ra_col)\nsources.add_column(dec_col)\n\n# print RA, Dec for each x, y position found\nprint(sources['xcentroid', 'ycentroid', 'RA', 'Dec']) \n\n# add option to print out list of sources with flux values\nouttable = 'sourcelist_phot_rate.txt'\nsources.add_column(phot_table['aperture_sum'])\nsources.add_column(phot_table['aperture_sum_bkgsub'])\n", "_____no_output_____" ] ], [ [ "#### Compare the RA and Dec positions used to create the simulated data to the values found in the output image.\nDifference each set of RA and Dec coordinates in both the input list and the found coordinates, taking into account any angles close to 360/0 degrees. If the difference for both the RA and Dec are below a set tolerance, then the positions match. Take the matched positions and convert the differences from degrees to milli arcseconds, and output the RA and Dec positions as well as the differences. ", "_____no_output_____" ] ], [ [ "# Compare input RA, Dec to found RA, Dec\nprint(' RA found Dec found RA_Diff (mas) Dec_diff (mas) Bkg sub flux pass/fail')\n\nfor i in np.arange(0,len(RA_sim)):\n for j in np.arange(0,len(ra)):\n ra_diff = 180 - abs(abs(RA_sim[i] - ra[j])-180)\n dec_diff = 180 - abs(abs(Dec_sim[i] - dec[j])-180)\n\n if ra_diff < 1e-5 and dec_diff < 1e-5:\n # put differences in milliarcseconds\n ra_diff = ra_diff * 3600000\n dec_diff = dec_diff * 3600000\n if ra_diff < 30 and dec_diff < 30: \n test = 'pass' \n else: \n test = 'fail'\n print('{:15.6f} {:15.6f} {:15.6f} {:15.6f} {:15.6f} {}'.format(ra[j], dec[j], ra_diff, dec_diff, \n phot_table['aperture_sum_bkgsub'][j], test))\n", "_____no_output_____" ] ], [ [ "<a id=\"residual_ID\"></a>\n# Compare output RA and Dec to expected values\n\nThe output RA and Dec coordinates should match the input RA and Dec coordinates to within 1/10 of a PSF FWHM (~0.03 arcsec for F770W).\n\nOutput RA_Diff and Dec_diff above should be on order of 30 or fewer milliarcseconds.\n\nCheck to see if your input flux is roughly what you expected based on the input data.\n\n[Top of Page](#title_ID)", "_____no_output_____" ], [ "<a id=\"about_ID\"></a>\n## About this Notebook\n**Author:** M. Cracraft, Research and Instrument Scientist II, INS/MIRI\n<br>**Updated On:** 08/09/2019 to add in aperture photometry", "_____no_output_____" ], [ "An extra optional test that can be done is to plot the flux values against x or y values. Previous testing has shown a spatial dependence of the flux with y values, so a quick plot can show whether this problem is fixed or not. Prior to the resample step, there is no pattern, after the step, a pattern is clear. Just do this as a last check. If the scatter is not random, there may be a problem that needs to be checked. (Of course, this only works if you give an equivalent if not equal input count level to each input star.)", "_____no_output_____" ] ], [ [ "plt.title('Surface brightness vs. y position on detector')\nplt.ylim(35500,37500) # help weed out sources that were erroneously 'hits' (bad pixels, cosmic rays, etc)\nplt.xlabel('y centroid position')\nplt.ylabel('Surface brightness')\nplt.plot(sources['ycentroid'], phot_table['aperture_sum_bkgsub'], marker='o',linestyle='') #ylim=(30000,40000))\nplt.show()", "_____no_output_____" ] ], [ [ "[Top of Page](#title_ID)\n<img style=\"float: right;\" src=\"./stsci_pri_combo_mark_horizonal_white_bkgd.png\" alt=\"stsci_pri_combo_mark_horizonal_white_bkgd\" width=\"200px\"/> ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
cb9ab488a941153fe9c1f2c2b894b44d8d5ab8f7
85,541
ipynb
Jupyter Notebook
ch01data/068QuakesSolution.ipynb
alvinam/eng
15f8021a7b7f199cb8136b9ff613374818bc052d
[ "CC-BY-3.0" ]
null
null
null
ch01data/068QuakesSolution.ipynb
alvinam/eng
15f8021a7b7f199cb8136b9ff613374818bc052d
[ "CC-BY-3.0" ]
null
null
null
ch01data/068QuakesSolution.ipynb
alvinam/eng
15f8021a7b7f199cb8136b9ff613374818bc052d
[ "CC-BY-3.0" ]
1
2020-03-06T12:48:40.000Z
2020-03-06T12:48:40.000Z
235.002747
78,630
0.918916
[ [ [ "### Download the data", "_____no_output_____" ] ], [ [ "import requests\nquakes_response=requests.get(\"http://earthquake.usgs.gov/fdsnws/event/1/query.geojson\",\n params={\n 'starttime':\"2000-01-01\",\n \"maxlatitude\":\"58.723\",\n \"minlatitude\":\"50.008\",\n \"maxlongitude\":\"1.67\",\n \"minlongitude\":\"-9.756\",\n \"minmagnitude\":\"1\",\n \"endtime\":\"2015-07-13\",\n \"orderby\":\"time-asc\"}\n )", "_____no_output_____" ] ], [ [ "### Parse the data as JSON", "_____no_output_____" ] ], [ [ "import json", "_____no_output_____" ], [ "requests_json = json.loads(quakes_response.text)", "_____no_output_____" ] ], [ [ "### Investigate the data to discover how it is structured.", "_____no_output_____" ] ], [ [ "type(requests_json)", "_____no_output_____" ], [ "requests_json.keys()", "_____no_output_____" ], [ "len(requests_json['features'])", "_____no_output_____" ], [ "requests_json['features'][0].keys()", "_____no_output_____" ], [ "requests_json['features'][0]['properties']['mag']", "_____no_output_____" ], [ "requests_json['features'][0]['geometry']", "_____no_output_____" ] ], [ [ "### Find the largest quake", "_____no_output_____" ] ], [ [ "quakes = requests_json['features']", "_____no_output_____" ], [ "largest_so_far = quakes[0]\nfor quake in quakes:\n if quake['properties']['mag'] > largest_so_far['properties']['mag']:\n largest_so_far = quake\nlargest_so_far['properties']['mag']", "_____no_output_____" ], [ "lat=largest_so_far['geometry']['coordinates'][1]\nlong=largest_so_far['geometry']['coordinates'][0]\nprint(\"Latitude:\", lat, \"Longitude:\", long)", "Latitude: 52.52 Longitude: -2.15\n" ] ], [ [ "### Get a map at the point of the quake", "_____no_output_____" ] ], [ [ "import requests\ndef request_map_at(lat,long, satellite=False,zoom=12,size=(400,400),sensor=False):\n base=\"http://maps.googleapis.com/maps/api/staticmap?\"\n \n params=dict(\n sensor= str(sensor).lower(),\n zoom= zoom,\n size= \"x\".join(map(str,size)),\n center= \",\".join(map(str,(lat,long)))\n )\n if satellite:\n params[\"maptype\"]=\"satellite\"\n\n return requests.get(base,params=params)", "_____no_output_____" ], [ "import IPython\nmap_png=request_map_at(lat, long,zoom=10)", "_____no_output_____" ] ], [ [ "### Display the map", "_____no_output_____" ] ], [ [ "IPython.core.display.Image(map_png.content)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
cb9ac06da2b8282be3a4128bb35a9d462288e924
53,601
ipynb
Jupyter Notebook
ViolanceDetection-Jupyter/preprocess_experiment.ipynb
pmukunda3/deep_learning_project
b0264a7193c4f152e6e0e8092edc2ec32fb3abcd
[ "MIT" ]
null
null
null
ViolanceDetection-Jupyter/preprocess_experiment.ipynb
pmukunda3/deep_learning_project
b0264a7193c4f152e6e0e8092edc2ec32fb3abcd
[ "MIT" ]
null
null
null
ViolanceDetection-Jupyter/preprocess_experiment.ipynb
pmukunda3/deep_learning_project
b0264a7193c4f152e6e0e8092edc2ec32fb3abcd
[ "MIT" ]
null
null
null
112.135983
34,596
0.824574
[ [ [ "import random, time, ffmpeg\nimport numpy as np\nfrom math import ceil\nimport threading\nimport cv2\nfrom datetime import datetime\nimport PIL.Image as Image\nimport tensorflow as tf\nimport os\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "model_settings = {\n # Training settings\n 'current_epoch': 1,\n 'max_steps': 1000,\n 'moving_decay': 0.9999, 'weight_decay': 0.00005, 'dropout': 0.5,\n 'learning_rate': 1e-4, # 1e-4 from previous code\n 'checkpoints': 200, # Number of steps to create checkpoint\n 'batch_sizes': [30], # Batch per device\n 'read_pretrained_model': True,\n 'load_fc_layers': True,\n 'train_conv': False,\n 'train_fc': True,\n 'save_graph': True,\n 'is_testing': False,\n\n # Neural-Network settings\n 'frames_per_batch': 16, # Number of frames in a batch\n 'video_fps': 12, # FPS of frames extracted\n 'crop_size': 112, # Input frames dimension\n 'channels': 3,\n 'trans_max': 10, # Translation factor for pre-processing\n\n # System settings\n 'devices_to_run': ['/gpu:0'], # Multiple devices are not supported yet :(\n 'num_thread': 4, # Number of threads to read video files\n 'queue_size': 3000, # Queue size for reading input\n\n # Directory settings\n 'read_from_frames': True,\n 'model_name': 'UCF_finetune',\n 'checkpoint_dir': './checkpoints/',\n 'model_save_dir': './models/',\n # 'model_read_loc' : '../ViolanceDetection-Jupyter/models/s1m-ucf101.model',\n 'model_read_loc': './models/UCF_finetuneFC_last.model',\n 'data_home': '../datasets/UCF-101-Frames/',\n 'train_test_loc': '../datasets/UCF-ActionRecognitionSplits',\n 'train_file_name': '/trainlist01.txt',\n # 'train_file_name': '/train_small.txt',\n 'test_file_name': '/testlist01.txt',\n 'mean_clip_loc': '../datasets/PreprocessData/crop_mean.npy'\n}\n\n\ndef set_model_settings(model_settings):\n # Storage of variables RAM:'/cpu:0' GPU:'/gpu:0'\n model_settings['variable_storage'] = model_settings['devices_to_run'][0]\n # model_settings['variable_storage'] = '/cpu:0'\n # Total number of batch\n model_settings['total_batch'] = np.sum(model_settings['batch_sizes'])\n\n # Input shape for placeholders\n model_settings['input_shape'] = (model_settings['frames_per_batch'],\n model_settings['crop_size'],\n model_settings['crop_size'],\n model_settings['channels'])\n\n # Mean clip for input\n model_settings['np_mean'] = np.load(model_settings['mean_clip_loc']). \\\n reshape(model_settings['input_shape'])\n\n if model_settings['is_testing']:\n model_settings['input_from_placeholders'] = False\n model_settings['dequeue_immediately'] = True\n model_settings['dropout'] = 1.0\n model_settings['trans_max'] = 0\n else:\n model_settings['input_from_placeholders'] = False\n model_settings['dequeue_immediately'] = False\n\n model_settings['start_time'] = datetime.now()\n\n \nset_model_settings(model_settings)", "_____no_output_____" ], [ "# Reads train/test filenames from provided splits\n# Returns video directions and their labels in a list\ndef get_data_dir(filename, from_frames=False):\n dir_videos, label_videos = [], []\n with open(filename, 'r') as input_file:\n for line in input_file:\n file_name, label = line.split(' ')\n # if will read from frames\n if from_frames:\n file_name = '.'.join(file_name.split('.')[:-1])\n dir_videos.append(file_name)\n label_videos.append(int(label) - 1)\n return dir_videos, label_videos\n\n\n# Shuffles video directions along with labels\ndef shuffle_list(dir_videos, label_videos, seed=time.time()):\n print('Shuffling the dataset...')\n video_indices = list(range(len(dir_videos)))\n random.seed(seed)\n random.shuffle(video_indices)\n shuffled_video_dirs = [dir_videos[i] for i in video_indices]\n shuffled_labels = [label_videos[i] for i in video_indices]\n return shuffled_video_dirs, shuffled_labels\n\n\n# Given video directory it reads the video\n# extracts the frames, and do pre-processing operation\ndef read_clips_from_video(dirname, model_settings):\n # Input size for the network\n frames_per_batch = model_settings['frames_per_batch']\n video_fps = model_settings['video_fps']\n crop_size = model_settings['crop_size']\n np_mean = model_settings['np_mean']\n trans_max = model_settings['trans_max']\n\n # Data augmentation randoms\n horizontal_flip = random.random()\n trans_factor = random.randint(-trans_max, trans_max)\n\n # Video information\n probe = ffmpeg.probe(dirname)\n video_info = probe[\"streams\"][0]\n video_width = video_info[\"width\"]\n video_height = video_info[\"height\"]\n video_duration = float(video_info[\"duration\"])\n num_frame = int(video_info[\"nb_frames\"])\n\n # Select which portion of the video will be input\n rand_max = int(num_frame - ((num_frame / video_duration) * (frames_per_batch / video_fps)))\n\n start_frame = random.randint(0, max(rand_max - 1, 0))\n # end_frame = ceil(start_frame + (num_frame / video_duration) * frames_per_batch / video_fps + 1)\n video_start = (video_duration / num_frame) * start_frame\n video_end = min(video_duration, video_start + ((frames_per_batch + 1) / video_fps))\n\n # Cropping factor\n x_pos = max(video_width - video_height + 2 * trans_factor, 0) // 2\n y_pos = max(video_height - video_width + 2 * trans_factor, 0) // 2\n crop_size1 = min(video_height, video_width)\n # Read specified times of the video\n ff = ffmpeg.input(dirname, ss=video_start, t=video_end - video_start)\n # Trim video -> did not work :(\n # ff = ff.trim(end_frame='50')\n # Divide into frames\n ff = ffmpeg.filter(ff, 'fps', video_fps)\n # Crop\n ff = ffmpeg.crop(ff, x_pos, y_pos, crop_size1, crop_size1)\n # Subsample\n ff = ffmpeg.filter(ff, 'scale', crop_size, crop_size)\n # Horizontal flip with some probability\n if horizontal_flip > 0.5:\n ff = ffmpeg.hflip(ff)\n # Output the video\n ff = ffmpeg.output(ff, 'pipe:',\n format='rawvideo',\n pix_fmt='rgb24')\n # Run Process in quiet mode\n out, _ = ffmpeg.run(ff, capture_stdout=True, quiet=True)\n # Extract to numpy array\n video = np.frombuffer(out, np.uint8). \\\n reshape([-1, crop_size, crop_size, 3])\n\n # Copies last frame if # of frames < 16\n # Subtracts the mean and converts type to float32\n num_frames = video.shape[0]\n if num_frames < frames_per_batch:\n last_frame = video[-1]\n num_frame_repeat = frames_per_batch - num_frames\n # print('Frames repeated: ', num_frame_repeat)\n last_repeat = np.repeat(last_frame[np.newaxis],\n num_frame_repeat,\n axis=0)\n video = np.concatenate((video, last_repeat), axis=0) - np_mean\n else:\n video = video[:frames_per_batch] - np_mean\n\n return video\n\n\ndef get_frames_data(filename, frames_per_batch=16):\n ret_arr = []\n for parent, dirnames, filenames in os.walk(filename):\n num_frames = len(filenames)\n start_max = max(0, num_frames - frames_per_batch)\n start_index = random.randint(0, start_max)\n end_index = min(start_index+frames_per_batch, num_frames)\n filenames = sorted(filenames)\n for i in range(start_index, end_index):\n image_name = str(filename) + '/' + str(filenames[i])\n img = Image.open(image_name)\n img_data = np.array(img)\n ret_arr.append(img_data)\n return ret_arr\n\ndef set_placeholders(model_settings):\n if model_settings['read_from_frames']:\n shape = (None, None, None, 3)\n images_placeholder = tf.placeholder(tf.float32, shape=shape, name=\"input_clip\")\n else:\n images_placeholder = tf.placeholder(tf.float32, shape=model_settings['input_shape'], name=\"input_clip\")\n\n labels_placeholder = tf.placeholder(tf.int64, shape=(), name=\"labels\")\n dropout_placeholder = tf.placeholder_with_default(model_settings['dropout'], shape=())\n\n model_settings['images_placeholder'] = images_placeholder\n model_settings['labels_placeholder'] = labels_placeholder\n model_settings['dropout_placeholder'] = dropout_placeholder\n print('Finished setting placeholders..')\n\n\ndef process_frames(model_settings):\n with tf.name_scope('Frame_Process'), tf.device('/cpu:0'):\n images_placeholder = model_settings['images_placeholder']\n trans_max = model_settings['trans_max']\n crop_size = model_settings['crop_size']\n frames_per_batch = model_settings['frames_per_batch']\n np_mean = tf.convert_to_tensor(model_settings['np_mean'])\n\n clips_shape = tf.shape(images_placeholder)\n video_width = clips_shape[1]\n video_height = clips_shape[2]\n rem_frame = frames_per_batch - clips_shape[0]\n\n trans_factor = tf.random.uniform([1], -trans_max, trans_max, dtype=tf.int32)\n crop_size1 = tf.math.minimum(video_height, video_width)\n x_pos = tf.math.maximum(video_width - video_height + 2 * trans_factor, 0) // 2\n x_start, x_end = x_pos[0], x_pos[0]+crop_size1\n y_pos = tf.math.maximum(video_height - video_width + 2 * trans_factor, 0) // 2\n y_start, y_end = y_pos[0], y_pos[0]+crop_size1\n\n clips_cropped = images_placeholder[:,x_start:x_end, y_start:y_end]\n clips_interp = tf.image.resize_bicubic(clips_cropped, (crop_size, crop_size))\n clips_interp = tf.clip_by_value(clips_interp, 0, 255)\n last_frame = clips_interp[-1]\n rem_frames = tf.tile(tf.expand_dims(last_frame,0), [rem_frame, 1, 1, 1])\n final_clips = tf.concat([clips_interp, rem_frames], 0)\n final_clips = tf.image.random_flip_left_right(final_clips)\n final_clips -= np_mean\n return final_clips\n\n\ndef read_clips(dirnames, model_settings):\n for dirname in dirnames:\n read_clip(dirname, model_settings)", "_____no_output_____" ], [ "train_dir_locations = model_settings['train_test_loc'] + model_settings['train_file_name']\ndir_frames, labels = get_data_dir(train_dir_locations, True)", "_____no_output_____" ], [ "set_placeholders(model_settings)\nframes = process_frames(model_settings)\n\ni = random.randint(0, 5000)\nfile_loc = model_settings['data_home'] + dir_frames[i] \nimgs = get_frames_data(file_loc)\nimages_placeholder = model_settings['images_placeholder']\n\nwith tf.Session() as sess:\n frames = sess.run(frames, {images_placeholder: imgs})\nplt.imshow(frames[0].astype(np.uint8))\nplt.show()", "Finished setting placeholders..\n" ], [ "dirnames_threads = []\nthreads = []\n\nfor i in range(8):\n cur = []\n for j in range(5):\n read_index = 5 * i + j\n video_dir, label = dir_videos[read_index], label_clips[read_index]\n video_dir = model_settings['data_home'] + video_dir\n cur.append(video_dir)\n dirnames_threads.append(cur)\n\n\nfor i in range(8):\n dirnames = dirnames_threads[i]\n threads.append(threading.Thread(target=read_clips, args=(dirnames, model_settings)))\n\ntime0 = time.time()\nfor i in range(8):\n threads[i].start()\n\nfor i in range(8):\n threads[i].join()\nprint('Time diff:', time.time() - time0)", "_____no_output_____" ], [ "index = 150\ndirname = model_settings['data_home'] + dir_videos[index]\nframes_per_batch = model_settings['frames_per_batch']\nvideo_fps = model_settings['video_fps']\ncrop_size = model_settings['crop_size']\nnp_mean = model_settings['np_mean']\nhorizontal_flip = random.random()\n\nprobe = ffmpeg.probe(dirname)\nvideo_info = probe[\"streams\"][0]\nvideo_width = video_info[\"width\"]\nvideo_height = video_info[\"height\"]\nvideo_duration = float(video_info[\"duration\"])\nnum_frame = int(video_info[\"nb_frames\"])\n\nrand_max = int(num_frame - ((num_frame / video_duration) * (frames_per_batch / video_fps)))\nstart_frame = random.randint(0, rand_max - 1)\nend_frame = ceil(start_frame + (num_frame / video_duration) * frames_per_batch / video_fps + 1)\nvideo_start = (video_duration / num_frame) * start_frame\nvideo_end = video_start + ((frames_per_batch+1) / video_fps) \nprint(end_frame-start_frame, video_start, video_end)\n\nx_pos = max(video_width - video_height, 0) // 2\ny_pos = max(video_height - video_width, 0) // 2\ncrop_size1 = min(video_height, video_width)\n", "_____no_output_____" ], [ "# Input video\nff = ffmpeg.input(dirname, ss=video_start, t=video_end-video_start)\n# Trim video\n#ff = ff.trim(end_frame='50')\n# Divide into frames\nff = ffmpeg.filter(ff, 'fps', video_fps)\n# Crop\nff = ffmpeg.crop(ff, x_pos, y_pos, crop_size1, crop_size1)\n# Subsample\nff = ffmpeg.filter(ff, 'scale', crop_size, crop_size)\n# Horizontal flip with some probability\nif horizontal_flip > 0.5:\n ff = ffmpeg.hflip(ff)\n# Output the video\nff = ffmpeg.output(ff, 'pipe:',\n format='rawvideo',\n pix_fmt='rgb24')\n# Run Process in quiet mode\nout, _ = ffmpeg.run(ff, capture_stdout=True, quiet=True)\n# Extract to numpy array\nvideo = np.frombuffer(out, np.uint8). \\\n reshape([-1, crop_size, crop_size, 3])\n\n# Substracts the mean and converts type to float32\nvideo = video[:16] - np_mean\n#print(video.shape)\n", "_____no_output_____" ], [ "index = 150\ndirname = model_settings['data_home'] + dir_videos[index]\nframes_per_batch = model_settings['frames_per_batch']\nvideo_fps = model_settings['video_fps']\ncrop_size = model_settings['crop_size']\nnp_mean = model_settings['np_mean']\nhorizontal_flip = random.random()\n\nprobe = ffmpeg.probe(dirname)\nvideo_info = probe[\"streams\"][0]\nvideo_width = video_info[\"width\"]\nvideo_height = video_info[\"height\"]\nvideo_duration = float(video_info[\"duration\"])\nnum_frame = int(video_info[\"nb_frames\"])\n\nrand_max = int(num_frame - ((num_frame / video_duration) * (frames_per_batch / video_fps)))\nstart_frame = random.randint(0, rand_max - 1)\nend_frame = ceil(start_frame + (num_frame / video_duration) * frames_per_batch / video_fps + 1)\n#end_frame = min(end_frame, num_frame)\n\nx_pos = max(video_width - video_height, 0) // 2\ny_pos = max(video_height - video_width, 0) // 2\ncrop_size1 = min(video_height, video_width)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9acc57b03919b91db4d8a57da12cd7f42a9872
146,196
ipynb
Jupyter Notebook
Notebooks/introduction_whatsnew/difmo.ipynb
nilearn/dev-days-2021
20fe5c56d9197e768bd147c43f1e6464936544d7
[ "MIT" ]
2
2021-03-28T09:43:15.000Z
2021-05-06T15:53:53.000Z
Notebooks/introduction_whatsnew/difmo.ipynb
tsalo/dev-days-2021
bfdffee54157eff7b43931c085d8947c249fe48e
[ "MIT" ]
10
2021-03-10T14:43:22.000Z
2021-07-05T16:01:21.000Z
Notebooks/introduction_whatsnew/difmo.ipynb
tsalo/dev-days-2021
bfdffee54157eff7b43931c085d8947c249fe48e
[ "MIT" ]
2
2021-03-10T21:30:59.000Z
2021-05-03T15:05:24.000Z
266.295082
129,080
0.919102
[ [ [ "# DiFuMo (Dictionaries of Functional Modes)\n\n<div class=\"alert alert-block alert-danger\">\n<b>NEW:</b> New in release 0.7.1\n</div>\n\n## Outline\n\n- <a href=\"#descr\">Description</a>\n- <a href=\"#howto\">Description</a>\n- <a href=\"#closer\">Coser look on the object</a>\n- <a href=\"#visualize\">Visualize</a>", "_____no_output_____" ], [ "<span id=\"descr\"></span>\n\n## Description\n\n- New atlas fetcher :func:`nilearn.datasets.fetch_atlas_difumo` \n- Download statistical maps which can serve as atlases to extract functional signals with different dimensionalities (64, 128, 256, 512, and 1024)\n- These modes are optimized to represent well raw BOLD timeseries, over a with range of experimental conditions.", "_____no_output_____" ], [ "<span id=\"howto\"></span>\n\n## How to use it?\n\nFirst of all, make sure you have nilearn >= 0.7.1 installed:", "_____no_output_____" ] ], [ [ "import nilearn\nprint(nilearn.__version__)", "0.7.2.dev\n" ] ], [ [ "If this is verified, we should be able to export the difumo fetcher from the `datasets` module:", "_____no_output_____" ] ], [ [ "from nilearn.datasets import fetch_atlas_difumo", "/home/nicolas/GitRepos/nilearn-fork/nilearn/datasets/__init__.py:87: FutureWarning: Fetchers from the nilearn.datasets module will be updated in version 0.9 to return python strings instead of bytes and Pandas dataframes instead of Numpy arrays.\n warn(\"Fetchers from the nilearn.datasets module will be \"\n" ] ], [ [ "The documentation for this function can be seen on the website [here](http://nilearn.github.io/modules/generated/nilearn.datasets.fetch_atlas_difumo.html#nilearn.datasets.fetch_atlas_difumo) or thanks to the Jupyter magic command:", "_____no_output_____" ] ], [ [ "?fetch_atlas_difumo", "_____no_output_____" ] ], [ [ "Looking at the docstring, it looks like there are mainly two parameters to control the data we wish to donwload:\n\n- dimension: this will be the number of functional maps of the atlas. It must be 64, 128, 256, 512, or 1024\n- resolution: this enables to download atlas sampled either at 2mm or 3mm resolution\n\nLet's try it:", "_____no_output_____" ] ], [ [ "difumo_64 = fetch_atlas_difumo(dimension=64, # Feel free to change these parameters!\n resolution_mm=2)", "/home/nicolas/anaconda3/envs/nilearn/lib/python3.8/site-packages/numpy/lib/npyio.py:2405: VisibleDeprecationWarning: Reading unicode strings without specifying the encoding argument is deprecated. Set the encoding, use None for the system default.\n output = genfromtxt(fname, **kwargs)\n" ] ], [ [ "This should have either downloaded the 64 component atlas sampled at 2mm from osf, or simply grabed the data in the nilearn cache if you have downloaded it already.", "_____no_output_____" ], [ "<span id=\"closer\"></span>\n\n## Closer look on the object\n\nLike for any dataset in nilearn, the resulting object is a scikit-learn Bunch object with the following keys:\n\n- description: string describing the dataset\n- maps: the actual data\n- labels: label information for the maps", "_____no_output_____" ] ], [ [ "type(difumo_64)", "_____no_output_____" ], [ "difumo_64.keys()", "_____no_output_____" ] ], [ [ "Reading the description before usage is always recommanded:", "_____no_output_____" ] ], [ [ "print(difumo_64.description.decode()) # Note that description strings will be soon shipped as Python strings, \n # avoiding the anoying call to decode...", "DiFuMo atlas\n\n\nNotes\n-----\n1. We provide Dictionaries of Functional Modes “DiFuMo” that can serve as atlases to extract functional signals, eg to serve as IDPs, with different dimensionalities (64, 128, 256, 512, and 1024). These modes are optimized to represent well raw :term:`BOLD` timeseries, over a with range of experimental conditions.\n \n - All atlases are available in .nii.gz format and sampled to :term:`MNI` space\n \n2. Additionally, we provide meaningful names for these modes, based on their anatomical location, to facilitate reporting of results.\n\n - Anatomical names are available for each resolution in .csv\n\nContent\n-------\n :'maps': Nifti images with the (probabilistic) region definitions\n :'labels': CSV file specifying the label information\n\n\nReferences\n----------\nFor more information about this dataset's structure:\nhttps://hal.inria.fr/hal-02904869\n\nDadi, K., Varoquaux, G., Machlouzarides-Shalit, A., Gorgolewski, KJ., Wassermann, D., Thirion, B., Mensch, A. Fine-grain atlases of functional modes for fMRI analysis. NeuroImage, Elsevier, 2020, pp.117126 [Link to this paper](https://hal.inria.fr/hal-02904869)\n\n\nMensch, A., Mairal, J., Thirion, B., Varoquaux, G., 2018. Stochastic Subsampling for Factorizing Huge Matrices. IEEE Transactions on Signal Processing 66, 113–128.\n\nPoldrack, R.A., Barch, D.M., Mitchell, J.P., et al., 2013. Toward open sharing of task-based fMRI data: the OpenfMRI project. Frontiers in neuroinformatics 7.\n\nLicence: usage is unrestricted for non-commercial research purposes.\n\n" ] ], [ [ "Label information is directly available:", "_____no_output_____" ] ], [ [ "assert len(difumo_64.labels) == 64 # We have one label information tuple per component\ndifumo_64.labels[:6] # Print the first 6 label information", "_____no_output_____" ] ], [ [ "We can see that each component has: \n\n- a label index going from 1 to 64\n- a name\n- a network (todo: explain)\n- a network (todo: explain)\n- coordinates (todo: explain)", "_____no_output_____" ], [ "Finally, the actual data is a simple path to a nifti image on disk, which is the usual way to represent niimg in Nilearn:", "_____no_output_____" ] ], [ [ "difumo_64.maps", "_____no_output_____" ] ], [ [ "If you wan to have a look at the actual data, you can open this image using usual nilearn loading utilities:", "_____no_output_____" ] ], [ [ "from nilearn.image import get_data\nraw_maps = get_data(difumo_64.maps) # raw_maps is a 4D numpy array holding the \nraw_maps.shape # coefficients of the functional modes", "_____no_output_____" ] ], [ [ "<span id=\"visualize\"></span>\n\n## Visualize it\n\n**Method 1**\n\nLooking at probabilitic atlases can be done with the function `plot_prob_atlas` of the `plotting` module:", "_____no_output_____" ] ], [ [ "from nilearn.plotting import plot_prob_atlas\n\nplot_prob_atlas(difumo_64.maps, title='DiFuMo 64')", "/home/nicolas/GitRepos/nilearn-fork/nilearn/plotting/displays.py:101: UserWarning: No contour levels were found within the data range.\n im = getattr(ax, type)(data_2d.copy(),\n" ] ], [ [ "**Method 2**\n\nAnother way to visualize the atlas is through the report of the `NiftiMapsMasker` object. \n\n<div class=\"alert alert-block alert-danger\">\n<b>Danger:</b> This feature is under development and still not available in 0.7.1. I might remove this section if I don't submit my PR in time.\n</div>", "_____no_output_____" ] ], [ [ "from nilearn.input_data import NiftiMapsMasker\nmasker = NiftiMapsMasker(difumo_64.maps)\nmasker", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb9accfd19fdf6fe5167da39e761884224be79f7
8,810
ipynb
Jupyter Notebook
components/gcp/dataproc/create_cluster/sample.ipynb
civitaspo/pipelines
7276711700ee93ec0d7490523313bdad840acb83
[ "Apache-2.0" ]
null
null
null
components/gcp/dataproc/create_cluster/sample.ipynb
civitaspo/pipelines
7276711700ee93ec0d7490523313bdad840acb83
[ "Apache-2.0" ]
484
2021-01-21T06:49:17.000Z
2022-03-23T01:21:24.000Z
components/gcp/dataproc/create_cluster/sample.ipynb
civitaspo/pipelines
7276711700ee93ec0d7490523313bdad840acb83
[ "Apache-2.0" ]
null
null
null
35.959184
307
0.609308
[ [ [ "# Name\nData processing by creating a cluster in Cloud Dataproc\n\n\n# Label\nCloud Dataproc, cluster, GCP, Cloud Storage, KubeFlow, Pipeline\n\n\n# Summary\nA Kubeflow Pipeline component to create a cluster in Cloud Dataproc.\n\n# Details\n## Intended use\n\nUse this component at the start of a Kubeflow Pipeline to create a temporary Cloud Dataproc cluster to run Cloud Dataproc jobs as steps in the pipeline.\n\n## Runtime arguments\n\n| Argument | Description | Optional | Data type | Accepted values | Default |\n|----------|-------------|----------|-----------|-----------------|---------|\n| project_id | The Google Cloud Platform (GCP) project ID that the cluster belongs to. | No | GCPProjectID | | |\n| region | The Cloud Dataproc region to create the cluster in. | No | GCPRegion | | |\n| name | The name of the cluster. Cluster names within a project must be unique. You can reuse the names of deleted clusters. | Yes | String | | None |\n| name_prefix | The prefix of the cluster name. | Yes | String | | None |\n| initialization_actions | A list of Cloud Storage URIs identifying executables to execute on each node after the configuration is completed. By default, executables are run on the master and all the worker nodes. | Yes | List | | None |\n| config_bucket | The Cloud Storage bucket to use to stage the job dependencies, the configuration files, and the job driver console’s output. | Yes | GCSPath | | None |\n| image_version | The version of the software inside the cluster. | Yes | String | | None |\n| cluster | The full [cluster configuration](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters#Cluster). | Yes | Dict | | None |\n| wait_interval | The number of seconds to pause before polling the operation. | Yes | Integer | | 30 |\n\n## Output\nName | Description | Type\n:--- | :---------- | :---\ncluster_name | The name of the cluster. | String\n\nNote: You can recycle the cluster by using the [Dataproc delete cluster component](https://github.com/kubeflow/pipelines/tree/master/components/gcp/dataproc/delete_cluster).\n\n\n## Cautions & requirements\n\nTo use the component, you must:\n* Set up the GCP project by following these [steps](https://cloud.google.com/dataproc/docs/guides/setup-project).\n* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.\n* Grant the following types of access to the Kubeflow user service account:\n * Read access to the Cloud Storage buckets which contains initialization action files.\n * The role, `roles/dataproc.editor` on the project.\n\n## Detailed description\n\nThis component creates a new Dataproc cluster by using the [Dataproc create cluster REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters/create). \n\nFollow these steps to use the component in a pipeline:\n\n1. Install the Kubeflow Pipeline SDK:\n", "_____no_output_____" ] ], [ [ "%%capture --no-stderr\n\n!pip3 install kfp --upgrade", "_____no_output_____" ] ], [ [ "2. Load the component using KFP SDK", "_____no_output_____" ] ], [ [ "import kfp.components as comp\n\ndataproc_create_cluster_op = comp.load_component_from_url(\n 'https://raw.githubusercontent.com/kubeflow/pipelines/1.4.0-rc.1/components/gcp/dataproc/create_cluster/component.yaml')\nhelp(dataproc_create_cluster_op)", "_____no_output_____" ] ], [ [ "### Sample\nNote: The following sample code works in an IPython notebook or directly in Python code. See the sample code below to learn how to execute the template.\n\n#### Set sample parameters", "_____no_output_____" ] ], [ [ "# Required Parameters\nPROJECT_ID = '<Please put your project ID here>'\n\n# Optional Parameters\nEXPERIMENT_NAME = 'Dataproc - Create Cluster'", "_____no_output_____" ] ], [ [ "#### Example pipeline that uses the component", "_____no_output_____" ] ], [ [ "import kfp.dsl as dsl\nimport json\[email protected](\n name='Dataproc create cluster pipeline',\n description='Dataproc create cluster pipeline'\n)\ndef dataproc_create_cluster_pipeline(\n project_id = PROJECT_ID, \n region = 'us-central1', \n name='', \n name_prefix='',\n initialization_actions='', \n config_bucket='', \n image_version='', \n cluster='', \n wait_interval='30'\n):\n dataproc_create_cluster_op(\n project_id=project_id, \n region=region, \n name=name, \n name_prefix=name_prefix, \n initialization_actions=initialization_actions, \n config_bucket=config_bucket, \n image_version=image_version, \n cluster=cluster, \n wait_interval=wait_interval)", "_____no_output_____" ] ], [ [ "#### Compile the pipeline", "_____no_output_____" ] ], [ [ "pipeline_func = dataproc_create_cluster_pipeline\npipeline_filename = pipeline_func.__name__ + '.zip'\nimport kfp.compiler as compiler\ncompiler.Compiler().compile(pipeline_func, pipeline_filename)", "_____no_output_____" ] ], [ [ "#### Submit the pipeline for execution", "_____no_output_____" ] ], [ [ "#Specify pipeline argument values\narguments = {}\n\n#Get or create an experiment and submit a pipeline run\nimport kfp\nclient = kfp.Client()\nexperiment = client.create_experiment(EXPERIMENT_NAME)\n\n#Submit a pipeline run\nrun_name = pipeline_func.__name__ + ' run'\nrun_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)", "_____no_output_____" ] ], [ [ "## References\n* [Kubernetes Engine for Kubeflow](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts)\n* [Component Python code](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py)\n* [Component Docker file](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/Dockerfile)\n* [Sample notebook](https://github.com/kubeflow/pipelines/blob/master/components/gcp/dataproc/create_cluster/sample.ipynb)\n* [Dataproc create cluster REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters/create)\n\n## License\nBy deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb9adeb9687ff27d3553996f159fa1c08598042e
30,319
ipynb
Jupyter Notebook
Basic Functions.ipynb
teamindelible/Getting-Started-with-Python
e9c34cfc774c8210b909dbf9f887158e7789071a
[ "MIT" ]
null
null
null
Basic Functions.ipynb
teamindelible/Getting-Started-with-Python
e9c34cfc774c8210b909dbf9f887158e7789071a
[ "MIT" ]
null
null
null
Basic Functions.ipynb
teamindelible/Getting-Started-with-Python
e9c34cfc774c8210b909dbf9f887158e7789071a
[ "MIT" ]
null
null
null
24.709861
138
0.501171
[ [ [ "def string_adder(a = \"\", b = \"\"):\n return str(a + \" \" + b)\n\nstring_adder(a = \"Michael\", b = \"Akinola\")\n#string_adder()\n#string_adder(\"$\", \"1000\")", "_____no_output_____" ], [ "# Define a long_word function that accepts a string. \n# The function should return a Boolean that reflects whether the string has more than 7 characters.\n# \ndef long_word(a_string):\n if len(a_string) > 7:\n return True\n return False\n\n# EXAMPLES:\nlong_word(\"Python\") #=> False\n# long_word(\"magnificent\") => True", "_____no_output_____" ], [ "# Define a first_longer_than_second function that accepts two string arguments. \n# The function should return a True if the first string is longer than the second \n# and False otherwise (including if they are equal in length).\n#\ndef first_longer_than_second(first, second):\n if len(first) > len(second):\n return True\n return False\n\n# EXAMPLES:\n#first_longer_than_second(\"Python\", \"Ruby\") => True\nfirst_longer_than_second(\"cat\", \"mouse\") #=> False\n# first_longer_than_second(\"Steven\", \"Seagal\") => False", "_____no_output_____" ], [ "#Define a same_first_and_last_letter function that accepts a string as an argument. \n# The function should return a True if the first and last character are equal, and False otherwise\n# Assume the string will always have 1 or more characters.\n#\ndef same_first_and_last_letter(word):\n if word[0] == word[-1]:\n return True\n return False\n\n# EXAMPLES:\n# same_first_and_last_letter(\"runner\") => True\n# same_first_and_last_letter(\"clock\") => False\nsame_first_and_last_letter(\"q\") #=> True\n# same_first_and_last_letter(\"Runner\") #=> False", "_____no_output_____" ], [ "# Define a three_number_sum function that accepts a 3-character string as an argument. \n# The function should add up the sum of the digits of the string. \n# HINT: You’ll have to figure out a way to convert the string-ified numbers to integers.\n#\ndef three_number_sum(x):\n return int(x[0]) + int(x[1]) + int(x[2])\n\n# EXAMPLES:\n# three_number_sum(\"123\") => 6\nthree_number_sum(\"567\") #=> 18\n# three_number_sum(\"444\") => 12\n# three_number_sum(\"000\") => 0", "_____no_output_____" ], [ "# Define a first_three_characters function that accepts a string argument.\n# The function should return the first three characters of the string.\n#\ndef first_three_characters(string):\n return string[0:3]\n\n# EXAMPLES:\nfirst_three_characters(\"dynasty\") #=> \"dyn\"\n# first_three_characters(\"empire\") => \"emp\"", "_____no_output_____" ], [ "# Define a last_five_characters function that accepts a string argument. \n# The function should return the last 5 characters of the string.\n#\ndef last_five_characters(text):\n return text[-5:]\n\n# EXAMPLES:\nlast_five_characters(\"dynasty\") #=> \"nasty\"\n# last_fiee_characters(\"empire\") => \"mpire\"", "_____no_output_____" ], [ "# Define a is_palindrome function that accepts a string argument. \n# The function should return True if the string is spelled the same backwards as it is forwards. \n# Return False otherwise.\n#\ndef is_palindrome(word):\n if word == word[::-1]:\n return True\n return False\n\n# EXAMPLES:\n#is_palindrome(\"racecar\") #=> True\nis_palindrome(\"yummy\") #=> False", "_____no_output_____" ], [ "# Define a vowel_count function that accepts a string argument.\n# The function should return the count of vowels in the string.\n# The 5 vowels are \"a\", \"e\", \"i\", \"o\", and \"u\".\n# You can assume the string will be in all lowercase.\n\ndef vowel_count(a_string):\n vowels = set(\"aeiou\")\n count = 0\n for letters in a_string:\n if letters in vowels:\n count = count + 1\n return count\n\nvowel_count(\"love\")", "_____no_output_____" ], [ "def vowel_cou(str): \n count = 0 \n vowel = set(\"aeiouAEIOU\") \n for alphabet in str: \n if alphabet in vowel: \n count = count + 1\n \n print(\"No. of vowels:\", count)", "_____no_output_____" ], [ "vowel_cou(\"accelerate\")", "No. of vowels: 5\n" ], [ "# Define a find_my_letter function that accepts two arguments: a string and a character\n# The function should return the first index position of the character in the string\n# The function should return a -1 if the character does not exist in the string\n\ndef find_my_letter(a_string, a_char):\n return a_string.find(a_char)\n\nfind_my_letter(\"Michelle\", \"e\")\n# find_my_letter(a_string = \"television\", a_char = \"e\")", "_____no_output_____" ], [ "# Define a fancy_cleanup function that accepts a single string argument\n# The function should clean up the whitespace on both sides of the\n# argument. It should also replace every occurence of the letter \"g\" with the\n# letter \"z\" and every occurence of a space with an exclamation point (!).\n\ndef fancy_cleanup(a_string):\n return a_string.strip().replace(\" \", \"e\").replace(\"!\", \"g\")\n\nfancy_cleanup(\" int lli!ent \")", "_____no_output_____" ], [ "# Define a even_or_odd function that accepts a single integer.\n# If the integer is even, the function should return the string “even”.\n# If the integer is odd, the function should return the string “odd”.\n\ndef even_or_odd(an_integer):\n if an_integer % 2 == 0:\n return \"even\"\n return \"odd\"\n\neven_or_odd(23)", "_____no_output_____" ], [ "def truthy_or_falsy(an_argument):\n if bool(an_argument):\n return \"The value \" + str(an_argument) + \" is truthy\"\n return \"The value \" + str(an_argument) + \" is falsy\"\n\ntruthy_or_falsy(\"celebrate\")\n# truthy_or_falsy(an_argument = \"\")\n# truthy_or_falsy(an_argument = \"shaba\")", "_____no_output_____" ], [ "# Define an up_and_down function that accepts a string argument\n# If the string consists of all uppercase letters, return a new string\n# consisting of all lowercase letters. If the string consists of all\n# lowercase letters, return a new string consisting of all uppercase\n# characters. If the string has a mix of uppercase and lowercase\n# characters, return a new string where the casing of each letter is swapped.\n\ndef up_and_down(string):\n if string == string.upper():\n return string.lower()\n elif string == string.lower():\n return string.upper()\n else:\n return string.swapcase()\n \nup_and_down(\"chocolate\")\n#up_and_down(\"CHOCOLATE\")\n#up_and_down(\"Chocolate\")", "_____no_output_____" ], [ "# Declare a negative_energy function that accepts a numeric argument and returns its absolute value. \n# The absolute value is the number's distance from zero.\n\ndef negative_energy(number):\n return abs(number)\n\nnegative_energy(12)\n#negative_energy(-5)", "_____no_output_____" ], [ "# Define a divisible_by_three_and_four function that accepts a number as its argument. \n# It should return True if the number is evenly divisible by both 3 and 4 . It should return False otherwise.\n\ndef divisible_by_three_and_four(number):\n if number % 3 == 0 and number % 4 == 0:\n return True\n return False\n\n#divisible_by_three_and_four(12)\ndivisible_by_three_and_four(6)", "_____no_output_____" ], [ "# Declare a string_theory function that accepts a string as an argument. \n# It should return True if the string has more than 3 characters and starts with a capital “S”. It should return False otherwise.\n\ndef string_theory(string):\n if len(string) > 3 and string[0] == \"S\":\n return True\n return False\n\nstring_theory(string = \"Shade\")\n#string_theory(string = \"Max\")\n#string_theory(string = \"Sam\")", "_____no_output_____" ], [ "#count down traditional\n\ndef count_down(final_number):\n current_number = final_number\n while current_number > 0:\n print(current_number)\n current_number -= 1\n \ncount_down(3)", "3\n2\n1\n" ], [ "#count down recurssion\n\ndef count_down(final_number):\n if final_number <= 0:\n return\n \n print(final_number)\n count_down(final_number - 1)\n \ncount_down(3)", "3\n2\n1\n" ], [ "#traditional method\n\ndef reverse(str):\n start_index = 0\n last_index = len(str) - 1\n reversed_string = \"\" \n\n while last_index >= start_index:\n reversed_string += str[last_index]\n last_index -= 1\n\n return reversed_string\n\nreverse(\"Iron\")", "_____no_output_____" ], [ "#recursion method\n\ndef reverse_d(str):\n if len(str) <= 1:\n return str\n\n return str[-1] + reverse_d(str[:-1])\n\n#e.g\nreverse_d(\"Einstein\")", "_____no_output_____" ], [ "#factorial\n\ndef factorial(number_n):\n x = 1\n for i in range(1,number_n + 1):\n x = x * i\n print(\"The factorial of\", number_n,\"is\",x) \n \nfactorial(10)", "The factorial of 10 is 3628800\n" ], [ "# Factorial of a number using recursion\n\ndef factorial(n):\n if n == 1:\n return n\n elif n < 0:\n print(\"Sorry, factorial does not exist for negative numbers\")\n elif n == 0:\n print(\"The factorial of 0 is 1\")\n else:\n return n * factorial(n-1)\n \nfactorial(5)", "_____no_output_____" ], [ "# Define a function product_of_even_indices that accepts a list of numbers. \n# The list will always have 6 total elements. \n# The function should return the product (multiplied total) of all numbers at an even index (0, 2, 4).\n\ndef product_of_even_indices(num_list):\n num_list = list(num_list)\n return num_list[0] * num_list[2] * num_list[4]\n\nproduct_of_even_indices(num_list = (1,2,3,4,5,6,))", "_____no_output_____" ], [ "# Define a function first_letter_of_last_string that accepts a list of strings. \n# It should return one character — the first letter of the last string in the list. \n# Assume the list will always have at least one string.\n\ndef first_letter_of_last_string(string):\n string = list(string)\n return string[-1][0]\n\nfirst_letter_of_last_string(string = ('ade', 'bambo', 'taiwo'))", "_____no_output_____" ], [ "# Define a split_in_two function that accepts a list and a number.\n# If the number is even, return the list elements from the third element to the end of the list.\n# If the number is odd, return the list elements from index 0 (inclusive) to 2 (exclusive)\n\ndef split_into_two(a_list, num):\n a_list = list(a_list)\n if num % 2 == 0:\n return a_list[-4:]\n else:\n return a_list[:2]\n \nsplit_into_two((1,2,3,4,5,6,7,8), 8)\n# split_into_two(a_list = (1,2,3,4,5,6,7,8), num = 7)", "_____no_output_____" ], [ "# Declare a nested_extraction function that accepts a list of lists and an index position.\n\n# The function should use the index as the basis of finding both the nested list \n# and the element from that list with the given index position\n\n# You can assume the number of lists will always be equal to \n# the number of elements within each of them.\n\ndef nested_extraction(lists, index):\n lists = list(lists)\n return lists[index][index]\n\nnested_extraction(([1,2,3],[4,5,6],[7,8,9]), 2)\n# nested_extraction(([1,2,3],[4,5,6],[7,8,9]), 1)\n# nested_extraction(([1,2,3],[4,5,6],[7,8,9]), 0)", "_____no_output_____" ], [ "# Declare a beginning_and_end function that accepts a list of elements.\n\n# It should return True if the first and last elements in the list are equal and False if they are unequal.\n\n# Assume the list will always have at least 1 element.\n\ndef beginning_and_end(list_s):\n #list_s = list\n if list_s[0] == list_s[-1]:\n return True\n else:\n return False\n \nbeginning_and_end([1,2,3,4,5,1])\n# beginning_and_end([1,2,3,4,5])", "_____no_output_____" ], [ "# Declare a long_word_in_collection function that accepts a list and a string. \n# The function should return True if \n# - the word exists in the list AND\n# - the word has more than 4 characters.\n#\n# words = [\"cat\", \"dog\", \"rhino\"]\n# long_word_in_collection(words, \"rhino\") => True\n# long_word_in_collection(words, \"cat\") => False\n# long_word_in_collection(words, \"monkey\") => False\n\ndef long_word_in_collection(a_list, string):\n if string in a_list and len(string) >= 4:\n return True\n else:\n return False\n \nlong_word_in_collection(['Mike', 'Sam', 'Ade', 'Wale'], 'Mike')\n# long_word_in_collection(['Mike', 'Sam', 'Ade', 'Wale'], 'Sam')", "_____no_output_____" ], [ "# Declare a count_of_a function that accepts a list of strings.\n# It should return a list with counts of how many “a” characters appear per string.\n# Do NOT use list comprehension.\n#\n# count_of_a([\"alligator\", \"aardvark\", \"albatross\"] => [2, 3, 2]\n# count_of_a([\"plywood\"]) => [0]\n# count_of_a([]) => []\n\ndef count_of_a(letters):\n return letters.count(\"a\")\n\nletters = [\"ade\", \"mike\", \"sam\", \"alligator\"]\nprint(list(map(count_of_a, letters)))", "[1, 0, 1, 2]\n" ], [ "def only_odds(odds):\n return (odds % 2) != 0\n\nodds = [1,2,3,4,5]\nprint(list(map(only_odds, odds)))", "[True, False, True, False, True]\n" ], [ "def count_of_a(letters):\n return letters.count(\"a\")\n\nletters = [\"alligator\", \"aardvark\", \"albatross\"] \nprint(list(map(count_of_a, letters)))", "[2, 3, 2]\n" ], [ "#lambda function\n\nmetals = [\"gold\", \"silver\", \"platinum\", \"palladium\"]\n\nprint(list(filter(lambda metal: len(metal) > 5, metals)))\nprint(list(filter(lambda element: len(element) < 4, metals)))\nprint(list(filter(lambda word: \"p\" in word, metals)))\nprint(list(map(lambda word: word.count(\"l\"), metals)))\nprint(list(map(lambda val: val.replace(\"s\", \"$\"), metals)))", "['silver', 'platinum', 'palladium']\n[]\n['platinum', 'palladium']\n[1, 1, 1, 2]\n['gold', '$ilver', 'platinum', 'palladium']\n" ], [ "#list comprehension\n\nanimals = [\"elephant\", \"horse\", \"cat\", \"giraffe\", \"cheetah\", \"dog\"]\nlong_words = [animal for animal in animals if len(animal) > 5]\nprint(long_words)\n\n#filter function\ndef is_long_animal(animal):\n return len(animal) > 5\n\nprint(list(filter(is_long_animal, animals)))", "['elephant', 'giraffe', 'cheetah']\n['elephant', 'giraffe', 'cheetah']\n" ], [ "#list comprehension\n\nnumbers = [4, 8, 15, 16, 23, 42]\ncubes = [number ** 3 for number in numbers]\nprint(cubes)\n\n#map function\n\ndef cube(number):\n return number ** 3\n\nprint(list(map(cube, numbers)))\n\nanimals = [\"cat\", \"bear\", \"zebra\", \"donkey\", \"cheetah\"]\nprint(list(map(len, animals))) ", "[64, 512, 3375, 4096, 12167, 74088]\n[64, 512, 3375, 4096, 12167, 74088]\n[3, 4, 5, 6, 7]\n" ], [ "# Declare a greater_sum function that accepts two lists of numbers.\n# It should return the list with the greatest sum.\n# You can assume the lists will always have different sums.\n#\n# greater_sum([1, 2, 3], [1, 2, 4]) => [1, 2, 4]\n# greater_sum([4, 5], [2, 3, 6]) => [2, 3, 6]\n# greater_sum([1], []) => [1]\n\ndef greater_sum(list_1, list_2):\n if sum(list_1) > sum(list_2):\n return list_1\n else:\n return list_2 \n \ngreater_sum([1,2,3,4], [1,2,3,5])", "_____no_output_____" ], [ "# Declare a sum_difference function that accepts two lists of numbers.\n# It should return the difference between the sum of values in the first and the second one\n#\n# sum_difference([1, 2, 3], [1, 2, 4]) => 6 - 7 => -1\n# sum_difference([4, 5], [2, 3, 6]) => 9 - 11 => -2\n# sum_difference([1], []) => 1\n\ndef sum_difference(list_a, list_b):\n return sum(list_a) - sum(list_b)\n\nsum_difference([1,2,3,4], [1,2,3,5])", "_____no_output_____" ], [ "def product(numbers):\n numbers = list(numbers)\n product = 1\n for num in numbers:\n product = product * num\n return product\n\nproduct([1, 2, 3]) #=> 6\n# product([4, 5, 6, 7]) #=> 840\n# product([10]) #=> 10", "_____no_output_____" ], [ "# Define a smallest_number function that accepts a list of numbers.\n\n# It should return the smallest value in the list.\n\ndef smallest_number(numbers):\n smallest = numbers[0]\n for number in numbers: \n if number < smallest:\n smallest = number\n return smallest\n\nsmallest_number([1, 2, 3]) #=> 1\n# smallest_number([3, 2, 1]) #=> 1\n# smallest_number([4, 5, 4]) #=> 4\n# smallest_number([-3, -2, -1]) #=> -3", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9ae294cbb8083faa0cdbdc77d9df031da09752
363,660
ipynb
Jupyter Notebook
nlp/notebooks/Topic_Model.ipynb
pshn111/Machine-Learning-Package
fbbaa44daf5f0701ea77e5b62eb57ef822e40ab2
[ "MIT" ]
null
null
null
nlp/notebooks/Topic_Model.ipynb
pshn111/Machine-Learning-Package
fbbaa44daf5f0701ea77e5b62eb57ef822e40ab2
[ "MIT" ]
null
null
null
nlp/notebooks/Topic_Model.ipynb
pshn111/Machine-Learning-Package
fbbaa44daf5f0701ea77e5b62eb57ef822e40ab2
[ "MIT" ]
null
null
null
695.334608
339,875
0.677713
[ [ [ "# Code borrowed from sklean\n# https://scikit-learn.org/stable/auto_examples/applications/plot_topics_extraction_with_nmf_lda.html\n# Author: Luke Kumar", "_____no_output_____" ], [ "import pandas as pd\nimport numpy as np\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.datasets import fetch_20newsgroups\n\nimport pyLDAvis\nimport pyLDAvis.sklearn\npyLDAvis.enable_notebook()\n\nimport pickle", "_____no_output_____" ] ], [ [ "## Params", "_____no_output_____" ] ], [ [ "n_samples = None # 2000\nn_features = 10000\nn_components = 25\nn_top_words = 20", "_____no_output_____" ] ], [ [ "# Data Loading", "_____no_output_____" ] ], [ [ "dataset = fetch_20newsgroups(shuffle=True, random_state=1,\n remove=('headers', 'footers', 'quotes'))\nif n_samples is not None:\n data_samples = dataset.data[:n_samples]\nelse:\n data_samples = dataset.data", "_____no_output_____" ], [ "data_samples[0]", "_____no_output_____" ], [ "len(data_samples)", "_____no_output_____" ] ], [ [ "# Encode Text", "_____no_output_____" ] ], [ [ "# max_df : float in range [0.0, 1.0] or int, default=1.0\n# When building the vocabulary ignore terms that have a document frequency strictly higher than the given threshold\n# (corpus-specific stop words). If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None.\n\n# min_df : float in range [0.0, 1.0] or int, default=1\n# When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold. \n# This value is also called cut-off in the literature. If float, the parameter represents a proportion of documents, \n# integer absolute counts. This parameter is ignored if vocabulary is not None.", "_____no_output_____" ], [ "tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,\n max_features=n_features,\n stop_words='english')", "_____no_output_____" ], [ "tf = tf_vectorizer.fit_transform(data_samples)", "_____no_output_____" ] ], [ [ "# LDA Model", "_____no_output_____" ] ], [ [ "lda = LatentDirichletAllocation(n_components=n_components, max_iter=100,\n learning_method = 'batch', #'online',\n random_state=0, verbose=0, n_jobs=-1,\n mean_change_tol=0.001)", "_____no_output_____" ], [ "lda.fit(tf)", "_____no_output_____" ], [ "# save model\npickle.dump(lda, open('lda.pkl', 'wb'))", "_____no_output_____" ], [ "def print_top_words(model, feature_names, n_top_words):\n for topic_idx, topic in enumerate(model.components_):\n message = \"Topic #%d: \" % topic_idx\n message += \" \".join([feature_names[i]\n for i in topic.argsort()[:-n_top_words - 1:-1]])\n print(message)\n print()", "_____no_output_____" ], [ "print_top_words(lda, tf_vectorizer.get_feature_names(), n_top_words)", "Topic #0: file israel entry israeli output program arab section rules entries printf int oname char build title info stream number return\nTopic #1: good year think just like don game better time did got hit really know players make ll didn years team\nTopic #2: team games game hockey season new league san nhl vs teams gm division year players city chicago st play york\nTopic #3: ax max g9v b8f a86 pl 145 1d9 1t 0t giz bhj 3t 2di 75u 34u 2tm wm 7ey bxn\nTopic #4: people think don does say god believe just know way like true question point life make things time good right\nTopic #5: edu com information mail available ftp pub list send email university software graphics address computer data cs contact ca internet\nTopic #6: like just don good car time ve know problem think work way does make use new used better really want\nTopic #7: said people know didn time just went don did like told came say saw going started left home took years\nTopic #8: db mov cs al bh si word pain com byte dave deleted judas pop di easter greek mydisplay bl hudson\nTopic #9: sale new offer price power used shipping ground condition wire use sell box cable circuit asking cd current cover supply\nTopic #10: key chip number bit keys keyboard chips serial algorithm phone 80 bits clipper know used machines use session block s1\nTopic #11: gun guns control use health firearms file medical states study crime weapons disease 1993 food rate drugs patients state msg\nTopic #12: 00 10 25 15 20 16 11 12 14 13 55 17 30 50 18 21 40 24 23 22\nTopic #13: know don like thanks just post want ve let think people anybody news did really tell koresh help heard good\nTopic #14: drive card scsi disk pc use mac does hard bit memory video know windows drives thanks ram bus apple controller\nTopic #15: windows file use window program files using server display set image version motif application widget does color problem code screen\nTopic #16: president mr people think stephanopoulos going money know don said work tax program jobs time support ll make new clinton\nTopic #17: key des public ripem rsa myers pgp faq edu message encryption sci pitt banks gordon soon crypt cryptography cipher surrender\nTopic #18: armenian turkish jews war people government armenians turkey world rights population turks jewish history killed genocide greek state armenia country\nTopic #19: ah 02 03 p2 04 pt mk air mp b8 p3 bh mu ma 34 mm 7u 05 01 mt\nTopic #20: cx w7 c_ uw t7 ck chz hz lk w1 17 mv k8 sp a7 s6 ww 34u c8 d9\nTopic #21: god jesus church bible christ christian faith john lord law christians son father sin paul spirit day holy heaven book\nTopic #22: space nasa launch earth satellite shuttle orbit moon lunar data mission flight solar spacecraft station rocket mars dc mass satellites\nTopic #23: government law use encryption privacy security clipper public new legal information court technology key access rights chip enforcement federal police\nTopic #24: period play pts goal power pp flyers 10 puck 11 shots goals second 18 12 scorer 17 calgary 19 shot\n\n" ], [ "pyLDAvis.sklearn.prepare(lda, tf, tf_vectorizer)", "/Users/luke/AMII/.virtualenvs/workshop/lib/python3.7/site-packages/pyLDAvis/_prepare.py:257: FutureWarning: Sorting because non-concatenation axis is not aligned. A future version\nof pandas will change to not sort by default.\n\nTo accept the future behavior, pass 'sort=False'.\n\nTo retain the current behavior and silence the warning, pass 'sort=True'.\n\n return pd.concat([default_term_info] + list(topic_dfs))\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb9ae749d486e4b9c60df3c890137d324419c952
13,603
ipynb
Jupyter Notebook
tutorials/tutorial06_osm.ipynb
kristery/flow
2638f8137541424af8de23159260d73c571f2e04
[ "MIT" ]
2
2020-12-03T21:13:39.000Z
2022-03-13T09:12:43.000Z
tutorials/tutorial06_osm.ipynb
kristery/flow
2638f8137541424af8de23159260d73c571f2e04
[ "MIT" ]
1
2019-12-05T09:04:05.000Z
2019-12-05T21:23:49.000Z
tutorials/tutorial06_osm.ipynb
kristery/flow
2638f8137541424af8de23159260d73c571f2e04
[ "MIT" ]
3
2019-12-07T11:36:21.000Z
2020-01-04T16:29:57.000Z
40.485119
720
0.569139
[ [ [ "# Tutorial 06: Networks from OpenStreetMap\n\nIn this tutorial, we discuss how networks that have been imported from OpenStreetMap can be integrated and run in Flow. This will all be presented via the Bay Bridge network, seen in the figure below. Networks from OpenStreetMap are commonly used in many traffic simulators for the purposes of replicating traffic in realistic traffic geometries. This is true in both SUMO and Aimsun (which are both supported in Flow), with each supporting several techniques for importing such network files. This process is further simplified and abstracted in Flow, with users simply required to specify the path to the osm file in order to simulate traffic in the network.\n\n\n<img src=\"img/bay_bridge_osm.png\" width=750>\n<center> **Figure 1**: Snapshot of the Bay Bridge from OpenStreetMap </center>\n\nBefore we begin, let us import all relevant Flow parameters as we have done for previous tutorials. If you are unfamiliar with these parameters, you are encouraged to review tutorial 1.", "_____no_output_____" ] ], [ [ "# the TestEnv environment is used to simply simulate the network\nfrom flow.envs import TestEnv\n\n# the Experiment class is used for running simulations\nfrom flow.core.experiment import Experiment\n\n# all other imports are standard\nfrom flow.core.params import VehicleParams\nfrom flow.core.params import NetParams\nfrom flow.core.params import InitialConfig\nfrom flow.core.params import EnvParams\nfrom flow.core.params import SumoParams", "_____no_output_____" ] ], [ [ "## 1. Running a Default Simulation\n\nIn order to create a network object in Flow with network features depicted from OpenStreetMap, we will use the base `Network` class. This class can sufficiently support the generation of any .osm file.", "_____no_output_____" ] ], [ [ "from flow.networks import Network", "_____no_output_____" ] ], [ [ "In order to recreate the network features of a specific osm file, the path to the osm file must be specified in `NetParams`. For this example, we will use an osm file extracted from the section of the Bay Bridge as depicted in Figure 1.\n\nIn order to specify the path to the osm file, simply fill in the `osm_path` attribute with the path to the .osm file as follows:", "_____no_output_____" ] ], [ [ "net_params = NetParams(\n osm_path='networks/bay_bridge.osm'\n)", "_____no_output_____" ] ], [ [ "Next, we create all other parameters as we have in tutorials 1 and 2. For this example, we will assume a total of 1000 are uniformly spread across the Bay Bridge. Once again, if the choice of parameters is unclear, you are encouraged to review Tutorial 1.", "_____no_output_____" ] ], [ [ "# create the remainding parameters\nenv_params = EnvParams()\nsim_params = SumoParams(render=True)\ninitial_config = InitialConfig()\nvehicles = VehicleParams()\nvehicles.add('human', num_vehicles=100)\n\n# create the network\nnetwork = Network(\n name='bay_bridge',\n net_params=net_params,\n initial_config=initial_config,\n vehicles=vehicles\n)", "_____no_output_____" ] ], [ [ "We are finally ready to test our network in simulation. In order to do so, we create an `Experiment` object and run the simulation for a number of steps. This is done in the cell below.", "_____no_output_____" ] ], [ [ "# create the environment\nenv = TestEnv(\n env_params=env_params,\n sim_params=sim_params,\n network=network\n)\n\n# run the simulation for 1000 steps\nexp = Experiment(env=env)\nexp.run(1, 1000)", "_____no_output_____" ] ], [ [ "## 2. Customizing the Network\n\nWhile the above example does allow you to view the network within Flow, the simulation is limited for two reasons. For one, vehicles are placed on all edges within the network; if we wished to simulate traffic solely on the on the bridge and do not care about the artireols, for instance, this would result in unnecessary computational burdens. Next, as you may have noticed if you ran the above example to completion, routes in the base network class are defaulted to consist of the vehicles' current edges only, meaning that vehicles exit the network as soon as they reach the end of the edge they are originated on. In the next subsections, we discuss how the network can be modified to resolve these issues.\n\n### 2.1 Specifying Traversable Edges\n\nIn order to limit the edges vehicles are placed on to the road sections edges corresponding to the westbound Bay Bridge, we define an `EDGES_DISTRIBUTION` variable. This variable specifies the names of the edges within the network that vehicles are permitted to originated in, and is assigned to the network via the `edges_distribution` component of the `InitialConfig` input parameter, as seen in the code snippet below. Note that the names of the edges can be identified from the .osm file or by right clicking on specific edges from the SUMO gui (see the figure below).\n\n<img src=\"img/osm_edge_name.png\" width=600>\n<center> **Figure 2**: Name of an edge from SUMO </center>", "_____no_output_____" ] ], [ [ "# we define an EDGES_DISTRIBUTION variable with the edges within \n# the westbound Bay Bridge \nEDGES_DISTRIBUTION = [\n \"11197898\",\n \"123741311\", \n \"123741303\",\n \"90077193#0\",\n \"90077193#1\", \n \"340686922\", \n \"236348366\", \n \"340686911#0\",\n \"340686911#1\",\n \"340686911#2\",\n \"340686911#3\",\n \"236348361\", \n \"236348360#0\", \n \"236348360#1\"\n]\n\n# the above variable is added to initial_config\nnew_initial_config = InitialConfig(\n edges_distribution=EDGES_DISTRIBUTION\n)", "_____no_output_____" ] ], [ [ "### 2.2 Creating Custom Routes\n\nNext, we choose to specify the routes of vehicles so that they can traverse the entire Bay Bridge, instead of the only the edge they are currently on. In order to this, we create a new network class that inherits all its properties from `Network` and simply redefine the routes by modifying the `specify_routes` variable. This method was originally introduced in Tutorial 07: Creating Custom Network. The new network class looks as follows:", "_____no_output_____" ] ], [ [ "# we create a new network class to specify the expected routes\nclass BayBridgeOSMNetwork(Network):\n\n def specify_routes(self, net_params):\n return {\n \"11197898\": [\n \"11197898\", \"123741311\", \"123741303\", \"90077193#0\", \"90077193#1\", \n \"340686922\", \"236348366\", \"340686911#0\", \"340686911#1\",\n \"340686911#2\", \"340686911#3\", \"236348361\", \"236348360#0\", \"236348360#1\",\n ],\n \"123741311\": [\n \"123741311\", \"123741303\", \"90077193#0\", \"90077193#1\", \"340686922\", \n \"236348366\", \"340686911#0\", \"340686911#1\", \"340686911#2\",\n \"340686911#3\", \"236348361\", \"236348360#0\", \"236348360#1\"\n ],\n \"123741303\": [\n \"123741303\", \"90077193#0\", \"90077193#1\", \"340686922\", \"236348366\",\n \"340686911#0\", \"340686911#1\", \"340686911#2\", \"340686911#3\", \"236348361\",\n \"236348360#0\", \"236348360#1\"\n ],\n \"90077193#0\": [\n \"90077193#0\", \"90077193#1\", \"340686922\", \"236348366\", \"340686911#0\",\n \"340686911#1\", \"340686911#2\", \"340686911#3\", \"236348361\", \"236348360#0\",\n \"236348360#1\"\n ],\n \"90077193#1\": [\n \"90077193#1\", \"340686922\", \"236348366\", \"340686911#0\", \"340686911#1\",\n \"340686911#2\", \"340686911#3\", \"236348361\", \"236348360#0\", \"236348360#1\"\n ],\n \"340686922\": [\n \"340686922\", \"236348366\", \"340686911#0\", \"340686911#1\", \"340686911#2\",\n \"340686911#3\", \"236348361\", \"236348360#0\", \"236348360#1\"\n ],\n \"236348366\": [\n \"236348366\", \"340686911#0\", \"340686911#1\", \"340686911#2\", \"340686911#3\",\n \"236348361\", \"236348360#0\", \"236348360#1\"\n ],\n \"340686911#0\": [\n \"340686911#0\", \"340686911#1\", \"340686911#2\", \"340686911#3\", \"236348361\",\n \"236348360#0\", \"236348360#1\"\n ],\n \"340686911#1\": [\n \"340686911#1\", \"340686911#2\", \"340686911#3\", \"236348361\", \"236348360#0\",\n \"236348360#1\"\n ],\n \"340686911#2\": [\n \"340686911#2\", \"340686911#3\", \"236348361\", \"236348360#0\", \"236348360#1\"\n ],\n \"340686911#3\": [\n \"340686911#3\", \"236348361\", \"236348360#0\", \"236348360#1\"\n ],\n \"236348361\": [\n \"236348361\", \"236348360#0\", \"236348360#1\"\n ],\n \"236348360#0\": [\n \"236348360#0\", \"236348360#1\"\n ],\n \"236348360#1\": [\n \"236348360#1\"\n ]\n }", "_____no_output_____" ] ], [ [ "### 2.3 Rerunning the SImulation\n\nWe are now ready to rerun the simulation with fully defined vehicle routes and a limited number of traversable edges. If we run the cell below, we can see the new simulation in action.", "_____no_output_____" ] ], [ [ "# create the network\nnew_network = BayBridgeOSMNetwork(\n name='bay_bridge',\n net_params=net_params,\n initial_config=new_initial_config,\n vehicles=vehicles,\n)\n\n# create the environment\nenv = TestEnv(\n env_params=env_params,\n sim_params=sim_params,\n network=new_network\n)\n\n# run the simulation for 1000 steps\nexp = Experiment(env=env)\nexp.run(1, 10000)", "_____no_output_____" ] ], [ [ "## 3. Other Tips\n\nThis tutorial introduces how to incorporate OpenStreetMap files in Flow. This feature, however, does not negate other features that are introduced in other tutorials and documentation. For example, if you would like to not have vehicles be originated side-by-side within a network, this can still be done by specifying a \"random\" spacing for vehicles as follows:\n\n initial_config = InitialConfig(\n spacing=\"random\",\n edges_distribution=EDGES_DISTRIBUTION\n )\n\nIn addition, inflows of vehicles can be added to networks imported from OpenStreetMap as they are for any other network (see the tutorial on adding inflows for more on this).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb9b1c87ce4437c961484980bcd74c73f8664a64
17,230
ipynb
Jupyter Notebook
bronze/B72_Rotation_Automata_Solutions.ipynb
ozlemsalehi/bronze-boun
37f853ead907b95e9ab20bb0a7f923126f00dc8f
[ "Apache-2.0", "CC-BY-4.0" ]
31
2019-10-06T19:13:26.000Z
2022-03-16T14:53:23.000Z
bronze/B72_Rotation_Automata_Solutions.ipynb
ozlemsalehi/bronze-boun
37f853ead907b95e9ab20bb0a7f923126f00dc8f
[ "Apache-2.0", "CC-BY-4.0" ]
null
null
null
bronze/B72_Rotation_Automata_Solutions.ipynb
ozlemsalehi/bronze-boun
37f853ead907b95e9ab20bb0a7f923126f00dc8f
[ "Apache-2.0", "CC-BY-4.0" ]
12
2020-03-07T09:15:40.000Z
2022-03-21T16:41:24.000Z
33.850688
309
0.515844
[ [ [ "<table width=\"100%\"> <tr>\n <td style=\"background-color:#ffffff;\">\n <a href=\"http://qworld.lu.lv\" target=\"_blank\"><img src=\"../images/qworld.jpg\" width=\"35%\" align=\"left\"> </a></td>\n <td style=\"background-color:#ffffff;vertical-align:bottom;text-align:right;\">\n prepared by Abuzer Yakaryilmaz (<a href=\"http://qworld.lu.lv/index.php/qlatvia/\" target=\"_blank\">QLatvia</a>)\n <br>\n Özlem Salehi | July 6, 2019 (updated)\n </td> \n</tr></table>", "_____no_output_____" ], [ "<table width=\"100%\"><tr><td style=\"color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;\">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>\n$ \\newcommand{\\bra}[1]{\\langle #1|} $\n$ \\newcommand{\\ket}[1]{|#1\\rangle} $\n$ \\newcommand{\\braket}[2]{\\langle #1|#2\\rangle} $\n$ \\newcommand{\\dot}[2]{ #1 \\cdot #2} $\n$ \\newcommand{\\biginner}[2]{\\left\\langle #1,#2\\right\\rangle} $\n$ \\newcommand{\\mymatrix}[2]{\\left( \\begin{array}{#1} #2\\end{array} \\right)} $\n$ \\newcommand{\\myvector}[1]{\\mymatrix{c}{#1}} $\n$ \\newcommand{\\myrvector}[1]{\\mymatrix{r}{#1}} $\n$ \\newcommand{\\mypar}[1]{\\left( #1 \\right)} $\n$ \\newcommand{\\mybigpar}[1]{ \\Big( #1 \\Big)} $\n$ \\newcommand{\\sqrttwo}{\\frac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\dsqrttwo}{\\dfrac{1}{\\sqrt{2}}} $\n$ \\newcommand{\\onehalf}{\\frac{1}{2}} $\n$ \\newcommand{\\donehalf}{\\dfrac{1}{2}} $\n$ \\newcommand{\\hadamard}{ \\mymatrix{rr}{ \\sqrttwo & \\sqrttwo \\\\ \\sqrttwo & -\\sqrttwo }} $\n$ \\newcommand{\\vzero}{\\myvector{1\\\\0}} $\n$ \\newcommand{\\vone}{\\myvector{0\\\\1}} $\n$ \\newcommand{\\vhadamardzero}{\\myvector{ \\sqrttwo \\\\ \\sqrttwo } } $\n$ \\newcommand{\\vhadamardone}{ \\myrvector{ \\sqrttwo \\\\ -\\sqrttwo } } $\n$ \\newcommand{\\myarray}[2]{ \\begin{array}{#1}#2\\end{array}} $\n$ \\newcommand{\\X}{ \\mymatrix{cc}{0 & 1 \\\\ 1 & 0} } $\n$ \\newcommand{\\Z}{ \\mymatrix{rr}{1 & 0 \\\\ 0 & -1} } $\n$ \\newcommand{\\Htwo}{ \\mymatrix{rrrr}{ \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} & \\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} \\\\ \\frac{1}{2} & -\\frac{1}{2} & -\\frac{1}{2} & \\frac{1}{2} } } $\n$ \\newcommand{\\CNOT}{ \\mymatrix{cccc}{1 & 0 & 0 & 0 \\\\ 0 & 1 & 0 & 0 \\\\ 0 & 0 & 0 & 1 \\\\ 0 & 0 & 1 & 0} } $\n$ \\newcommand{\\norm}[1]{ \\left\\lVert #1 \\right\\rVert } $", "_____no_output_____" ], [ "<h2> <font color=\"blue\"> Solutions for </font>Rotation Automata</h2>", "_____no_output_____" ], [ "<a id=\"task1\"></a>\n<h3> Task 1 </h3>\n\nDo the same task given above by using different angles.\n\nTest at least three different angles. \n\nPlease modify the code above.", "_____no_output_____" ], [ "<h3>Solution</h3>", "_____no_output_____" ], [ "Any odd multiple of $ \\frac{\\pi}{16} $ works: $ i \\frac{\\pi}{16} $, where $ i \\in \\{1,3,5,7,\\ldots\\} $", "_____no_output_____" ], [ "<a id=\"task2\"></a>\n<h3> Task 2 </h3>\n\nLet $ \\mathsf{p} = 11 $.\n\nDetermine an angle of rotation such that when the length of stream is a multiple of $ \\sf p $, then we observe only state $ 0 $, and we can also observe state $ 1 $, otherwise.\n\nTest your rotation by using a quantum circuit. Execute the circuit for all streams of lengths from 1 to 11.", "_____no_output_____" ], [ "<h3>Solution</h3>", "_____no_output_____" ], [ "We can pick any angle $ k\\frac{2\\pi}{11} $ for $ k \\in \\{1,\\ldots,10\\} $.", "_____no_output_____" ] ], [ [ "from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\nfrom math import pi\nfrom random import randrange\n\n# the angle of rotation\nr = randrange(1,11)\nprint(\"the picked angle is\",r,\"times of 2pi/11\")\nprint() \ntheta = r*pi/11\n\n# we read streams of length from 1 to 11\nfor i in range(1,12):\n # quantum circuit with one qubit and one bit\n qreg = QuantumRegister(1) \n creg = ClassicalRegister(1) \n mycircuit = QuantumCircuit(qreg,creg)\n # the stream of length i\n for j in range(i):\n mycircuit.ry(2*theta,qreg[0]) # apply one rotation for each symbol\n # we measure after reading the whole stream\n #mycircuit.measure(qreg[0],creg[0])\n # execute the circuit 1000 times\n job = execute(mycircuit,Aer.get_backend('unitary_simulator'))\n u=job.result().get_unitary(mycircuit,decimals=3)\n # we print the unitary matrix in nice format\n for i in range(len(u)):\n s=\"\"\n for j in range(len(u)):\n val = str(u[i][j].real)\n while(len(val)<8): val = \" \"+val\n s = s + val\n print(s)", "_____no_output_____" ] ], [ [ "<a id=\"task3\"></a>\n<h3> Task 3 </h3>\n\nList down 10 possible different angles for Task 2, where each angle should be between 0 and $2\\pi$.", "_____no_output_____" ], [ "<h3>Solution</h3>", "_____no_output_____" ], [ "Any angle $ k\\frac{2\\pi}{11} $ for $ k \\in \\{1,\\ldots,10\\} $.", "_____no_output_____" ], [ "<h3>Task 4</h3>\n\nFor each stream of length from 1 to 10, experimentially determine the best angle of rotation (we observe state $\\ket{1}$ the most) by using your circuit.", "_____no_output_____" ], [ "<h3>Solution</h3>", "_____no_output_____" ] ], [ [ "from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\nfrom math import pi\nfrom random import randrange\n\n# for each stream of length from 1 to 10\nfor i in range(1,11):\n # we try each angle of the form k*2*pi/11 for k=1,...,10\n # we try to find the best k for which we observe 1 the most\n number_of_one_state = 0\n best_k = 1\n all_outcomes_for_i = \"length \"+str(i)+\"-> \"\n for k in range(1,11):\n theta = k*2*pi/11\n # quantum circuit with one qubit and one bit\n qreg = QuantumRegister(1) \n creg = ClassicalRegister(1) \n mycircuit = QuantumCircuit(qreg,creg)\n # the stream of length i\n for j in range(i):\n mycircuit.ry(2*theta,qreg[0]) # apply one rotation for each symbol\n # we measure after reading the whole stream\n mycircuit.measure(qreg[0],creg[0])\n # execute the circuit 10000 times\n job = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=10000)\n counts = job.result().get_counts(mycircuit)\n all_outcomes_for_i = all_outcomes_for_i + str(k)+ \":\" + str(counts['1']) + \" \"\n if int(counts['1']) > number_of_one_state:\n number_of_one_state = counts['1']\n best_k = k\n print(all_outcomes_for_i)\n print(\"for length\",i,\", the best k is\",best_k)\n print()", "_____no_output_____" ] ], [ [ "<a id=\"task5\"></a>\n<h3> Task 5 </h3>\n\nLet $ \\mathsf{p} = 31 $.\n\nCreate a circuit with three quantum bits and three classical bits.\n\nRotate the qubits with angles $ 3\\frac{2\\pi}{31} $, $ 7\\frac{2\\pi}{31} $, and $ 11\\frac{2\\pi}{31} $, respectively.\n\nExecute your circuit for all streams of lengths from 1 to 30. Check whether the number of state $ \\ket{000} $ is less than half or not.\n\n<i>Note that whether a key is in dictionary or not can be checked as follows:</i>\n\n```python\nif '000' in counts.keys():\n c = counts['000']\nelse:\n c = 0\n```", "_____no_output_____" ], [ "<h3>Solution</h3>", "_____no_output_____" ] ], [ [ "from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\nfrom math import pi\nfrom random import randrange\n\n# the angles of rotations\ntheta1 = 3*2*pi/31\ntheta2 = 7*2*pi/31\ntheta3 = 11*2*pi/31\n\n# we read streams of length from 1 to 30\nfor i in range(1,32):\n # quantum circuit with three qubits and three bits\n qreg = QuantumRegister(3) \n creg = ClassicalRegister(3) \n mycircuit = QuantumCircuit(qreg,creg)\n # the stream of length i\n for j in range(i):\n # apply rotations for each symbol\n mycircuit.ry(2*theta1,qreg[0]) \n mycircuit.ry(2*theta2,qreg[1]) \n mycircuit.ry(2*theta3,qreg[2]) \n # we measure after reading the whole stream\n mycircuit.measure(qreg,creg)\n # execute the circuit N times\n N = 1000\n job = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=N)\n counts = job.result().get_counts(mycircuit)\n print(counts)\n if '000' in counts.keys():\n c = counts['000']\n else:\n c = 0\n print('000 is observed',c,'times out of',N)\n percentange = round(c/N*100,1)\n print(\"the ratio of 000 is \",percentange,\"%\")\n print()", "_____no_output_____" ] ], [ [ "<a id=\"task6\"></a>\n<h3> Task 6 </h3>\n\nLet $ \\mathsf{p} = 31 $.\n\nCreate a circuit with three quantum bits and three classical bits.\n\nRotate the qubits with random angles of the form $ k\\frac{2\\pi}{31}, $ where $ k \n\\in \\{1,\\ldots,30\\}.$\n\nExecute your circuit for all streams of lengths from 1 to 30.\n\nCalculate the maximum percentage of observing the state $ \\ket{000} $.\n\nRepeat this task for a few times.\n\n<i>Note that whether a key is in dictionary or not can be checked as follows:</i>\n\n```python\nif '000' in counts.keys():\n c = counts['000']\nelse:\n c = 0\n```", "_____no_output_____" ], [ "<h3>Solution</h3>", "_____no_output_____" ] ], [ [ "from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\nfrom math import pi\nfrom random import randrange\n\n# randomly picked angles of rotations \nk1 = randrange(1,31)\ntheta1 = k1*2*pi/31\nk2 = randrange(1,31)\ntheta2 = k2*2*pi/31\nk3 = randrange(1,31)\ntheta3 = k3*2*pi/31\nprint(\"k1 =\",k1,\"k2 =\",k2,\"k3 =\",k3)\nprint()\n\nmax_percentange = 0\n# we read streams of length from 1 to 30\nfor i in range(1,31):\n k1 = randrange(1,31)\n theta1 = k1*2*pi/31\n k2 = randrange(1,31)\n theta2 = k2*2*pi/31\n k3 = randrange(1,31)\n theta3 = k3*2*pi/31\n # quantum circuit with three qubits and three bits\n qreg = QuantumRegister(3) \n creg = ClassicalRegister(3) \n mycircuit = QuantumCircuit(qreg,creg)\n # the stream of length i\n for j in range(i):\n # apply rotations for each symbol\n mycircuit.ry(2*theta1,qreg[0]) \n mycircuit.ry(2*theta2,qreg[1]) \n mycircuit.ry(2*theta3,qreg[2]) \n # we measure after reading the whole stream\n mycircuit.measure(qreg,creg)\n # execute the circuit N times\n N = 1000\n job = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=N)\n counts = job.result().get_counts(mycircuit)\n # print(counts)\n if '000' in counts.keys():\n c = counts['000']\n else:\n c = 0\n # print('000 is observed',c,'times out of',N)\n percentange = round(c/N*100,1)\n if max_percentange < percentange: max_percentange = percentange\n # print(\"the ration of 000 is \",percentange,\"%\")\n # print()\nprint(\"max percentage is\",max_percentange)", "_____no_output_____" ] ], [ [ "<a id=\"task7\"></a>\n<h3> Task 7 </h3>\n\nRepeat Task 6 by using four and five qubits.", "_____no_output_____" ], [ "<h3>Solution</h3>", "_____no_output_____" ] ], [ [ "from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer\nfrom math import pi\nfrom random import randrange\n\nnumber_of_qubits = 4\n#number_of_qubits = 5\n# randomly picked angles of rotations \ntheta = []\nfor i in range(number_of_qubits):\n k = randrange(1,31)\n print(\"k\",str(i),\"=\",k)\n theta += [k*2*pi/31]\n# print(theta)\n\n# we count the number of zeros\nzeros = ''\nfor i in range(number_of_qubits):\n zeros = zeros + '0'\nprint(\"zeros = \",zeros)\nprint()\n\nmax_percentange = 0\n# we read streams of length from 1 to 30\nfor i in range(1,31):\n # quantum circuit with qubits and bits\n qreg = QuantumRegister(number_of_qubits) \n creg = ClassicalRegister(number_of_qubits) \n mycircuit = QuantumCircuit(qreg,creg)\n # the stream of length i\n for j in range(i):\n # apply rotations for each symbol\n for k in range(number_of_qubits):\n mycircuit.ry(2*theta[k],qreg[k]) \n # we measure after reading the whole stream\n mycircuit.measure(qreg,creg)\n # execute the circuit N times\n N = 1000\n job = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=N)\n counts = job.result().get_counts(mycircuit)\n # print(counts)\n if zeros in counts.keys():\n c = counts[zeros]\n else:\n c = 0\n # print('000 is observed',c,'times out of',N)\n percentange = round(c/N*100,1)\n if max_percentange < percentange: max_percentange = percentange\n # print(\"the ration of 000 is \",percentange,\"%\")\n # print()\nprint(\"max percentage is\",max_percentange)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cb9b27cc21e4725d7fe4f05d9fb88e0a2e091547
83,117
ipynb
Jupyter Notebook
nircam/NIRCAM-Imaging-Mode-WCS-Roundtrip.ipynb
sosey/jwst-investigate
b415f7e3fd44233ad87dcaf23fec5921cc858994
[ "BSD-3-Clause" ]
null
null
null
nircam/NIRCAM-Imaging-Mode-WCS-Roundtrip.ipynb
sosey/jwst-investigate
b415f7e3fd44233ad87dcaf23fec5921cc858994
[ "BSD-3-Clause" ]
null
null
null
nircam/NIRCAM-Imaging-Mode-WCS-Roundtrip.ipynb
sosey/jwst-investigate
b415f7e3fd44233ad87dcaf23fec5921cc858994
[ "BSD-3-Clause" ]
null
null
null
36.137826
591
0.605207
[ [ [ "# Walkthrough the NIRCAM imaging WCS pipeline rountrip of values through the coordinate frame transforms", "_____no_output_____" ] ], [ [ "import jwst\njwst.__version__", "_____no_output_____" ], [ "from astropy.io import fits\nfrom jwst import assign_wcs\nfrom jwst.datamodels import image", "_____no_output_____" ], [ "# add in the columns for ra and dec min/max points, translated from the wcs object for now\ndirect_data='test_disperse_f335m_rate_updated.fits' # original image provided for testing\n\n# We will open the direct image as an Image datamodel\ndirect_image = image.ImageModel(direct_data)", "_____no_output_____" ] ], [ [ "### Some basics about this image", "_____no_output_____" ] ], [ [ "direct_image.meta.instrument.pupil, direct_image.meta.instrument.filter, direct_image.meta.instrument.module, direct_image.meta.instrument.detector, direct_image.meta.instrument.channel", "_____no_output_____" ] ], [ [ "### This is the FITS WCS information", "_____no_output_____" ] ], [ [ "direct_image.get_fits_wcs()", "2018-04-26 17:39:15,170 - stpipe - WARNING - /Users/sosey/miniconda3/envs/jwstdev/lib/python3.5/site-packages/astropy/io/fits/card.py:979: VerifyWarning: Card is too long, comment will be truncated.\n VerifyWarning)\n\n" ], [ "# Load up the assign_wcs step that will assign all the transforms from world->detector\nassign_wcs_step=assign_wcs.AssignWcsStep()\nreference_file_types = ['distortion', 'filteroffset', 'specwcs', 'regions',\n 'wavelengthrange', 'camera', 'collimator',\n 'disperser', 'fore', 'fpa', 'msa', 'ote', 'ifupost',\n 'ifufore', 'ifuslicer']\nreference_file_names = {}\n\n# Ask CRDS for the reference files that apply to the image are working with\nfor name in reference_file_types:\n reffile = assign_wcs_step.get_reference_file(direct_image, name)\n reference_file_names[name] = reffile if reffile else \"\"\nreference_file_names", "2018-04-26 17:39:15,192 - stpipe.AssignWcsStep - INFO - AssignWcsStep instance created.\n" ], [ "direct_gwcs = assign_wcs_step(direct_image)", "2018-04-26 17:39:15,552 - stpipe.AssignWcsStep - INFO - Step AssignWcsStep running with args (<ImageModel(2048, 2048) from test_disperse_f335m_rate_updated.fits>,).\n2018-04-26 17:39:15,958 - stpipe.AssignWcsStep - INFO - assign_wcs updated S_REGION to POLYGON ICRS 0.009022622237533287 -0.008853991096776785 0.008719091565190015 0.008959908112853443 359.9911115957577 0.008837456490423869 359.9911689262167 -0.00902295821400355\n2018-04-26 17:39:15,959 - stpipe.AssignWcsStep - INFO - COMPLETED assign_wcs\n2018-04-26 17:39:15,968 - stpipe.AssignWcsStep - INFO - Step AssignWcsStep done\n" ] ], [ [ "### Some information about where the transforms are centered", "_____no_output_____" ] ], [ [ "direct_gwcs.meta.wcsinfo.crpix1, direct_gwcs.meta.wcsinfo.crpix2, direct_gwcs.meta.wcsinfo.crval1, direct_gwcs.meta.wcsinfo.crval2", "_____no_output_____" ] ], [ [ "### The GWCS object that contains all the transforms is now attached to the image model", "_____no_output_____" ] ], [ [ "direct_gwcs.meta.wcs", "_____no_output_____" ] ], [ [ "### Check the transform from detector pixels to sky coordinates in decimal degrees of RA and DEC", "_____no_output_____" ], [ "The default transform goes from detector pixels to sky coordinate (ra,dec)", "_____no_output_____" ] ], [ [ "direct_gwcs.meta.wcs(110,110)", "_____no_output_____" ], [ "detector_to_world = direct_gwcs.meta.wcs.get_transform('detector','world')\ndetector_to_world(110,110)", "_____no_output_____" ] ], [ [ "### Now get the inverse transform from RA,DEC to detector pixels, using the RA,DEC we just calculated\nThis should return the pixel (110,110)", "_____no_output_____" ] ], [ [ "world_to_detector = direct_gwcs.meta.wcs.get_transform('world','detector')\nworld_to_detector(0.00804448203007923, -0.007899731808577077)", "_____no_output_____" ] ], [ [ "### Let's check the other transforms to make sure it's just the distortion reference file that is off", "_____no_output_____" ] ], [ [ "direct_gwcs.meta.wcs.available_frames", "_____no_output_____" ], [ "world_to_v2v3 = direct_gwcs.meta.wcs.get_transform('world','v2v3')\nworld_to_v2v3(0.00804448203007923, -0.007899731808577077) # degrees", "_____no_output_____" ], [ "v2v3_to_world = direct_gwcs.meta.wcs.get_transform('v2v3','world')\nv2v3_to_world(149.63161618088085, -555.8266943126895) # arcseconds", "_____no_output_____" ] ], [ [ "### The following transforms only goes through the distortion reference file, it can't seem to return the original detector coordinates", "_____no_output_____" ] ], [ [ "detector_to_v2v3 = direct_gwcs.meta.wcs.get_transform('detector','v2v3')\ndetector_to_v2v3(110, 110)", "_____no_output_____" ], [ "v2v3_to_detector = direct_gwcs.meta.wcs.get_transform('v2v3','detector')\nv2v3_to_detector(149.63161618088085, -555.8266943126896)", "_____no_output_____" ] ], [ [ "### The transform across the distortion image is not able to reproduce values roundtripping\nLet's check if we can reproduce the anchor point of the distortion, the value at CRPIX1, CRPIX2", "_____no_output_____" ] ], [ [ "crpix1, crpix2, crval1, crval2=direct_gwcs.meta.wcsinfo.crpix1, direct_gwcs.meta.wcsinfo.crpix2, direct_gwcs.meta.wcsinfo.crval1, direct_gwcs.meta.wcsinfo.crval2", "_____no_output_____" ] ], [ [ "#### Check the roundtrip of crpix1,crpix2 from detector <-> v2v3\nThis also uses the distortion reference file", "_____no_output_____" ] ], [ [ "crpix1, crpix2", "_____no_output_____" ], [ "detector_to_v2v3(crpix1, crpix2)", "_____no_output_____" ], [ "v2v3_to_detector(120.63991845281103, -527.3565915096161)", "_____no_output_____" ] ], [ [ "#### Check the roundtrip of crval1,crval2 from world <-> v2v3", "_____no_output_____" ] ], [ [ "crval1, crval2", "_____no_output_____" ], [ "world_to_v2v3(crval1, crval2)", "_____no_output_____" ], [ "v2v3_to_world(120.67137599999998, -527.387665)", "_____no_output_____" ] ], [ [ "## The above examples convince me that the distortion reference image, specifically `jwst_nircam_distortion_0061.asdf` has 2D variations that make it impossible to compute the correct detector pixel coordinates given a position on the sky.\n\n### It's possible that the *incorrect* distortion reference image is being returned from CRDS for the image, let's have a look at the RMAP that is being used", "_____no_output_____" ], [ "## This is the current rmap in use\nhttps://jwst-crds.stsci.edu/browse/jwst_nircam_distortion_0018.rmap\n\nIt's checking these values:\n \n 'parkey' : (('META.EXPOSURE.TYPE', 'META.INSTRUMENT.DETECTOR', 'META.INSTRUMENT.CHANNEL', 'META.INSTRUMENT.PUPIL', 'META.INSTRUMENT.FILTER'),\n ('META.OBSERVATION.DATE', 'META.OBSERVATION.TIME')),\n 'reference_to_dataset' : {\n 'CHANNEL' : 'META.INSTRUMENT.CHANNEL',\n 'DETECTOR' : 'META.INSTRUMENT.DETECTOR',\n 'EXP_TYPE' : 'META.EXPOSURE.TYPE',", "_____no_output_____" ] ], [ [ "direct_image.meta.exposure.type, direct_image.meta.instrument.detector, direct_image.meta.instrument.channel, direct_image.meta.instrument.pupil, direct_image.meta.instrument.filter", "_____no_output_____" ] ], [ [ "### For the values specified above, the RMAP shows matching as:\n\n ('NRC_IMAGE|NRC_TSIMAGE|NRC_FLAT|NRC_LED|NRC_WFSC|NRC_GRISM|NRC_TSGRISM|NRC_FOCUS',\n 'NRCA1',\n 'SHORT',\n 'CLEAR|F162M|F164N|GDHS0|GDHS60|WLM8|WLP8|PINHOLES|MASKIPR',\n 'N/A') : UseAfter({'2014-10-01 00:00:00' : 'jwst_nircam_distortion_0061.asdf',\n }),\n \nSo the reference file matches regardness of FILTER (which has N/A) that is specified, it only cares about pupil and the detector specification", "_____no_output_____" ], [ "## Let's try the same thing with a different image, this one is taken from the latest NIRCAM simulations\nIt has a differently populated FITS WCS information, and specifies a different filter and detector", "_____no_output_____" ] ], [ [ "direct_data='V54321001002P000000000110d_A5_F444W_rate.fits' # most recent simulation for testing\n\n# We will open the direct image as a DrizProduct datamodel\ndirect_image = image.ImageModel(direct_data)", "_____no_output_____" ], [ "direct_image.meta.instrument.pupil, direct_image.meta.instrument.filter,direct_image.meta.instrument.module, direct_image.meta.instrument.detector, direct_image.meta.instrument.channel", "_____no_output_____" ], [ "reference_file_names = {}\n\n# Ask CRDS for the reference files that apply to the image are working with\nfor name in reference_file_types:\n reffile = assign_wcs_step.get_reference_file(direct_image, name)\n reference_file_names[name] = reffile if reffile else \"\"\nreference_file_names", "_____no_output_____" ], [ "direct_image.get_fits_wcs()", "2018-04-26 17:39:23,760 - stpipe - WARNING - /Users/sosey/miniconda3/envs/jwstdev/lib/python3.5/site-packages/astropy/io/fits/card.py:979: VerifyWarning: Card is too long, comment will be truncated.\n VerifyWarning)\n\n" ], [ "direct_gwcs = assign_wcs_step(direct_image)", "2018-04-26 17:39:23,998 - stpipe.AssignWcsStep - INFO - Step AssignWcsStep running with args (<ImageModel(2048, 2048) from V54321001002P000000000110d_A5_F444W_rate.fits>,).\n2018-04-26 17:39:24,358 - stpipe.AssignWcsStep - INFO - assign_wcs updated S_REGION to POLYGON ICRS 53.14918395976533 -27.84239002279679 53.17751007567551 -27.816685398839844 53.14925269475395 -27.791587338019358 53.120015753464514 -27.816868426589295\n2018-04-26 17:39:24,358 - stpipe.AssignWcsStep - INFO - COMPLETED assign_wcs\n2018-04-26 17:39:24,366 - stpipe.AssignWcsStep - INFO - Step AssignWcsStep done\n" ], [ "direct_gwcs.meta.wcs", "_____no_output_____" ] ], [ [ "### Check the transform from detector pixels to sky coordinates in decimal degrees of RA and DEC", "_____no_output_____" ], [ "The default transform goes from detector pixels to sky coordinate (ra,dec)", "_____no_output_____" ] ], [ [ "direct_gwcs.meta.wcs(110,110)", "_____no_output_____" ], [ "detector_to_world = direct_gwcs.meta.wcs.get_transform('detector','world')\ndetector_to_world(110,110)", "_____no_output_____" ] ], [ [ "### Now get the inverse transform from RA,DEC to detector pixels, using the RA,DEC we just calculated\nThis should return the pixel (110,110)", "_____no_output_____" ] ], [ [ "world_to_detector = direct_gwcs.meta.wcs.get_transform('world','detector')\nworld_to_detector(53.149149027123194, -27.839618613331695)", "_____no_output_____" ] ], [ [ "### Let's check the other transforms to make sure it's just the distortion reference file that is off", "_____no_output_____" ] ], [ [ "direct_gwcs.meta.wcs.available_frames", "_____no_output_____" ], [ "world_to_v2v3 = direct_gwcs.meta.wcs.get_transform('world','v2v3')\nworld_to_v2v3(53.149149027123194, -27.839618613331695) # degrees", "_____no_output_____" ], [ "v2v3_to_world = direct_gwcs.meta.wcs.get_transform('v2v3','world')\nv2v3_to_world(144.31111617155733, -550.8134158050235) # arcseconds", "_____no_output_____" ] ], [ [ "### The following transforms only goes through the distortion reference file, it can't seem to return the original detector coordinates", "_____no_output_____" ] ], [ [ "detector_to_v2v3 = direct_gwcs.meta.wcs.get_transform('detector','v2v3')\ndetector_to_v2v3(110, 110)", "_____no_output_____" ], [ "v2v3_to_detector = direct_gwcs.meta.wcs.get_transform('v2v3','detector')\nv2v3_to_detector(144.31111617152274, -550.8134158049928)", "_____no_output_____" ] ], [ [ "### The transform across the distortion image is not able to reproduce values roundtripping\nLet's check if we can reproduce the anchor point of the distortion, the value at CRPIX1, CRPIX2", "_____no_output_____" ] ], [ [ "crpix1, crpix2, crval1, crval2=direct_gwcs.meta.wcsinfo.crpix1, direct_gwcs.meta.wcsinfo.crpix2, direct_gwcs.meta.wcsinfo.crval1, direct_gwcs.meta.wcsinfo.crval2", "_____no_output_____" ], [ "crval1,crval2, direct_gwcs.meta.wcsinfo.roll_ref", "_____no_output_____" ] ], [ [ "#### Check the roundtrip of crpix1,crpix2 from detector <-> v2v3\nThis also uses the distortion reference file", "_____no_output_____" ] ], [ [ "crpix1, crpix2", "_____no_output_____" ], [ "detector_to_v2v3(crpix1, crpix2)", "_____no_output_____" ], [ "v2v3_to_detector(86.04055467237623, -493.16454761867965)", "_____no_output_____" ] ], [ [ "#### Check the roundtrip of crval1,crval2 from world <-> v2v3", "_____no_output_____" ] ], [ [ "crval1, crval2", "_____no_output_____" ], [ "world_to_v2v3(crval1, crval2)", "_____no_output_____" ], [ "v2v3_to_world(86.10345800001141, -493.2275120000079)", "_____no_output_____" ] ], [ [ "## Using a different distortion reference file we still are seeing the same offsets with the reverse transform.\nWe can do a little more detective work and chart the roundtrip offsets that are present in all the distortion reference files.\nFirst we need to get a local copy of all the distortion reference files in CRDS for the NRC_IMAGE mode.\n\nI'm going to do this by asking CRDS. Make sure you have these environment variables set:\n\n CRDS_SERVER_URL=https://jwst-crds.stsci.edu\n CRDS_PATH=/Users/sosey/crds_cache --> wherever you want the files stored locally\n \n # get all the nircam distortion files currently in use\n crds sync --contexts jwst-nircam-distortion-operational --fetch-references\n", "_____no_output_____" ] ], [ [ "import glob", "_____no_output_____" ], [ "dist_files=glob.glob('/Users/sosey/crds_cache/references/jwst/nircam/*distortion*')", "_____no_output_____" ] ], [ [ "### Let's make a WCS object for each of the distortion files that will take us through the transform.\nWe use the most recent image as the starting point and direct the distortion file to use.", "_____no_output_____" ] ], [ [ "from jwst.assign_wcs import nircam\nfrom jwst.datamodels.wcs_ref_models import DistortionModel\nfrom gwcs.wcs import WCS", "_____no_output_____" ] ], [ [ "### Next we'll cut the list down to just the distortion files used with imageing mode", "_____no_output_____" ] ], [ [ "image_dist=[]\nfor dist in dist_files:\n print(dist)\n data=DistortionModel(dist)\n try:\n if (data['exp_type'] == 'NRC_IMAGE'):\n image_dist.append(dist)\n except KeyError:\n try:\n if (data['EXP_TYPE'] == 'NRC_IMAGE'):\n image_dist.append(dist)\n except KeyError:\n if \"NRC_IMAGE\" in data.meta.exposure.p_exptype:\n image_dist.append(dist)\n data.close()", "/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0001.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0002.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0003.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0004.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0005.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0006.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0007.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0008.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0009.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0010.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0011.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0012.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0013.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0014.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0015.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0016.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0017.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0018.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0019.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0020.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0021.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0022.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0023.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0024.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0025.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0026.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0027.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0028.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0029.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0030.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0037.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0038.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0039.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0043.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0057.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0058.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0059.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0060.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0061.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0062.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0063.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0064.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0065.asdf\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0066.asdf\n" ], [ "direct_data='V54321001002P000000000110d_A5_F444W_rate.fits' # latest simulated image for testing\n\n# We will open the direct image as a DrizProduct datamodel\ndirect_image = image.ImageModel(direct_data)\ndirect_image.meta.instrument.pupil, direct_image.meta.instrument.filter,direct_image.meta.instrument.module, direct_image.meta.instrument.detector, direct_image.meta.instrument.channel", "_____no_output_____" ], [ "# Load up the assign_wcs step to populate our structure, we should see only the distortion file being used\nassign_wcs_step=assign_wcs.AssignWcsStep()\nreference_file_types = ['distortion', 'filteroffset', 'specwcs', 'regions',\n 'wavelengthrange', 'camera', 'collimator',\n 'disperser', 'fore', 'fpa', 'msa', 'ote', 'ifupost',\n 'ifufore', 'ifuslicer']\nreference_file_names = {}\n\n# Ask CRDS for the reference files that apply to the image are working with\nfor name in reference_file_types:\n reffile = assign_wcs_step.get_reference_file(direct_image, name)\n reference_file_names[name] = reffile if reffile else \"\"\nreference_file_names", "2018-04-26 17:39:40,195 - stpipe.AssignWcsStep - INFO - AssignWcsStep instance created.\n" ] ], [ [ "### I'm going to call a part of the pipeline that already assumes the correct file has been matched by CRDS, so I should just need to give it the reference file to use and it will return the pipeline that includes that file", "_____no_output_____" ] ], [ [ "results=[]\nfor dist in image_dist:\n reference_file_names['distortion'] = dist\n pipeline = nircam.imaging(direct_image, reference_file_names)\n test_wcs = WCS(pipeline)\n ra,dec = test_wcs(110,110)\n try:\n w2d = test_wcs.get_transform('world','detector')\n x,y = w2d(ra,dec)\n results.append({'dfile':dist, 'ra':ra, 'dec':dec, 'x':x, 'y':y, 'start_x': 110, 'start_y': 110})\n except NotImplementedError:\n pass\n \n ", "_____no_output_____" ], [ "line = '{:>79} {:>8} {:>8} {:>19} {:>19} {:>19} {:>19}'.format(\"FILE\", \"START_X\", \"START_Y\",\"RETURNED X\", \"RETURNED Y\", \"DELTA X\", \"DELTA Y\")\nprint(line)\nfor res in results:\n print(\"{:>79} {:>8} {:>8} {:>19} {:>19} {:>19} {:>19}\".format(res['dfile'], res['start_x'], res['start_y'],res['x'], res['y'], res['start_x'] - res['x'], res['start_y']-res['y']))\n ", " FILE START_X START_Y RETURNED X RETURNED Y DELTA X DELTA Y\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0004.asdf 110 110 -32211.563169272278 -4414611.484215153 32321.563169272278 4414721.484215153\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0011.asdf 110 110 110.50304890545871 109.23927236754712 -0.5030489054587122 0.760727632452884\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0012.asdf 110 110 110.34137150293236 109.62428757378888 -0.3413715029323612 0.37571242621112333\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0013.asdf 110 110 109.94816366972968 110.0303481756208 0.05183633027031931 -0.0303481756208015\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0014.asdf 110 110 109.38545945771133 110.39814309057316 0.6145405422886654 -0.3981430905731571\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0015.asdf 110 110 111.39314748078246 108.39352521887382 -1.39314748078246 1.606474781126181\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0016.asdf 110 110 109.04545366399353 110.95276267214285 0.954546336006473 -0.9527626721428533\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0017.asdf 110 110 109.78800711440576 110.17214872743332 0.2119928855942419 -0.1721487274333242\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0018.asdf 110 110 109.31110484408768 110.66720056352229 0.68889515591232 -0.667200563522286\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0019.asdf 110 110 108.39675567690172 110.89650896323514 1.6032443230982807 -0.8965089632351351\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0020.asdf 110 110 110.098379123054 109.87763607881155 -0.0983791230540021 0.12236392118845174\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0021.asdf 110 110 110.50304890499478 109.23927236807431 -0.5030489049947846 0.7607276319256897\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0022.asdf 110 110 110.34137150335498 109.62428757388814 -0.3413715033549778 0.3757124261118605\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0023.asdf 110 110 109.94816367007354 110.03034817505613 0.05183632992645926 -0.030348175056133186\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0024.asdf 110 110 109.38545945727077 110.39814309012611 0.6145405427292303 -0.39814309012611204\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0025.asdf 110 110 111.39314748156202 108.39352521900166 -1.3931474815620248 1.6064747809983402\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0026.asdf 110 110 109.04545366372139 110.95276267276301 0.9545463362786109 -0.952762672763015\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0027.asdf 110 110 109.78800711484327 110.17214872745653 0.2119928851567323 -0.17214872745653054\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0028.asdf 110 110 109.31110484440914 110.6672005624399 0.6888951555908562 -0.667200562439902\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0029.asdf 110 110 108.39675567555064 110.8965089631495 1.6032443244493635 -0.8965089631495005\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0030.asdf 110 110 110.09837912296992 109.87763607870494 -0.09837912296991647 0.12236392129506157\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0057.asdf 110 110 115.7060195864882 104.59142474479745 -5.706019586488196 5.40857525520255\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0058.asdf 110 110 107.81672101399101 113.67340689113007 2.183278986008986 -3.673406891130071\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0059.asdf 110 110 115.04030042777447 110.4250127829049 -5.0403004277744685 -0.42501278290490063\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0060.asdf 110 110 117.14543043632614 107.25115815627896 -7.145430436326137 2.7488418437210385\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0061.asdf 110 110 127.67113370076697 104.93657235314652 -17.671133700766973 5.063427646853484\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0062.asdf 110 110 97.61343207141408 123.37917388253788 12.386567928585919 -13.379173882537884\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0063.asdf 110 110 110.2215771535989 109.87151638926684 -0.22157715359890062 0.12848361073315573\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0064.asdf 110 110 117.91190750112469 102.11505730485305 -7.911907501124688 7.884942695146947\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0065.asdf 110 110 104.89192400136733 116.05870816595791 5.108075998632671 -6.05870816595791\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0066.asdf 110 110 109.20281049225446 110.92871603920503 0.7971895077455429 -0.9287160392050282\n" ] ], [ [ "NOTE: according to the RMAP, distortion files numbered prior to `*0011.asf` are old and replaced by the useafter date with updated files, so the results for `jwst_nircam_distortion_0004.asdf` above can be ignored", "_____no_output_____" ], [ "## We can do the same number crunching just using the the model inside the distortion reference file itself, and we should get the same answer as with the GWCS model", "_____no_output_____" ] ], [ [ "results=[]\nfor dist in image_dist:\n model = DistortionModel(dist).model\n try:\n v2v3x, v2v3y = model(110, 110)\n x, y = model.inverse(v2v3x, v2v3y)\n results.append({'dfile':dist, 'v2v3x':v2v3x, 'v2v3y':v2v3y, 'x':x, 'y':y, 'start_x': 110, 'start_y': 110})\n except NotImplementedError:\n pass", "_____no_output_____" ], [ "line = '{:>79} {:>8} {:>8} {:>19} {:>19} {:>19} {:>19}'.format(\"FILE\", \"START_X\", \"START_Y\",\"RETURNED X\", \"RETURNED Y\", \"DELTA X\", \"DELTA Y\")\nprint(line)\nfor res in results:\n print(\"{:>79} {:>8} {:>8} {:>19} {:>19} {:>19} {:>19}\".format(res['dfile'], res['start_x'], res['start_y'],res['x'], res['y'], res['start_x'] - res['x'], res['start_y']-res['y']))\n", " FILE START_X START_Y RETURNED X RETURNED Y DELTA X DELTA Y\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0004.asdf 110 110 -32211.56316247882 -4414611.483280538 32321.56316247882 4414721.483280538\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0011.asdf 110 110 110.50304890570733 109.23927236783844 -0.5030489057073311 0.7607276321615615\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0012.asdf 110 110 110.34137150365541 109.62428757533306 -0.34137150365540947 0.37571242466694343\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0013.asdf 110 110 109.9481636701729 110.03034817585473 0.05183632982709696 -0.030348175854726378\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0014.asdf 110 110 109.38545945780922 110.39814309137432 0.614540542190781 -0.39814309137432247\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0015.asdf 110 110 111.39314748246379 108.39352521951902 -1.3931474824637888 1.6064747804809798\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0016.asdf 110 110 109.04545366404597 110.95276267427047 0.9545463359540349 -0.952762674270474\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0017.asdf 110 110 109.78800711493207 110.17214872795837 0.21199288506792868 -0.17214872795837266\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0018.asdf 110 110 109.3111048445423 110.66720056420137 0.6888951554577005 -0.6672005642013659\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0019.asdf 110 110 108.39675567667469 110.89650896331993 1.6032443233253133 -0.8965089633199312\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0020.asdf 110 110 110.09837912350001 109.87763607948385 -0.09837912350000977 0.12236392051615042\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0021.asdf 110 110 110.50304890570732 109.23927236783844 -0.5030489057073169 0.7607276321615615\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0022.asdf 110 110 110.34137150365544 109.62428757533306 -0.3413715036554379 0.37571242466694343\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0023.asdf 110 110 109.94816367017292 110.03034817585473 0.05183632982708275 -0.030348175854726378\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0024.asdf 110 110 109.38545945780922 110.39814309137432 0.614540542190781 -0.39814309137432247\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0025.asdf 110 110 111.39314748246379 108.393525219519 -1.3931474824637888 1.606474780480994\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0026.asdf 110 110 109.04545366404592 110.95276267427045 0.9545463359540776 -0.9527626742704456\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0027.asdf 110 110 109.78800711493207 110.17214872795837 0.21199288506792868 -0.17214872795837266\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0028.asdf 110 110 109.31110484454233 110.66720056420138 0.6888951554576721 -0.6672005642013801\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0029.asdf 110 110 108.39675567667473 110.89650896331995 1.6032443233252707 -0.8965089633199455\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0030.asdf 110 110 110.09837912349998 109.87763607948384 -0.09837912349998135 0.12236392051616463\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0057.asdf 110 110 115.70601958722068 104.59142474523185 -5.7060195872206805 5.408575254768152\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0058.asdf 110 110 107.81672101478568 113.67340689151149 2.183278985214315 -3.6734068915114904\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0059.asdf 110 110 115.04030042830334 110.42501278338807 -5.04030042830334 -0.4250127833880697\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0060.asdf 110 110 117.14543043635081 107.25115815741685 -7.145430436350807 2.748841842583147\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0061.asdf 110 110 127.67113370114123 104.93657235389549 -17.67113370114123 5.063427646104515\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0062.asdf 110 110 97.61343207196512 123.37917388278981 12.386567928034879 -13.379173882789814\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0063.asdf 110 110 110.2215771533123 109.87151638994749 -0.2215771533122961 0.12848361005251263\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0064.asdf 110 110 117.91190750169733 102.11505730538465 -7.911907501697328 7.884942694615347\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0065.asdf 110 110 104.89192400174534 116.05870816687195 5.108075998254662 -6.058708166871952\n/Users/sosey/crds_cache/references/jwst/nircam/jwst_nircam_distortion_0066.asdf 110 110 109.20281049288587 110.92871603909657 0.7971895071141262 -0.928716039096571\n" ] ], [ [ "## The NIRCAM team used scripts that translate through the SIAF file as part of their data simulator\nBelow, I'll go through the same process and see what happens (communicae via Hilbert):\n\n The SIAF file contains all of the relevant coordinate system definitions and parameters, as well as the polynomial coefficients which describe the translation between the V2,V3 system and pixel space for each NIRCam aperture on each detector. Colin Cox's report from 2009 which defines the terms in the SIAF file and shows how to use the information in the SIAF file to generate functions for translating between coordinate systems. \n\n (Note that the translation functions and definitions are built around pixel coordinates that are indexed to 1. So if you are going to run translations in python, where things are indexed to 0, be sure to add 1 when creating inputs for the translation models, and subtract 1 from the outputs.)\n\n If you are interested in quickly being able to translate between pixel space and RA/Dec or V2/V3, I have already used the data in the SIAF file to construct distortion reference files. These are the reference files that will be used in the DMS pipeline. There's a separate file for each aperture. I've attached the file for full-frame A1 observations as an example. Let me know if you would like others.\n\n I've attached some python code in coord_translate.py that contains everything you need to go from x,y to RA,Dec and back. The script depends on the other attached python scripts.\n\n\nInside the scripts, there are instructions for translating between x,y <-> ra,dec through the SIAF file\n", "_____no_output_____" ] ], [ [ "from asdf import AsdfFile\nfrom astropy.io import ascii\nimport numpy as np\n\nfrom SIAFDistortions import rotations\nfrom SIAFDistortions import read_siaf_table\nfrom SIAFDistortions import polynomial\n", "_____no_output_____" ], [ "distortionTable = ascii.read('SIAFDistortions/NIRCam_SIAF_2016-09-29.csv', header_start=1)", "_____no_output_____" ] ], [ [ " ## Method 1, (Ra, Dec)--> (x, y) just using the distortion reference file\n There are two methods for translating from RA,Dec to x,y. The first makes use of only\n the distortion reference file (the asdf file). This method is faster (and is\n therefore used within the DMS pipeline), but loses accuracy the farther from the\n reference location that you get. For full frame observations, errors can approach\n ~20 pixels at the corners of the detector (ther reference location is the center\n of the detector).", "_____no_output_____" ] ], [ [ "def RADecToXY_approx(ra,dec,attitude_matrix,coord_transform,refpix_v2,refpix_v3):\n #If the full set of distortion coefficients are not provided,\n #(i.e. you don't have the SIAF file)\n #then we fall back to the coordinate transform provided by the\n #distortion reference file. These results are not exact, and\n #become less accurate the farther the source is from the center\n #of the detector. Results can be incorrect by ~20 pixels in the\n #corners of the detector.\n\n\n #RA,Dec to V2,V3\n pixelv2,pixelv3 = rotations.getv2v3(attitude_matrix,ra,dec)\n \n #V2,V3 to distorted pixels\n deltapixelx,deltapixely = coord_transform.inverse(pixelv2-refpix_v2,pixelv3-refpix_v3)\n\n return deltapixelx,deltapixely", "_____no_output_____" ], [ "#distortion reference file to use\ndist_reffile = 'SIAFDistortions/NRCA1_FULL_distortion.asdf'\n\n#RA and Dec you wish to convert to x,y\nra = 53.1490299775 # decimal degrees. RA you wish to convert to x,y\ndec = -27.8168745624 # decimal degrees. Dec you wish to convert to x,y\n\n#telescope pointing information \ntel_ra = 53.1490299775 # decimal degrees. RA at the reference location on the detector\ntel_dec = -27.8168745624 # decimal degrees. Dec at the reference location on the detector\ntel_rot = 45.04234416817661 #telescope rotation, degrees.\n\n#in this case, if you don't have the SIAF file, you'll need to get the reference\n#location v2,v3 coordinates for the desired aperture from some other source.\nrefpix_v2 = 120.6714 # arcsec. reference location is usually center of aperture \nrefpix_v3 = -527.3877 # arcsec. \nrefpix_x = 1024.5 # pixels. reference location x for desired aperture\nrefpix_y = 1024.5 # pixels. reference location y for desired aperture\n\n#Read in the CRDS-format distortion reference file\nwith AsdfFile.open(dist_reffile) as dist_file:\n coord_transform = dist_file.tree['model']\n\n#Create attitude_matrix\nattitude_matrix = rotations.attitude(refpix_v2,refpix_v3,tel_ra,tel_dec,tel_rot)\n\n#Translate\ndx,dy = RADecToXY_approx(ra,dec,attitude_matrix,coord_transform,refpix_v2,refpix_v3)\n\n#Final x,y value\npixelx = dx + refpix_x\npixely = dy + refpix_y\n\nprint(\"Approx {},{}\".format(pixelx,pixely))", "Approx 1024.4999999998824,1024.4999999999436\n" ] ], [ [ "#### I'm going to turn the above into a function that accepts, ra, dec, distortion file so I can compare the differences with the above results", "_____no_output_____" ] ], [ [ "def method_1_sky_to_pix(ra=0., dec=0., distortion_file=None):\n if distortion_file is None:\n distortion_file = 'SIAFDistortions/NRCA1_FULL_distortion.asdf'\n with AsdfFile.open(distortion_file) as dist_file:\n coord_transform = dist_file.tree['model']\n \n #telescope pointing information \n tel_ra = 53.1490299775 # decimal degrees. RA at the reference location on the detector\n tel_dec = -27.8168745624 # decimal degrees. Dec at the reference location on the detector\n tel_rot = 45.04234416817661 #telescope rotation, degrees.\n\n #in this case, if you don't have the SIAF file, you'll need to get the reference\n #location v2,v3 coordinates for the desired aperture from some other source.\n refpix_v2 = 120.6714 # arcsec. reference location is usually center of aperture \n refpix_v3 = -527.3877 # arcsec. \n refpix_x = 1024.5 # pixels. reference location x for desired aperture\n refpix_y = 1024.5 # pixels. reference location y for desired aperture\n\n #Create attitude_matrix\n attitude_matrix = rotations.attitude(refpix_v2,refpix_v3,tel_ra,tel_dec,tel_rot)\n\n #Translate\n dx,dy = RADecToXY_approx(ra,dec,attitude_matrix,coord_transform,refpix_v2,refpix_v3)\n\n #Final x,y value\n pixelx = dx + refpix_x\n pixely = dy + refpix_y\n\n return (pixelx,pixely)", "_____no_output_____" ] ], [ [ "## Method 2, (Ra, Dec) --> (x, y) using the distortion reference file and the SIAF file\n The second method of translating from RA,Dec to pixel x,y uses extra information in\n the SIAF file that is not present in the distortion coefficient file. It is\n computationally slower than the other method, but has minimal errors.", "_____no_output_____" ] ], [ [ "def getDistortionCoefficients(table,from_sys,to_sys,aperture):\n '''from the table of distortion coefficients, get the coeffs that \n correspond to the requested transformation and return as a list \n for x and another for y\n '''\n match = table['AperName'] == aperture\n if np.any(match) == False:\n print(\"Aperture name {} not found in input CSV file.\".format(aperture))\n sys.exit()\n\n row = table[match]\n\n if ((from_sys == 'science') & (to_sys == 'ideal')):\n label = 'Sci2Idl'\n elif ((from_sys == 'ideal') & (to_sys == 'science')):\n label = 'Idl2Sci'\n else:\n print(\"WARNING: from_sys of {} and to_sys of {} not a valid transformation.\".format(from_sys,to_sys))\n sys.exit()\n \n #get the coefficients, return as list\n X_cols = [c for c in row.colnames if label+'X' in c]\n Y_cols = [c for c in row.colnames if label+'Y' in c]\n x_coeffs = [row[c].data[0] for c in X_cols]\n y_coeffs = [row[c].data[0] for c in Y_cols]\n\n #Also get the V2,V3 and x,y values of the reference pixel\n v2ref = row['V2Ref'].data[0]\n v3ref = row['V3Ref'].data[0]\n xref = row['XSciRef'].data[0]\n yref = row['YSciRef'].data[0]\n \n #Get parity and V3 Y angle info as well\n parity = row['VIdlParity'].data[0]\n yang = row['V3IdlYAngle'].data[0]\n \n return x_coeffs,y_coeffs,v2ref,v3ref,xref,yref,parity,yang\n", "_____no_output_____" ], [ "def RADecToXY_exact(ra,dec,attitude_matrix,v2v32idlx,v2v32idly,v2_ref,v3_ref,x_sci2idl,y_sci2idl): \n #RA,Dec to V2,V3\n pixelv2,pixelv3 = rotations.getv2v3(attitude_matrix,ra,dec)\n\n #Now V2,V3 to undistorted angular distance from the reference pixel\n xidl = v2v32idlx(pixelv2-v2_ref,pixelv3-v3_ref)\n yidl = v2v32idly(pixelv2-v2_ref,pixelv3-v3_ref)\n \n #Finally, undistorted distances to distorted pixel values\n deltapixelx, deltapixely, err, iter = polynomial.invert(x_sci2idl,y_sci2idl,xidl,yidl,5)\n\n return deltapixelx,deltapixely\n", "_____no_output_____" ], [ "#distortion reference file to use\ndist_reffile = 'SIAFDistortions/NRCA1_FULL_distortion.asdf'\n\n#aperture_name\nap_name = 'NRCA1_FULL'\n\n#RA and Dec you wish to convert to x,y\nra = 53.1490299775 # decimal degrees. RA you wish to convert to x,y\ndec = -27.8168745624 # decimal degrees. Dec you wish to convert to x,y\n\n#telescope pointing information \ntel_ra = 53.1490299775 # decimal degrees. RA at the reference location on the detector\ntel_dec = -27.8168745624 # decimal degrees. Dec at the reference location on the detector\ntel_rot = 45.04234416817661 #telescope rotation, degrees.\n\n\n#read in the SIAF file\ndistortionTable = ascii.read('SIAFDistortions/NIRCam_SIAF_2016-09-29.csv',header_start=1)\n\n#get the extra parameters needed from the SIAF file\nx_sci2idl,y_sci2idl,refpix_v2,refpix_v3,refpix_x,refpix_y,parity,v3yang = getDistortionCoefficients(distortionTable,'science','ideal',ap_name)\n\n#generate the fucntion which will translate from V2,V3 to undistorted coordinates\nv2v32idlx, v2v32idly = read_siaf_table.get_siaf_v2v3_transform('SIAFDistortions/NIRCam_SIAF_2016-09-29.csv',ap_name,to_system='ideal')\n\n#Create attitude_matrix\nattitude_matrix = rotations.attitude(refpix_v2,refpix_v3,tel_ra,tel_dec,tel_rot)\n\n#Translate\ndx,dy = RADecToXY_exact(ra,dec,attitude_matrix,v2v32idlx,v2v32idly,refpix_v2,refpix_v3,x_sci2idl,y_sci2idl)\n\n#Final x,y value\npixelx = dx + refpix_x\npixely = dy + refpix_y\n\nprint(\"Exact {},{}\".format(pixelx,pixely))", "Exact 1024.4999999998815,1024.4999999999445\n" ] ], [ [ "### The function below does the example shown above, accepting x, y, distortion_file", "_____no_output_____" ] ], [ [ "def method_2_sky_to_pix(ra=0., dec=0., distortion_file=None, ap_name=None):\n \n if distortion_file is None:\n distortion_file = 'SIAFDistortions/NRCA1_FULL_distortion.asdf'\n ap_name = 'NRCA1_FULL'\n with AsdfFile.open(distortion_file) as dist_file:\n coord_transform = dist_file.tree['model']\n \n #aperture_name\n if ap_name is None:\n raise ValueError(\"Expected ap_name for distortion file\")\n\n #telescope pointing information \n tel_ra = 53.1490299775 # decimal degrees. RA at the reference location on the detector\n tel_dec = -27.8168745624 # decimal degrees. Dec at the reference location on the detector\n tel_rot = 45.04234416817661 #telescope rotation, degrees.\n\n\n #read in the SIAF file\n distortionTable = ascii.read('SIAFDistortions/NIRCam_SIAF_2016-09-29.csv',header_start=1)\n\n #get the extra parameters needed from the SIAF file\n x_sci2idl,y_sci2idl,refpix_v2,refpix_v3,refpix_x,refpix_y,parity,v3yang = getDistortionCoefficients(distortionTable,'science','ideal',ap_name)\n\n #generate the fucntion which will translate from V2,V3 to undistorted coordinates\n v2v32idlx, v2v32idly = read_siaf_table.get_siaf_v2v3_transform('SIAFDistortions/NIRCam_SIAF_2016-09-29.csv',ap_name,to_system='ideal')\n\n #Create attitude_matrix\n attitude_matrix = rotations.attitude(refpix_v2,refpix_v3,tel_ra,tel_dec,tel_rot)\n\n #Translate\n dx,dy = RADecToXY_exact(ra,dec,attitude_matrix,v2v32idlx,v2v32idly,refpix_v2,refpix_v3,x_sci2idl,y_sci2idl)\n\n #Final x,y value\n pixelx = dx + refpix_x\n pixely = dy + refpix_y\n\n return (pixelx,pixely)", "_____no_output_____" ] ], [ [ "## Method, (x, y) --> (Ra, Dec)\n Translating from x,y to RA,Dec is simpler, with only one method, which\n gives exact answers\n", "_____no_output_____" ] ], [ [ "def XYToRADec(pixelx,pixely,attitude_matrix,coord_transform,refpix_x,refpix_y,refpix_v2,refpix_v3):\n #Translate a given x,y location on the detector\n #to RA,Dec\n\n #Transform distorted pixels to V2,V3\n deltav2,deltav3 = coord_transform(pixelx-refpix_x,pixely-refpix_y)\n pixelv2 = deltav2 + refpix_v2\n pixelv3 = deltav3 + refpix_v3\n\n #Now translate V2,V3 to RA,Dec\n ra,dec = rotations.pointing(attitude_matrix,pixelv2,pixelv3)\n\n return ra,dec", "_____no_output_____" ], [ "#pixel coords to translate\npixelx = 1024.5 \npixely = 1024.5\n\n#telescope pointing information \ntel_ra = 53.1490299775 # decimal degrees. RA at the reference location on the detector\ntel_dec = -27.8168745624 # decimal degrees. Dec at the reference location on the detector\ntel_rot = 45.04234416817661 #telescope rotation, degrees.\n\n#distortion reference file to use\ndist_reffile = 'SIAFDistortions/NRCA1_FULL_distortion.asdf'\nap_name = 'NRCA1_FULL'\n\n#Read in the CRDS-format distortion reference file\nwith AsdfFile.open(dist_reffile) as dist_file:\n coord_transform = dist_file.tree['model']\n\n#read in the SIAF file\ndistortionTable = ascii.read('SIAFDistortions/NIRCam_SIAF_2016-09-29.csv',header_start=1)\n\n#get parameters needed from the SIAF file\nx_sci2idl,y_sci2idl,refpix_v2,refpix_v3,refpix_x,refpix_y,parity,v3yang = getDistortionCoefficients(distortionTable,'science','ideal',ap_name)\n\n#Create attitude_matrix\nattitude_matrix = rotations.attitude(refpix_v2,refpix_v3,tel_ra,tel_dec,tel_rot)\n\n#Translate\nra,dec = XYToRADec(pixelx,pixely,attitude_matrix,coord_transform,refpix_x,refpix_y,refpix_v2,refpix_v3)\n\nprint('RA,Dec is {},{}'.format(ra,dec))", "RA,Dec is 53.14902997749999,-27.8168745624\n" ], [ "def pix_to_sky(x=0, y=0, distortion_file=None, ap_name=None):\n #telescope pointing information \n tel_ra = 53.1490299775 # decimal degrees. RA at the reference location on the detector\n tel_dec = -27.8168745624 # decimal degrees. Dec at the reference location on the detector\n tel_rot = 45.04234416817661 #telescope rotation, degrees.\n\n #distortion reference file to use\n if distortion_file is None:\n distortion_file = 'SIAFDistortions/NRCA1_FULL_distortion.asdf'\n ap_name = 'NRCA1_FULL'\n if ap_name is None:\n raise ValueError(\"Need to specify ap_name appropriate for the distortion file\")\n\n #Read in the CRDS-format distortion reference file\n with AsdfFile.open(distortion_file) as dist_file:\n coord_transform = dist_file.tree['model']\n\n #read in the SIAF file\n distortionTable = ascii.read('SIAFDistortions/NIRCam_SIAF_2016-09-29.csv',header_start=1)\n\n #get parameters needed from the SIAF file\n x_sci2idl,y_sci2idl,refpix_v2,refpix_v3,refpix_x,refpix_y,parity,v3yang = getDistortionCoefficients(distortionTable,'science','ideal',ap_name)\n\n #Create attitude_matrix\n attitude_matrix = rotations.attitude(refpix_v2,refpix_v3,tel_ra,tel_dec,tel_rot)\n\n #Translate\n ra,dec = XYToRADec(x, y, attitude_matrix,coord_transform,refpix_x,refpix_y,refpix_v2,refpix_v3)\n\n return (ra,dec)", "_____no_output_____" ] ], [ [ "### The examples above were pulled from `coord_translate.py` and the telescope pointing information was taken from `V54321001002P000000000110d_A5_F444W_rate.fits` (used in the examples above)\n\nNow let's look at the (110,110) pixel that is far away from where the distortion is defined at crpix1, crpix2", "_____no_output_____" ] ], [ [ "ra, dec = pix_to_sky(110, 110)\nra, dec", "_____no_output_____" ] ], [ [ "#### We'll take that calculated ra,dec and feed it back to find the original pixel using the two methods outlined above", "_____no_output_____" ] ], [ [ "method_1_sky_to_pix(ra, dec)", "_____no_output_____" ], [ "method_2_sky_to_pix(ra, dec)", "_____no_output_____" ] ], [ [ "*Note that the reference files mention an updated SIAF file I don't have access to: NIRCam_SIAF_2017-03-28.csv*", "_____no_output_____" ], [ "## method_2 returns much closer values than method_1 and the distortion reference file alone, as expected by the team. We should figure out how to add the extra calculations to the distortion reference file so that we can get proper translations for use in the WFSS and Resample pipelines", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
cb9b2c3008866cd939476d95ce27d2e7544ee0db
28,253
ipynb
Jupyter Notebook
notebooks/00_preface.ipynb
fehiepsi/rethinking-numpyro
9f79903651a0ba838d98f40777bb2f0e78113624
[ "MIT" ]
335
2019-10-27T12:30:27.000Z
2022-03-27T20:50:53.000Z
notebooks/00_preface.ipynb
tallamjr/rethinking-numpyro
ab56ce135a835a68bb8246de7c0435190acfd1fe
[ "MIT" ]
12
2019-11-01T22:26:51.000Z
2021-08-16T03:40:04.000Z
notebooks/00_preface.ipynb
tallamjr/rethinking-numpyro
ab56ce135a835a68bb8246de7c0435190acfd1fe
[ "MIT" ]
53
2019-11-10T23:16:08.000Z
2022-03-30T19:35:52.000Z
97.761246
22,144
0.865076
[ [ [ "# Preface", "_____no_output_____" ] ], [ [ "!pip install -q numpyro arviz causalgraphicalmodels daft", "_____no_output_____" ], [ "import os\n\nimport arviz as az\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport jax.numpy as jnp\nfrom jax import random\n\nimport numpyro\nimport numpyro.distributions as dist\nimport numpyro.optim as optim\nfrom numpyro.infer import SVI, Trace_ELBO\n\nif \"SVG\" in os.environ:\n %config InlineBackend.figure_formats = [\"svg\"]\naz.style.use(\"arviz-darkgrid\")\nnumpyro.set_platform(\"cpu\")", "_____no_output_____" ] ], [ [ "### Code 0.1", "_____no_output_____" ] ], [ [ "print(\"All models are wrong, but some are useful.\")", "All models are wrong, but some are useful.\n" ] ], [ [ "### Code 0.2", "_____no_output_____" ] ], [ [ "x = jnp.arange(1, 3)\nx = x * 10\nx = jnp.log(x)\nx = jnp.sum(x)\nx = jnp.exp(x)\nx", "_____no_output_____" ] ], [ [ "### Code 0.3", "_____no_output_____" ] ], [ [ "print(jnp.log(0.01 ** 200))\nprint(200 * jnp.log(0.01))", "-inf\n-921.03406\n" ] ], [ [ "### Code 0.4", "_____no_output_____" ] ], [ [ "# Load the data:\n# car braking distances in feet paired with speeds in km/h\n# see cars.info() for details\ncars = pd.read_csv(\"../data/cars.csv\", index_col=0)\n\n# fit a linear regression of distance on speed\ndef model(speed, dist_):\n mu = numpyro.param(\"a\", 0.0) + numpyro.param(\"b\", 1.0) * speed\n numpyro.sample(\"dist\", dist.Normal(mu, 1), obs=dist_)\n\n\nsvi = SVI(\n model,\n lambda speed, dist_: None,\n optim=optim.Adam(1),\n loss=Trace_ELBO(),\n speed=cars.speed.values,\n dist_=cars.dist.values,\n)\nsvi_result = svi.run(random.PRNGKey(0), 1000)\nparams = svi_result.params\n\n# estimated coefficients from the model\nprint(params)\n\n# plot residuals against speed\nresid = cars.dist - (params[\"a\"] + params[\"b\"] * cars.speed.values)\naz.plot_pair({\"speed\": cars.speed, \"resid\": resid})\nplt.show()", "100%|██████████| 1000/1000 [00:00<00:00, 1165.66it/s, init loss: 30629.4453, avg. loss [951-1000]: 5608.2529]\n" ] ], [ [ "### Code 0.5", "_____no_output_____" ], [ "```sh\npip install numpyro arviz causalgraphicalmodels daft\n```", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb9b31ea19cee9baacfd5756c8326b08f9fb2a59
9,406
ipynb
Jupyter Notebook
13_bases_de_datos.ipynb
daviqui/py121
526eabdf07fa860ee94c48108a7dd5d6f4b38a3a
[ "MIT" ]
null
null
null
13_bases_de_datos.ipynb
daviqui/py121
526eabdf07fa860ee94c48108a7dd5d6f4b38a3a
[ "MIT" ]
null
null
null
13_bases_de_datos.ipynb
daviqui/py121
526eabdf07fa860ee94c48108a7dd5d6f4b38a3a
[ "MIT" ]
null
null
null
25.840659
409
0.578886
[ [ [ "[![imagenes](imagenes/pythonista.png)](https://pythonista.mx)", "_____no_output_____" ], [ "## La DB API de Python para bases de datos relacionales.\n\nDebido a que existen muy diversos gestores de bases de datos, tanto SQL como no-SQL, la comunidad de Python publicó la [PEP-249](https://www.python.org/dev/peps/pep-0249/), la cual define modelo genérico de API para la gestión de bases de datos, de tal modo que independienetemente de las paerticularidades del gestor,existan interfaces (clases, funciones y métodos) unificadas para acceder a los datos.", "_____no_output_____" ], [ "En la siguiente liga se puede consultar las diversas bases de datos soportadas por Python: \n\nhttps://wiki.python.org/moin/DatabaseInterfaces", "_____no_output_____" ], [ "## Conexión a MySQL.\n\nPara ilustrar una conexión y operación simple de una base de datos relacional se utilizará la base de datos MariaDB conectada mediante el driver *pymysql*.\n\nPara conocer más sobre *pymysql*, consultar la siguiente liga.\n\nhttps://pymysql.readthedocs.io/en/latest/", "_____no_output_____" ] ], [ [ "!pip install pymysql", "_____no_output_____" ], [ "import pymysql", "_____no_output_____" ] ], [ [ "### El objeto *pymysql.connect*.\n\nEl objeto *pymysql.connect* es un objeto instanciado de la clase *pymysql.connections.Connection*, el cual permite abrir una conexión a la base de datos con la siguiente sintaxis:\n\n``` python\npymysql.connect(user=<objeto tipo str>, password=<objeto tipo str>,\n host='<URL>', port='<puerto>', \n database=<objeto tipo str>)\n```\nExisten algunos otros parámetros, pero los que se indican son los más comunes.\n\nPor defecto la URL del host es *localhost*, el puerto es el *3306* y la base datos es la principal.", "_____no_output_____" ], [ "**Ejemplo:**", "_____no_output_____" ] ], [ [ "conexion = pymysql.connect(user='root', password='0p3n5t4ck')", "_____no_output_____" ] ], [ [ "### El método *pymysql.connect.query()*.\nEste método permite ingresar consultas SQL a la base de datos ingresándola como parámetro.", "_____no_output_____" ], [ "**Ejemplo:**", "_____no_output_____" ] ], [ [ "conexion.query(\"CREATE DATABASE pythonista;\")", "_____no_output_____" ] ], [ [ "### El método *pymysql.connect.commit()*\n\nEste método permite realizar un commit a a la base de datos.\n", "_____no_output_____" ] ], [ [ "conexion.commit()", "_____no_output_____" ] ], [ [ "### El método *pymysql.connect.close()*\nEste método permite cerrar la conexión con la base de datos.", "_____no_output_____" ] ], [ [ "conexion.close()", "_____no_output_____" ] ], [ [ "### El objeto *pymysql.cursor*.\n\nAún cuando es posible realizar operaciones de consulta con los objetos *pymysql.connect*, estos objetos se utilizan primordialmente para operaciones de conexión las bases de datos.\n\nEl objeto cursor es una instancia de la clase *pymysql.cursors.Cursor*, el cual contiene los métodos:\n* *execute()*, con el que se pueden enviar instrucciones SQL a la base de datos.\n* *fetchone()*, con el que se obtiene el primer resultado de una búsqueda.\n* *fetchall()* con el que se obtienen todos los resultado de una búsqueda dentro de un bojeto de tipo *tuple*.", "_____no_output_____" ], [ "### La declaración *with* para conexiones de bases de datos.\n\nLas conexiones de bases de datos también pueden ser utilizadas dentro de una declaración *with*.\n\nDe esta forma se abre una conexión y se crea un objeto de tipo *pymysql.cursor* que puede ser utilizado dentro del bloque de código inscrito a *with*. Tan pronto como el bloque es ejecutado, se realiza un commit de las acciones realizadas y se cierra la conexión.\n", "_____no_output_____" ], [ "**Ejemplo:**", "_____no_output_____" ] ], [ [ "sql = 'SHOW DATABASES;'\nwith pymysql.connect(user='root', password='0p3n5t4ck') as cursor:\n print(type(cursor))\n cursor.execute(sql)\n print(cursor.fetchall())", "_____no_output_____" ], [ "with pymysql.connect(user='root', password='0p3n5t4ck') as conexion:\n conexion.execute(\"DROP DATABASE pythonista;\")\n print(cursor.fetchall())", "_____no_output_____" ], [ "sql = 'SHOW DATABASES;'\nwith pymysql.connect(user='root', password='0p3n5t4ck') as cursor:\n cursor.execute(sql)\n print(cursor.fetchall())", "_____no_output_____" ] ], [ [ "## Conexión a MongoDB.\n\n[MongoDB](https://www.mongodb.com/) es una base de datos muy popular que se basa en \"colecciones\" y \"documentos\" en formato JSON en vez de registros. \n\nLa API de MongoDB para Python fue desarrollada por el mismo equipo de MongoDB.\n\nPara conocer más sobre el uso de la API de MongoDB consultar:\n\nhttp://api.mongodb.com/python/current/tutorial.html", "_____no_output_____" ], [ "**Ejemplo:**\n\nSe realizará una conexión al servidor de Mongodb corriendo en el sistema local (*localhost*) y se consultará el estado del gestor.", "_____no_output_____" ] ], [ [ "!pip install pymongo", "_____no_output_____" ], [ "from pymongo import MongoClient\nclient = MongoClient(\"localhost\")\ndb=client.admin\nserverStatusResult=db.command(\"serverStatus\")\nclaves = [keys for keys in serverStatusResult]", "_____no_output_____" ], [ "claves", "_____no_output_____" ], [ "serverStatusResult['uptime']", "_____no_output_____" ], [ "serverStatusResult['host']", "_____no_output_____" ], [ "serverStatusResult['version']", "_____no_output_____" ] ], [ [ "<p style=\"text-align: center\"><a rel=\"license\" href=\"http://creativecommons.org/licenses/by/4.0/\"><img alt=\"Licencia Creative Commons\" style=\"border-width:0\" src=\"https://i.creativecommons.org/l/by/4.0/80x15.png\" /></a><br />Esta obra está bajo una <a rel=\"license\" href=\"http://creativecommons.org/licenses/by/4.0/\">Licencia Creative Commons Atribución 4.0 Internacional</a>.</p>\n<p style=\"text-align: center\">&copy; José Luis Chiquete Valdivieso. 2017.</p>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
cb9b48479fc0d62d4ddb70baea96c5a9c0f9c749
98,087
ipynb
Jupyter Notebook
Fast_Text_Model.ipynb
COMP6248-Reproducability-Challenge/Differentiables_FastAccurateTextClassification
4d30c93f553e9918ec170c3a2dd35e0940a484f8
[ "MIT" ]
1
2020-11-23T05:30:20.000Z
2020-11-23T05:30:20.000Z
Fast_Text_Model.ipynb
COMP6248-Reproducability-Challenge/Differentiables_FastAccurateTextClassification
4d30c93f553e9918ec170c3a2dd35e0940a484f8
[ "MIT" ]
1
2020-03-26T02:45:33.000Z
2020-03-26T11:35:07.000Z
Fast_Text_Model.ipynb
COMP6248-Reproducability-Challenge/Differentiables_FastAccurateTextClassification
4d30c93f553e9918ec170c3a2dd35e0940a484f8
[ "MIT" ]
null
null
null
58.946514
14,130
0.590802
[ [ [ "# Execute this code block to install dependencies when running on colab\ntry:\n import torch\nexcept:\n from os.path import exists\n from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag\n platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag())\n cuda_output = !ldconfig -p|grep cudart.so|sed -e 's/.*\\.\\([0-9]*\\)\\.\\([0-9]*\\)$/cu\\1\\2/'\n accelerator = cuda_output[0] if exists('/dev/nvidia0') else 'cpu'\n\n !pip install -q http://download.pytorch.org/whl/{accelerator}/torch-1.0.0-{platform}-linux_x86_64.whl torchvision\n ! pip install --upgrade git+https://github.com/sovrasov/flops-counter.pytorch.git\n\ntry:\n import torchtext\nexcept:\n !pip install torchtext\n \ntry:\n import spacy\nexcept:\n !pip install spacy\n \ntry:\n spacy.load('en')\nexcept:\n !python -m spacy download en", "_____no_output_____" ] ], [ [ "# Data loading and preprocessing", "_____no_output_____" ] ], [ [ "import torch\nfrom torchtext import data\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch import optim\nfrom torch.distributions import Categorical\nfrom torch.distributions import Binomial\nfrom torchtext import datasets\nimport os.path\n\nimport random\nimport numpy as np", "_____no_output_____" ], [ "TEXT = data.Field(tokenize='spacy', lower=True, include_lengths=True)\nLABEL = data.LabelField(dtype=torch.float)\n_train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)", "aclImdb_v1.tar.gz: 0%| | 164k/84.1M [00:00<00:57, 1.47MB/s]" ], [ "train_data, valid_data = _train_data.split(0.8)\n\nprint(f'Number of training examples: {len(train_data)}')\nprint(f'Number of validation examples: {len(valid_data)}')\nprint(f'Number of testing examples: {len(test_data)}')", "Number of training examples: 20000\nNumber of validation examples: 5000\nNumber of testing examples: 25000\n" ], [ "TEXT.build_vocab(train_data, max_size=100000, vectors=\"glove.6B.100d\")\nLABEL.build_vocab(train_data)\n\nprint(f\"Unique tokens in TEXT vocabulary: {len(TEXT.vocab)}\")\nprint(f\"Unique tokens in LABEL vocabulary: {len(LABEL.vocab)}\")", ".vector_cache/glove.6B.zip: 862MB [03:29, 4.12MB/s] \n100%|█████████▉| 398204/400000 [00:19<00:00, 20314.70it/s]" ], [ "device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\ndevice = 'cpu'\n# Assume that we are on a CUDA machine, then this should print a CUDA device:\n\nprint(device)", "cpu\n" ] ], [ [ "# Model and training", "_____no_output_____" ] ], [ [ "BATCH_SIZE = 50\n\ntrain_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(\n (train_data, valid_data, test_data), \n batch_size=BATCH_SIZE,\n device=device,\n shuffle = False,\n sort_key=lambda x: len(x.text),\n sort_within_batch=True)", "_____no_output_____" ], [ "eps = torch.tensor(1e-9)\ntemp = []\nR = 20 # chunk of words read or skipped\n\ndef reward_function(prob, true_label):\n \"\"\"\n Returns 1 if correct prediction, -1 otherwise\n \"\"\"\n # print(\"true_label\", \"prob\", true_label, prob)\n if prob>0.5 and true_label>0.5:\n return torch.tensor(1.0, requires_grad=True)\n if prob<0.5 and true_label<0.5:\n return torch.tensor(1.0, requires_grad=True)\n return torch.tensor(-1.0, requires_grad=True)\n\ndef sample_binary(prob):\n if prob>random.random:\n return torch.tensor(1)\n return torch.tensor(0)\n\nclass SkipReadingModel(nn.Module):\n def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim, gamma=0.99, train_mode=True, K=4):\n super().__init__()\n \n # store dimensions and constants\n self.input_dim = input_dim\n self.embedding_dim = embedding_dim\n self.hidden_dim = hidden_dim\n self.output_dim = output_dim\n self.gamma = torch.tensor(gamma)\n self.train_mode = train_mode\n self.K = K\n \n # create layers\n self.embedding = nn.Embedding(input_dim, embedding_dim)\n self.lstm_cell = nn.LSTMCell(input_size = embedding_dim, hidden_size = hidden_dim, bias = True)\n self.stop_linear_1 = nn.Linear(hidden_dim, hidden_dim)\n self.stop_linear_2 = nn.Linear(hidden_dim, hidden_dim)\n self.stop_linear_3 = nn.Linear(hidden_dim, 1)\n \n self.jumping_linear_1 = nn.Linear(hidden_dim, hidden_dim)\n self.jumping_linear_2 = nn.Linear(hidden_dim, hidden_dim)\n self.jumping_linear_3 = nn.Linear(hidden_dim, K)\n \n self.output_linear_1 = nn.Linear(hidden_dim, hidden_dim)\n self.output_linear_2 = nn.Linear(hidden_dim, output_dim)\n \n self.value_head = nn.Linear(hidden_dim, 1)\n \n # Baseline weight \n self.wb = nn.Parameter(data=torch.zeros(self.hidden_dim), requires_grad=True)\n self.cb = nn.Parameter(data=torch.tensor((0.0)), requires_grad=True)\n \n # Initialize lstm_cell states\n self.initialize_lstm_cell_states()\n \n # Initalize episode number and time number\n self.initialize_for_new_batch()\n self.initialize_time_number()\n \n # Overall reward and loss history\n self.reward_history = []\n self.loss_history = []\n self.training_accuracies = []\n self.validation_accuracies = []\n # torch.tensor((0.0), requires_grad=True) \n \n def initialize_lstm_cell_states(self):\n self.c = torch.zeros(1, self.hidden_dim, requires_grad=True)\n self.h = torch.zeros(1, self.hidden_dim, requires_grad=True)\n \n def initialize_episode_number(self):\n self.ep = 0\n \n def initialize_time_number(self):\n self.t = 0\n \n def clear_batch_lists(self):\n del self.saved_log_probs_s[:]\n del self.saved_log_probs_n[:]\n del self.saved_log_probs_o[:]\n del self.reward_baselines[:]\n del self.rewards[:]\n del self.label_targets[:]\n del self.label_predictions[:]\n del self.state_values[:]\n self.initialize_episode_number()\n self.training_accuracy = 0.0\n \n def initialize_for_new_batch(self):\n \"\"\"\n Cleans history of log probabilities, rewards, targets etc for the last\n batch\n \"\"\"\n self.initialize_episode_number()\n \n # Episode policy and reward history \n self.saved_log_probs_s = [] # log probabilities for each time step t in each episode in batch\n self.saved_log_probs_n = [] # log probs for jump\n self.saved_log_probs_o = [] # log_prob for class\n self.rewards = [] # reward at final time step of each episode in batch\n self.reward_baselines = [] # reward baselines for each time step t in each episode in batch\n self.state_values = []\n\n # Predictions and targets history (for cross entropy loss calculation)\n self.label_predictions = [] # 1 probability for each episode\n self.label_targets = []# 1 label for each episode\n self.training_accuracy = 0.0\n\n \n def classify(self):\n # global temp\n # temp.append(self.c[0])\n out = self.output_linear_1(self.c[0])\n out = self.output_linear_2(out)\n self.label_predictions.append(out)\n prob_o = torch.sigmoid(out)\n class_categ = Binomial(probs=prob_o)\n _class = class_categ.sample()\n if self.train_mode:\n self.rewards.append(reward_function(_class, self.label_targets[-1]))\n self.saved_log_probs_o.append((class_categ.log_prob(_class), ))\n # return torch.sigmoid(out)\n \n def get_baseline(self):\n return torch.dot(self.wb, self.c[0].detach()) + self.cb\n \n def save_training_accuracy(self):\n correct = 0\n for _r in self.rewards:\n if _r > 0:\n correct += 1\n self.training_accuracy = correct/len(self.rewards)\n self.training_accuracies.append(self.training_accuracy)\n \n \n def forward(self, pack):\n texts, lengths, labels = pack\n embeddeds = self.embedding(texts)\n # embeddeds = nn.utils.rnn.pack_padded_sequence(embeddeds, lengths)\n self.initialize_for_new_batch()\n\n for episode_number in range(embeddeds.shape[1]):\n \n # load episode data\n self.ep = episode_number\n embedded = embeddeds[:, self.ep, :]\n \n #print(texts.shape, embeddeds.shape, embedded.shape)\n #print(label)\n \n # initialize counters and index\n tokens_read = 0\n jumps_made = 0\n word_index = 0\n words_len = embedded.shape[0]\n self.initialize_lstm_cell_states()\n self.initialize_time_number()\n self.saved_log_probs_s.append([])\n self.saved_log_probs_n.append([])\n self.state_values.append([])\n self.reward_baselines.append([]) \n if self.train_mode:\n label = labels[self.ep].reshape(1)\n self.label_targets.append(label)\n\n # start iterating through sequence, while skipping some words\n while word_index<words_len and word_index<400:\n self.t += 1 \n #print(\"embedded_word\", embedded_word.shape)\n\n # generate next lstm cell state\n for _r in range(min(R, words_len-word_index)):\n embedded_word = embedded[word_index]\n self.h, self.c = self.lstm_cell(torch.reshape(embedded_word, (1, -1)), (self.h, self.c))\n word_index += 1\n # print('word_index', word_index, 'tokens_read', tokens_read, 'jumps_made', jumps_made)\n \n # print(self.c)\n _state_value = self.value_head(self.c[0])\n self.state_values.append(_state_value)\n \n _s = self.stop_linear_1(self.c[0])\n _s = F.relu(_s)\n _s = self.stop_linear_2(_s)\n _s = F.relu(_s)\n _s = self.stop_linear_3(_s)\n\n probs_s = torch.sigmoid(_s)\n try:\n stop_categ = Binomial(probs=probs_s)\n stop = stop_categ.sample()\n except:\n print(\"_c\", self.c)\n #temp = (self.c, _s, probs_s, self.stop_linear_1, self.stop_linear_2, self.stop_linear_3, stop_categ)\n raise ValueError('got the expected error')\n # Add log probability of our chosen action to our history\n self.saved_log_probs_s[-1].append(stop_categ.log_prob(stop))\n self.reward_baselines[-1].append(self.get_baseline())\n\n if stop > 0.5:\n self.classify()\n break\n else:\n _n = self.jumping_linear_1(self.c[0])\n _n = F.relu(_n)\n _n = self.jumping_linear_2(_n)\n _n = F.relu(_n)\n _n = self.jumping_linear_3(_n)\n _n = F.softmax(_n)\n n_categ = Categorical(_n)\n n = n_categ.sample()\n self.saved_log_probs_n[-1].append(n_categ.log_prob(n))\n word_index += n * R\n else:\n # print(\"Finished while loop\")\n # raise ValueError('Finshed ')\n self.classify()\n if self.train_mode:\n self.save_training_accuracy()\n return self.label_predictions \n ", "_____no_output_____" ], [ "INPUT_DIM = len(TEXT.vocab)\nEMBEDDING_DIM = 100\nHIDDEN_DIM = 128\nOUTPUT_DIM = 1\nFLOP_COST = 0.0001\n\nseed = 7\ntorch.manual_seed(seed)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nnp.random.seed(seed)\n\nbce = nn.BCEWithLogitsLoss(reduction='mean')\npolicy_model = SkipReadingModel(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM).to(device)\npretrained_embeddings = TEXT.vocab.vectors\npolicy_model.embedding.weight.data.copy_(pretrained_embeddings)\n\nif os.path.exists('fast_text_model.weights'):\n policy_model.load_state_dict(torch.load('fast_text_model.weights'))\n\n# define the optimiser\noptimizer = optim.Adam(policy_model.parameters(), lr=0.01)", "_____no_output_____" ], [ "def update_policy():\n \n #print(len(policy_model.rewards), len(policy_model.label_predictions), len(policy_model.label_targets))\n #print(len(policy_model.saved_log_probs_o), len(policy_model.saved_log_probs_n), len(policy_model.saved_log_probs_s))\n #print(len(policy_model.reward_baselines))\n \n policy_loss_sum = torch.tensor(0.0, requires_grad=True)\n reward_sum = torch.tensor(0.0, requires_grad=True)\n baseline_loss_sum = torch.tensor(0.0, requires_grad=True)\n value_loss_sum = torch.tensor(0.0, requires_grad=True)\n \n for reward, prediction, target, log_probs_o, log_probs_n, log_probs_s, baselines, svs in zip(\n policy_model.rewards, policy_model.label_predictions,\n policy_model.label_targets, policy_model.saved_log_probs_o,\n policy_model.saved_log_probs_n, policy_model.saved_log_probs_s,\n policy_model.reward_baselines, policy_model.state_values):\n \n for lpn in log_probs_n:\n policy_loss_sum = policy_loss_sum + lpn\n \n for i, (lps, b, sv) in enumerate(zip(log_probs_s, baselines, svs)):\n policy_loss_sum = policy_loss_sum + lps\n r = torch.pow(policy_model.gamma, i) * (-FLOP_COST)\n if i == len(svs)-1:\n r = r + torch.pow(policy_model.gamma, i) * reward\n adv = r - sv.item()\n reward_sum = reward_sum + adv\n value_loss_sum = value_loss_sum + F.smooth_l1_loss(sv, torch.tensor([r]))\n # baseline_loss_sum = baseline_loss_sum + torch.pow(rew, 2)\n\n # baseline_loss_sum = baseline_loss_sum - torch.pow(rew, 2) + torch.pow(torch.pow(policy_model.gamma, i) * reward + rew, 2)\n policy_loss_sum = policy_loss_sum + log_probs_o[0]\n \n # print(\"reward sum\", reward_sum)\n # print(\"policy_loss_sum\", policy_loss_sum)\n \n loss = policy_loss_sum * reward_sum + value_loss_sum\n \n optimizer.zero_grad()\n # print('policy_loss', policy_loss)\n loss.backward(retain_graph=True)\n optimizer.step()\n \n policy_model.clear_batch_lists()", "_____no_output_____" ], [ "def test_model():\n policy_model.train_mode = True\n correct = 0\n total=0\n for _data in test_iterator:\n # get the inputs\n texts, text_lengths, labels = _data.text[0], _data.text[1], _data.label\n # print(\"Input review texts, text_lengths, labels\", texts.shape, text_lengths.shape, labels.shape)\n predictions = policy_model((texts.to(device), text_lengths.to(device), labels.to(device)))\n for (prediction, label) in zip(predictions, labels):\n if reward_function(label, prediction) > 0:\n correct += 1\n total += 1\n if total%1000 == 0:\n print(total)\n if total%5000 == 0:\n break\n print(\"Test accuracy :\", correct/total)\n policy_model.train_mode = True\n return correct/total\n\ndef validate_model():\n policy_model.train_mode = True\n correct = 0\n total=0\n for _data in valid_iterator:\n # get the inputs\n texts, text_lengths, labels = _data.text[0], _data.text[1], _data.label\n # print(\"Input review texts, text_lengths, labels\", texts.shape, text_lengths.shape, labels.shape)\n predictions = policy_model((texts.to(device), text_lengths.to(device), labels.to(device)))\n for (prediction, label) in zip(predictions, labels):\n if reward_function(label, prediction) > 0:\n correct += 1\n total += 1\n if total%1000 == 0:\n break\n print(\"Validation accuracy :\", correct/total)\n policy_model.train_mode = True\n policy_model.validation_accuracies.append(correct/total)\n return correct/total\n\n# test_model()", "_____no_output_____" ], [ "# the epoch loop\n\nwith torch.enable_grad():\n validate_model()\n for epoch in range(10):\n running_reward = 10\n t = 0\n for _data in train_iterator:\n # get the inputs\n texts, text_lengths, labels = _data.text[0], _data.text[1], _data.label\n # print(\"Input review texts, text_lengths, labels\", texts.shape, text_lengths.shape, labels.shape)\n prediction = policy_model((texts.to(device), text_lengths.to(device), labels.to(device)))\n # print(\"Prediction\", prediction.item()) \n # raise ValueError('Not done')\n t += 1\n if t%2 == 0:\n print(\"batch no. %d, training accuracy %4.2f\" % (t, policy_model.training_accuracy))\n if t%10 == 0:\n validate_model()\n #print(\"wb\", policy_model.wb)\n # print(\"lstm hh hi\", policy_model.lstm_cell.weight_hh[0][::10], policy_model.lstm_cell.weight_ih[0][::10])\n #print(\"lstm hh hi\", policy_model.lstm_cell.weight_hh, policy_model.lstm_cell.weight_ih)\n #print(\"emb\", policy_model.embedding.weight)\n #print(\"jmp\", policy_model.jumping_linear.weight)\n #print(\"out\", policy_model.output_linear.weight)\n if t%1000 == 0:\n break\n update_policy()\n #running_reward = 0.05 * policy_model.reward_episode + (1 - 0.05) * running_reward\n #print(\"Epoch %d, reward %4.2f\" % (epoch, running_reward))\n print(\"Epoch %d\" % (epoch))\n print('**** Finished Training ****')\n# test_model()", "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:210: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n" ], [ "torch.save(policy_model.state_dict(), 'fast_text_model.weights')", "_____no_output_____" ] ], [ [ "# Evaluation", "_____no_output_____" ] ], [ [ "import timeit\nimport spacy\nimport matplotlib.pyplot as plt\nnlp = spacy.load('en')\n\ndef predict_sentiment(model, sentence):\n model.train_mode = False\n tokenized = [tok.text for tok in nlp.tokenizer(sentence)]\n indexed = [TEXT.vocab.stoi[t] for t in tokenized]\n # tensor = torch.LongTensor(indexed).to(device)\n tensor = torch.LongTensor(indexed).to('cpu')\n tensor = tensor.unsqueeze(1)\n model((tensor, torch.tensor([tensor.shape[0]]), None))\n res = torch.sigmoid(model.label_predictions[0])\n model.train_mode = False\n return res\n\ntimes = []\nlengths=[]\ntrain_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(\n (train_data, valid_data, test_data), \n batch_size=1,\n device=device,\n shuffle = True,\n # sort_key=lambda x: len(x.text),\n sort_within_batch=False)\n\ni = 0\nfor _data in test_iterator:\n if i%100 == 0:\n # get the inputs\n texts, text_lengths, labels = _data.text[0], _data.text[1], _data.label\n #print(texts.shape, text_lengths.shape, labels.shape)\n start_time = timeit.default_timer()\n predictions = policy_model((texts.to(device), text_lengths.to(device), labels.to(device)))\n elapsed = timeit.default_timer() - start_time\n lengths.append(texts.shape[0])\n times.append(elapsed)\n # print(\"Input review texts, text_lengths, labels\", texts.shape, text_lengths.shape, labels.shape)\n if i>20000:\n break\n i += 1\n\nimport pickle\n\npickle_out = open(\"test_times_1.pickle\",\"wb\")\npickle.dump((lengths, times), pickle_out)\npickle_out.close()\n\nplt.scatter(lengths, times, label='skip-model')\nplt.xlabel('Lengths of sentences')\nplt.ylabel('Time taken for prediction')\nplt.show()", "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:210: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n" ], [ "predict_sentiment(policy_model, \"This film is terrible what can I say\")", "_____no_output_____" ], [ "import pickle\n\npickle_out = open(\"training_epochs_1.pickle\",\"wb\")\npickle.dump((policy_model.training_accuracies, policy_model.validation_accuracies), pickle_out)\npickle_out.close()", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
cb9b850a584dc3b81c1980251c4a16c0814fd9bb
4,491
ipynb
Jupyter Notebook
notebooks/cloud_products_analysis/spacy/SpacyTest.colab.ipynb
dylanhogg/jupyter-experiments
c1b3a6b50eaeeb5be73bb30fb69cbc813c61ed26
[ "CC-BY-4.0" ]
null
null
null
notebooks/cloud_products_analysis/spacy/SpacyTest.colab.ipynb
dylanhogg/jupyter-experiments
c1b3a6b50eaeeb5be73bb30fb69cbc813c61ed26
[ "CC-BY-4.0" ]
null
null
null
notebooks/cloud_products_analysis/spacy/SpacyTest.colab.ipynb
dylanhogg/jupyter-experiments
c1b3a6b50eaeeb5be73bb30fb69cbc813c61ed26
[ "CC-BY-4.0" ]
null
null
null
25.372881
125
0.534847
[ [ [ "# Spacy NLP analysis over AWS products (colab ready)\n\nInspiration: \nhttps://github.com/explosion/spacy-notebooks \nhttps://spacy.io/models/en \nhttps://spacy.io/usage/visualizers#jupyter \nhttps://spacy.io/usage/models \nhttps://spacy.io/usage/facts-figures#spacy-models \nhttps://spacy.io/usage/spacy-101 ", "_____no_output_____" ] ], [ [ "!which python", "_____no_output_____" ], [ "!pip install spacy pandas cloud-products>=1.1.2", "_____no_output_____" ], [ "# NOTE: You may need to restart the kernel to be able to use these downloaded models if installed here\n# !python -m spacy download en_core_web_lg # v2.3.1 782.7 MB\n!python -m spacy download en_core_web_md # v2.3.1 50.8 MB", "_____no_output_____" ], [ "import pandas as pd\nimport spacy\nfrom spacy import displacy\nfrom cloud_products.aws import AwsCrawler\nfrom IPython.core.display import HTML", "_____no_output_____" ], [ "df_cloud_products = AwsCrawler().get_products_as_df()\ndf_cloud_products.head(2)", "_____no_output_____" ], [ "codes = df_cloud_products[\"code\"].unique()\nprint(f\"len(codes) = {len(codes)}\")\ncodes[:20]", "_____no_output_____" ], [ "code = \"sagemaker\"\ntext = df_cloud_products.set_index(\"code\").loc[code][\"product_text\"]\nf\"{text[:120]}... ({len(text)} chars)\"", "_____no_output_____" ], [ "nlps = {}\nnlps[\"en_core_web_md\"] = spacy.load(\"en_core_web_md\")", "_____no_output_____" ], [ "def apply_nlp(text, nlps, nlp_type, options_ent={}, render=True) -> pd.DataFrame:\n nlp = nlps[nlp_type]\n doc = nlp(text)\n \n if render:\n doc.user_data[\"title\"] = f\"{nlp_type}\"\n #options_dep = {\"compact\": False, \"bg\": \"#ffffff\", \"color\": \"grey\", \"font\": \"Source Sans Pro\"}\n #displacy.render(doc, style=\"dep\", options=options_dep)\n #displacy.render(list(doc.sents), style=\"dep\", options=options_dep)\n displacy.render(doc, style=\"ent\", jupyter=True, options=options_ent)\n \n ents = [(e.label_, e.text, e.root, e.conjuncts, e.start_char, e.end_char, e.vector_norm) for e in doc.ents] \n \n df = pd.DataFrame.from_records(ents)\n columns = [\"label\", \"text\", \"root\", \"conjuncts\", \"start_char\", \"end_char\", \"vector_norm\"]\n df.columns = columns\n df.sort_values(by=\"label\", inplace=True)\n df = df.groupby(columns).size().reset_index(name='counts')\n return df", "_____no_output_____" ], [ "df = apply_nlp(text, nlps, \"en_core_web_md\")", "_____no_output_____" ], [ "HTML(df.to_html())", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9b8bca14166b9c32fb2c4f63512f293dcccef5
14,242
ipynb
Jupyter Notebook
6.009-Fundamentals of Programming/week0/rec00.ipynb
CutIce/awesome-courses
f35402e4ce313d45945b06c96fede71c590a73e8
[ "MIT" ]
null
null
null
6.009-Fundamentals of Programming/week0/rec00.ipynb
CutIce/awesome-courses
f35402e4ce313d45945b06c96fede71c590a73e8
[ "MIT" ]
null
null
null
6.009-Fundamentals of Programming/week0/rec00.ipynb
CutIce/awesome-courses
f35402e4ce313d45945b06c96fede71c590a73e8
[ "MIT" ]
null
null
null
23.347541
402
0.518467
[ [ [ "# Python Notional Machine\nOur goal is to refresh ourselves on basics (and some subtleties) associated with Python's data and computational model. Along the way, we'll also use or refresh ourselves on the <b>environment model</b> as a way to think about and keep track of the effect of executing Python code. Specifically, we'll demonstrate use of *environment diagrams* to explain the outcomes of different code sequences.", "_____no_output_____" ], [ "## Variables and data types", "_____no_output_____" ], [ "### Integers", "_____no_output_____" ] ], [ [ "a = 307\nb = a\nprint('a:', a, '\\nb:', b)", "_____no_output_____" ], [ "a = a + 310\na += 400\nprint('a:', a, '\\nb:', b)", "_____no_output_____" ] ], [ [ "So far so good -- integers, and variables pointing to integers, are straightforward.", "_____no_output_____" ], [ "### Lists", "_____no_output_____" ] ], [ [ "x = ['baz', 302, 303, 304]\nprint('x:', x)", "_____no_output_____" ], [ "y = x\nprint('y:', y)", "_____no_output_____" ], [ "x = 377\nprint('x:', x, '\\ny:', y)", "_____no_output_____" ] ], [ [ "\nUnlike integers, lists are mutable:", "_____no_output_____" ] ], [ [ "x = y\nx[0] = 388\nprint('x:', x)", "_____no_output_____" ], [ "print('y:', y)", "_____no_output_____" ] ], [ [ "As seen above, we have to be careful about sharing (also known as \"aliasing\") mutable data!", "_____no_output_____" ] ], [ [ "a = [301, 302, 303]\nb = [a, a, a]\nprint(b)", "_____no_output_____" ], [ "b[0][0] = 304\nprint(b)\nprint(a)", "_____no_output_____" ] ], [ [ "### Tuples", "_____no_output_____" ], [ "Tuples are a lot like lists, except that they are immutable.", "_____no_output_____" ] ], [ [ "x = ('baz', [301, 302], 303, 304)\ny = x\nprint('x:', x, '\\ny:', y)", "_____no_output_____" ] ], [ [ "Unlike a list, we can't change the top most structure of a tuple. What happens if we try the following?", "_____no_output_____" ] ], [ [ "x[0] = 388", "_____no_output_____" ] ], [ [ "What will happen in the following (operating on `x`)?", "_____no_output_____" ] ], [ [ "x[1][0] = 311\nprint('x:', x, '\\ny:', y)", "_____no_output_____" ] ], [ [ "So we still need to be careful! The tuple didn't change at the top level -- but it might have members that are themselves mutable.", "_____no_output_____" ], [ "### Strings", "_____no_output_____" ], [ "Strings are also immutable. We can't change them once created. ", "_____no_output_____" ] ], [ [ "a = 'ya'\nb = a + 'rn'\nprint('a:', a, '\\nb:', b)", "_____no_output_____" ], [ "a[0] = 'Y'", "_____no_output_____" ], [ "c = 'twine'\nd = c\nc += ' thread'\nprint('c:', c, '\\nd:', d)", "_____no_output_____" ] ], [ [ "That's a little bit tricky. Here the `+=` operator makes a copy of `c` first to use as part of the new string with `' there'` included at the end.", "_____no_output_____" ], [ "### Back to lists: append, extend, and the '+' and '+=' operators", "_____no_output_____" ] ], [ [ "x = [1, 2, 3]\ny = [4, 5]\nx.append(y)\ny[0] = 99\nprint('x:', x, '\\ny:', y)", "_____no_output_____" ] ], [ [ "So again, we have to watch out for aliasing/sharing, whenever we mutate an object.", "_____no_output_____" ] ], [ [ "x = [1, 2, 3]\ny = [4, 5]\nx.extend(y)\ny[0] = 88\nprint('x:', x, '\\ny:', y)", "_____no_output_____" ] ], [ [ "<pre>\n\n\n\n\n\n\n\n\n</pre>\nWhat happens when using the `+` operator used on lists?", "_____no_output_____" ] ], [ [ "x = [1, 2, 3]\ny = x\nx = x + [4, 5]\nprint('x:', x)", "_____no_output_____" ] ], [ [ "So the `+` operator on a list looks sort of like extend. But has it changed `x` in place, or made a copy of `x` first for use in the longer list?\n\nAnd what happens to `y` in the above?", "_____no_output_____" ] ], [ [ "print('y:', y)", "_____no_output_____" ] ], [ [ "So that clarifies things -- the `+` operator on a list makes a (shallow) copy of the left argument first, then uses that copy in the new larger list.", "_____no_output_____" ], [ "Another case, this time using the `+=` operator with a list. Note: in the case of integers, `a = a + <val>` and `a += <val>` gave exactly the same result. How about in the case of lists?", "_____no_output_____" ] ], [ [ "x = [1, 2, 3]\ny = x\nx += [4, 5]\ny[0] = 77\nprint('x:', x, '\\ny:', y)", "_____no_output_____" ] ], [ [ "So `x += <something>` is NOT the same thing as `x = x + <something>` if `x` is a list! Here it actually DOES mutate or change `x` in place, if that is allowed (i.e., if `x` is a mutable object).\n\nContrast this with the same thing, but for `x` in the case where `x` was a string. Since strings are immutable, python does not change `x` in place. Rather, the `+=` operator is overloaded to do a top-level copy of the target, make that copy part of the new larger object, and assign that new object to the variable.", "_____no_output_____" ], [ "Let's check your understanding. What will happen in the following, that looks just like the code above for lists, but instead using tuples. What will x and y be after executing this?", "_____no_output_____" ] ], [ [ "x = (301, 302, 303)\ny = x\nx += (304, 305)\nprint('x:', x, '\\ny:', y)", "_____no_output_____" ] ], [ [ "## Functions and scoping", "_____no_output_____" ] ], [ [ "x = 500\ndef foo(y):\n return x + y\nz = foo(307)\nprint('x:', x, '\\nfoo:', foo, '\\nz:', z)", "_____no_output_____" ], [ "def bar(x):\n x = 1000\n return foo(307)\nw = bar('hi')\nprint('x:', x, '\\nw:', w)", "_____no_output_____" ] ], [ [ "Importantly, `foo` \"remembers\" that it was created in the global environment, so looks in the global environment to find a value for `x`. It does **not** look back in its \"call chain\"; rather, it looks back in its parent environment.", "_____no_output_____" ], [ "### Optional arguments and default values", "_____no_output_____" ] ], [ [ "def foo(x, y = []):\n y = y + [x]\n return y\n\na = foo(7)\nb = foo(8, [1, 2, 3])\nprint('a:', a, '\\nb:', b)", "_____no_output_____" ], [ "c = foo(7)\nprint('a:', a, '\\nb:', b, '\\nc:', c)", "_____no_output_____" ] ], [ [ "Let's try something that looks close to the same thing... but with an important difference!", "_____no_output_____" ] ], [ [ "def foo(x, y = []):\n y.append(x) # different here\n return y\n\na = foo(7)\nb = foo(8, [1, 2, 3])\nprint('a:', a, '\\nb:', b)", "_____no_output_____" ] ], [ [ "Okay, so far it looks the same as with the earlier `foo`.", "_____no_output_____" ] ], [ [ "c = foo(7)\nprint('a:', a, '\\nb:', b, '\\nc:', c)", "_____no_output_____" ] ], [ [ "So quite different... all kinds of aliasing going on. Perhaps surprisingly, the default value to an optional argument is only evaluated once, at function *definition* time. The moral here is to be **very** careful (and indeed it may be best to simply avoid) having optional/default arguments that are mutable structures like lists... it's hard to remember or debug such aliasing!", "_____no_output_____" ], [ "## Reference Counting", "_____no_output_____" ], [ "This is an advanced feature you don't need to know about, but you might be curious about. Python knows to throw away an object when its \"reference counter\" reaches zero. You can inspect the current value of an object's reference counter with `sys.getrefcount`.", "_____no_output_____" ] ], [ [ "import sys\nL1 = [301, 302, 303]\nprint(sys.getrefcount(L1))\nL2 = L1\nprint(sys.getrefcount(L1))\nL3 = [L1, L1, L1]\nprint(sys.getrefcount(L1))\nL3.pop()\nprint(sys.getrefcount(L1))\nL3 = 307\nprint(sys.getrefcount(L1))", "_____no_output_____" ] ], [ [ "## Readings -- if you want/need more refreshers", "_____no_output_____" ], [ "Check out readings and exercises from <a href=https://hz.mit.edu/catsoop/6.145><b>6.145</b></a>:\n<ul>\n <li> <a href=https://hz.mit.edu/catsoop/6.145/assignment0.0/readings#_variables_and_assignment>Assignment and aliasing</a>\n <li> What is an <a href=https://hz.mit.edu/catsoop/6.145/assignment0.0/readings#_environment_diagrams>environment</a>? What is a frame? How should we draw environment diagrams?\n <li> What is a <a href=https://hz.mit.edu/catsoop/6.145/assignment1.0/readings>function</a>? \n What happens when one is defined? What happens when one is called? \n <li> What happens when a <a href=https://hz.mit.edu/catsoop/6.145/assignment1.1/readings#_function_ception_and_returning_functions>function is defined inside another function</a> (also known as a closure)?\n <li> What is a <a href=https://hz.mit.edu/catsoop/6.145/assignment2.0/readings>class</a>? What is an instance? What is self? What is __init__?\n <li> How does <a href=https://hz.mit.edu/catsoop/6.145/assignment2.1/readings>inheritance</a> in classes work?\n </ul>\n \n Another resource is the <a href=https://greenteapress.com/wp/think-python-2e/>Think Python</a> textbook.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb9b90e3d8096ebc5cb05f27443b4683386a921b
394,408
ipynb
Jupyter Notebook
MachineLearning/HouseholdPowerConsumption/KMeansML.ipynb
Renanrbsc/DataScience
1118d2fdc2326c64228a44841054ccbe0f554075
[ "MIT" ]
null
null
null
MachineLearning/HouseholdPowerConsumption/KMeansML.ipynb
Renanrbsc/DataScience
1118d2fdc2326c64228a44841054ccbe0f554075
[ "MIT" ]
null
null
null
MachineLearning/HouseholdPowerConsumption/KMeansML.ipynb
Renanrbsc/DataScience
1118d2fdc2326c64228a44841054ccbe0f554075
[ "MIT" ]
null
null
null
393.228315
344,812
0.923947
[ [ [ "![Captura%20de%20tela%202022-02-16%20154604.png](attachment:Captura%20de%20tela%202022-02-16%20154604.png)", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\nfrom scipy.spatial.distance import cdist, pdist\nfrom sklearn.metrics import silhouette_score\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n%matplotlib inline", "_____no_output_____" ], [ "df = pd.read_csv(\"datasets/household_power_consumption.csv\", \n dtype={'Global_active_power': str, 'Global_reactive_power': str, \n 'Voltage': str, 'Global_intensity': str, \n 'Sub_metering_1': str, 'Sub_metering_2': str, \n 'Sub_metering_3': str},\n sep=\";\")", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2075259 entries, 0 to 2075258\nData columns (total 9 columns):\n # Column Dtype \n--- ------ ----- \n 0 Date object\n 1 Time object\n 2 Global_active_power object\n 3 Global_reactive_power object\n 4 Voltage object\n 5 Global_intensity object\n 6 Sub_metering_1 object\n 7 Sub_metering_2 object\n 8 Sub_metering_3 object\ndtypes: object(9)\nmemory usage: 142.5+ MB\n" ], [ "df.head()", "_____no_output_____" ], [ "df.isnull().sum()", "_____no_output_____" ] ], [ [ "# Preprocessing", "_____no_output_____" ] ], [ [ "# correção do tipo date e junção (data hora)\ndf[\"Datetime\"] = df[\"Date\"] + \" \" + df[\"Time\"]\ndf[\"Datetime\"] = pd.to_datetime(df[\"Datetime\"], format=\"%d/%m/%Y %H:%M:%S\")\ndf = df.drop(columns=[\"Date\", \"Time\"])", "_____no_output_____" ], [ "# correção de tipos e padronização de nulos pela moda\nfor col in df.select_dtypes(include=\"object\").columns:\n df.loc[(df[col] == \"?\"), [col]] = np.nan\n df[col] = df[col].astype(float)\n mode = df[col].mode()[0]\n df[col] = df[col].replace(np.nan, mode)", "_____no_output_____" ], [ "df.isnull().sum()", "_____no_output_____" ], [ "# obter valores para entrada no modelo K-means\n# tudo menos a coluna de data\nloc_X = df.iloc[0:, :7]", "_____no_output_____" ], [ "loc_X.head()", "_____no_output_____" ], [ "# valores de entrada convertidos em np.array\nX = loc_X.values", "_____no_output_____" ], [ "# gerando 1% dos dados totais para amostra do modelo\ndf_sample, sample = train_test_split(X, train_size = .01)\ndf_sample.shape", "_____no_output_____" ], [ "# Aplicando redução de dimensionalidade no array \n# das 7 colunas de variaveis para 2 componentes principais\npca = PCA(n_components = 2).fit_transform(df_sample)", "_____no_output_____" ], [ "# Determinando um range de Hyperparâmetro \"K\" do Kmeans\nk_range = range(1, 12)\nk_range", "_____no_output_____" ], [ "# Aplicando o modelo K-means para cada valor de K\nk_means_var = [KMeans(n_clusters = k).fit(pca) for k in k_range]", "_____no_output_____" ], [ "# Curva de Elbow\n\n#Ajustando o centroide do cluster para cada modelo\ncentroids = [X.cluster_centers_ for X in k_means_var]\n\n# Calculando a distancia euclidiana de cada ponto de dado para o centroide\nk_euclid = [cdist(pca, cent, 'euclidean') for cent in centroids]\ndist = [np.min(ke, axis = 1) for ke in k_euclid]\n\n# Soma dos quadrados das distancias dentro do cluster\nsoma_quadrados_intra_cluster = [sum(d**2) for d in dist]\n\n# Soma total dos quadrados\nsoma_total = sum(pdist(pca)**2)/pca.shape[0]\n\n# Soma dos quadrados entre clusters\nsoma_quadrados_inter_cluster = soma_total - soma_quadrados_intra_cluster\n\n# Curva de Elbow\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.plot(k_range, soma_quadrados_inter_cluster/soma_total*100, \"b*-\")\nax.set_ylim((0, 100))\nplt.grid(True)\nplt.xlabel(\"N° de Clusters\")\nplt.ylabel(\"% de Variância Explicada\")\nplt.title(\"Variância Explicada para cada valor de K\")", "_____no_output_____" ], [ "# Escolhendo um valor de K (igual a 8) para avaliaçâo de maquina preditiva\n\n# criando um modelo com k = 8\nmodel_v1 = KMeans(n_clusters=8)\nmodel_v1.fit(pca)", "_____no_output_____" ], [ "# Obtendo os valores minimos e maximos e organiza o shape\nx_min, x_max = pca[:, 0].min() - 5, pca[:, 0].max() - 1\ny_min, y_max = pca[:, 1].min() + 1, pca[:, 1].max() + 1\nxx, yy = np.meshgrid(np.arange(x_min, x_max, .02), np.arange(y_min, y_max, .02))\nZ = model_v1.predict(np.c_[xx.ravel(), yy.ravel()])\nZ = Z.reshape(xx.shape)", "_____no_output_____" ], [ "# Plot ds areas dos clusters\nplt.figure(1)\nplt.clf()\nplt.imshow(\n Z,\n interpolation='nearest',\n extent=(xx.min(), xx.max(), yy.min(), yy.max()),\n cmap=plt.cm.Paired,\n aspect='auto',\n origin='lower'\n );", "_____no_output_____" ], [ "# Metrica de avaliaçao para Clusterização\n# The best value is 1 and the worst value is -1\n?silhouette_score", "_____no_output_____" ], [ "# Silhouette_score\nlabels = model_v1.labels_\n\nsilhouette_score(pca, labels, metric='euclidean')", "_____no_output_____" ], [ "# Marcando os clusters da Maquina Preditiva Final\n\n# Lista de nomes de colunas\nnames = loc_X.columns.tolist()\n\n# Incluindo o n° do cluster na base de clientes\ncluster_map = pd.DataFrame(df_sample, columns=names)\ncluster_map[\"cluster\"] = model_v1.labels_", "_____no_output_____" ], [ "cluster_map.head()", "_____no_output_____" ], [ "cluster_map.dtypes", "_____no_output_____" ], [ "cluster_map.groupby(\"cluster\")[\"Global_active_power\"].mean().rename(\"Mean_GAP\").reset_index()", "_____no_output_____" ], [ "# Conclusão: Grupo 5 tem maior consumo de energia dentre os 8 grupos\n# Grupo 0 tem menor consumo, assim, possivel falha na transmissão de energia?", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9bc1566bb6050cf96f421a946ee0cdfa3ca409
91,645
ipynb
Jupyter Notebook
hw-2-C0deMonkee.ipynb
chapman-phys227-2016s/hw-2-C0deMonkee
2db1f30afecb9f2d09e29a15c6f47656e8582deb
[ "MIT" ]
null
null
null
hw-2-C0deMonkee.ipynb
chapman-phys227-2016s/hw-2-C0deMonkee
2db1f30afecb9f2d09e29a15c6f47656e8582deb
[ "MIT" ]
null
null
null
hw-2-C0deMonkee.ipynb
chapman-phys227-2016s/hw-2-C0deMonkee
2db1f30afecb9f2d09e29a15c6f47656e8582deb
[ "MIT" ]
null
null
null
305.483333
15,324
0.922647
[ [ [ "import fit_pendulum_data as p1\nimport midpoint_vec as p2\nimport Lagrange_poly1 as p3\nimport Lagrange_poly2 as p4\nimport Lagrange_poly2b as p5\nimport sympy as sp\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "# Exercise 5.18\n## Plots L vs T from a source file and fits polynomials of varying degrees to it", "_____no_output_____" ] ], [ [ "p1.part_a()", "_____no_output_____" ] ], [ [ "### Below are the polynomials being fit to the data", "_____no_output_____" ] ], [ [ "p1.part_b()", "_____no_output_____" ] ], [ [ "# Exercise 5.22\n## Computes the midpoint rule in different forms", "_____no_output_____" ] ], [ [ "p2.midpointint(p2.function, 1, 3, 50)[0]", "_____no_output_____" ], [ "p2.sum_vectorized(p2.function, 1, 3, 50)", "_____no_output_____" ], [ "p2.sum_numpy(p2.function, 1, 3, 50)", "_____no_output_____" ] ], [ [ "# Exercise 5.23, 5.24, 5.25 (3 part)\n## Lagrange Interpolation", "_____no_output_____" ] ], [ [ "p4.graph(p4.sin, 20, 0, 10, [0,10,-2,2])", "_____no_output_____" ], [ "p5.problem_5_25()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb9bc327eafc927afc536f72bf7d693812bfe988
6,132
ipynb
Jupyter Notebook
pydefect/analyzer/dash_components/cpd_energy_dash_demo.ipynb
KazMorita/pydefect
681e4bfe92c53edfe8b50cb72768114b28daabc9
[ "MIT" ]
1
2021-09-10T05:07:39.000Z
2021-09-10T05:07:39.000Z
pydefect/analyzer/dash_components/cpd_energy_dash_demo.ipynb
obaica/pydefect-1
31e5ad774845f436554ef15000b8eba3b168a65c
[ "MIT" ]
null
null
null
pydefect/analyzer/dash_components/cpd_energy_dash_demo.ipynb
obaica/pydefect-1
31e5ad774845f436554ef15000b8eba3b168a65c
[ "MIT" ]
1
2022-01-07T10:14:16.000Z
2022-01-07T10:14:16.000Z
33.145946
240
0.548761
[ [ [ "from crystal_toolkit.helpers.layouts import Columns, Column\nfrom crystal_toolkit.settings import SETTINGS\nfrom jupyter_dash import JupyterDash\nfrom pydefect.analyzer.calc_results import CalcResults\nfrom pydefect.analyzer.dash_components.cpd_energy_dash import CpdEnergy2D3DComponent, CpdEnergyOtherComponent\nfrom pydefect.chem_pot_diag.chem_pot_diag import ChemPotDiag, CpdPlotInfo, \\\n CompositionEnergy\nfrom pydefect.corrections.manual_correction import ManualCorrection\nfrom pydefect.input_maker.defect_entry import DefectEntry\nfrom pymatgen import Composition, Structure, Lattice, Element\nimport dash_html_components as html\nimport crystal_toolkit.components as ctc\nfrom dash.dependencies import Input, Output, State\nimport json\n\napp = JupyterDash(suppress_callback_exceptions=True,\n assets_folder=SETTINGS.ASSETS_PATH)\nfrom vise.analyzer.band_edge_properties import BandEdge\n\ncomp_energies = [\n CompositionEnergy(Composition(\"Mg\"), 0.0, \"a\"),\n CompositionEnergy(Composition(\"Ca\"), 0.0, \"a\"),\n CompositionEnergy(Composition(\"Sr\"), 0.0, \"a\"),\n CompositionEnergy(Composition(\"O\"), 0.0, \"a\"),\n CompositionEnergy(Composition(\"H\"), 0.0, \"a\"),\n# CompositionEnergy(Composition(\"MgCaO3\"), -100.0, \"a\"),\n CompositionEnergy(Composition(\"MgCaSrO3\"), -100.0, \"a\"),\n]\n#cpd = ChemPotDiag(comp_energies, target=Composition(\"MgCaO3\"))\ncpd = ChemPotDiag(comp_energies, target=Composition(\"MgCaSrO3\"))\ncpd_plot_info = CpdPlotInfo(cpd)", "dash_mp_components\n" ], [ "print(cpd.target.elements)\nprint(cpd.dim)\nprint(cpd.target_vertices)\nprint(cpd.all_compounds)\nprint(cpd.impurity_abs_energy(Element.H, label=\"A\"))", "[Element Mg, Element Ca, Element Sr, Element O]\n4\n{'A': array([ 0., -100., 0., 0.]), 'B': array([ 0., 0., -100., 0.]), 'C': array([ 0. , 0. , 0. , -33.33333333]), 'D': array([-100., 0., 0., 0.])}\n[Comp: Mg1, Comp: Ca1, Comp: Sr1, Comp: O2, Comp: H2, Comp: Sr1 Ca1 Mg1 O3]\n(CompositionEnergy(composition=Comp: H1, energy=0.0, source='a'), 0.0)\n" ], [ "structure = Structure(Lattice.cubic(1), species=[\"O\"] * 2, coords=[[0]*3]*2)\ndefect_structure = Structure(Lattice.cubic(1), species=[\"O\"] * 1, coords=[[0]*3])\n\ncommon = dict(site_symmetry=\"1\",\n magnetization=0.0,\n kpoint_coords=[[0]*3],\n kpoint_weights=[1.0],\n potentials=[0.0],\n vbm_info=BandEdge(0.0),\n cbm_info=BandEdge(1.0),\n fermi_level=0.0)\n\nperfect = CalcResults(structure=structure,energy=0, **common)\ndefects = [CalcResults(structure=defect_structure, energy=1.0, **common),\n CalcResults(structure=defect_structure, energy=0.5, **common)]\n\nde_common = dict(name=\"Va_O1\",\n structure=defect_structure, site_symmetry=\"1\",\n perturbed_structure=defect_structure, defect_center=[[0]*3])\n\ndefect_entries = [DefectEntry(charge=0, **de_common),\n DefectEntry(charge=1, **de_common)]\n\ncorrections = [ManualCorrection(correction_energy=1.0),\n ManualCorrection(correction_energy=1.0)]\n\ncpd_e_component = CpdEnergyOtherComponent(cpd_plot_info,\n perfect,\n defects,\n defect_entries,\n corrections)\n\nmy_layout = html.Div([Column(cpd_e_component.layout)])\nctc.register_crystal_toolkit(app=app, layout=my_layout, cache=None)", "_____no_output_____" ], [ "app.run_server(port=8097)\n#app.run_server(mode='inline', port=8094)\n", "Dash app running on http://127.0.0.1:8097/\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
cb9bdfa23a20710d23a9f7edd54ba87e7d62bb20
55,474
ipynb
Jupyter Notebook
knn_model_no_feature_filter/knn_model.ipynb
yang5hi/machine-learning-challenge
f2fb7e7953305b177d66a8622ae012ddcb16b249
[ "ADSL" ]
null
null
null
knn_model_no_feature_filter/knn_model.ipynb
yang5hi/machine-learning-challenge
f2fb7e7953305b177d66a8622ae012ddcb16b249
[ "ADSL" ]
null
null
null
knn_model_no_feature_filter/knn_model.ipynb
yang5hi/machine-learning-challenge
f2fb7e7953305b177d66a8622ae012ddcb16b249
[ "ADSL" ]
null
null
null
53.858252
16,812
0.612629
[ [ [ "# Update sklearn to prevent version mismatches\n!pip install sklearn --upgrade", "_____no_output_____" ], [ "# install joblib. This will be used to save your model. \n# Restart your kernel after installing \n!pip install joblib", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ] ], [ [ "# Read the CSV and Perform Basic Data Cleaning", "_____no_output_____" ] ], [ [ "df = pd.read_csv(\"../data/exoplanet_data.csv\")\n# Drop the null columns where all values are null\ndf = df.dropna(axis='columns', how='all')\n# Drop the null rows\ndf = df.dropna()\ndf.head()", "_____no_output_____" ], [ "df.tail()", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 6991 entries, 0 to 6990\nData columns (total 41 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 koi_disposition 6991 non-null object \n 1 koi_fpflag_nt 6991 non-null int64 \n 2 koi_fpflag_ss 6991 non-null int64 \n 3 koi_fpflag_co 6991 non-null int64 \n 4 koi_fpflag_ec 6991 non-null int64 \n 5 koi_period 6991 non-null float64\n 6 koi_period_err1 6991 non-null float64\n 7 koi_period_err2 6991 non-null float64\n 8 koi_time0bk 6991 non-null float64\n 9 koi_time0bk_err1 6991 non-null float64\n 10 koi_time0bk_err2 6991 non-null float64\n 11 koi_impact 6991 non-null float64\n 12 koi_impact_err1 6991 non-null float64\n 13 koi_impact_err2 6991 non-null float64\n 14 koi_duration 6991 non-null float64\n 15 koi_duration_err1 6991 non-null float64\n 16 koi_duration_err2 6991 non-null float64\n 17 koi_depth 6991 non-null float64\n 18 koi_depth_err1 6991 non-null float64\n 19 koi_depth_err2 6991 non-null float64\n 20 koi_prad 6991 non-null float64\n 21 koi_prad_err1 6991 non-null float64\n 22 koi_prad_err2 6991 non-null float64\n 23 koi_teq 6991 non-null int64 \n 24 koi_insol 6991 non-null float64\n 25 koi_insol_err1 6991 non-null float64\n 26 koi_insol_err2 6991 non-null float64\n 27 koi_model_snr 6991 non-null float64\n 28 koi_tce_plnt_num 6991 non-null int64 \n 29 koi_steff 6991 non-null int64 \n 30 koi_steff_err1 6991 non-null int64 \n 31 koi_steff_err2 6991 non-null int64 \n 32 koi_slogg 6991 non-null float64\n 33 koi_slogg_err1 6991 non-null float64\n 34 koi_slogg_err2 6991 non-null float64\n 35 koi_srad 6991 non-null float64\n 36 koi_srad_err1 6991 non-null float64\n 37 koi_srad_err2 6991 non-null float64\n 38 ra 6991 non-null float64\n 39 dec 6991 non-null float64\n 40 koi_kepmag 6991 non-null float64\ndtypes: float64(31), int64(9), object(1)\nmemory usage: 2.2+ MB\n" ] ], [ [ "# Select your features (columns)", "_____no_output_____" ] ], [ [ "# Set features. This will also be used as x values.\ntarget = df[\"koi_disposition\"]\ntarget_names = ['CANDIDATE','CONFIRMED','FALSE POSITIVE']\nfeatures = df.drop(\"koi_disposition\", axis=1)\nfeature_names=features.columns", "_____no_output_____" ] ], [ [ "# Create a Train Test Split\n\nUse `koi_disposition` for the y values", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import LabelEncoder\nfrom tensorflow.keras.utils import to_categorical\n\n# Step 1: Label-encode data set\nlabel_encoder = LabelEncoder()\nlabel_encoder.fit(target)\nencoded_y = label_encoder.transform(target)\nencoded_y", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(features, encoded_y, random_state=42)\nX_train.shape, y_train.shape", "_____no_output_____" ] ], [ [ "# Pre-processing\n\nScale the data using the MinMaxScaler and perform some feature selection", "_____no_output_____" ] ], [ [ "# Scale the data\nfrom sklearn.preprocessing import MinMaxScaler\nX_scaler = MinMaxScaler().fit(X_train)\nX_train_scaled = X_scaler.transform(X_train)\nX_test_scaled = X_scaler.transform(X_test)", "_____no_output_____" ] ], [ [ "# Train the Model\n\n", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nfrom sklearn.neighbors import KNeighborsClassifier\n# Loop through different k values to see which has the highest accuracy\n# Note: We only use odd numbers because we don't want any ties\ntrain_scores = []\ntest_scores = []\nfor k in range(1, 20, 2):\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train_scaled, y_train)\n train_score = knn.score(X_train_scaled, y_train)\n test_score = knn.score(X_test_scaled, y_test)\n train_scores.append(train_score)\n test_scores.append(test_score)\n print(f\"k: {k}, Train/Test Score: {train_score:.3f}/{test_score:.3f}\")\n \n \nplt.plot(range(1, 20, 2), train_scores, marker='o')\nplt.plot(range(1, 20, 2), test_scores, marker=\"x\")\nplt.xlabel(\"k neighbors\")\nplt.ylabel(\"Testing accuracy Score\")\nplt.show()", "k: 1, Train/Test Score: 1.000/0.790\nk: 3, Train/Test Score: 0.897/0.811\nk: 5, Train/Test Score: 0.878/0.820\nk: 7, Train/Test Score: 0.866/0.823\nk: 9, Train/Test Score: 0.856/0.828\nk: 11, Train/Test Score: 0.848/0.828\nk: 13, Train/Test Score: 0.849/0.822\nk: 15, Train/Test Score: 0.841/0.824\nk: 17, Train/Test Score: 0.838/0.827\nk: 19, Train/Test Score: 0.836/0.824\n" ], [ "knn = KNeighborsClassifier(n_neighbors=19)\nknn.fit(X_train_scaled, y_train)\npredictions = knn.predict(X_test_scaled)\n\nprint(f\"k = 19: Training Data Score: {knn.score(X_train_scaled, y_train):.5f}\")\nprint(f\"k = 19: Testing Data Score: {knn.score(X_test_scaled, y_test):.5f}\")", "k = 19: Training Data Score: 0.83635\nk = 19: Testing Data Score: 0.82437\n" ], [ "from sklearn.metrics import classification_report\npredictions = knn.predict(X_test_scaled)\nprint(classification_report(y_test, predictions,\n target_names=list(label_encoder.inverse_transform([0,1,2]))))", " precision recall f1-score support\n\n CANDIDATE 0.69 0.52 0.59 411\n CONFIRMED 0.66 0.78 0.72 484\nFALSE POSITIVE 0.98 1.00 0.99 853\n\n accuracy 0.82 1748\n macro avg 0.78 0.76 0.76 1748\n weighted avg 0.82 0.82 0.82 1748\n\n" ] ], [ [ "# Hyperparameter Tuning\n\nUse `GridSearchCV` to tune the model's parameters", "_____no_output_____" ] ], [ [ "# Create the GridSearchCV model\n# Create the GridSearch estimator along with a parameter object containing the values to adjust\nfrom sklearn.model_selection import GridSearchCV\nparam_grid = {'n_neighbors': [3,5,7,9,11,13,15,17,19],\n 'weights':['uniform','distance']}\ngrid = GridSearchCV(KNeighborsClassifier(), param_grid, verbose=3)", "_____no_output_____" ], [ "# Train the model with GridSearch\ngrid.fit(X_train_scaled, y_train)", "Fitting 5 folds for each of 18 candidates, totalling 90 fits\n[CV 1/5] END ....n_neighbors=3, weights=uniform;, score=0.827 total time= 0.0s\n[CV 2/5] END ....n_neighbors=3, weights=uniform;, score=0.803 total time= 0.0s\n[CV 3/5] END ....n_neighbors=3, weights=uniform;, score=0.800 total time= 0.0s\n[CV 4/5] END ....n_neighbors=3, weights=uniform;, score=0.805 total time= 0.0s\n[CV 5/5] END ....n_neighbors=3, weights=uniform;, score=0.807 total time= 0.0s\n[CV 1/5] END ...n_neighbors=3, weights=distance;, score=0.827 total time= 0.0s\n[CV 2/5] END ...n_neighbors=3, weights=distance;, score=0.804 total time= 0.0s\n[CV 3/5] END ...n_neighbors=3, weights=distance;, score=0.799 total time= 0.0s\n[CV 4/5] END ...n_neighbors=3, weights=distance;, score=0.807 total time= 0.0s\n[CV 5/5] END ...n_neighbors=3, weights=distance;, score=0.808 total time= 0.0s\n[CV 1/5] END ....n_neighbors=5, weights=uniform;, score=0.825 total time= 0.0s\n[CV 2/5] END ....n_neighbors=5, weights=uniform;, score=0.811 total time= 0.0s\n[CV 3/5] END ....n_neighbors=5, weights=uniform;, score=0.812 total time= 0.0s\n[CV 4/5] END ....n_neighbors=5, weights=uniform;, score=0.806 total time= 0.0s\n[CV 5/5] END ....n_neighbors=5, weights=uniform;, score=0.812 total time= 0.0s\n[CV 1/5] END ...n_neighbors=5, weights=distance;, score=0.827 total time= 0.0s\n[CV 2/5] END ...n_neighbors=5, weights=distance;, score=0.806 total time= 0.0s\n[CV 3/5] END ...n_neighbors=5, weights=distance;, score=0.810 total time= 0.0s\n[CV 4/5] END ...n_neighbors=5, weights=distance;, score=0.810 total time= 0.0s\n[CV 5/5] END ...n_neighbors=5, weights=distance;, score=0.813 total time= 0.0s\n[CV 1/5] END ....n_neighbors=7, weights=uniform;, score=0.828 total time= 0.0s\n[CV 2/5] END ....n_neighbors=7, weights=uniform;, score=0.807 total time= 0.0s\n[CV 3/5] END ....n_neighbors=7, weights=uniform;, score=0.810 total time= 0.0s\n[CV 4/5] END ....n_neighbors=7, weights=uniform;, score=0.807 total time= 0.0s\n[CV 5/5] END ....n_neighbors=7, weights=uniform;, score=0.809 total time= 0.0s\n[CV 1/5] END ...n_neighbors=7, weights=distance;, score=0.827 total time= 0.0s\n[CV 2/5] END ...n_neighbors=7, weights=distance;, score=0.806 total time= 0.0s\n[CV 3/5] END ...n_neighbors=7, weights=distance;, score=0.807 total time= 0.0s\n[CV 4/5] END ...n_neighbors=7, weights=distance;, score=0.810 total time= 0.0s\n[CV 5/5] END ...n_neighbors=7, weights=distance;, score=0.807 total time= 0.0s\n[CV 1/5] END ....n_neighbors=9, weights=uniform;, score=0.833 total time= 0.0s\n[CV 2/5] END ....n_neighbors=9, weights=uniform;, score=0.816 total time= 0.0s\n[CV 3/5] END ....n_neighbors=9, weights=uniform;, score=0.807 total time= 0.0s\n[CV 4/5] END ....n_neighbors=9, weights=uniform;, score=0.800 total time= 0.0s\n[CV 5/5] END ....n_neighbors=9, weights=uniform;, score=0.823 total time= 0.0s\n[CV 1/5] END ...n_neighbors=9, weights=distance;, score=0.834 total time= 0.0s\n[CV 2/5] END ...n_neighbors=9, weights=distance;, score=0.814 total time= 0.0s\n[CV 3/5] END ...n_neighbors=9, weights=distance;, score=0.806 total time= 0.0s\n[CV 4/5] END ...n_neighbors=9, weights=distance;, score=0.805 total time= 0.0s\n[CV 5/5] END ...n_neighbors=9, weights=distance;, score=0.823 total time= 0.0s\n[CV 1/5] END ...n_neighbors=11, weights=uniform;, score=0.824 total time= 0.0s\n[CV 2/5] END ...n_neighbors=11, weights=uniform;, score=0.819 total time= 0.0s\n[CV 3/5] END ...n_neighbors=11, weights=uniform;, score=0.819 total time= 0.0s\n[CV 4/5] END ...n_neighbors=11, weights=uniform;, score=0.793 total time= 0.0s\n[CV 5/5] END ...n_neighbors=11, weights=uniform;, score=0.816 total time= 0.0s\n[CV 1/5] END ..n_neighbors=11, weights=distance;, score=0.827 total time= 0.0s\n[CV 2/5] END ..n_neighbors=11, weights=distance;, score=0.819 total time= 0.0s\n[CV 3/5] END ..n_neighbors=11, weights=distance;, score=0.819 total time= 0.0s\n[CV 4/5] END ..n_neighbors=11, weights=distance;, score=0.801 total time= 0.0s\n[CV 5/5] END ..n_neighbors=11, weights=distance;, score=0.814 total time= 0.0s\n[CV 1/5] END ...n_neighbors=13, weights=uniform;, score=0.835 total time= 0.0s\n[CV 2/5] END ...n_neighbors=13, weights=uniform;, score=0.815 total time= 0.0s\n[CV 3/5] END ...n_neighbors=13, weights=uniform;, score=0.821 total time= 0.0s\n[CV 4/5] END ...n_neighbors=13, weights=uniform;, score=0.801 total time= 0.0s\n[CV 5/5] END ...n_neighbors=13, weights=uniform;, score=0.812 total time= 0.0s\n[CV 1/5] END ..n_neighbors=13, weights=distance;, score=0.832 total time= 0.0s\n[CV 2/5] END ..n_neighbors=13, weights=distance;, score=0.810 total time= 0.0s\n[CV 3/5] END ..n_neighbors=13, weights=distance;, score=0.819 total time= 0.0s\n[CV 4/5] END ..n_neighbors=13, weights=distance;, score=0.802 total time= 0.0s\n[CV 5/5] END ..n_neighbors=13, weights=distance;, score=0.814 total time= 0.0s\n[CV 1/5] END ...n_neighbors=15, weights=uniform;, score=0.832 total time= 0.0s\n[CV 2/5] END ...n_neighbors=15, weights=uniform;, score=0.811 total time= 0.0s\n[CV 3/5] END ...n_neighbors=15, weights=uniform;, score=0.831 total time= 0.0s\n[CV 4/5] END ...n_neighbors=15, weights=uniform;, score=0.808 total time= 0.0s\n[CV 5/5] END ...n_neighbors=15, weights=uniform;, score=0.813 total time= 0.0s\n[CV 1/5] END ..n_neighbors=15, weights=distance;, score=0.836 total time= 0.0s\n[CV 2/5] END ..n_neighbors=15, weights=distance;, score=0.814 total time= 0.0s\n[CV 3/5] END ..n_neighbors=15, weights=distance;, score=0.830 total time= 0.0s\n[CV 4/5] END ..n_neighbors=15, weights=distance;, score=0.808 total time= 0.0s\n[CV 5/5] END ..n_neighbors=15, weights=distance;, score=0.812 total time= 0.0s\n[CV 1/5] END ...n_neighbors=17, weights=uniform;, score=0.838 total time= 0.0s\n[CV 2/5] END ...n_neighbors=17, weights=uniform;, score=0.811 total time= 0.0s\n[CV 3/5] END ...n_neighbors=17, weights=uniform;, score=0.830 total time= 0.0s\n[CV 4/5] END ...n_neighbors=17, weights=uniform;, score=0.803 total time= 0.0s\n[CV 5/5] END ...n_neighbors=17, weights=uniform;, score=0.813 total time= 0.0s\n[CV 1/5] END ..n_neighbors=17, weights=distance;, score=0.838 total time= 0.0s\n[CV 2/5] END ..n_neighbors=17, weights=distance;, score=0.810 total time= 0.0s\n[CV 3/5] END ..n_neighbors=17, weights=distance;, score=0.829 total time= 0.0s\n[CV 4/5] END ..n_neighbors=17, weights=distance;, score=0.806 total time= 0.0s\n[CV 5/5] END ..n_neighbors=17, weights=distance;, score=0.813 total time= 0.0s\n[CV 1/5] END ...n_neighbors=19, weights=uniform;, score=0.834 total time= 0.0s\n[CV 2/5] END ...n_neighbors=19, weights=uniform;, score=0.810 total time= 0.0s\n[CV 3/5] END ...n_neighbors=19, weights=uniform;, score=0.837 total time= 0.0s\n[CV 4/5] END ...n_neighbors=19, weights=uniform;, score=0.805 total time= 0.0s\n[CV 5/5] END ...n_neighbors=19, weights=uniform;, score=0.820 total time= 0.0s\n[CV 1/5] END ..n_neighbors=19, weights=distance;, score=0.835 total time= 0.0s\n[CV 2/5] END ..n_neighbors=19, weights=distance;, score=0.815 total time= 0.0s\n[CV 3/5] END ..n_neighbors=19, weights=distance;, score=0.835 total time= 0.0s\n[CV 4/5] END ..n_neighbors=19, weights=distance;, score=0.807 total time= 0.0s\n[CV 5/5] END ..n_neighbors=19, weights=distance;, score=0.821 total time= 0.0s\n" ], [ "print(grid.best_params_)\nprint(grid.best_score_)", "{'n_neighbors': 19, 'weights': 'distance'}\n0.822617323659756\n" ], [ "from sklearn.metrics import classification_report\npredictions = grid.predict(X_test_scaled)\nprint(classification_report(y_test, predictions,\n target_names=list(label_encoder.inverse_transform([0,1,2]))))", " precision recall f1-score support\n\n CANDIDATE 0.68 0.53 0.60 411\n CONFIRMED 0.67 0.77 0.72 484\nFALSE POSITIVE 0.98 1.00 0.99 853\n\n accuracy 0.83 1748\n macro avg 0.78 0.77 0.77 1748\n weighted avg 0.82 0.83 0.82 1748\n\n" ] ], [ [ "# Save the Model", "_____no_output_____" ] ], [ [ "# save your model by updating \"your_name\" with your name\n# and \"your_model\" with your model variable\n# be sure to turn this in to BCS\n# if joblib fails to import, try running the command to install in terminal/git-bash\nimport joblib\nfilename = 'knn_model.sav'\njoblib.dump(knn, filename)", "_____no_output_____" ] ] ]
[ "raw", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "raw", "raw" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb9be07394c6b49493279394a4a267829481d8b6
13,419
ipynb
Jupyter Notebook
content/notebooks/vampyr-scf.ipynb
Gabrielgerez/multiwavelet-tutorial
4170f5771d7d067cfb11a975dd972a0a47498c2f
[ "CC-BY-4.0" ]
1
2022-03-07T12:22:57.000Z
2022-03-07T12:22:57.000Z
content/notebooks/vampyr-scf.ipynb
Gabrielgerez/multiwavelet-tutorial
4170f5771d7d067cfb11a975dd972a0a47498c2f
[ "CC-BY-4.0" ]
8
2021-10-14T12:29:36.000Z
2022-01-19T11:42:21.000Z
content/notebooks/vampyr-scf.ipynb
Gabrielgerez/multiwavelet-tutorial
4170f5771d7d067cfb11a975dd972a0a47498c2f
[ "CC-BY-4.0" ]
3
2021-10-14T18:17:06.000Z
2022-01-17T10:08:47.000Z
35.784
261
0.556226
[ [ [ "# V2: SCF optimization with VAMPyR", "_____no_output_____" ], [ "## V2.1: Hydrogen atom\n\nIn order to solve the one-electron Schr\\\"{o}dinger equation in MWs we reformulate them in an integral form [1].\n\n\\begin{equation}\n \\phi = -2\\hat{G}_{\\mu}\\hat{V}\\phi\n\\end{equation}\n\nWhere $\\hat{V}$ is the potential acting on the system, $\\phi$ is the wavefunction, $\\hat{G}$ is the Helmholtz integral operator, where its kernel is defined as $G_\\mu(r - r') = \\frac{\\exp(-\\mu |r - r'|)}{4\\pi |r - r'|}$\nand $\\mu$ is a parameter defined above through the energy.", "_____no_output_____" ], [ "The Helmholtz operator is already implemented in vampyr, therefore the only things you need are the integral KS equation and the definition of $\\mu$ \n\n\\begin{equation}\n \\mu = \\sqrt{-2E}\n\\end{equation}\n\nThe way you initialize the Helmholtz operator is as follows\n```\nH = vp.HelmholtzOperator( mra, exp=mu, prec=eps )\n```\nwhere `mu` is the $\\mu$ is the parameter defined above, mra you have seen before, and `eps` is the desired threshold precision. This operator is applied the same way you applied the vp.ScalingProjector earlier.", "_____no_output_____" ], [ "In this exercise you will be solving the KS equation iteratively for a simple system, the Hydrogen atom. This means that you only have the nuclear potential to take into account for the potential term in the KS equation.\n\n$$ V_{nuc}(\\mathbf{r}) = -\\frac{1}{|\\mathbf{r}|}$$\n\nWe will also be working with a single orbital, of which the initial guess is\n\n$$ \\phi_0(\\mathbf{r}) = e^{-|\\mathbf{r}|^2} $$\nwhere \n$$ |\\mathbf{r}| = \\sqrt{x^2 + y^2 + z^2}$$\n", "_____no_output_____" ], [ "The orbital update is defined as follows\n\\begin{align}\n \\Delta\\tilde{\\phi}^n &= -2\\hat{G}[V_{nuc}\\phi^n] - \\phi^n \\\\\n \\Delta\\tilde{\\phi}^n &= \\tilde{\\phi}^{n+1} - \\phi^n\n\\end{align}\nwhere we use \\~ to denote a function that is **not** normalized, and $n$ is the iteration index.", "_____no_output_____" ], [ "#### Implementation exercise:\n\n1. Make a nuclear potential as a python function `f_nuc(r)`\n2. Make an initial guess for the orbital as a python function `f_phi(r)` (hint use `np.exp` to get an exponetial function)\n3. Create a Helmholtz operator $G_\\mu$ with $\\mu$ as shown above, use the exact value of $E = -0.5 a.u.$ for a hydrogen atom\n4. Project both nuclear potential ($V$) and orbital ($\\phi_n$) to the MW basis using a `vp.ScalingProjector` with precision $\\epsilon=1.0e-3$\n5. Compute new orbital through application of the Helmholtz operator\n6. Compute the size of the orbital update $||\\tilde{\\phi}^{n+1} - \\phi^n||$\n7. Normalize the orbital $\\phi^{n+1} = \\tilde{\\phi}^{n+1}/||\\tilde{\\phi}^{n+1}||$\n8. Update orbital $\\phi^{n+1} \\rightarrow \\phi^{n}$ for next iteration\n9. Repeat steps 5-8 until your wavefunction has converged\n\nThe convergence criterion is the norm of $\\Delta \\phi^n$, but you should start by looping a set amount of times before trying the threshold.", "_____no_output_____" ] ], [ [ "from vampyr import vampyr3d as vp\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nr_x = np.linspace(-0.99, 0.99, 1000) # create an evenly spaced set of points between -0.99 and 0.99\nr_y = np.zeros(1000)\nr_z = np.zeros(1000)\nr = [r_x, r_y, r_z]\n\n# Analytic nuclear potential\ndef f_nuc(r):\n # TODO: implement the nuclear potential\n return\n\n# Analytic guess for solution\ndef f_phi(r):\n # TODO: implement the initial guess for the orbital\n return\n\n# Prepare Helmholtz operator\nE = -0.5\nmu = np.sqrt(-2*E)\nG_mu = # TODO: Construct BSH operator from mu)\n\nV = # TODO: Project nuclear potential V from f_nuc\nphi_n = # TODO: Project starting guess phi_n from f_phi\nphi_n.normalize()\n\n# Optimization loop\nthrs = 1.0e-3\nupdate = 1.0\ni = 0\nwhile (i < 3): # switch to (update > thrs) later\n # TODO:\n # Compute product of potential V and wavefunction phi_n\n # Apply Helmholtz operator to obtain phi_np1\n # Compute norm = ||phi^{n+1}||\n # Compute update = ||phi^{n+1} - phi^{n}||\n \n # this will plot the wavefunction at each iteration\n phi_n_plt = [phi_n([x, 0.0, 0.0]) for x in r_x]\n plt.plot(r_x, phi_n_plt) \n \n # this will print some info, you need to compute in the loop:\n print(\"iteration: {} Norm: {} Update: {}\".format(i, norm, update))\n i += 1\n\nplt.show()", "_____no_output_____" ] ], [ [ "## V2.2 Extension to Helium", "_____no_output_____" ], [ "A few things change when you go from Hydrogen to Helium:\n1. The energy is no longer known exactly, and thus will have to be computed from the wavefunction\n2. The Helmholtz operator which depends on the energy through $\\mu = \\sqrt{-2E}$ needs to be updated in every iteration\n3. The potential operator $V$ depends on the wavefunction and must be updated in every iteration", "_____no_output_____" ], [ "In this example we will use the Hartree-Fock model, which for a single-orbital system like Helium, reduces to the following potential operator:\n\\begin{align}\n \\hat{V} &= \\hat{V}_{nuc} + 2\\hat{J} - \\hat{K}\\\\\n &= \\hat{V}_{nuc} + \\hat{J}\n\\end{align}\nsince $\\hat{K} = \\hat{J}$ for a doubly occupied single orbital.", "_____no_output_____" ], [ "The Coulomb potential $\\hat{J}$ can be computed by application of the Poisson operator $P$:\n\\begin{equation}\n \\hat{J}(r) = P\\left[4\\pi\\rho\\right]\n\\end{equation}\nWhere $\\rho$ is the square of the orbital\n\\begin{equation}\n \\rho = \\phi*\\phi\n\\end{equation}", "_____no_output_____" ], [ "#### Pen and paper exercise:\n\nUse the fact that\n\\begin{equation}\n \\tilde{\\phi}^{n+1} = -\\Big[\\hat{T} - E^n\\Big]^{-1} V^n\\phi^n \\end{equation}\nto show that\n\\begin{equation}\n E^{n+1} = \\frac{\\langle\\tilde{\\phi}^{n+1}|\\hat{T} +\n \\hat{V}^{n+1}|\\tilde{\\phi}^{n+1}\\rangle}\n {||\\tilde{\\phi}^{n+1}||^2}\n\\end{equation}\ncan be written as a pure update $dE^n$ involving only the potentials $\\hat{V}^{n+1}$, $\\hat{V}^n$ as well as the orbitals $\\tilde{\\phi}^{n+1}$ and $\\phi^n$\n\\begin{equation}\n E^{n+1} = E^{n} + dE^n\n\\end{equation}\n", "_____no_output_____" ], [ "#### Implementation exercise:\n1. Make a nuclear potential function `f_nuc(r)` for the Helium atom\n2. Make an initial guess for the orbital as a python function `f_phi(r)` (hint use `np.exp` to get an exponetial function)\n3. Project both nuclear potential ($V$) and orbital ($\\phi_n$) to the MW basis using a `vp.ScalingProjector` with precision $\\epsilon=1.0e-3$\n4. Create a Helmholtz operator $G^n$ with $\\mu^n$ using the current energy $E^n$\n5. Compute total potential $\\hat{V^n} = \\hat{V}_{nuc} + \\hat{J^n}$, where the Coulomb potential is computed using the `vp.PoissonOperator` on the current squared orbital $\\rho^n = ||\\phi^n||^2$\n6. Compute new orbital through application of the Helmholtz operator on $\\phi^{n+1} = -2\\hat{G}^n\\hat{V}^n\\phi^n$\n7. Compute the size of the orbital update $||\\tilde{\\phi}^{n+1} - \\phi^n||$\n8. Normalize the orbital $\\phi^{n+1} = \\tilde{\\phi}^{n+1}/||\\tilde{\\phi}^{n+1}||$\n9. Update orbital $\\phi^{n+1} \\rightarrow \\phi^{n}$ for next iteration\n10. Repeat steps 4-9 until your wavefunction has converged\n\nThe convergence criterion is the norm of $\\Delta \\phi^n$, but you should start by looping a set amount of times before trying the threshold.", "_____no_output_____" ] ], [ [ "from vampyr import vampyr3d as vp\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nr_x = np.linspace(-0.99, 0.99, 1000) # create an evenly spaced set of points between -0.99 and 0.99\nr_y = np.zeros(1000)\nr_z = np.zeros(1000)\nr = [r_x, r_y, r_z]\n\n# Analytic nuclear potential Helium\ndef f_nuc(r):\n #implement the nuclear potential\n return\n\n# Analytic guess for solution (same as for Hydrogen)\ndef f_phi(r):\n # implement the initial guess for the orbital\n return\n\n\n# TODO:\n# Project nuclear potential V_nuc from f_nuc\n# Project starting guess phi_n from f_phi\n# Set a starting guess E_n for the energy\n\n\n# Optimization loop\nthrs = 1.0e-3\nupdate = 1.0\ni = 0\nwhile (i < 3): # switch to (update > thrs) later\n \n # Prepare Helmholtz operator from current energy\n mu_n = np.sqrt(-2*E_n)\n G_n = # TODO: Construct BSH operator from mu_n)\n \n # TODO:\n # Compute rho\n # Initialize vp.PoissonOperator and compute J\n # Compute total potential V = V_nuc + J\n # Iterate Helmholtz operator to get new orbital phi^{n+1}\n \n dE_n = # TODO: insert energy expression from above\n\n # Prepare for next iteration\n E_n += dE_n\n phi_n += dPhi_n\n \n # This will plot the wavefunction at each iteration\n phi_n_plt = [phi_n([x, 0.0, 0.0]) for x in r_x]\n plt.plot(r_x, phi_n_plt) \n \n # this will print some info, you need to compute in the loop:\n # norm = ||phi^{n+1}||\n # update = ||phi^{n+1} - phi^{n}||\n print(\"iteration: {} Energy: {} Norm: {} Update: {}\".format(i, E_n, norm, update))\n i += 1\n\n \nplt.show()", "_____no_output_____" ] ], [ [ "You should expect the orbital energy to converge towards\n$E_n \\approx -0.918$.", "_____no_output_____" ], [ "#### Bonus exercise:\nThe total energy can be computed after convergence as\n$E_{tot} = 2E_n - \\langle\\rho|J\\rangle$, should be around $E_{tot} \\approx -2.86$.", "_____no_output_____" ], [ "\n## Sources\n\n\n[1] Stig Rune Jensen, Santanu Saha, José A. Flores-Livas, William Huhn, Volker Blum, Stefan Goedecker, and Luca Frediani The Elephant in the Room of Density Functional Theory Calculations. The Journal of Physical Chemistry Letters 2017 8 (7), 1449-1457\nDOI: 10.1021/acs.jpclett.7b00255\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
cb9bf268254e4827004522b7ab314c0c0313c608
60,800
ipynb
Jupyter Notebook
predicoes/predictflightdelays.ipynb
silvioedu/MachineLearning-Practice
0acc998f435f99eaa6bf5daa533072fd4e62b531
[ "MIT" ]
null
null
null
predicoes/predictflightdelays.ipynb
silvioedu/MachineLearning-Practice
0acc998f435f99eaa6bf5daa533072fd4e62b531
[ "MIT" ]
null
null
null
predicoes/predictflightdelays.ipynb
silvioedu/MachineLearning-Practice
0acc998f435f99eaa6bf5daa533072fd4e62b531
[ "MIT" ]
null
null
null
60,800
60,800
0.788569
[ [ [ "# Predição de atraso de voos\n\nhttps://docs.microsoft.com/en-us/learn/modules/predict-flight-delays-with-python/0-introduction", "_____no_output_____" ], [ "### Importando o arquivo", "_____no_output_____" ] ], [ [ "!curl https://topcs.blob.core.windows.net/public/FlightData.csv -o flightdata.csv", " % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n100 1552k 100 1552k 0 0 833k 0 0:00:01 0:00:01 --:--:-- 833k\n" ], [ "import pandas as pd\ndf = pd.read_csv('flightdata.csv')\ndf.head()", "_____no_output_____" ], [ "observacoes, features = df.shape\nprint(\"O dataframe possui {} observacoes e {} features.\".format(observacoes, features))", "O dataframe possui 11231 observacoes e 26 features.\n" ] ], [ [ "* Column\tDescription\n* YEAR\tYear that the flight took place\n* QUARTER\tQuarter that the flight took place (1-4)\n* MONTH\tMonth that the flight took place (1-12)\n* DAY_OF_MONTH\tDay of the month that the flight took place (1-31)\n* DAY_OF_WEEK\tDay of the week that the flight took place (1=Monday, 2=Tuesday, etc.)\n* UNIQUE_CARRIER\tAirline carrier code (e.g., DL)\n* TAIL_NUM\tAircraft tail number\n* FL_NUM\tFlight number\n* ORIGIN_AIRPORT_ID\tID of the airport of origin\n* ORIGIN\tOrigin airport code (ATL, DFW, SEA, etc.)\n* DEST_AIRPORT_ID\tID of the destination airport\n* DEST\tDestination airport code (ATL, DFW, SEA, etc.)\n* CRS_DEP_TIME\tScheduled departure time\n* DEP_TIME\tActual departure time\n* DEP_DELAY\tNumber of minutes departure was delayed\n* DEP_DEL15\t0=Departure delayed less than 15 minutes, 1=Departure delayed 15 minutes or more\n* CRS_ARR_TIME\tScheduled arrival time\n* ARR_TIME\tActual arrival time\n* ARR_DELAY\tNumber of minutes flight arrived late\n* ARR_DEL15\t0=Arrived less than 15 minutes late, 1=Arrived 15 minutes or more late\n* CANCELLED\t0=Flight was not cancelled, 1=Flight was cancelled\n* DIVERTED\t0=Flight was not diverted, 1=Flight was diverted\n* CRS_ELAPSED_TIME\tScheduled flight time in minutes\n* ACTUAL_ELAPSED_TIME\tActual flight time in minutes\n* DISTANCE\tDistance traveled in miles", "_____no_output_____" ], [ "### Limpeza e preparação dos dados", "_____no_output_____" ] ], [ [ "# Possui valores nulos?\ndf.isnull().values.any()", "_____no_output_____" ], [ "# Quais colunas?\ndf.isnull().sum()", "_____no_output_____" ], [ "# A coluna 'Unnamed: 25' não possui valores para nenhuma observação, então vou excluir\ndf = df.drop('Unnamed: 25', axis=1)", "_____no_output_____" ], [ "# Selecionar dados para trabalhar\ndf = df[[\"MONTH\", \"DAY_OF_MONTH\", \"DAY_OF_WEEK\", \"ORIGIN\", \"DEST\", \"CRS_DEP_TIME\", \"ARR_DEL15\"]]\ndf.isnull().sum()", "_____no_output_____" ], [ "# Verificar alguns valores nulos\ndf[df.isnull().values.any(axis=1)].head()", "_____no_output_____" ], [ "# Como os valores nulos correspondem a voos atrasados, vamos sinalizá-los como tal\ndf = df.fillna({'ARR_DEL15': 1})", "_____no_output_____" ], [ "# Como a coluna CRS_DEP_TIME possui horário mas sem o formato apropriado, vamos vazer uma conta para diminuir o range e evitar afetar as análises\nimport math\n\nfor index, row in df.iterrows():\n df.loc[index, 'CRS_DEP_TIME'] = math.floor(row['CRS_DEP_TIME'] / 100)\ndf.head()", "_____no_output_____" ], [ "# Tratando colunas Origem e Destino devido a serem valores categóricos\ndf = pd.get_dummies(df, columns=['ORIGIN', 'DEST'])\ndf.head()", "_____no_output_____" ] ], [ [ "### Realizando a separação dos dados em treino e teste", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\ntrain_x, test_x, train_y, test_y = train_test_split(df.drop('ARR_DEL15', axis=1), df['ARR_DEL15'], test_size=0.2, random_state=42)\nprint(\"{} observações para treino e {} observações para teste\".format(train_x.shape[0],test_x.shape[0]))", "8984 observações para treino e 2247 observações para teste\n" ] ], [ [ "### Realizando o treinamento com o RandomForest", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestClassifier\n\nmodel = RandomForestClassifier(random_state=13)\nmodel.fit(train_x, train_y)", "_____no_output_____" ], [ "# Predição\npredicted = model.predict(test_x)", "_____no_output_____" ] ], [ [ "### Verificando métricas da predição", "_____no_output_____" ] ], [ [ "from sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import recall_score\n\npred_accuracy = model.score(test_x, test_y)\nprint(\"Acurácia {}\".format(pred_accuracy))\n\nprobabilities = model.predict_proba(test_x)\npred_roc = roc_auc_score(test_y, probabilities[:, 1])\nprint(\"Curva ROC {}\".format(pred_roc))\n\ntrain_predictions = model.predict(train_x)\npred_precision = precision_score(train_y, train_predictions)\nprint(\"Precisão {}\".format(pred_precision))\n\npred_recall = recall_score(train_y, train_predictions)\nprint(\"Recall {}\".format(pred_recall))\n", "Acurácia 0.8642634623943035\nCurva ROC 0.7014819895830565\nPrecisão 1.0\nRecall 0.9992012779552716\n" ], [ "from sklearn.metrics import confusion_matrix\ncf_matrix = confusion_matrix(test_y, predicted)\nprint(\"Flights on time:\")\nprint(\" - and predicted on time:\", cf_matrix[0][0])\nprint(\" - but predicted delayed:\", cf_matrix[0][1])\nprint(\"Flights delayed:\")\nprint(\" - but predicted on time:\", cf_matrix[1][0])\nprint(\" - and predicted delayed:\", cf_matrix[1][1])", "Flights on time:\n - and predicted on time: 1903\n - but predicted delayed: 33\nFlights delayed:\n - but predicted on time: 272\n - and predicted delayed: 39\n" ] ], [ [ "### Visualizando a saída do modelo preditivo", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.set()", "_____no_output_____" ], [ "from sklearn.metrics import roc_curve\n\nfpr, tpr, _ = roc_curve(test_y, probabilities[:, 1])\nplt.plot(fpr, tpr)\nplt.plot([0, 1], [0, 1], color='grey', lw=1, linestyle='--')\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')", "_____no_output_____" ] ], [ [ "### Expondo o modelo", "_____no_output_____" ] ], [ [ "def predict_delay(departure_date_time, origin, destination):\n from datetime import datetime\n\n try:\n departure_date_time_parsed = datetime.strptime(departure_date_time, '%d/%m/%Y %H:%M:%S')\n except ValueError as e:\n return 'Error parsing date/time - {}'.format(e)\n\n month = departure_date_time_parsed.month\n day = departure_date_time_parsed.day\n day_of_week = departure_date_time_parsed.isoweekday()\n hour = departure_date_time_parsed.hour\n\n origin = origin.upper()\n destination = destination.upper()\n\n input = [{'MONTH': month,\n 'DAY': day,\n 'DAY_OF_WEEK': day_of_week,\n 'CRS_DEP_TIME': hour,\n 'ORIGIN_ATL': 1 if origin == 'ATL' else 0,\n 'ORIGIN_DTW': 1 if origin == 'DTW' else 0,\n 'ORIGIN_JFK': 1 if origin == 'JFK' else 0,\n 'ORIGIN_MSP': 1 if origin == 'MSP' else 0,\n 'ORIGIN_SEA': 1 if origin == 'SEA' else 0,\n 'DEST_ATL': 1 if destination == 'ATL' else 0,\n 'DEST_DTW': 1 if destination == 'DTW' else 0,\n 'DEST_JFK': 1 if destination == 'JFK' else 0,\n 'DEST_MSP': 1 if destination == 'MSP' else 0,\n 'DEST_SEA': 1 if destination == 'SEA' else 0 }]\n\n return model.predict_proba(pd.DataFrame(input))[0][0]", "_____no_output_____" ] ], [ [ "### Executando testes do modelo", "_____no_output_____" ] ], [ [ "predict_delay('1/10/2018 21:45:00', 'JFK', 'ATL')", "_____no_output_____" ], [ "import numpy as np\n\nlabels = ('Oct 1', 'Oct 2', 'Oct 3', 'Oct 4', 'Oct 5', 'Oct 6', 'Oct 7')\nvalues = (predict_delay('1/10/2018 21:45:00', 'JFK', 'ATL'),\n predict_delay('2/10/2018 21:45:00', 'JFK', 'ATL'),\n predict_delay('3/10/2018 21:45:00', 'JFK', 'ATL'),\n predict_delay('4/10/2018 21:45:00', 'JFK', 'ATL'),\n predict_delay('5/10/2018 21:45:00', 'JFK', 'ATL'),\n predict_delay('6/10/2018 21:45:00', 'JFK', 'ATL'),\n predict_delay('7/10/2018 21:45:00', 'JFK', 'ATL'))\nalabels = np.arange(len(labels))\n\nplt.bar(alabels, values, align='center', alpha=0.5)\nplt.xticks(alabels, labels)\nplt.ylabel('Probability of On-Time Arrival')\nplt.ylim((0.0, 1.0))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb9bf86583e08d2a7d661fb71ff868cdaf584dd3
3,107
ipynb
Jupyter Notebook
02-configuracion.ipynb
map0logo/git-novice-nb-es
e33fec766286dd242972f254f62e5347d7254255
[ "Unlicense" ]
null
null
null
02-configuracion.ipynb
map0logo/git-novice-nb-es
e33fec766286dd242972f254f62e5347d7254255
[ "Unlicense" ]
null
null
null
02-configuracion.ipynb
map0logo/git-novice-nb-es
e33fec766286dd242972f254f62e5347d7254255
[ "Unlicense" ]
null
null
null
36.552941
162
0.568394
[ [ [ "# Configurando Git\n\nCuando usamos Git por primera vez en una computadora necesitamos configurar algunas cosas.\n\nAquí está como Dracula configuró su nuevo laptop:\n\n```bash\ngit config --global user.name \"Vlad Dracula\"\ngit config --global user.email \"[email protected]\"\ngit config --global color.ui \"auto\"\n```\n\n(Por favor utiliza tu propio nombre y dirección email en lugar del de Dracula.)\n\nÉl también configuró su editor de texto favorito, siguiendo esta tabla:\n\n| Editor | Comando de configuración |\n|:-------------------|:-------------------------------------------------|\n| nano | `git config --global core.editor \"nano -w\"` |\n| Text Wrangler | `git config --global core.editor \"edit -w\"` |\n| Sublime Text (Mac) | `git config --global core.editor \"subl -n -w\"` |\n| Sublime Text (Win) | `git config --global core.editor \"'c:/program files/sublime text 2/sublime_text.exe' -w\"` |\n| Notepad++ (Win) | `git config --global core.editor \"'c:/program files (x86)/Notepad++/notepad++.exe' -multiInst -notabbar -nosession -noPlugin\"`|\n| Kate (Linux) | `git config --global core.editor \"kate\"` |\n| Gedit (Linux) | `git config --global core.editor \"gedit -s -w\"` |\n\nLos comandos de Git se escriben `git verbo`, donde `verbo` es lo que queremos que haga.\nEn este caso, le estamos diciendo a Git:\n\n* nuestro nombre y dirección email,\n* que coloree la salida,\n* cual es nuestro editor de texto favorito, y\n* que queremos utilizar estas configuraciones globalmente (esto es, para todos los proyectos),\n\nSolo necesitamos ejecutar los cuatro comandos de arriba una sola vez: la bandera `--global` le dice a Git\nque use estas configuraciones para todos los proyectos, en tu cuenta de usuario, en esta computadora.\n\nPuedes verificar tu configuración en cualquier momento:\n\n```bash\ngit config --list\n```\n\nPuedes cambiar tu configuración tantas veces como quieras: tan solo utiliza los mismos comandos para escoger otro editor o actualizar tu dirección de email.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
cb9bfa63d3f86b47fc61a3f9d1952bdf61bbcf5d
6,223
ipynb
Jupyter Notebook
research_process/Graph_creation.ipynb
Bahusson/ai
bfc2427b86d6fb50f344937d8c2e8ce341c4aea4
[ "MIT" ]
null
null
null
research_process/Graph_creation.ipynb
Bahusson/ai
bfc2427b86d6fb50f344937d8c2e8ce341c4aea4
[ "MIT" ]
null
null
null
research_process/Graph_creation.ipynb
Bahusson/ai
bfc2427b86d6fb50f344937d8c2e8ce341c4aea4
[ "MIT" ]
null
null
null
19.030581
72
0.482886
[ [ [ "import tensorflow as tf", "_____no_output_____" ], [ "graph = tf.get_default_graph()", "_____no_output_____" ], [ "graph.get_operations()", "_____no_output_____" ], [ "#Tworzenie stałej widocznej na Tensor Board\na = tf.constant(10,name=\"a\")\n#Tworzymy stałą 'a' o nazwie 'a', \n#która dzięki parametrowi name='a'\n#będzie widoczna w TensorBoard.", "_____no_output_____" ], [ "#Pokazuje jakie działania mamy przypisane do grafu.\noperations = graph.get_operations()\noperations\n#Na razie jak widać jedną stałą.", "_____no_output_____" ], [ "b = tf.constant(20,name=\"b\")", "_____no_output_____" ], [ "operations = graph.get_operations()\noperations\n#Dwie stałe...", "_____no_output_____" ], [ "#Tworzenie nowej stałej \"c\" z sumy \"a\" i \"b\".\nc = tf.add(a,b,name=\"c\")", "_____no_output_____" ], [ "c", "_____no_output_____" ], [ "operations = graph.get_operations()\noperations\n#Poniżej widać wszystkie działania jakie wykonujemy podczas sesji.", "_____no_output_____" ], [ "# Tworzenie stałej \"d\" z mnożenia axb.\nd = tf.multiply(a,b,name=\"d\")", "_____no_output_____" ], [ "operations = graph.get_operations()\noperations", "_____no_output_____" ], [ "e = tf.multiply(c,d,name=\"e\")\noperations = graph.get_operations()\noperations", "_____no_output_____" ], [ "#W tym miejscu powinien iść kod inicjalizacji zmiennych\n#Ale pracujemy wyjątkowo na stałych więc nie jest potrzebny.\n#Kod jest w centralnym projekcie.", "_____no_output_____" ], [ "sess = tf.Session()", "_____no_output_____" ], [ "print(sess.run(e))", "6000\n" ], [ "#Pomaga się odnaleźć - drukuje zawartość grafu.\nfor op in graph.get_operations(): print(op.name)", "a\nb\nc\nd\ne\n" ], [ "sess.close()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9bffc15bf164e667ce40e2406db5349db41836
691,015
ipynb
Jupyter Notebook
aviation/snow_rulelist.ipynb
ajmendez/explore
367e96fca29fef8a9afa490798b922fa0fc303e1
[ "MIT" ]
null
null
null
aviation/snow_rulelist.ipynb
ajmendez/explore
367e96fca29fef8a9afa490798b922fa0fc303e1
[ "MIT" ]
null
null
null
aviation/snow_rulelist.ipynb
ajmendez/explore
367e96fca29fef8a9afa490798b922fa0fc303e1
[ "MIT" ]
null
null
null
1,025.244807
615,609
0.939176
[ [ [ "import os\nimport glob\nimport LatLon \nimport numpy as np\nimport pandas as pd\npd.set_option('display.max_rows', 10)\n\n# plot\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport pylab\nimport seaborn as sns\nsns.set_style(\"whitegrid\")\nfrom pysurvey.plot import setup, legend, icolorbar, density, minmax\n\nimport geoplotlib\nimport geoplotlib.colors", "_____no_output_____" ], [ "clean = pd.DataFrame.from_csv('/Users/ajmendez/tmp/flight/flight_clean_3.csv')", "_____no_output_____" ], [ "clean['flightid'] = flights['flight']+'.'+flights['flightindex'].apply(str)", "_____no_output_____" ], [ "clean[clean['flightpoints'] > 100].groupby(['flight'], as_index=False).count().sort('date')", "/Users/ajmendez/.local/canopy/User/lib/python2.7/site-packages/ipykernel/__main__.py:1: FutureWarning: sort(columns=....) is deprecated, use sort_values(by=.....)\n if __name__ == '__main__':\n" ], [ "isgood = ( (clean['flight'] == 'A719AA') | \n# (clean['flight'] == 'A9C737') |\n# (clean['flight'] == 'A313B4') |\n (clean['flight'] == '406696') )\nflights = clean[isgood]\nprint len(flights)\n# flights.plot('flightindex', 'flightpoints', kind='scatter')\nflights.plot('lon', 'lat', kind='scatter', c='datenum', \n cmap=pylab.cm.Spectral, lw=0, alpha=0.5)\n\n# pylab.axhline(10)", "15371\n" ], [ "colors = geoplotlib.colors.create_set_cmap(flights['flightnum'], pylab.cm.jet)\n\ngeoplotlib.tiles_provider('darkmatter')\nfor fi in np.unique(flights['flightnum']):\n geoplotlib.scatter(flights[flights['flightnum'] == fi], color=colors[fi])\nbbox = geoplotlib.utils.BoundingBox(40.5,-78.0,38.5,-76)\ngeoplotlib.set_bbox(bbox)\ngeoplotlib.inline(800)", "_____no_output_____" ], [ "import sys\nsys.path.append('/Users/ajmendez/tmp/sklearn-expertsys/')\nimport RuleListClassifier\nreload(RuleListClassifier)\nfrom RuleListClassifier import *\nfrom sklearn.datasets.mldata import fetch_mldata\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier", "_____no_output_____" ], [ "feature_labels = [\"#Pregnant\",\"Glucose concentration test\",\"Blood pressure(mmHg)\",\"Triceps skin fold thickness(mm)\",\"2-Hour serum insulin (mu U/ml)\",\"Body mass index\",\"Diabetes pedigree function\",\"Age (years)\"]\ndata = fetch_mldata(\"diabetes\") # get dataset\n\ny = (data.target+1)/2\nXtrain, Xtest, ytrain, ytest = train_test_split(data.data, y) # split", "_____no_output_____" ], [ "clf = RuleListClassifier(max_iter=100, class1label=\"diabetes\", verbose=False)\nclf.fit2(Xtrain, ytrain, feature_labels=feature_labels)\nprint \"RuleListClassifier Accuracy:\", clf.score(Xtest, ytest), \"Learned interpretable model:\\n\", clf\n", "_____no_output_____" ], [ "clf = RuleListClassifier(max_iter=10000, class1label=\"diabetes\", verbose=False)\nclf.fit(Xtrain, ytrain, feature_labels=feature_labels)\nprint \"RuleListClassifier Accuracy:\", clf.score(Xtest, ytest), \"Learned interpretable model:\\n\", clf\nprint \"RandomForestClassifier Accuracy:\", RandomForestClassifier().fit(Xtrain, ytrain).score(Xtest, ytest)\n", "RuleListClassifier Accuracy: 0.697916666667 Learned interpretable model:\nTrained RuleListClassifier for detecting diabetes\n==================================================\nIF Glucose concentration test : -inf_to_99.5 THEN probability of diabetes: 8.4% (4.6%-13.3%)\nELSE IF Body mass index : -inf_to_27.8499995 THEN probability of diabetes: 15.5% (9.4%-22.7%)\nELSE IF 2-Hour serum insulin (mu U/ml) : 36.5_to_119.5 THEN probability of diabetes: 20.0% (9.8%-32.7%)\nELSE IF Glucose concentration test : 167.0_to_inf THEN probability of diabetes: 92.9% (84.9%-98.0%)\nELSE IF #Pregnant : 6.5_to_inf THEN probability of diabetes: 75.4% (64.0%-85.3%)\nELSE IF Glucose concentration test : 99.5_to_130.5 THEN probability of diabetes: 32.3% (23.4%-41.9%)\nELSE probability of diabetes: 54.4% (42.6%-66.0%)\n=================================================\n\nRandomForestClassifier Accuracy: 0.734375\n" ], [ "feature_labels = ['lat', 'lon', 'alt', 'datenum']\nXtrain = flights[feature_labels]\nytrain = flights['flight'] == 'A719AA'\nclf = RuleListClassifier(max_iter=1000, \n class1label=\"flight\", \n verbose=False)\nclf.fit(Xtrain, ytrain, feature_labels=feature_labels)\n", "_____no_output_____" ], [ "print \"RuleListClassifier Accuracy:\", clf.score(Xtrain, ytrain), \"Learned interpretable model:\\n\", clf\nprint \"RandomForestClassifier Accuracy:\", RandomForestClassifier().fit(Xtrain, ytrain).score(Xtrain, ytrain)", " RuleListClassifier Accuracy: 0.982499512068 Learned interpretable model:\nTrained RuleListClassifier for detecting flight\n================================================\nIF alt : -inf_to_32962.5 THEN probability of flight: 0.0% (0.0%-0.1%)\nELSE IF lon : -77.015515_to_-76.948455 THEN probability of flight: 100.0% (99.8%-100.0%)\nELSE IF datenum : 11.4166205208_to_11.8940003819 THEN probability of flight: 0.6% (0.0%-2.0%)\nELSE IF lon : -76.948455_to_-76.8153 THEN probability of flight: 100.0% (99.9%-100.0%)\nELSE IF datenum : 13.6763937904_to_13.8865028008 AND lat : 39.474705_to_39.782185 THEN probability of flight: 0.3% (0.0%-1.2%)\nELSE IF datenum : 4.07091896405_to_4.9111564004 THEN probability of flight: 0.4% (0.0%-1.4%)\nELSE IF datenum : 8.23224320595_to_9.6481313657 THEN probability of flight: 99.8% (99.3%-100.0%)\nELSE IF datenum : 3.72189303814_to_3.91504727426 THEN probability of flight: 0.5% (0.0%-1.9%)\nELSE IF lon : -77.204215_to_-77.039575 THEN probability of flight: 99.8% (99.2%-100.0%)\nELSE IF datenum : 1.40955446754_to_3.01352711225 THEN probability of flight: 99.8% (99.2%-100.0%)\nELSE IF alt : 36962.5_to_37012.5 THEN probability of flight: 99.9% (99.7%-100.0%)\nELSE IF lon : -77.51456_to_-77.25565 THEN probability of flight: 0.6% (0.0%-2.4%)\nELSE IF lon : -77.808515_to_-77.51456 THEN probability of flight: 0.7% (0.0%-2.5%)\nELSE IF lat : 39.474705_to_39.782185 THEN probability of flight: 99.1% (96.9%-100.0%)\nELSE IF lat : 39.327735_to_39.407935 THEN probability of flight: 1.5% (0.0%-5.4%)\nELSE IF lon : -76.639945_to_inf THEN probability of flight: 50.0% (2.5%-97.5%)\nELSE IF datenum : 13.1880469386_to_13.6763937904 THEN probability of flight: 92.3% (73.5%-99.8%)\nELSE IF datenum : 6.19013145251_to_6.87653632521 THEN probability of flight: 95.5% (83.9%-99.9%)\nELSE probability of flight: 52.9% (41.2%-64.4%)\n===============================================\n\nRandomForestClassifier Accuracy: 1.0\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9c05024e4e0118ac4e1d81c85e9cd1ea6b054a
2,748
ipynb
Jupyter Notebook
examples/ChessBoard/.ipynb_checkpoints/Untitled-checkpoint.ipynb
MPF-Optimization-Laboratory/AtomicOpt.jl
a03f6a0ed152bad9b518548fafa936667deb8a67
[ "MIT" ]
1
2022-02-01T01:26:04.000Z
2022-02-01T01:26:04.000Z
examples/ChessBoard/.ipynb_checkpoints/Untitled-checkpoint.ipynb
ZhenanFanUBC/AtomicOpt.jl
a03f6a0ed152bad9b518548fafa936667deb8a67
[ "MIT" ]
null
null
null
examples/ChessBoard/.ipynb_checkpoints/Untitled-checkpoint.ipynb
ZhenanFanUBC/AtomicOpt.jl
a03f6a0ed152bad9b518548fafa936667deb8a67
[ "MIT" ]
null
null
null
21.809524
63
0.417758
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb9c065243816fbbe103025c83c5b92a7365fc2d
243,051
ipynb
Jupyter Notebook
Code/Scenario2 original.ipynb
thinkhow/Market-Prediction-with-Macroeconomics-features
feac711017739ea6ffe46a7fcac6b4b0c265e0b5
[ "MIT" ]
null
null
null
Code/Scenario2 original.ipynb
thinkhow/Market-Prediction-with-Macroeconomics-features
feac711017739ea6ffe46a7fcac6b4b0c265e0b5
[ "MIT" ]
1
2021-05-24T00:26:34.000Z
2021-05-24T00:26:34.000Z
Code/Scenario2 original.ipynb
thinkhow/Market-Prediction-with-Macroeconomics-features
feac711017739ea6ffe46a7fcac6b4b0c265e0b5
[ "MIT" ]
null
null
null
320.225296
111,020
0.917495
[ [ [ "import pandas as pd\nimport numpy as np\n\n## For plotting\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nimport datetime as dt\nimport seaborn as sns\nsns.set_style(\"whitegrid\")", "_____no_output_____" ], [ "path = '../Data/dff1.csv'", "_____no_output_____" ], [ "df= pd.read_csv(path, parse_dates=['ds'])\n# df = df.rename(columns = {\"Date\":\"ds\",\"Close\":\"y\"}) \ndf = df[['ds', 'y','fbsp', 'diff','tby', 'ffr', 'fta', 'eps', 'div', 'une', 'wti', 'ppi',\n 'rfs']]\n# df", "_____no_output_____" ], [ "df['fbsp_tby'] = df['fbsp'] * df['tby']\ndf['fbsp_ffr'] = df['fbsp'] * df['ffr']\ndf['fbsp_div'] = df['fbsp'] * df['div']\ndf['eps_tby'] = df['eps'] * df['tby']\ndf['eps_ffr'] = df['eps'] * df['ffr']\ndf['eps_div'] = df['eps'] * df['div']", "_____no_output_____" ], [ "# cutoff between test and train data\ncutoff = len(df) - 252\ndf_train = df[:cutoff].copy()\ndf_test = df[cutoff:].copy()\nprint(cutoff)", "2300\n" ], [ "df_train.columns", "_____no_output_____" ], [ "possible_features = ['tby', 'ffr', 'fta', 'eps', 'div', 'une', 'wti',\n 'ppi', 'rfs', 'fbsp_tby', 'fbsp_ffr', 'fbsp_div', 'eps_tby',\n 'eps_ffr', 'eps_div']\n\nfrom itertools import chain, combinations\n\ndef powerset(iterable):\n #\"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)\"\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))\n\n#print(list(powerset(possible_features)))", "_____no_output_____" ], [ "len(possible_features)", "_____no_output_____" ], [ "from statsmodels.regression.linear_model import OLS\n\nreg_new = OLS((df_train['diff']).copy(),df_train[possible_features].copy()).fit()\nprint(reg_new.params)\n\n#from the output, we can see it's consistent with sklearn output", "tby 305.980303\nffr 183.608289\nfta 0.000065\neps -1.775823\ndiv -142.530009\nune -80.692625\nwti 3.378638\nppi -7.923858\nrfs 0.006087\nfbsp_tby -0.043153\nfbsp_ffr -0.146870\nfbsp_div -0.391168\neps_tby -2.122760\neps_ffr 2.390346\neps_div 3.950103\ndtype: float64\n" ], [ "new_coef = reg_new.params\nnew_possible_feats = new_coef[abs(new_coef)>0].index\n\npower_feats = list(powerset(new_possible_feats))\npower_feats.remove(())\n\npower_feats = [ list(feats) for feats in power_feats]\nlen(power_feats)\n", "_____no_output_____" ], [ "AIC_scores = []\nparameters = []\n\nfor feats in power_feats:\n tmp_reg = OLS((df_train['diff']).copy(),df_train[feats].copy()).fit()\n AIC_scores.append(tmp_reg.aic)\n parameters.append(tmp_reg.params)\n\n \nMin_AIC_index = AIC_scores.index(min(AIC_scores))\nMin_AIC_feats = power_feats[Min_AIC_index] \nMin_AIC_params = parameters[Min_AIC_index]\nprint(Min_AIC_feats)\nprint(Min_AIC_params) ", "['tby', 'ffr', 'fta', 'div', 'une', 'wti', 'ppi', 'rfs', 'fbsp_tby', 'fbsp_ffr', 'fbsp_div', 'eps_tby', 'eps_ffr', 'eps_div']\ntby 313.791358\nffr 187.937377\nfta 0.000057\ndiv -78.607290\nune -87.125044\nwti 3.455364\nppi -8.104355\nrfs 0.005916\nfbsp_tby -0.046476\nfbsp_ffr -0.132070\nfbsp_div -0.388510\neps_tby -2.157124\neps_ffr 2.033978\neps_div 3.252783\ndtype: float64\n" ], [ "len(Min_AIC_feats)", "_____no_output_____" ], [ "###After selecting the best features, we report the testing error, and make the plot \nAIC_df_test = df_test[Min_AIC_feats]\nAIC_pred_test = AIC_df_test.dot(Min_AIC_params)+df_test.fbsp\n\nAIC_df_train = df_train[Min_AIC_feats]\nAIC_pred_train = AIC_df_train.dot(Min_AIC_params)+ df_train.fbsp\n\n", "_____no_output_____" ], [ "from sklearn.metrics import mean_squared_error as MSE\n\nmse_train = MSE(df_train.y, AIC_pred_train) \nmse_test = MSE(df_test.y, AIC_pred_test)\n\n\n#compare with fbprophet()\n\nfb_mse_train = MSE(df_train.y, df_train.fbsp) \nfb_mse_test = MSE(df_test.y, df_test.fbsp)\n\n\nprint(mse_train,fb_mse_train)\n\nprint(mse_test,fb_mse_test)", "2543.6188296247346 22303.563608543613\n11928.655041711927 15247.912341091072\n" ], [ "df_train.ds", "_____no_output_____" ], [ "plt.figure(figsize=(18,10))\n\n# plot the training data\nplt.plot(df_train.ds,df_train.y,'b',\n label = \"Training Data\")\n\nplt.plot(df_train.ds, AIC_pred_train,'r-',\n label = \"Improved Fitted Values by Best_AIC\")\n\n# # plot the fit\nplt.plot(df_train.ds, df_train.fbsp,'g-',\n label = \"FB Fitted Values\")\n\n# # plot the forecast\nplt.plot(df_test.ds, df_test.fbsp,'g--',\n label = \"FB Forecast\")\nplt.plot(df_test.ds, AIC_pred_test,'r--',\n label = \"Improved Forecast by Best_AIC\")\nplt.plot(df_test.ds,df_test.y,'b--',\n label = \"Test Data\")\n\nplt.legend(fontsize=14)\n\nplt.xlabel(\"Date\", fontsize=16)\nplt.ylabel(\"SP&500 Close Price\", fontsize=16)\n\nplt.show()", "_____no_output_____" ], [ "plt.figure(figsize=(18,10))\nplt.plot(df_test.y,label=\"Training Data\")\nplt.plot(df_test.fbsp,label=\"FB Forecast\")\nplt.plot(AIC_pred_test,label=\"Improved Forecast by Best_AIC\")\nplt.legend(fontsize = 14)\nplt.show()", "_____no_output_____" ], [ "column = ['tby', 'ffr', 'fta', 'eps', 'div', 'une',\n 'wti', 'ppi', 'rfs', 'fbsp_tby', 'fbsp_ffr', 'fbsp_div', 'eps_tby', 'eps_ffr', 'eps_div']", "_____no_output_____" ], [ "from sklearn import preprocessing\ndf1_train = df_train[['diff', 'tby', 'ffr', 'fta', 'eps', 'div', 'une', 'wti', 'ppi', 'rfs', 'fbsp_tby', 'fbsp_ffr', 'fbsp_div', 'eps_tby', 'eps_ffr', 'eps_div']]\n\nX = preprocessing.scale(df1_train)\nfrom statsmodels.regression.linear_model import OLS\n\nreg_new = OLS((X[:,0]).copy(),X[:,1:].copy()).fit()\nprint(reg_new.params)", "[ 1.50405129 1.03228322 0.27409454 1.17073571 0.31243092 -0.75747342\n 0.46988206 -0.39944639 2.10369448 -0.69112943 -2.1804296 -2.38576385\n -1.14196633 1.41832903 -0.34501927]\n" ], [ "# Before Covid\n# pd.Series(reg_new.params, index=['tby', 'ffr', 'fta', 'eps', 'div', 'une',\n# 'wti', 'ppi', 'rfs', 'fbsp_tby', 'fbsp_ffr', 'fbsp_div', 'eps_tby', 'eps_ffr', 'eps_div'] )", "_____no_output_____" ], [ "# before covid\ncoef1 = [ 1.50405129, 1.03228322, 0.27409454, 1.17073571, 0.31243092,\n -0.75747342, 0.46988206, -0.39944639, 2.10369448, -0.69112943,\n -2.1804296 , -2.38576385, -1.14196633, 1.41832903, -0.34501927]\n# include covid\ncoef2 = [ 0.65150054, 1.70457239, -0.1573802 , -0.18007979, -0.15221931,\n -0.62326075, 0.45065894, -0.38972706, 2.87210843, -1.17604495,\n -4.92858316, -2.15459111, 0.11418468, 2.74829778, 0.55520382]", "_____no_output_____" ], [ "# Include Covid\n# pd.Series( np.append( ['coefficients (before covid)'], np.round(coef1,3)), index= np.append(['features'], column) ) \n ", "_____no_output_____" ], [ "index = ['10 Year U.S Treasury Bond Yield Rates (tby)', 'Federal Funds Rates (ffr)',\n 'Federal Total Assets (fta)', 'Earning-Per-Share of S&P 500 (eps)', 'Dividend Yield of S&P 500 (div)',\n 'Unemployment Rates (une) ', 'West Texas Intermediate oil index (wit)', 'Producer Price Index (ppi)',\n 'Retail and Food Services Sales (rfs)', \n 'fbsp_tby', 'fbsp_ffr', 'fbsp_div', 'eps_tby', 'eps_ffr', 'eps_div'\n ]", "_____no_output_____" ], [ "len(index)", "_____no_output_____" ], [ "pd.Series(coef2, index =index1)", "_____no_output_____" ], [ "df3 = pd.DataFrame(coef1, index = index1, columns = ['coefficients (before covid)'])\ndf3['coefficients (include covid)'] =pd.Series(coef2, index =index1)\ndf3", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9c0771c603d0b61970bb8495e13e3522b8a13c
52,772
ipynb
Jupyter Notebook
modulo_1/M1-part2.ipynb
Wan-essa/curso-de-Python
43d09888174e490d30c0699baee0d59447aee113
[ "MIT" ]
null
null
null
modulo_1/M1-part2.ipynb
Wan-essa/curso-de-Python
43d09888174e490d30c0699baee0d59447aee113
[ "MIT" ]
null
null
null
modulo_1/M1-part2.ipynb
Wan-essa/curso-de-Python
43d09888174e490d30c0699baee0d59447aee113
[ "MIT" ]
null
null
null
53.412955
1,232
0.542617
[ [ [ "# Escola de Dados - Python para inovação cívica\n## Módulo 1: Por dentro do python\n\n- [Aula 6](#Aula-6:-Carregando-pacotes)\n- [Aula 7](#Aula-7:-Carregando-tabelas)\n- [Aula 8](#Aula-8:-Carregando-várias-tabelas)\n\n\n### Aula 6: Carregando pacotes\n\nInstalaremos todos os pacotes que usaremos nesse módulo; Pandas, Matplotlib, Numpy, Jupiter Widgets e Pandas Profiling", "_____no_output_____" ] ], [ [ "pip install pandas", "Requirement already satisfied: pandas in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (1.3.0)\nRequirement already satisfied: numpy>=1.17.3 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pandas) (1.21.1)\nRequirement already satisfied: python-dateutil>=2.7.3 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pandas) (2.8.2)\nRequirement already satisfied: pytz>=2017.3 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pandas) (2021.1)\nRequirement already satisfied: six>=1.5 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from python-dateutil>=2.7.3->pandas) (1.16.0)\nNote: you may need to restart the kernel to use updated packages.\n" ], [ "pip install matplotlib", "Requirement already satisfied: matplotlib in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (3.4.2)\nRequirement already satisfied: python-dateutil>=2.7 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from matplotlib) (2.8.2)\nRequirement already satisfied: cycler>=0.10 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from matplotlib) (0.10.0)\nRequirement already satisfied: kiwisolver>=1.0.1 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from matplotlib) (1.3.1)\nRequirement already satisfied: numpy>=1.16 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from matplotlib) (1.21.1)\nRequirement already satisfied: pyparsing>=2.2.1 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from matplotlib) (2.4.7)\nRequirement already satisfied: pillow>=6.2.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from matplotlib) (8.3.1)\nRequirement already satisfied: six in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from cycler>=0.10->matplotlib) (1.16.0)\nNote: you may need to restart the kernel to use updated packages.\n" ], [ "pip install numpy", "Requirement already satisfied: numpy in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (1.21.1)\nNote: you may need to restart the kernel to use updated packages.\n" ], [ "pip install ipywidgets", "Requirement already satisfied: ipywidgets in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (7.6.3)\nRequirement already satisfied: ipykernel>=4.5.1 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from ipywidgets) (6.0.3)\nRequirement already satisfied: jupyterlab-widgets>=1.0.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from ipywidgets) (1.0.0)\nRequirement already satisfied: traitlets>=4.3.1 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from ipywidgets) (5.0.5)\nRequirement already satisfied: ipython>=4.0.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from ipywidgets) (7.25.0)\nRequirement already satisfied: nbformat>=4.2.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from ipywidgets) (5.1.3)\nRequirement already satisfied: widgetsnbextension~=3.5.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from ipywidgets) (3.5.1)\nRequirement already satisfied: debugpy<2.0,>=1.0.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from ipykernel>=4.5.1->ipywidgets) (1.4.1)\nRequirement already satisfied: tornado<7.0,>=4.2 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from ipykernel>=4.5.1->ipywidgets) (6.1)\nRequirement already satisfied: jupyter-client<7.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from ipykernel>=4.5.1->ipywidgets) (6.1.12)\nRequirement already satisfied: matplotlib-inline<0.2.0,>=0.1.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from ipykernel>=4.5.1->ipywidgets) (0.1.2)\nRequirement already satisfied: pickleshare in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from ipython>=4.0.0->ipywidgets) (0.7.5)\nRequirement already satisfied: setuptools>=18.5 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from ipython>=4.0.0->ipywidgets) (57.0.0)\nRequirement already satisfied: decorator in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from ipython>=4.0.0->ipywidgets) (4.4.2)\nRequirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from ipython>=4.0.0->ipywidgets) (3.0.19)\nRequirement already satisfied: pygments in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from ipython>=4.0.0->ipywidgets) (2.9.0)\nRequirement already satisfied: jedi>=0.16 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from ipython>=4.0.0->ipywidgets) (0.18.0)\nRequirement already satisfied: pexpect>4.3 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from ipython>=4.0.0->ipywidgets) (4.8.0)\nRequirement already satisfied: backcall in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from ipython>=4.0.0->ipywidgets) (0.2.0)\nRequirement already satisfied: parso<0.9.0,>=0.8.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from jedi>=0.16->ipython>=4.0.0->ipywidgets) (0.8.2)\nRequirement already satisfied: python-dateutil>=2.1 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from jupyter-client<7.0->ipykernel>=4.5.1->ipywidgets) (2.8.2)\nRequirement already satisfied: pyzmq>=13 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from jupyter-client<7.0->ipykernel>=4.5.1->ipywidgets) (22.1.0)\nRequirement already satisfied: jupyter-core>=4.6.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from jupyter-client<7.0->ipykernel>=4.5.1->ipywidgets) (4.7.1)\nRequirement already satisfied: ipython-genutils in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from nbformat>=4.2.0->ipywidgets) (0.2.0)\nRequirement already satisfied: jsonschema!=2.5.0,>=2.4 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from nbformat>=4.2.0->ipywidgets) (3.2.0)\nRequirement already satisfied: attrs>=17.4.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from jsonschema!=2.5.0,>=2.4->nbformat>=4.2.0->ipywidgets) (21.2.0)\nRequirement already satisfied: pyrsistent>=0.14.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from jsonschema!=2.5.0,>=2.4->nbformat>=4.2.0->ipywidgets) (0.18.0)\nRequirement already satisfied: six>=1.11.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from jsonschema!=2.5.0,>=2.4->nbformat>=4.2.0->ipywidgets) (1.16.0)\nRequirement already satisfied: ptyprocess>=0.5 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pexpect>4.3->ipython>=4.0.0->ipywidgets) (0.7.0)\nRequirement already satisfied: wcwidth in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython>=4.0.0->ipywidgets) (0.2.5)\nRequirement already satisfied: notebook>=4.4.1 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from widgetsnbextension~=3.5.0->ipywidgets) (6.4.0)\nRequirement already satisfied: jinja2 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets) (3.0.1)\nRequirement already satisfied: prometheus-client in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets) (0.11.0)\nRequirement already satisfied: terminado>=0.8.3 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets) (0.10.1)\nRequirement already satisfied: Send2Trash>=1.5.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets) (1.7.1)\nRequirement already satisfied: argon2-cffi in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets) (20.1.0)\nRequirement already satisfied: nbconvert in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets) (6.1.0)\nRequirement already satisfied: cffi>=1.0.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from argon2-cffi->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets) (1.14.6)\nRequirement already satisfied: pycparser in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from cffi>=1.0.0->argon2-cffi->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets) (2.20)\nRequirement already satisfied: MarkupSafe>=2.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from jinja2->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets) (2.0.1)\nRequirement already satisfied: testpath in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets) (0.5.0)\nRequirement already satisfied: bleach in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets) (3.3.1)\nRequirement already satisfied: pandocfilters>=1.4.1 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets) (1.4.3)\nRequirement already satisfied: entrypoints>=0.2.2 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets) (0.3)\nRequirement already satisfied: nbclient<0.6.0,>=0.5.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets) (0.5.3)\nRequirement already satisfied: mistune<2,>=0.8.1 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets) (0.8.4)\nRequirement already satisfied: defusedxml in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets) (0.7.1)\nRequirement already satisfied: jupyterlab-pygments in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from nbconvert->notebook>=4.4.1->widgetsnbextension~=3.5.0->ipywidgets) (0.1.2)\n" ], [ "pip install pandas-profiling", "Requirement already satisfied: pandas-profiling in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (3.0.0)\nRequirement already satisfied: pydantic>=1.8.1 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pandas-profiling) (1.8.2)\nRequirement already satisfied: visions[type_image_path]==0.7.1 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pandas-profiling) (0.7.1)\nRequirement already satisfied: numpy>=1.16.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pandas-profiling) (1.21.1)\nRequirement already satisfied: joblib in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pandas-profiling) (1.0.1)\nRequirement already satisfied: matplotlib>=3.2.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pandas-profiling) (3.4.2)\nRequirement already satisfied: tangled-up-in-unicode==0.1.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pandas-profiling) (0.1.0)\nRequirement already satisfied: missingno>=0.4.2 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pandas-profiling) (0.5.0)\nRequirement already satisfied: seaborn>=0.10.1 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pandas-profiling) (0.11.1)\nRequirement already satisfied: scipy>=1.4.1 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pandas-profiling) (1.7.0)\nRequirement already satisfied: jinja2>=2.11.1 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pandas-profiling) (3.0.1)\nRequirement already satisfied: PyYAML>=5.0.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pandas-profiling) (5.4.1)\nRequirement already satisfied: phik>=0.11.1 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pandas-profiling) (0.11.2)\nRequirement already satisfied: tqdm>=4.48.2 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pandas-profiling) (4.61.2)\nRequirement already satisfied: pandas!=1.0.0,!=1.0.1,!=1.0.2,!=1.1.0,>=0.25.3 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pandas-profiling) (1.3.0)\nRequirement already satisfied: requests>=2.24.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pandas-profiling) (2.26.0)\nRequirement already satisfied: htmlmin>=0.1.12 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pandas-profiling) (0.1.12)\nRequirement already satisfied: attrs>=19.3.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from visions[type_image_path]==0.7.1->pandas-profiling) (21.2.0)\nRequirement already satisfied: multimethod==1.4 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from visions[type_image_path]==0.7.1->pandas-profiling) (1.4)\nRequirement already satisfied: networkx>=2.4 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from visions[type_image_path]==0.7.1->pandas-profiling) (2.5.1)\nRequirement already satisfied: bottleneck in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from visions[type_image_path]==0.7.1->pandas-profiling) (1.3.2)\nRequirement already satisfied: imagehash in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from visions[type_image_path]==0.7.1->pandas-profiling) (4.2.1)\nRequirement already satisfied: Pillow in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from visions[type_image_path]==0.7.1->pandas-profiling) (8.3.1)\nRequirement already satisfied: MarkupSafe>=2.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from jinja2>=2.11.1->pandas-profiling) (2.0.1)\nRequirement already satisfied: pyparsing>=2.2.1 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from matplotlib>=3.2.0->pandas-profiling) (2.4.7)\nRequirement already satisfied: python-dateutil>=2.7 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from matplotlib>=3.2.0->pandas-profiling) (2.8.2)\nRequirement already satisfied: cycler>=0.10 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from matplotlib>=3.2.0->pandas-profiling) (0.10.0)\nRequirement already satisfied: kiwisolver>=1.0.1 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from matplotlib>=3.2.0->pandas-profiling) (1.3.1)\nRequirement already satisfied: six in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from cycler>=0.10->matplotlib>=3.2.0->pandas-profiling) (1.16.0)\nRequirement already satisfied: decorator<5,>=4.3 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from networkx>=2.4->visions[type_image_path]==0.7.1->pandas-profiling) (4.4.2)\nRequirement already satisfied: pytz>=2017.3 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pandas!=1.0.0,!=1.0.1,!=1.0.2,!=1.1.0,>=0.25.3->pandas-profiling) (2021.1)\nRequirement already satisfied: typing-extensions>=3.7.4.3 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from pydantic>=1.8.1->pandas-profiling) (3.10.0.0)\nRequirement already satisfied: urllib3<1.27,>=1.21.1 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from requests>=2.24.0->pandas-profiling) (1.26.6)\nRequirement already satisfied: certifi>=2017.4.17 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from requests>=2.24.0->pandas-profiling) (2021.5.30)\nRequirement already satisfied: idna<4,>=2.5 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from requests>=2.24.0->pandas-profiling) (3.2)\nRequirement already satisfied: charset-normalizer~=2.0.0 in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from requests>=2.24.0->pandas-profiling) (2.0.3)\nRequirement already satisfied: PyWavelets in /home/cecivieira/Projetos/python-inovacao-civica/.venv/lib/python3.8/site-packages (from imagehash->visions[type_image_path]==0.7.1->pandas-profiling) (1.1.1)\nNote: you may need to restart the kernel to use updated packages.\n" ] ], [ [ "A instalação é só realizada 1 vez, mas a importação das bibliotecas deve ocorrer no inicío do código, como na próxima linha:", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ] ], [ [ "### Aula 7: Carregando tabela\nVamos aprender a ler arquivos do tipo CSV e converter para dataframe Pandas.\n\nOs dados que usaremos nesse notebook pode ser encontrado em: [TSE. Candidaturas em 2020](https://www.tse.jus.br/eleicoes/estatisticas/repositorio-de-dados-eleitorais-1)", "_____no_output_____" ] ], [ [ "candidaturas_pe = pd.read_csv('../conjunto_dados/consulta_cand_2020_PE.csv', sep=';', encoding='latin_1')", "_____no_output_____" ], [ "# o comando abaixo retira a limitação da quantidade de colunas impressa na tela\n\npd.options.display.max_columns = None", "_____no_output_____" ], [ "candidaturas_pe.head()", "_____no_output_____" ], [ "# informação sobre o tamanho do dataframe (qnt. linhas, qnt. colunas)\ncandidaturas_pe.shape", "_____no_output_____" ] ], [ [ "## Quantas candidaturas houveram no seu município nas eleições de 2020?", "_____no_output_____" ], [ "## Aula 8: Carregando várias tabelas\n\nÉ possível ler vários arquivos CSV e converter de uma só vez para dataframe Pandas. Para isso, além de usar as técnicas vistas nas aulas anteriores, importaremos a biblioteca glob.", "_____no_output_____" ] ], [ [ "import glob", "_____no_output_____" ], [ "dados_todos_estados = glob.glob('../conjunto_dados/consulta_*.csv')\nprint(dados_todos_estados)", "['../conjunto_dados/consulta_cand_2020_PI.csv', '../conjunto_dados/consulta_cand_2020_MT.csv', '../conjunto_dados/consulta_cand_2020_PB.csv', '../conjunto_dados/consulta_cand_2020_PA.csv', '../conjunto_dados/consulta_cand_2020_SP.csv', '../conjunto_dados/consulta_cand_2020_AL.csv', '../conjunto_dados/consulta_cand_2020_PE.csv', '../conjunto_dados/consulta_cand_2020_RR.csv', '../conjunto_dados/consulta_cand_2020_MS.csv', '../conjunto_dados/consulta_cand_2020_AP.csv', '../conjunto_dados/consulta_cand_2020_MG.csv', '../conjunto_dados/consulta_cand_2020_PR.csv', '../conjunto_dados/consulta_cand_2020_ES.csv', '../conjunto_dados/consulta_cand_2020_RJ.csv', '../conjunto_dados/consulta_cand_2020_MA.csv', '../conjunto_dados/consulta_cand_2020_AC.csv', '../conjunto_dados/consulta_cand_2020_RN.csv', '../conjunto_dados/consulta_cand_2020_CE.csv', '../conjunto_dados/consulta_cand_2020_RS.csv', '../conjunto_dados/consulta_cand_2020_SE.csv', '../conjunto_dados/consulta_cand_2020_GO.csv', '../conjunto_dados/consulta_cand_2020_BA.csv', '../conjunto_dados/consulta_cand_2020_AM.csv', '../conjunto_dados/consulta_cand_2020_TO.csv', '../conjunto_dados/consulta_cand_2020_SC.csv', '../conjunto_dados/consulta_cand_2020_RO.csv']\n" ] ], [ [ "Vamos ler todos os arquivos e reunir em uma lista.", "_____no_output_____" ] ], [ [ "lista_df = []\n\nfor dados_estado in dados_todos_estados:\n df_estado = pd.read_csv(dados_estado, sep=';', encoding='latin_1')\n lista_df.append(df_estado)", "_____no_output_____" ] ], [ [ "Com o método `pd.concat` poderemos reunir os dataframes de cada estado em um único", "_____no_output_____" ] ], [ [ "df_completo = pd.concat(lista_df, axis=0, ignore_index=True)\ndf_completo.shape", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb9c091c1aa0455aeb8bf3c208ac3c3d60519037
50,895
ipynb
Jupyter Notebook
notebooks/pdf_ingestion.ipynb
zaxmks/demo-data-compliance-service
372e612c570aaf5b512bec17627f825e880add67
[ "CNRI-Python", "CECILL-B" ]
null
null
null
notebooks/pdf_ingestion.ipynb
zaxmks/demo-data-compliance-service
372e612c570aaf5b512bec17627f825e880add67
[ "CNRI-Python", "CECILL-B" ]
null
null
null
notebooks/pdf_ingestion.ipynb
zaxmks/demo-data-compliance-service
372e612c570aaf5b512bec17627f825e880add67
[ "CNRI-Python", "CECILL-B" ]
null
null
null
37.727947
642
0.438059
[ [ [ "!pwd\nimport sys\nsys.path.append('/workspace')\n", "/workspace/notebooks\r\n" ], [ "from src.core.db.config import DatabaseEnum\nfrom src.core.db.models.pdf_models import Fincen8300Rev4\nfrom src.core.db.models.main_models import EmployeeToDocument\nfrom src.core.db.session import DBContext, DbQuery", "[23/03/2021 03:01:08] [ INFO] [dotenv.main] [_get_stream():77] [PID:135 TID:140482163009344] -> Python-dotenv could not find configuration file /workspace/src/core/env/.local.env.local.\n" ] ], [ [ "# Read the pdf data into a data source", "_____no_output_____" ] ], [ [ "db = DbQuery(DatabaseEnum.PDF_INGESTION_DB)\nresult = db.execute(\"SELECT * from public.fincen8300_rev4\")\n", "[23/03/2021 03:01:08] [ INFO] [sqlalchemy.engine.base.Engine] [_execute_context():1235] [PID:135 TID:140482163009344] -> select version()\n[23/03/2021 03:01:08] [ INFO] [sqlalchemy.engine.base.Engine] [_execute_context():1240] [PID:135 TID:140482163009344] -> {}\n[23/03/2021 03:01:08] [ INFO] [sqlalchemy.engine.base.Engine] [_execute_context():1235] [PID:135 TID:140482163009344] -> select current_schema()\n[23/03/2021 03:01:08] [ INFO] [sqlalchemy.engine.base.Engine] [_execute_context():1240] [PID:135 TID:140482163009344] -> {}\n[23/03/2021 03:01:08] [ INFO] [sqlalchemy.engine.base.Engine] [_cursor_execute():1341] [PID:135 TID:140482163009344] -> SELECT CAST('test plain returns' AS VARCHAR(60)) AS anon_1\n[23/03/2021 03:01:08] [ INFO] [sqlalchemy.engine.base.Engine] [_cursor_execute():1342] [PID:135 TID:140482163009344] -> {}\n[23/03/2021 03:01:08] [ INFO] [sqlalchemy.engine.base.Engine] [_cursor_execute():1341] [PID:135 TID:140482163009344] -> SELECT CAST('test unicode returns' AS VARCHAR(60)) AS anon_1\n[23/03/2021 03:01:08] [ INFO] [sqlalchemy.engine.base.Engine] [_cursor_execute():1342] [PID:135 TID:140482163009344] -> {}\n[23/03/2021 03:01:08] [ INFO] [sqlalchemy.engine.base.Engine] [_execute_context():1235] [PID:135 TID:140482163009344] -> show standard_conforming_strings\n[23/03/2021 03:01:08] [ INFO] [sqlalchemy.engine.base.Engine] [_execute_context():1240] [PID:135 TID:140482163009344] -> {}\n[23/03/2021 03:01:08] [ INFO] [sqlalchemy.engine.base.Engine] [_execute_context():1235] [PID:135 TID:140482163009344] -> SELECT * from public.fincen8300_rev4\n[23/03/2021 03:01:08] [ INFO] [sqlalchemy.engine.base.Engine] [_execute_context():1240] [PID:135 TID:140482163009344] -> {}\n" ], [ "type(result)", "_____no_output_____" ], [ "from pandas import DataFrame\ndf = DataFrame(result.fetchall())\ndf.columns = result.keys()", "_____no_output_____" ], [ "df.head(100)", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ], [ "from src.sources.data_source import DataSource\nfincen = DataSource(df)", "_____no_output_____" ] ], [ [ "# Read employee data into data source", "_____no_output_____" ] ], [ [ "db = DbQuery(DatabaseEnum.MAIN_INGESTION_DB)\nresult = db.execute(\"SELECT * from public.employee\")", "[23/03/2021 03:02:35] [ INFO] [sqlalchemy.engine.base.Engine] [_execute_context():1235] [PID:135 TID:140482163009344] -> SELECT * from public.employee\n[23/03/2021 03:02:35] [ INFO] [sqlalchemy.engine.base.Engine] [_execute_context():1240] [PID:135 TID:140482163009344] -> {}\n" ], [ "print(result.keys())", "['id', 'prefix_name', 'first_name', 'last_name', 'middle_name', 'suffix_name', 'date_of_birth', 'dod_id', 'ssn']\n" ], [ "df_employee = DataFrame(result.fetchall())\ndf_employee.columns = result.keys()\ndf_employee.head(20)", "_____no_output_____" ], [ "employee = DataSource(df_employee)", "_____no_output_____" ] ], [ [ "# Map the columns", "_____no_output_____" ] ], [ [ "from src.sources.structured_data_source import StructuredDataSource\nfrom src.mapping.pdfs.pdf_field_name_classifier import FieldNameClassifier\nfrom src.mapping.pdfs.pdf_field_label_catalog import FieldLabelCatalog\nfrom src.mapping.pdfs.pseudofield_generator import PseudofieldGenerator\n\npseudofield_generator = PseudofieldGenerator(fincen)\npseudofield_generator.generate()\n", "_____no_output_____" ], [ "def _create_column_relations_for(source, target):\n \"\"\"Create column relations from canonical column identifiers.\"\"\"\n gold_id_info = FieldNameClassifier.get_id_info_from_df(target.get_data())\n data_id_info = FieldNameClassifier.get_id_info_from_df(source.get_data())\n for identifier in FieldLabelCatalog:\n if identifier in gold_id_info and identifier in data_id_info:\n g_id = gold_id_info[identifier]\n d_id = data_id_info[identifier]\n source.create_column_relation(\n d_id.field_name, g_id.field_name, target\n )\n print(\n \"New relation detected: %s\" % str(source.column_relations[-1])\n )", "_____no_output_____" ], [ "_create_column_relations_for(fincen, employee)", "New relation detected: tin -> pandas DataFrame (hash -8826298284233990544): ssn (1.00 conf)\nNew relation detected: dob -> pandas DataFrame (hash -8826298284233990544): date_of_birth (1.00 conf)\nNew relation detected: first_name -> pandas DataFrame (hash -8826298284233990544): first_name (1.00 conf)\nNew relation detected: last_name -> pandas DataFrame (hash -8826298284233990544): last_name (1.00 conf)\n" ] ], [ [ "# Find Row Mappings", "_____no_output_____" ] ], [ [ "from src.mapping.rows.row_mapping_configuration import RowMappingConfiguration\nfrom src.mapping.values.value_matching_configuration import ValueMatchingConfiguration", "_____no_output_____" ], [ "import json\ndef load_config(path):\n \"\"\"\n Read JSON from a filepath\n \"\"\"\n with open(path, \"r\") as F:\n return json.load(F)", "_____no_output_____" ], [ "value_matching_config_json = load_config('../config/mapping/levenshtein_default.json')\nrow_mapping_config_json = load_config('../config/mapping/weighted_linear_default.json')\nvalue_matching_config = ValueMatchingConfiguration(**value_matching_config_json)\nrow_mapping_config = RowMappingConfiguration(**row_mapping_config_json)", "_____no_output_____" ], [ "x = fincen.get_column_relations()\nprint(x)", "[ColumnRelation(target_data_source=pandas DataFrame (hash -8826298284233990544), source_column_name='tin', target_column_name='ssn', confidence=1.0), ColumnRelation(target_data_source=pandas DataFrame (hash -8826298284233990544), source_column_name='dob', target_column_name='date_of_birth', confidence=1.0), ColumnRelation(target_data_source=pandas DataFrame (hash -8826298284233990544), source_column_name='first_name', target_column_name='first_name', confidence=1.0), ColumnRelation(target_data_source=pandas DataFrame (hash -8826298284233990544), source_column_name='last_name', target_column_name='last_name', confidence=1.0)]\n" ], [ "fincen.map_rows_to(\n employee, value_matching_config, row_mapping_config\n)\n", "_____no_output_____" ], [ "import pandas as pd\ndef _generate_structured_row_matches(source, employee):\n \"\"\"Generate structured row matches.\"\"\"\n rows = {\n 'first_name': [], # just for sanity check\n 'last_name': [], # just for sanity check \n 'emp_uuid': [],\n 'doc_uuid': []\n }\n target_df = employee.get_data()\n for relation in source.row_relations:\n source_index = relation.source_index\n target_index = relation.target_index\n source_row = source.get_data().iloc[source_index]\n target_row = employee.get_data().iloc[target_index]\n rows['emp_uuid'].append(target_row.id)\n rows[\"doc_uuid\"].append(source_row.id)\n rows[\"first_name\"].append(source_row.first_name)\n rows[\"last_name\"].append(source_row.last_name)\n return pd.DataFrame(rows)", "_____no_output_____" ], [ "results_df = _generate_structured_row_matches(fincen, employee)\nresults_df.head()", "_____no_output_____" ], [ "fincen.get_data().iloc[2].first_name\n", "_____no_output_____" ] ], [ [ "# Write final dataframe to Main database", "_____no_output_____" ] ], [ [ "num_records = results_df.shape[0]\nwith DBContext(DatabaseEnum.MAIN_INGESTION_DB) as main_db:\n for i in range(num_records):\n row = results_df.iloc[i]\n main_db.add(EmployeeToDocument(employee_id=str(row.emp_uuid),\n document_ingestion_id=str(row.doc_uuid)\n ))", "[23/03/2021 03:01:14] [ INFO] [sqlalchemy.engine.base.Engine] [_begin_impl():730] [PID:135 TID:140482163009344] -> BEGIN (implicit)\n[23/03/2021 03:01:14] [ INFO] [sqlalchemy.engine.base.Engine] [_execute_context():1235] [PID:135 TID:140482163009344] -> INSERT INTO employee_to_document (employee_id, document_ingestion_id, related_employee_id) VALUES (%(employee_id)s, %(document_ingestion_id)s, %(related_employee_id)s) RETURNING employee_to_document.id\n[23/03/2021 03:01:14] [ INFO] [sqlalchemy.engine.base.Engine] [_execute_context():1240] [PID:135 TID:140482163009344] -> {'employee_id': UUID('33bafc44-4752-45d8-bea4-3f9df9a8b965'), 'document_ingestion_id': UUID('d48ed690-d5f5-45c0-bc58-693458e8b20c'), 'related_employee_id': None}\n[23/03/2021 03:01:14] [ INFO] [sqlalchemy.engine.base.Engine] [_commit_impl():772] [PID:135 TID:140482163009344] -> COMMIT\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb9c1040c526da647c1ae0d814326cd139cc0d4e
394,054
ipynb
Jupyter Notebook
src/notebooks/gridworld_ev.ipynb
ZmeiGorynych/generative_playground
5c336dfbd14235e4fd97b21778842a650e733275
[ "MIT" ]
9
2018-09-23T17:34:23.000Z
2021-07-29T09:48:55.000Z
src/notebooks/gridworld_ev.ipynb
ZmeiGorynych/generative_playground
5c336dfbd14235e4fd97b21778842a650e733275
[ "MIT" ]
2
2020-04-15T17:52:18.000Z
2020-04-15T18:26:27.000Z
src/notebooks/gridworld_ev.ipynb
ZmeiGorynych/generative_playground
5c336dfbd14235e4fd97b21778842a650e733275
[ "MIT" ]
6
2019-04-30T22:01:43.000Z
2021-11-22T02:20:18.000Z
64.599016
31,160
0.613578
[ [ [ "\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\nimport sys\nsys.path.append('..')\nfrom gridworld.agent import Agent\nfrom gridworld.environment import Gridworld\nfrom gridworld.q_function import QFunction, QDistFunction\n\n%matplotlib inline", "_____no_output_____" ], [ "distributions = True\nthompson = True\ngamma = 0.9\nif distributions:\n q_function = QDistFunction(gamma=gamma, thompson=thompson)\nelse:\n q_function = QFunction()\n\nagent = Agent(\n Gridworld.deterministic(),\n epsilon=0.001,\n loss_fun=nn.MSELoss(),\n q_function=q_function,\n gamma=gamma,\n lr=0.001\n)", "_____no_output_____" ], [ "rewards = agent.train(100)", "_____no_output_____" ], [ "rewards.plot()", "_____no_output_____" ], [ "agent.run_model()", "[[' ' '@' ' ' ' ']\n [' ' '^' ' ' ' ']\n [' ' ' ' 'W' ' ']\n [' ' ' ' ' ' '+']]\n[[' ' ' ' '@' ' ']\n [' ' '^' ' ' ' ']\n [' ' ' ' 'W' ' ']\n [' ' ' ' ' ' '+']]\n[[' ' ' ' ' ' ' ']\n [' ' '^' '@' ' ']\n [' ' ' ' 'W' ' ']\n [' ' ' ' ' ' '+']]\n[[' ' ' ' ' ' ' ']\n [' ' '^' ' ' '@']\n [' ' ' ' 'W' ' ']\n [' ' ' ' ' ' '+']]\n[[' ' ' ' ' ' ' ']\n [' ' '^' ' ' ' ']\n [' ' ' ' 'W' '@']\n [' ' ' ' ' ' '+']]\n[[' ' ' ' ' ' ' ']\n [' ' '^' ' ' ' ']\n [' ' ' ' 'W' ' ']\n [' ' ' ' ' ' '@']]\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
cb9c13df9e22b3e1edf59f64a13a54fa39423ac3
24,270
ipynb
Jupyter Notebook
zeros_no_faturamento.ipynb
lsawakuchi/x1
564a135b4fdaa687a4ef6d470ddaa4730932d429
[ "MIT" ]
null
null
null
zeros_no_faturamento.ipynb
lsawakuchi/x1
564a135b4fdaa687a4ef6d470ddaa4730932d429
[ "MIT" ]
null
null
null
zeros_no_faturamento.ipynb
lsawakuchi/x1
564a135b4fdaa687a4ef6d470ddaa4730932d429
[ "MIT" ]
null
null
null
27.178052
1,056
0.469427
[ [ [ "import pandas as pd\nimport numpy as np\nfrom datetime import datetime\nfrom sqlalchemy import create_engine\nfrom dateutil.relativedelta import relativedelta\nfrom pricing.service.scoring.lscore import LScoring\nfrom plotly.offline import init_notebook_mode, iplot\nimport plotly.graph_objs as go\ninit_notebook_mode(connected=True)\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n%matplotlib inline", "_____no_output_____" ] ], [ [ "- Avaliar meses em que o faturamento foi zero (antes dos ultimos 6 meses)\n- Avaliar o periodo em que o faturamento ficou x% abaixo da media\n- Avaliar a amplitude do faturamento", "_____no_output_____" ], [ "#### Qual a probabilidade de se observar faturamento nulo nos ultimos 12 meses?", "_____no_output_____" ] ], [ [ "lista_teste = ['26998230000185', '17160880000166', '13.919.916/0001-91']", "_____no_output_____" ], [ "engine = create_engine(\"mysql+pymysql://capMaster:#jackpot123#@captalys.cmrbivuuu7sv.sa-east-1.rds.amazonaws.com:23306/credito-digital\")\ncon = engine.connect()\ndf = pd.read_sql(\"select * from tb_Fluxo where cnpj in {}\".format(tuple(lista_teste)), con)\ncon.close()", "_____no_output_____" ], [ "df = df[['cnpj', 'dataFluxo', 'valorFluxo']]", "_____no_output_____" ], [ "df.columns = ['cnpj', 'data', 'valor']", "_____no_output_____" ], [ "dt = df[df['cnpj']=='26998230000185']", "_____no_output_____" ], [ "dt['data'] = dt.apply(lambda x : x['data'] + relativedelta(months=5), axis=1)", "_____no_output_____" ], [ "dt", "_____no_output_____" ], [ "body = {'dados' : dt[['data', 'valor']].to_dict(\"records\"), 'id_produto' : 'tomatico'}\nls = LScoring(body)\nls.calcula()", "_____no_output_____" ], [ "trace = go.Scatter(\n x = dt['data'],\n y = dt['valor'],\n)\nlayout = go.Layout(title='faturamento')\nfig = go.Figure(data = [trace], layout = layout)\niplot(fig)", "_____no_output_____" ], [ "dt = df[df['cnpj']=='17160880000166']", "_____no_output_____" ], [ "body = {'dados' : dt[['data', 'valor']].to_dict(\"records\"), 'id_produto' : 'tomatico'}", "_____no_output_____" ], [ "ls = LScoring(body)\nls.calcula()", "_____no_output_____" ], [ "trace = go.Scatter(\n x = dt['data'],\n y = dt['valor'],\n)\nlayout = go.Layout(title='faturamento')\nfig = go.Figure(data = [trace], layout = layout)\niplot(fig)", "_____no_output_____" ], [ "_df = df[df['cnpj']=='26998230000185']", "_____no_output_____" ], [ "_df['valor'].mean()", "_____no_output_____" ], [ "abs( - 0) < 0.1", "_____no_output_____" ], [ "_df['valor'].min()/_df['valor'].mean()", "_____no_output_____" ], [ "_df['low'] = _df['valor']/_df['valor'].mean()", "_____no_output_____" ], [ "_df['low'].max()/_df['low'].min()", "_____no_output_____" ], [ "_df['valor'].max()/_df['valor'].min()", "_____no_output_____" ], [ "_df[_df['low']< 1]['low'].mean()", "_____no_output_____" ], [ "len(_df[_df['low']< 1])/len(_df)", "_____no_output_____" ], [ "engine = create_engine(\"mysql+pymysql://capMaster:#jackpot123#@captalys.cmrbivuuu7sv.sa-east-1.rds.amazonaws.com:23306/varejo\")\ncon = engine.connect()\ndf = pd.read_sql(\"select * from fluxo_pv\", con)\ncon.close()", "_____no_output_____" ], [ "df['data'].iloc[10]", "_____no_output_____" ], [ "df.drop(index=0, inplace=True)", "_____no_output_____" ], [ "df = df[df['data']>datetime(2018,2,1).date()]", "_____no_output_____" ], [ "df = df[df['cpf_cnpj']!='00.000.000/0001-91']", "_____no_output_____" ], [ "df.groupby('cpf_cnpj').count().max()", "_____no_output_____" ], [ "df['zeros'] = df.apply(lambda x : int(x['valor'] == 0), axis=1)\n ", "_____no_output_____" ], [ "df_zeros = df.groupby('cpf_cnpj').sum().reset_index()[['cpf_cnpj', 'zeros']]", "_____no_output_____" ], [ "df_zeros[df_zeros['zeros']>0]", "_____no_output_____" ], [ "dt = df[df['cpf_cnpj']=='13.919.916/0001-91'][['cpf_cnpj', 'data', 'valor']]", "_____no_output_____" ], [ "dt['cpf_cnpj'] = dt.apply(lambda x : x['cpf_cnpj'].replace(\".\", \"\").replace(\"/\", \"\").replace(\"-\", \"\"), axis=1)", "_____no_output_____" ], [ "dt.columns = ['cnpj', 'data', 'valor']", "_____no_output_____" ], [ "df = pd.concat([df, dt])", "_____no_output_____" ], [ "df.to_excel(\"base_zeros.xlsx\")", "_____no_output_____" ], [ "from datetime import datetime", "_____no_output_____" ], [ "datetime.now().date().replace(day=1)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9c17ecea04ff1ecd2da162a66899bd51c6ffec
40,218
ipynb
Jupyter Notebook
site/en/tutorials/load_data/tf-records.ipynb
suphoff/docs
d9735cae89723b8868c75a78306d5f0031bad977
[ "Apache-2.0" ]
1
2019-01-23T14:44:21.000Z
2019-01-23T14:44:21.000Z
site/en/tutorials/load_data/tf-records.ipynb
simoneparisotto/docs
87edfc365e4e17926d3c9cc752eb30670a73049c
[ "Apache-2.0" ]
null
null
null
site/en/tutorials/load_data/tf-records.ipynb
simoneparisotto/docs
87edfc365e4e17926d3c9cc752eb30670a73049c
[ "Apache-2.0" ]
null
null
null
32.460048
474
0.536153
[ [ [ "##### Copyright 2018 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Using TFRecords and `tf.Example`\n\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/tutorials/load_data/tf-records\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/load_data/tf-records.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/tutorials/load_data/tf-records.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>", "_____no_output_____" ], [ "To read data efficiently it can be helpful to serialize your data and store it in a set of files (100-200MB each) that can each be read linearly. This is especially true if the data is being streamed over a network. This can also be useful for caching any data-preprocessing.\n\nThe TFRecord format is a simple format for storing a sequence of binary records.\n\n[Protocol buffers](https://developers.google.com/protocol-buffers/) are a cross-platform, cross-language library for efficient serialization of structured data.\n\nProtocol messages are defined by `.proto` files, these are often the easiest way to understand a message type. \n\nThe `tf.Example` message (or protobuf) is a flexible message type that represents a `{\"string\": value}` mapping. It is designed for use with TensorFlow and is used throughout the higher-level APIs such as [TFX](https://www.tensorflow.org/tfx/).", "_____no_output_____" ], [ "\nThis notebook will demonstrate how to create, parse, and use the `tf.Example` message, and then serialize, write, and read `tf.Example` messages to and from `.tfrecord` files.\n\nNote: While useful, these structures are optional. There is no need to convert existing code to use TFRecords, unless you are using [`tf.data`](https://www.tensorflow.org/guide/datasets) and reading data is still the bottleneck to training. See [Data Input Pipeline Performance](https://www.tensorflow.org/guide/performance/datasets) for dataset performance tips.", "_____no_output_____" ], [ "## Setup", "_____no_output_____" ] ], [ [ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\ntf.enable_eager_execution()\n\nimport numpy as np\nimport IPython.display as display", "_____no_output_____" ] ], [ [ "## `tf.Example`", "_____no_output_____" ], [ "### Data types for `tf.Example`", "_____no_output_____" ], [ "Fundamentally a `tf.Example` is a `{\"string\": tf.train.Feature}` mapping.\n\nThe `tf.train.Feature` message type can accept one of the following three types (See the [`.proto` file]((https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/example/feature.proto) for reference). Most other generic types can be coerced into one of these.\n\n1. `tf.train.BytesList` (the following types can be coerced)\n\n - `string`\n - `byte`\n\n1. `tf.train.FloatList` (the following types can be coerced)\n\n - `float` (`float32`)\n - `double` (`float64`)\n\n1. `tf.train.Int64List` (the following types can be coerced)\n\n - `bool`\n - `enum`\n - `int32`\n - `uint32`\n - `int64`\n - `uint64`", "_____no_output_____" ], [ "In order to convert a standard TensorFlow type to a `tf.Example`-compatible `tf.train.Feature`, you can use the following shortcut functions:\n\nEach function takes a scalar input value and returns a `tf.train.Feature` containing one of the three `list` types above.", "_____no_output_____" ] ], [ [ "# The following functions can be used to convert a value to a type compatible\n# with tf.Example.\n\ndef _bytes_feature(value):\n \"\"\"Returns a bytes_list from a string / byte.\"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef _float_feature(value):\n \"\"\"Returns a float_list from a float / double.\"\"\"\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n\ndef _int64_feature(value):\n \"\"\"Returns an int64_list from a bool / enum / int / uint.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))", "_____no_output_____" ] ], [ [ "Note: To stay simple, this example only uses scalar inputs. The simplest way to handle non-scalar features is to use `tf.serialize_tensor` to convert tensors to binary-strings. Strings are scalars in tensorflow. Use `tf.parse_tensor` to convert the binary-string back to a tensor.", "_____no_output_____" ], [ "Below are some examples of how these functions work. Note the varying input types and the standardizes output types. If the input type for a function does not match one of the coercible types stated above, the function will raise an exception (e.g. `_int64_feature(1.0)` will error out, since `1.0` is a float, so should be used with the `_float_feature` function instead).", "_____no_output_____" ] ], [ [ "print(_bytes_feature(b'test_string'))\nprint(_bytes_feature(u'test_bytes'.encode('utf-8')))\n\nprint(_float_feature(np.exp(1)))\n\nprint(_int64_feature(True))\nprint(_int64_feature(1))", "_____no_output_____" ] ], [ [ "All proto messages can be serialized to a binary-string using the `.SerializeToString` method.", "_____no_output_____" ] ], [ [ "feature = _float_feature(np.exp(1))\n\nfeature.SerializeToString()", "_____no_output_____" ] ], [ [ "### Creating a `tf.Example` message", "_____no_output_____" ], [ "Suppose you want to create a `tf.Example` message from existing data. In practice, the dataset may come from anywhere, but the procedure of creating the `tf.Example` message from a single observation will be the same. \n\n1. Within each observation, each value needs to be converted to a `tf.train.Feature` containing one of the 3 compatible types, using one of the functions above. \n\n1. We create a map (dictionary) from the feature name string to the encoded feature value produced in #1.\n\n1. The map produced in #2 is converted to a [`Features` message](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/example/feature.proto#L85).", "_____no_output_____" ], [ "In this notebook, we will create a dataset using NumPy. \n\nThis dataset will have 4 features.\n- a boolean feature, `False` or `True` with equal probability\n- a random bytes feature, uniform across the entire support\n- an integer feature uniformly randomly chosen from `[-10000, 10000)`\n- a float feature from a standard normal distribution\n\nConsider a sample consisting of 10,000 independently and identically distributed observations from each of the above distributions.", "_____no_output_____" ] ], [ [ "# the number of observations in the dataset\nn_observations = int(1e4)\n\n# boolean feature, encoded as False or True\nfeature0 = np.random.choice([False, True], n_observations)\n\n# integer feature, random between -10000 and 10000\nfeature1 = np.random.randint(0, 5, n_observations)\n\n# bytes feature\nstrings = np.array([b'cat', b'dog', b'chicken', b'horse', b'goat'])\nfeature2 = strings[feature1]\n\n# float feature, from a standard normal distribution\nfeature3 = np.random.randn(n_observations)", "_____no_output_____" ] ], [ [ "Each of these features can be coerced into a `tf.Example`-compatible type using one of `_bytes_feature`, `_float_feature`, `_int64_feature`. We can then create a `tf.Example` message from these encoded features.", "_____no_output_____" ] ], [ [ "def serialize_example(feature0, feature1, feature2, feature3):\n \"\"\"\n Creates a tf.Example message ready to be written to a file.\n \"\"\"\n \n # Create a dictionary mapping the feature name to the tf.Example-compatible\n # data type.\n \n feature = {\n 'feature0': _int64_feature(feature0),\n 'feature1': _int64_feature(feature1),\n 'feature2': _bytes_feature(feature2),\n 'feature3': _float_feature(feature3),\n }\n \n # Create a Features message using tf.train.Example.\n \n example_proto = tf.train.Example(features=tf.train.Features(feature=feature))\n return example_proto.SerializeToString()", "_____no_output_____" ] ], [ [ "For example, suppose we have a single observation from the dataset, `[False, 4, bytes('goat'), 0.9876]`. We can create and print the `tf.Example` message for this observation using `create_message()`. Each single observation will be written as a `Features` message as per the above. Note that the `tf.Example` [message](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/example/example.proto#L88) is just a wrapper around the `Features` message.", "_____no_output_____" ] ], [ [ "# This is an example observation from the dataset.\n\nexample_observation = []\n\nserialized_example = serialize_example(False, 4, b'goat', 0.9876)\nserialized_example", "_____no_output_____" ] ], [ [ "To decode the message use the `tf.train.Example.FromString` method.", "_____no_output_____" ] ], [ [ "example_proto = tf.train.Example.FromString(serialized_example)\nexample_proto", "_____no_output_____" ] ], [ [ "## TFRecord files using `tf.data`", "_____no_output_____" ], [ "The `tf.data` module also provides tools for reading and writing data in tensorflow.", "_____no_output_____" ], [ "### Writing a TFRecord file\n\nThe easiest way to get the data into a dataset is to use the `from_tensor_slices` method.\n\nApplied to an array, it returns a dataset of scalars.", "_____no_output_____" ] ], [ [ "tf.data.Dataset.from_tensor_slices(feature1)", "_____no_output_____" ] ], [ [ "Applies to a tuple of arrays, it returns a dataset of tuples:", "_____no_output_____" ] ], [ [ "features_dataset = tf.data.Dataset.from_tensor_slices((feature0, feature1, feature2, feature3))\nfeatures_dataset", "_____no_output_____" ], [ "# Use `take(1)` to only pull one example from the dataset.\nfor f0,f1,f2,f3 in features_dataset.take(1):\n print(f0)\n print(f1)\n print(f2)\n print(f3)", "_____no_output_____" ] ], [ [ "Use the `tf.data.Dataset.map` method to apply a function to each element of a `Dataset`.\n\nThe mapped function must operate in TensorFlow graph mode: It must operate on and return `tf.Tensors`. A non-tensor function, like `create_example`, can be wrapped with `tf.py_func` to make it compatible.\n\nUsing `tf.py_func` requires that you specify the shape and type information that is otherwise unavailable:", "_____no_output_____" ] ], [ [ "def tf_serialize_example(f0,f1,f2,f3):\n tf_string = tf.py_func(\n serialize_example, \n (f0,f1,f2,f3), # pass these args to the above function.\n tf.string) # the return type is `tf.string`.\n return tf.reshape(tf_string, ()) # The result is a scalar", "_____no_output_____" ] ], [ [ "Apply this function to each element in the dataset:", "_____no_output_____" ] ], [ [ "serialized_features_dataset = features_dataset.map(tf_serialize_example)\nserialized_features_dataset", "_____no_output_____" ] ], [ [ "And write them to a TFRecord file:", "_____no_output_____" ] ], [ [ "filename = 'test.tfrecord'\nwriter = tf.data.experimental.TFRecordWriter(filename)\nwriter.write(serialized_features_dataset)", "_____no_output_____" ] ], [ [ "### Reading a TFRecord file", "_____no_output_____" ], [ "We can also read the TFRecord file using the `tf.data.TFRecordDataset` class. \n\nMore information on consuming TFRecord files using `tf.data` can be found [here](https://www.tensorflow.org/guide/datasets#consuming_tfrecord_data). \n\nUsing `TFRecordDataset`s can be useful for standardizing input data and optimizing performance.", "_____no_output_____" ] ], [ [ "filenames = [filename]\nraw_dataset = tf.data.TFRecordDataset(filenames)\nraw_dataset", "_____no_output_____" ] ], [ [ "At this point the dataset contains serialized `tf.train.Example` messages. When iterated over it returns these as scalar string tensors. \n\nUse the `.take` method to only show the first 10 records.\n\nNote: iterating over a `tf.data.Dataset` only works with eager execution enabled.", "_____no_output_____" ] ], [ [ "for raw_record in raw_dataset.take(10):\n print(repr(raw_record))", "_____no_output_____" ] ], [ [ "These tensors can be parsed using the function below.\n\nNote: The `feature_description` is necessary here because datasets use graph-execution, and need this description to build their shape and type signature.", "_____no_output_____" ] ], [ [ "# Create a description of the features. \nfeature_description = {\n 'feature0': tf.FixedLenFeature([], tf.int64, default_value=0),\n 'feature1': tf.FixedLenFeature([], tf.int64, default_value=0),\n 'feature2': tf.FixedLenFeature([], tf.string, default_value=''),\n 'feature3': tf.FixedLenFeature([], tf.float32, default_value=0.0),\n}\n\ndef _parse_function(example_proto):\n # Parse the input tf.Example proto using the dictionary above.\n return tf.parse_single_example(example_proto, feature_description)", "_____no_output_____" ] ], [ [ "Or use `tf.parse example` to parse a whole batch at once.", "_____no_output_____" ], [ "Apply this finction to each item in the dataset using the `tf.data.Dataset.map` method:", "_____no_output_____" ] ], [ [ "parsed_dataset = raw_dataset.map(_parse_function)\nparsed_dataset ", "_____no_output_____" ] ], [ [ "Use eager execution to display the observations in the dataset. There are 10,000 observations in this dataset, but we only display the first 10. The data is displayed as a dictionary of features. Each item is a `tf.Tensor`, and the `numpy` element of this tensor displays the value of the feature.", "_____no_output_____" ] ], [ [ "for parsed_record in parsed_dataset.take(10):\n print(repr(raw_record))", "_____no_output_____" ] ], [ [ "Here, the `tf.parse_example` function unpacks the `tf.Example` fields into standard tensors.", "_____no_output_____" ], [ "## TFRecord files using tf.python_io", "_____no_output_____" ], [ "The `tf.python_io` module also contains pure-Python functions for reading and writing TFRecord files. ", "_____no_output_____" ], [ "### Writing a TFRecord file", "_____no_output_____" ], [ "Now write the 10,000 observations to the file `test.tfrecords`. Each observation is converted to a `tf.Example` message, then written to file. We can then verify that the file `test.tfrecords` has been created.", "_____no_output_____" ] ], [ [ "# Write the `tf.Example` observations to the file.\nwith tf.python_io.TFRecordWriter(filename) as writer:\n for i in range(n_observations):\n example = serialize_example(feature0[i], feature1[i], feature2[i], feature3[i])\n writer.write(example)", "_____no_output_____" ], [ "!ls", "_____no_output_____" ] ], [ [ "### Reading a TFRecord file", "_____no_output_____" ], [ "Suppose we now want to read this data back, to be input as data into a model.\n\nThe following example imports the data as is, as a `tf.Example` message. This can be useful to verify that a the file contains the data that we expect. This can also be useful if the input data is stored as TFRecords but you would prefer to input NumPy data (or some other input data type), for example [here](https://www.tensorflow.org/guide/datasets#consuming_numpy_arrays), since this example allows us to read the values themselves.\n\nWe iterate through the TFRecords in the infile, extract the `tf.Example` message, and can read/store the values within.", "_____no_output_____" ] ], [ [ "record_iterator = tf.python_io.tf_record_iterator(path=filename)\n\nfor string_record in record_iterator:\n example = tf.train.Example()\n example.ParseFromString(string_record)\n \n print(example)\n \n # Exit after 1 iteration as this is purely demonstrative.\n break", "_____no_output_____" ] ], [ [ "The features of the `example` object (created above of type `tf.Example`) can be accessed using its getters (similarly to any protocol buffer message). `example.features` returns a `repeated feature` message, then getting the `feature` message returns a map of feature name to feature value (stored in Python as a dictionary).", "_____no_output_____" ] ], [ [ "print(dict(example.features.feature))", "_____no_output_____" ] ], [ [ "From this dictionary, you can get any given value as with a dictionary.", "_____no_output_____" ] ], [ [ "print(example.features.feature['feature3'])", "_____no_output_____" ] ], [ [ "Now, we can access the value using the getters again.", "_____no_output_____" ] ], [ [ "print(example.features.feature['feature3'].float_list.value)", "_____no_output_____" ] ], [ [ "## Walkthrough: Reading/Writing Image Data", "_____no_output_____" ], [ "This is an example of how to read and write image data using TFRecords. The purpose of this is to show how, end to end, input data (in this case an image) and write the data as a TFRecord file, then read the file back and display the image.\n\nThis can be useful if, for example, you want to use several models on the same input dataset. Instead of storing the image data raw, it can be preprocessed into the TFRecords format, and that can be used in all further processing and modelling. \n\nFirst, let's download [this image](https://commons.wikimedia.org/wiki/File:Felis_catus-cat_on_snow.jpg) of a cat in the snow and [this photo](https://upload.wikimedia.org/wikipedia/commons/f/fe/New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg) of the Williamsburg Bridge, NYC under construction.", "_____no_output_____" ], [ "### Fetch the images", "_____no_output_____" ] ], [ [ "cat_in_snow = tf.keras.utils.get_file('320px-Felis_catus-cat_on_snow.jpg', 'https://storage.googleapis.com/download.tensorflow.org/example_images/320px-Felis_catus-cat_on_snow.jpg')\nwilliamsburg_bridge = tf.keras.utils.get_file('194px-New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg','https://storage.googleapis.com/download.tensorflow.org/example_images/194px-New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg')", "_____no_output_____" ], [ "display.display(display.Image(filename=cat_in_snow))\ndisplay.display(display.HTML('Image cc-by: <a \"href=https://commons.wikimedia.org/wiki/File:Felis_catus-cat_on_snow.jpg\">Von.grzanka</a>'))", "_____no_output_____" ], [ "display.display(display.Image(filename=williamsburg_bridge))\ndisplay.display(display.HTML('<a \"href=https://commons.wikimedia.org/wiki/File:New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg\">source</a>'))", "_____no_output_____" ] ], [ [ "### Write the TFRecord file", "_____no_output_____" ], [ "As we did earlier, we can now encode the features as types compatible with `tf.Example`. In this case, we will not only store the raw image string as a feature, but we will store the height, width, depth, and an arbitrary `label` feature, which is used when we write the file to distinguish between the cat image and the bridge image. We will use `0` for the cat image, and `1` for the bridge image. ", "_____no_output_____" ] ], [ [ "image_labels = {\n cat_in_snow : 0,\n williamsburg_bridge : 1,\n}", "_____no_output_____" ], [ "# This is an example, just using the cat image.\nimage_string = open(cat_in_snow, 'rb').read()\n\nlabel = image_labels[cat_in_snow]\n\n# Create a dictionary with features that may be relevant.\ndef image_example(image_string, label):\n image_shape = tf.image.decode_jpeg(image_string).shape\n\n feature = {\n 'height': _int64_feature(image_shape[0]),\n 'width': _int64_feature(image_shape[1]),\n 'depth': _int64_feature(image_shape[2]),\n 'label': _int64_feature(label),\n 'image_raw': _bytes_feature(image_string),\n }\n\n return tf.train.Example(features=tf.train.Features(feature=feature))\n\nfor line in str(image_example(image_string, label)).split('\\n')[:15]:\n print(line)\nprint('...')", "_____no_output_____" ] ], [ [ "We see that all of the features are now stores in the `tf.Example` message. Now, we functionalize the code above and write the example messages to a file, `images.tfrecords`.", "_____no_output_____" ] ], [ [ "# Write the raw image files to images.tfrecords.\n# First, process the two images into tf.Example messages.\n# Then, write to a .tfrecords file.\n\nwith tf.python_io.TFRecordWriter('images.tfrecords') as writer:\n for filename, label in image_labels.items():\n image_string = open(filename, 'rb').read()\n tf_example = image_example(image_string, label)\n writer.write(tf_example.SerializeToString())", "_____no_output_____" ], [ "!ls", "_____no_output_____" ] ], [ [ "### Read the TFRecord file\n\nWe now have the file `images.tfrecords`. We can now iterate over the records in the file to read back what we wrote. Since, for our use case we will just reproduce the image, the only feature we need is the raw image string. We can extract that using the getters described above, namely `example.features.feature['image_raw'].bytes_list.value[0]`. We also use the labels to determine which record is the cat as opposed to the bridge.", "_____no_output_____" ] ], [ [ "raw_image_dataset = tf.data.TFRecordDataset('images.tfrecords')\n\n# Create a dictionary describing the features. \nimage_feature_description = {\n 'height': tf.FixedLenFeature([], tf.int64),\n 'width': tf.FixedLenFeature([], tf.int64),\n 'depth': tf.FixedLenFeature([], tf.int64),\n 'label': tf.FixedLenFeature([], tf.int64),\n 'image_raw': tf.FixedLenFeature([], tf.string),\n}\n\ndef _parse_image_function(example_proto):\n # Parse the input tf.Example proto using the dictionary above.\n return tf.parse_single_example(example_proto, image_feature_description)\n\nparsed_image_dataset = raw_image_dataset.map(_parse_image_function)\nparsed_image_dataset", "_____no_output_____" ] ], [ [ "Recover the images from the TFRecord file:", "_____no_output_____" ] ], [ [ "for image_features in parsed_image_dataset:\n image_raw = image_features['image_raw'].numpy()\n display.display(display.Image(data=image_raw))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb9c1cc78c8eafb40ca62a771def94edb5ea5fbb
27,703
ipynb
Jupyter Notebook
modulo - 2 Modelos Preditivos e Séries Temporais/desafio_final2.ipynb
k3ybladewielder/bootcamp_igti_ml
d8e929af62b3202e24e7d5ccaf14549a38dc6139
[ "MIT" ]
1
2021-01-14T01:41:28.000Z
2021-01-14T01:41:28.000Z
modulo - 2 Modelos Preditivos e Séries Temporais/desafio_final2.ipynb
k3ybladewielder/bootcamp_igti_ml
d8e929af62b3202e24e7d5ccaf14549a38dc6139
[ "MIT" ]
null
null
null
modulo - 2 Modelos Preditivos e Séries Temporais/desafio_final2.ipynb
k3ybladewielder/bootcamp_igti_ml
d8e929af62b3202e24e7d5ccaf14549a38dc6139
[ "MIT" ]
1
2021-07-16T22:39:14.000Z
2021-07-16T22:39:14.000Z
30.54355
120
0.355341
[ [ [ "# Módulo 2 - Modelos preditivos e séries temporais\n\n# Desafio do Módulo 2", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ], [ "base = pd.read_csv('https://pycourse.s3.amazonaws.com/banknote_authentication.txt', header=None)\nbase.head()", "_____no_output_____" ], [ "#labels:\n#variance, skewness, curtosis e entropy)\nbase.columns=['variance', 'skewness', 'curtosis', 'entropy', 'class']\nbase.head()", "_____no_output_____" ], [ "#Qual o tamanho desse dataset (número de linhas, número de colunas)?\nbase.shape", "_____no_output_____" ], [ "#Qual variável possui o maior range (diferença entre valor máximo e mínimo)?\n#Qual a média da coluna skewness?\n#Qual a média da coluna entropy?\n#Qual a desvio padrão da coluna curtosis?\n\n\nbase.describe()", "_____no_output_____" ], [ "#Qual a mediana da coluna variance?\nbase.median()", "_____no_output_____" ], [ "#Qual a porcentagem de exemplos do dataset que são cédulas falsas (class=1)?\n\nfalsas = (base['class'] == 1).sum()\ntotal = base.shape[0]\nfalsas / total * 100", "_____no_output_____" ], [ "#Qual o valor da correlação de Pearson entre as variáveis skewness e curtosis?\nimport scipy as sp\nfrom scipy import stats\n\nsp.stats.pearsonr(base['skewness'], base['curtosis'])\n", "_____no_output_____" ], [ "#Qual a acurácia do KNN no conjunto de teste?\nfrom sklearn.model_selection import train_test_split\n\nx = base.drop('class', axis=1)\n\ny = base['class']\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.30, random_state=1)\n\nimport sklearn\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.neural_network import MLPClassifier\n\nfrom sklearn.metrics import accuracy_score #Medir acurária abaixo", "_____no_output_____" ], [ "#a. Algoritmo KNN:\nclf_KNN = KNeighborsClassifier(n_neighbors=5)\n\n#b. Algoritmo Árvore de Decisão (Decision Tree):\nclf_arvore = DecisionTreeClassifier(random_state=1)\n\n#c. Algoritmo Floresta Aleatória (Random Forest):\nclf_floresta = RandomForestClassifier(max_depth=8, random_state=1)\n\n#d. Algoritmo SVM:\nclf_svm = SVC(gamma='auto',kernel='rbf', random_state=1)\n\n#e. Algoritmo Rede MLP:\nclf_mlp = MLPClassifier(hidden_layer_sizes=(2,), solver='lbfgs', random_state=1)", "_____no_output_____" ], [ "#Qual a acurácia do KNN no conjunto de teste?\n\nclf_KNN.fit(x_train, y_train)\nknn_predict = clf_KNN.predict(x_test)\n\naccuracy_score(y_test, knn_predict)\n", "_____no_output_____" ], [ "#Qual a acurácia da Árvore de Decisão no conjunto de teste?\n\nclf_arvore.fit(x_train, y_train)\narvore_predict = clf_arvore.predict(x_test)\n\naccuracy_score(y_test, arvore_predict)", "_____no_output_____" ], [ "#Qual a acurácia do Random Forest no conjunto de teste?\n\nclf_floresta.fit(x_train, y_train)\nfloresta_redict = clf_floresta.predict(x_test)\n\naccuracy_score(y_test, floresta_redict)", "_____no_output_____" ], [ "#Qual a acurácia do SVM no conjunto de teste?\n\nclf_svm.fit(x_train, y_train)\nsvm_predict = clf_svm.predict(x_test)\n\naccuracy_score(y_test, svm_predict)", "_____no_output_____" ], [ "#Qual a acurácia da rede MLP no conjunto de teste?\n\nclf_mlp.fit(x_train, y_train)\nmlp_predict = clf_mlp.predict(x_test)\n\naccuracy_score(y_test, mlp_predict)", "_____no_output_____" ], [ "#Analisando o valor da importância relativa das features do Random Forest (atributo feature_importances_), \n#qual feature melhor contribuiu para a predição de class?\n\n#Qual o valor da importância relativa da feature skewness?\n\nrelativos = clf_floresta.feature_importances_\nbase.head()", "_____no_output_____" ], [ "relativos", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "# Fim\n\n# Visite o meu [github](https://github.com/k3ybladewielder) <3", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
cb9c266ac560212996ba7e2fd57f114762a5f8f3
3,373
ipynb
Jupyter Notebook
Bifurcation.ipynb
cadavisfour/animated-plots
d1b9a127f950988d5f56467bb5ec0f70b002dc84
[ "MIT" ]
null
null
null
Bifurcation.ipynb
cadavisfour/animated-plots
d1b9a127f950988d5f56467bb5ec0f70b002dc84
[ "MIT" ]
2
2021-03-29T21:55:19.000Z
2021-03-29T22:06:35.000Z
Bifurcation.ipynb
cadavisfour/animated-plots
d1b9a127f950988d5f56467bb5ec0f70b002dc84
[ "MIT" ]
null
null
null
31.231481
320
0.55292
[ [ [ "The following generates a gif displaying the normal form of a pitchfork bifurcation. The normal form can be thought of the simplest version of a vector field whose solution captures the desired behavior. Other vector fields containing pitchfork bifurcation behavior can be locally transformed to the normal form.\n", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import odeint\nimport imageio\nimport os\nimport matplotlib.style as style\nstyle.use('ggplot')\nfilenames = []\nR=np.arange(-1,3,.05)\nfor i in range(R.size):#Loops over values of the parameter\n # plot the line chart\n r=R[i]\n def f(state,t):#Defines the vector field x-dot.\n x = state\n return r*x-x**3\n #r*x+ for subcritical r*x- for supercritical, subcritical needs work.\n a = np.arange(-1.2,1.2,.05)\n t = np.arange(0,2,.01)\n for j in a: #Loops over initial conditions\n state0=[j]\n states=odeint(f,state0,t)#integrator \n plt.figure(i)\n plt.xlim(0,2)\n plt.ylim(-3,3)\n plt.plot(t,states[:,0])\n \n # create file name and append it to a list\n filename = f'{i}.png'\n #print(filename)\n filenames.append(filename)\n \n # save frame\n plt.savefig(filename)\n plt.xlabel('t')\n plt.ylabel('x')\n plt.title('Pitchfork Bifurcation')\n plt.close()\n# build gif\nwith imageio.get_writer('pitchfork2.gif', mode='I') as writer:\n for filename in filenames:\n image = imageio.imread(filename)\n writer.append_data(image)\n \n# Remove png files\nfor filename in set(filenames):\n os.remove(filename)", "_____no_output_____" ] ], [ [ "![SegmentLocal](pitchfork2.gif \"segment\")", "_____no_output_____" ], [ "The above is given by generating solution curves to the vector field\n$$\\dot{x}=rx-x^3$$\nwe have \\begin{array}\narx-x^3=0\\\\\n-x(x^2-r)=0\n\\end{array}\nso the equilibria of the ode are at $x=0,$ and $x=\\pm\\sqrt{r}.$\nthe values of r in the animation range over the interval $[-1,3]$, and when they change from negative to positive sign, the values $\\pm\\sqrt{r}$ change from imaginary to real, and the number of real equilibria changes from 1 to 3 smoothly.\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb9c2b6bbe688a1e172d997d135494bfc860f1fc
144,989
ipynb
Jupyter Notebook
CI_Data_Science_Lesson_10_WO_Answers.ipynb
MaxDGU/datasciencenotebooks
8f48f0049de23e20016260f43c0d9037109897d1
[ "BSD-Source-Code" ]
null
null
null
CI_Data_Science_Lesson_10_WO_Answers.ipynb
MaxDGU/datasciencenotebooks
8f48f0049de23e20016260f43c0d9037109897d1
[ "BSD-Source-Code" ]
null
null
null
CI_Data_Science_Lesson_10_WO_Answers.ipynb
MaxDGU/datasciencenotebooks
8f48f0049de23e20016260f43c0d9037109897d1
[ "BSD-Source-Code" ]
null
null
null
40.063277
509
0.365945
[ [ [ "<a href=\"https://codeimmersives.com\"><img src = \"https://www.codeimmersives.com/wp-content/uploads/2019/09/CodeImmersives_Logo_RGB_NYC_BW.png\" width = 400> </a>\n\n\n<h1 align=center><font size = 5>Agenda</font></h1>", "_____no_output_____" ], [ "### \n<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n\n1. [Review](#0)<br>\n2. [Pandas continued](#2)<br>\n2. [Exercise](#10)<br> \n3. [Exercise](#12)<br> \n</div>\n<hr>", "_____no_output_____" ], [ "<h2>Review</h2>", "_____no_output_____" ], [ "<h2>Pandas Series continued</h2>", "_____no_output_____" ], [ "We can sort a series by using the sort_values method.<br>\n<code>\nimport pandas as pd\nx = [27,4,14,23,33,14,22,11]\nps = pd.Series(x).sort_values(ascending = False)\nps\n</code><br>\nNOTE: We can use the ascending = True to sort the series in ascending order", "_____no_output_____" ] ], [ [ "import pandas as pd\nx = [27,4,14,23,33,14,22,11]\nps = pd.Series(x).sort_values(ascending = False)\nps", "_____no_output_____" ] ], [ [ "We can also sort a series using a lambda expression. Consider the <br>\nfollowing list of strings:<br>\n<code>\nps = pd.Series(['a', 'B', 'c', 'D', 'e'])\nps.sort_values(key=lambda x: x.str.lower())\nps\n</code><br>\nNOTE: We can use the str method to change all values in a series to a string", "_____no_output_____" ] ], [ [ "ps = pd.Series(['a', 'B', 'c', 'D', 'e'])\nps.sort_values(key=lambda x: x.str.lower())\nps", "_____no_output_____" ] ], [ [ "<H2>Exercise</h2>\nTake the following list of cities and alphabetize them and put them<br>\nIn the proper case: ALBANY ---> Albany<br>\n<case>\ncities = ['PROVIDENCE', 'HARTFORD','BOSTON','ALBANY','TRENTON']\n\n</case>", "_____no_output_____" ], [ "<h4>Solution</h4>", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ], [ [ "<h4>Pandas Series.str methods</h4>\nHere is a list of str methods to try:<br>\nstr.contains<br>\nstr.count<br>\nstr.endswith<br>\nstr.find<br>\nstr.index<br>\nstr.join<br>\nstr.get<br>\nstr.len<br>\nstr.split<br>\nstr.isalnum<br> \n\n\n", "_____no_output_____" ], [ "<h2> Exercise </h2> \n1) use pd.str to get a boolean matrix for which words in the series <code> ['Mouse', 'dog', 'house and parrot', '23', np.NaN] </code> contain 'og' - use pd.str.contains <br>\n2) Use pd.str.count to get the count of 'a' in the series <code> ['A', 'B', 'Aaba', 'Baca', np.nan, 'CABA', 'cat','aardvark'] </code> <br>\n3) Use pd.str.count to get the count of dollar signs in the series *hint* special characters need a double backslash before them ('\\\\') <code>\n['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat', 'canna$', 'findme_', 'dollarsign$', 'infinite', 'lo$'] </code>\n <br>\n4) Use pd.str.endswith() to see how many strings end with 't' in the series <code> ['bat', 'bear', 'caT', np.nan, 'lost', 'later', 'tattletale', 'tops', 'taste', 'tart', 'tango', 'taint', 'tarentino', 'tot'] </code> <br>\n5) Use pd.str.join() to join all elements in the series <code> [['lion', 'elephant', 'zebra'],\n [1.1, 2.2, 3.3],\n ['cat', 'gerbil', 'dog'],\n ['cow', 4.5, 'goat'],\n ['duck', 'swan', 'fish', 'guppy']] </code> using a hyphen '-' to join. ", "_____no_output_____" ], [ "Solution:", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\n\ns1 = pd.Series(['Mouse', 'dog', 'house and parrot', '23', np.NaN])\nprint(s1.str.contains('og'))", "0 False\n1 True\n2 False\n3 False\n4 NaN\ndtype: object\n" ] ], [ [ "We can split the data in a series and unpack the results into columns<br>\n<br>\n<code>\ncities = ['PROVIDENCE, Rhode Island', 'Harford, Connecticut','Boston, Massachusetts']\nps = pd.Series(cities).str.split(expand = True)\nps\n</code><br>\nNOTE: We use the expand key word argument to create the multiple columns", "_____no_output_____" ] ], [ [ "cities = ['PROVIDENCE, Rhode Island', 'Harford, Connecticut','Boston, Massachusetts']\nps = pd.Series(cities).str.split(expand = True)\nps", "_____no_output_____" ] ], [ [ "We can remove a row by dropping its index<br>\n<br>\n<code>\nimport pandas as pd\nfavorites = {'1st place':'Pineapple',\n '2nd place': 'Grapes',\n '3rd place': 'Granny Smith Apples',\n '4th place': 'Strawberries'}\nps = pd.Series(favorites.values(), index = favorites.keys())\nnew_ps = ps.drop(labels='4th place')\nnew_ps\n</code>", "_____no_output_____" ] ], [ [ "import pandas as pd\nfavorites = {'1st place':'Pineapple',\n '2nd place': 'Grapes',\n '3rd place': 'Granny Smith Apples',\n '4th place': 'Strawberries'}\nps = pd.Series(favorites)\nnew_ps = ps.drop(labels='4th place')\nnew_ps", "_____no_output_____" ] ], [ [ "<h4>Pandas Series sum</h4>\nWe can sum up all of our values in the series<br>\n<code>\nimport pandas as pd\ndata = [90,93,97,88, 89]\nps = pd.Series(data)\nprint(ps.sum())\n</code>", "_____no_output_____" ] ], [ [ "import pandas as pd\ndata = [90,93,97,88, 89]\nps = pd.Series(data)\nprint(ps.sum())", "457\n" ], [ "", "_____no_output_____" ] ], [ [ "### DataFrame", "_____no_output_____" ] ], [ [ "data = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada', 'Nevada'],\n 'year': [2000, 2001, 2002, 2001, 2002, 2003],\n 'pop': [1.5, 1.7, 3.6, 2.4, 2.9, 3.2]}\nframe = pd.DataFrame(data)", "_____no_output_____" ], [ "frame", "_____no_output_____" ], [ "frame.head()", "_____no_output_____" ], [ "pd.DataFrame(data, columns=['year', 'state', 'pop'])", "_____no_output_____" ], [ "frame.loc[1]", "_____no_output_____" ], [ "import numpy as np\nframe = pd.DataFrame(data, columns = ['year', 'state', 'pop', 'area'])\nframe\nframe['area'] = np.arange(6)\nframe", "_____no_output_____" ] ], [ [ "<h2> Exercise </h2>\n1) Edit the states dataframe to create a new dataframe frame2 that adds a 'debt' column <br> \n2) change the indices to be string numbers from one to six ('one', ... 'six) <br>\n3) Print all the values in the years column for viewing <br>\n4) Print row 'three' with all it's values using loc[] <br>\n5) Use numpy's arange() function to fill the debt column frame2['debt'] with a range of numbers (Hint: you need to match the range to the number of rows in the frame for broadcasting to work).", "_____no_output_____" ], [ "Solution", "_____no_output_____" ] ], [ [ "# pd.dataFrame(data = [], columns =[], index=[])", "_____no_output_____" ] ], [ [ "Now instead of an arange, say we wanted to specify the debt column manually ourselves, we can do this by creating another series and assigning the column to the series (remember how dataframes contain series).", "_____no_output_____" ] ], [ [ "val = pd.Series([-1.2, -1.5, -1.7], index=['two', 'four', 'five'])\nframe2['debt'] = val\nframe2", "_____no_output_____" ], [ "frame2['Eastern'] = frame2.state == 'Ohio'\nframe2\n#del frame2['eastern']", "_____no_output_____" ] ], [ [ "<h2> Exercise </h2>\n1) Add a western state column that reads True for western states. <br> \n2) Print the resulting table. <br>\n3) Add an Eastern_Debt column that is populated with a random number from 0 to 100. Print the result. \n3) delete all western and eastern columns using del \n", "_____no_output_____" ], [ "Solution", "_____no_output_____" ] ], [ [ "import numpy as np\nframe2['Western'] = frame2.state == 'Nevada'\nframe2['Eastern_Debt'] = np.random.randint(0,101,6)\nframe2", "_____no_output_____" ], [ "del frame2['Eastern'], frame2['Eastern_Debt'], frame2['Western']\nframe2", "_____no_output_____" ] ], [ [ "<h4> Transposing </h4>", "_____no_output_____" ] ], [ [ "pop = {'Nevada': {2001: 2.4, 2002: 2.9},\n 'Ohio': {2000: 1.5, 2001: 1.7, 2002: 3.6}}", "_____no_output_____" ], [ "frame3 = pd.DataFrame(pop)\nframe3", "_____no_output_____" ], [ "\nframe4= pd.DataFrame(frame3.T)\nframe4", "_____no_output_____" ], [ "pd.DataFrame(pop, index=[2001, 2002, 2003])", "_____no_output_____" ], [ "pdata = {'Ohio': frame3['Ohio'][:-1],\n 'Nevada': frame3['Nevada'][:2]}\npd.DataFrame(pdata)", "_____no_output_____" ], [ "frame3.index.name = 'year'; frame3.columns.name = 'state'\nframe3\n#to add header: rows: df.index.attribute, columns: df.columns.attribute", "_____no_output_____" ], [ "frame3.values", "_____no_output_____" ], [ "frame2.values", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "### Correlation and Covariance", "_____no_output_____" ], [ "conda install pandas-datareader", "_____no_output_____" ] ], [ [ "price = pd.read_pickle('yahoo_price.pkl')\nvolume = pd.read_pickle('yahoo_volume.pkl')", "_____no_output_____" ] ], [ [ "import pandas_datareader.data as web\nall_data = {ticker: web.get_data_yahoo(ticker)\n for ticker in ['AAPL', 'IBM', 'MSFT', 'GOOG']}\n\nprice = pd.DataFrame({ticker: data['Adj Close']\n for ticker, data in all_data.items()})\nvolume = pd.DataFrame({ticker: data['Volume']\n for ticker, data in all_data.items()})", "_____no_output_____" ] ], [ [ "returns = price.pct_change()\nreturns.tail()", "_____no_output_____" ], [ "#returns['MSFT'].corr(returns['IBM'])\nreturns['MSFT'].cov(returns['IBM'])", "_____no_output_____" ], [ "returns.MSFT.corr(returns.IBM)", "_____no_output_____" ], [ "returns.corr()\n#returns.cov()", "_____no_output_____" ], [ "returns.corrwith(returns.IBM)", "_____no_output_____" ], [ "returns.corrwith(volume)", "_____no_output_____" ] ], [ [ "Exercise!", "_____no_output_____" ], [ "This notebook is part of a course at www.codeimmersives.com called Data Science. If you accessed this notebook outside the course, you can get more information about this course online by clicking here.", "_____no_output_____" ], [ "<hr>\n\nCopyright &copy; 2021 Code Immersives", "_____no_output_____" ], [ "This notebook is part of a course at www.codeimmersives.com called Data Science. If you accessed this notebook outside the course, you can get more information about this course online by clicking here.", "_____no_output_____" ], [ "<hr>\n\nCopyright &copy; 2021 Code Immersives", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cb9c2d334707528d982da84fe8c21bef993f963e
119,536
ipynb
Jupyter Notebook
1_2_Convolutional_Filters_Edge_Detection/4. Fourier Transform of Filters.ipynb
binshi/udacity_cvnd
a37f0b12f1bd882d4d3aae9a55d17b0c46409249
[ "MIT" ]
null
null
null
1_2_Convolutional_Filters_Edge_Detection/4. Fourier Transform of Filters.ipynb
binshi/udacity_cvnd
a37f0b12f1bd882d4d3aae9a55d17b0c46409249
[ "MIT" ]
3
2021-03-19T04:53:14.000Z
2022-01-13T01:46:26.000Z
1_2_Convolutional_Filters_Edge_Detection/4. Fourier Transform of Filters.ipynb
binshi/udacity_cvnd
a37f0b12f1bd882d4d3aae9a55d17b0c46409249
[ "MIT" ]
null
null
null
707.313609
107,796
0.947539
[ [ [ "## High and Low Pass Filters\n\nNow, you might be wondering, what makes filters high and low-pass; why is a Sobel filter high-pass and a Gaussian filter low-pass?\n\nWell, you can actually visualize the frequencies that these filters block out by taking a look at their fourier transforms. The frequency components of any image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum. So, let's treat our filters as small images, and display them in the frequency domain!", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\n\n%matplotlib inline\n\n# Define gaussian, sobel, and laplacian (edge) filters\n\ngaussian = (1/9)*np.array([[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]])\n\nsobel_x= np.array([[-1, 0, 1],\n [-2, 0, 2],\n [-1, 0, 1]])\n\nsobel_y= np.array([[-1,-2,-1],\n [0, 0, 0],\n [1, 2, 1]])\n\n# laplacian, edge filter\nlaplacian=np.array([[0, 1, 0],\n [1,-4, 1],\n [0, 1, 0]])\n\nfilters = [gaussian, sobel_x, sobel_y, laplacian]\nfilter_name = ['gaussian','sobel_x', \\\n 'sobel_y', 'laplacian']\n\n\n# perform a fast fourier transform on each filter\n# and create a scaled, frequency transform image\nf_filters = [np.fft.fft2(x) for x in filters]\nfshift = [np.fft.fftshift(y) for y in f_filters]\nfrequency_tx = [np.log(np.abs(z)+1) for z in fshift]\n\n# display 4 filters\nfor i in range(len(filters)):\n plt.subplot(2,2,i+1),plt.imshow(frequency_tx[i],cmap = 'gray')\n plt.title(filter_name[i]), plt.xticks([]), plt.yticks([])\n\nplt.show()", "_____no_output_____" ] ], [ [ "Areas of white or light gray, allow that part of the frequency spectrum through! Areas of black mean that part of the spectrum is blocked out of the image. \n\nRecall that the low frequencies in the frequency spectrum are at the center of the frequency transform image, and high frequencies are at the edges. You should see that the Gaussian filter allows only low-pass frequencies through, which is the center of the frequency transformed image. The sobel filters block out frequencies of a certain orientation and a laplace (detects edges regardless of orientation) filter, should block out low-frequencies!\n\nYou are encouraged to load in an image, apply a filter to it using `filter2d` then visualize what the fourier transform of that image looks like before and after a filter is applied.", "_____no_output_____" ] ], [ [ "## TODO: load in an image, and filter it using a kernel of your choice\n## apply a fourier transform to the original *and* filtered images and compare them\nimage = cv2.imread('images/birds.jpg')\n# Change color to RGB (from BGR)\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n# convert to grayscale\ngray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n# normalize the image\nnorm_image = gray/255.0\n\nfiltered_images = [cv2.filter2D(gray, -1, w) for w in filters]\nf_filters = [np.fft.fft2(x) for x in filtered_images]\nfshift = [np.fft.fftshift(y) for y in f_filters]\nfrequency_tx = [np.log(np.abs(z)+1) for z in fshift]\n\nfor i in range(len(filters)):\n plt.subplot(2,2,i+1),plt.imshow(frequency_tx[i],cmap = 'gray')\n plt.title(filter_name[i]), plt.xticks([]), plt.yticks([])\n\nplt.show()\n\nplt.imshow(filtered_image_y, cmap='gray')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb9c3934653597f56205109c558a4bba34349d7d
597,276
ipynb
Jupyter Notebook
src/analysis/complexity_analysis/Complexity _ analysis_NLP Group .ipynb
nemo-rui-chen/nlp-ferrari
9aeda193593f8a7ae25fd288d21b8f2505916dcf
[ "MIT" ]
null
null
null
src/analysis/complexity_analysis/Complexity _ analysis_NLP Group .ipynb
nemo-rui-chen/nlp-ferrari
9aeda193593f8a7ae25fd288d21b8f2505916dcf
[ "MIT" ]
null
null
null
src/analysis/complexity_analysis/Complexity _ analysis_NLP Group .ipynb
nemo-rui-chen/nlp-ferrari
9aeda193593f8a7ae25fd288d21b8f2505916dcf
[ "MIT" ]
null
null
null
1,204.185484
351,492
0.950954
[ [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nfrom matplotlib.pyplot import MultipleLocator", "_____no_output_____" ], [ "complexity = pd.read_csv('C:/Users/34433/Desktop/MFFT/Courses/MFIN7036 NLP/Group project/complexity_220318.csv')\nticker_industry = pd.read_excel('C:/Users/34433/Desktop/MFFT/Courses/MFIN7036 NLP/Group project/Ticker-Industry.xlsx')", "_____no_output_____" ], [ "ticker_industry['Company'] = [x.split(':')[-1] for x in ticker_industry['Exchange:Ticker']]\ncomplexity_ind = pd.merge(complexity,ticker_industry.loc[:,['Company','Primary Industry']], how='left',on='Company') \ncomplexity_ind ['Year'] = [x[:4] for x in complexity_ind['Date']]\ncomplexity_ind ['Quarter'] = [x[4:] for x in complexity_ind['Date']]\ncomplexity_ind = complexity_ind.rename (columns={'Primary Industry':'Industry'})\n", "_____no_output_____" ], [ "complexity_ind", "_____no_output_____" ] ], [ [ "#### Data re-organization for Yannan's portfolio construction\nThe following codes in this part is to merge sentiment score with complexity score for further analysis.", "_____no_output_____" ] ], [ [ "sentiment = pd.read_csv('C:/Users/34433/Desktop/MFFT/Courses/MFIN7036 NLP/Group project/sentiment_data.csv')\nsentiment['TickerQuarter'] = sentiment['Ticker']+sentiment['Quarter']\ncomplexity_ind['TickerQuarter'] = complexity_ind['Company']+complexity_ind['Date']\ncomplexity_ind.query(\"Company == 'AAPL'\")\n# merge two dataframes on Quarter number\nsentiment_complexity = pd.merge(sentiment,complexity_ind.loc[:,['TickerQuarter','Flesch score','Polysyllable number per sentence']], how='left',on='TickerQuarter')\nsenti_complex_combine = sentiment_complexity.drop ('TickerQuarter',axis=1)\nsenti_complex_combine.to_csv(\"C:/Users/34433/Desktop/MFFT/Courses/MFIN7036 NLP/Group project/senti_complex_combine_220318.csv\",index=False)", "_____no_output_____" ], [ "# We got mean score for each quarter. \n#We would like to see cumulative return in a time series so we insert the mean score of the quarter for each day.\nall_factors = pd.read_csv('C:/Users/34433/Desktop/MFFT/Courses/MFIN7036 NLP/Group project/all_factors_220318.csv')\nall_factors['Mean']= all_factors['Mean'].fillna(method='pad',axis=0)\nall_factors['Total_Words']= all_factors['Total_Words'].fillna(method='pad',axis=0)\nall_factors['Sum']= all_factors['Sum'].fillna(method='pad',axis=0)\nall_factors['Median']= all_factors['Median'].fillna(method='pad',axis=0)\nall_factors['Std']= all_factors['Std'].fillna(method='pad',axis=0)\nall_factors['%Positive']= all_factors['%Positive'].fillna(method='pad',axis=0)\nall_factors['%Negative']= all_factors['%Negative'].fillna(method='pad',axis=0)\nall_factors['%Neutral']= all_factors['%Neutral'].fillna(method='pad',axis=0)\nall_factors['Flesch score']= all_factors['Flesch score'].fillna(method='pad',axis=0)\nall_factors['Polysyllable number per sentence']= all_factors['Polysyllable number per sentence'].fillna(method='pad',axis=0)\n\nall_factors.to_csv(\"C:/Users/34433/Desktop/MFFT/Courses/MFIN7036 NLP/Group project/all_factors_value_insert_220318_v2.csv\",index=False)", "_____no_output_____" ] ], [ [ "### QoQ & YoY Plot (for one company/industry)", "_____no_output_____" ] ], [ [ "# slice the frame to get a pivot table to view QoQ & YoY change\nindustry = 'all industries'\nfactor = 'Polysyllable number per sentence'\n#qoq_sliced_frame = complexity_ind.query(\"Industry == 'Soft Drinks' \")\nqoq_sliced_frame = complexity_ind\npivot_flesch = pd.pivot_table(qoq_sliced_frame, values = factor , index = ['Quarter'], columns = ['Year'], aggfunc=np.mean)\n\n# Plot in one figure\nsns.set(rc = {'figure.figsize':(10,10)}) # set figure sizess\nplt.ylim(ymin=2.5,ymax=4) # set y axis limitation\nx_major_locator=MultipleLocator(1) # Set axis density\nax=plt.gca()\nax.xaxis.set_major_locator(x_major_locator)\ng = sns.lineplot(data=pivot_flesch,markers=True)\ng.set(title = industry+\" - \"+factor)\ng.legend(loc=\"upper left\", bbox_to_anchor=(1, 1)) # move legend to a proper position", "_____no_output_____" ], [ " # To compare the year-end transcripts complexity across industries\nQ4_complexity_ind = complexity_ind.query (\"Quarter == 'Q4'\")\nquarter ='Q4'\nfactor = 'Reading time'\nsns.set(rc = {'figure.figsize':(10,10)}) # set figure sizess\nplt.ylim(ymin=200,ymax=1000) # set y axis limitation\nx_major_locator=MultipleLocator(1) # Set axis density\nax=plt.gca()\nax.xaxis.set_major_locator(x_major_locator)\ng = sns.lineplot(data=Q4_complexity_ind,x='Year',y=factor, hue='Industry',markers=True, legend='brief',style=\"Industry\",ci=None)\ng.set(title = quarter + \" - \"+ factor)\ng.legend(loc=\"upper left\", bbox_to_anchor=(1, 1)) # move legend to a proper position", "_____no_output_____" ], [ "# To compare complexity across industries\nfactor = 'Polysyllable number per sentence'\nall_ind_pivot_flesch = pd.pivot_table(complexity_ind, values = factor, columns = ['Industry'], aggfunc=np.median)\ng=sns.barplot( data=all_ind_pivot_flesch,palette=\"Blues_d\")\nplt.ylim(ymin=2,ymax=5) # set y axis limitation\nplt.xticks(fontsize=14, rotation=90)\ng.set(title = factor + \" - \"+ \"Industry Comparison\")", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb9c603b922b29dd11eb19099db570383e6a352c
13,792
ipynb
Jupyter Notebook
notebooks/trees_classification.ipynb
ThomasBourgeois/scikit-learn-mooc
1c4bd0fb9a8466d396dd5daa64ee500546c9d834
[ "CC-BY-4.0" ]
null
null
null
notebooks/trees_classification.ipynb
ThomasBourgeois/scikit-learn-mooc
1c4bd0fb9a8466d396dd5daa64ee500546c9d834
[ "CC-BY-4.0" ]
null
null
null
notebooks/trees_classification.ipynb
ThomasBourgeois/scikit-learn-mooc
1c4bd0fb9a8466d396dd5daa64ee500546c9d834
[ "CC-BY-4.0" ]
null
null
null
32.528302
183
0.608324
[ [ [ "# Build a classification decision tree\n\nWe will illustrate how decision tree fit data with a simple classification\nproblem using the penguins dataset.", "_____no_output_____" ], [ "<div class=\"admonition note alert alert-info\">\n<p class=\"first admonition-title\" style=\"font-weight: bold;\">Note</p>\n<p class=\"last\">If you want a deeper overview regarding this dataset, you can refer to the\nAppendix - Datasets description section at the end of this MOOC.</p>\n</div>", "_____no_output_____" ] ], [ [ "import pandas as pd\n\npenguins = pd.read_csv(\"../datasets/penguins_classification.csv\")\nculmen_columns = [\"Culmen Length (mm)\", \"Culmen Depth (mm)\"]\ntarget_column = \"Species\"", "_____no_output_____" ] ], [ [ "Besides, we split the data into two subsets to investigate how trees will\npredict values based on an out-of-samples dataset.", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\n\ndata, target = penguins[culmen_columns], penguins[target_column]\ndata_train, data_test, target_train, target_test = train_test_split(\n data, target, random_state=0)\nrange_features = {\n feature_name: (data[feature_name].min() - 1, data[feature_name].max() + 1)\n for feature_name in data.columns}", "_____no_output_____" ] ], [ [ "<div class=\"admonition caution alert alert-warning\">\n<p class=\"first admonition-title\" style=\"font-weight: bold;\">Caution!</p>\n<p class=\"last\">Here and later, we use the name <tt class=\"docutils literal\">data</tt> and <tt class=\"docutils literal\">target</tt> to be explicit. In\nscikit-learn documentation, <tt class=\"docutils literal\">data</tt> is commonly named <tt class=\"docutils literal\">X</tt> and <tt class=\"docutils literal\">target</tt> is\ncommonly called <tt class=\"docutils literal\">y</tt>.</p>\n</div>", "_____no_output_____" ], [ "In a previous notebook, we learnt that a linear classifier will define a\nlinear separation to split classes using a linear combination of the input\nfeatures. In our 2-dimensional space, it means that a linear classifier will\ndefine some oblique lines that best separate our classes. We define a\nfunction below that, given a set of data points and a classifier, will plot\nthe decision boundaries learnt by the classifier.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef plot_decision_function(fitted_classifier, range_features, ax=None):\n \"\"\"Plot the boundary of the decision function of a classifier.\"\"\"\n from sklearn.preprocessing import LabelEncoder\n\n feature_names = list(range_features.keys())\n # create a grid to evaluate all possible samples\n plot_step = 0.02\n xx, yy = np.meshgrid(\n np.arange(*range_features[feature_names[0]], plot_step),\n np.arange(*range_features[feature_names[1]], plot_step),\n )\n\n # compute the associated prediction\n Z = fitted_classifier.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = LabelEncoder().fit_transform(Z)\n Z = Z.reshape(xx.shape)\n\n # make the plot of the boundary and the data samples\n if ax is None:\n _, ax = plt.subplots()\n ax.contourf(xx, yy, Z, alpha=0.4, cmap=\"RdBu\")\n\n return ax", "_____no_output_____" ] ], [ [ "Thus, for a linear classifier, we will obtain the following decision\nboundaries. These boundaries lines indicate where the model changes its\nprediction from one class to another.", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LogisticRegression\n\nlinear_model = LogisticRegression()\nlinear_model.fit(data_train, target_train)", "_____no_output_____" ], [ "import seaborn as sns\n\n# create a palette to be used in the scatterplot\npalette = [\"tab:red\", \"tab:blue\", \"black\"]\n\nax = sns.scatterplot(data=penguins, x=culmen_columns[0], y=culmen_columns[1],\n hue=target_column, palette=palette)\nplot_decision_function(linear_model, range_features, ax=ax)\n# put the legend outside the plot\nplt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n_ = plt.title(\"Decision boundary using a logistic regression\")", "_____no_output_____" ] ], [ [ "We see that the lines are a combination of the input features since they are\nnot perpendicular a specific axis. Indeed, this is due to the model\nparametrization that we saw in the previous notebook, controlled by the\nmodel's weights and intercept.\n\nBesides, it seems that the linear model would be a good candidate for\nsuch problem as it gives good accuracy.", "_____no_output_____" ] ], [ [ "linear_model.fit(data_train, target_train)\ntest_score = linear_model.score(data_test, target_test)\nprint(f\"Accuracy of the LogisticRegression: {test_score:.2f}\")", "_____no_output_____" ] ], [ [ "Unlike linear models, decision trees are non-parametric models: they are not\ncontrolled by a mathematical decision function and do not have weights or\nintercept to be optimized.\n\nIndeed, decision trees will partition the space by considering a single\nfeature at a time. Let's illustrate this behaviour by having a decision\ntree make a single split to partition the feature space.", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeClassifier\n\ntree = DecisionTreeClassifier(max_depth=1)\ntree.fit(data_train, target_train)", "_____no_output_____" ], [ "ax = sns.scatterplot(data=penguins, x=culmen_columns[0], y=culmen_columns[1],\n hue=target_column, palette=palette)\nplot_decision_function(tree, range_features, ax=ax)\nplt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n_ = plt.title(\"Decision boundary using a decision tree\")", "_____no_output_____" ] ], [ [ "The partitions found by the algorithm separates the data along the axis\n\"Culmen Depth\", discarding the feature \"Culmen Length\". Thus, it highlights\nthat a decision tree does not use a combination of feature when making a\nsplit. We can look more in depth at the tree structure.", "_____no_output_____" ] ], [ [ "from sklearn.tree import plot_tree\n\n_, ax = plt.subplots(figsize=(8, 6))\n_ = plot_tree(tree, feature_names=culmen_columns,\n class_names=tree.classes_, impurity=False, ax=ax)", "_____no_output_____" ] ], [ [ "<div class=\"admonition tip alert alert-warning\">\n<p class=\"first admonition-title\" style=\"font-weight: bold;\">Tip</p>\n<p class=\"last\">We are using the function <tt class=\"docutils literal\">fig, ax = <span class=\"pre\">plt.subplots(figsize=(8,</span> 6))</tt> to create\na figure and an axis with a specific size. Then, we can pass the axis to the\n<tt class=\"docutils literal\">sklearn.tree.plot_tree</tt> function such that the drawing happens in this axis.</p>\n</div>", "_____no_output_____" ], [ "We see that the split was done the culmen depth feature. The original\ndataset was subdivided into 2 sets based on the culmen depth\n(inferior or superior to 16.45 mm).\n\nThis partition of the dataset minimizes the class diversities in each\nsub-partitions. This measure is also known as a **criterion**,\nand is a settable parameter.\n\nIf we look more closely at the partition, we see that the sample superior to\n16.45 belongs mainly to the Adelie class. Looking at the values, we indeed\nobserve 103 Adelie individuals in this space. We also count 52 Chinstrap\nsamples and 6 Gentoo samples. We can make similar interpretation for the\npartition defined by a threshold inferior to 16.45mm. In this case, the most\nrepresented class is the Gentoo species.\n\nLet's see how our tree would work as a predictor. Let's start to see the\nclass predicted when the culmen depth is inferior to the threshold.", "_____no_output_____" ] ], [ [ "tree.predict([[0, 15]])", "_____no_output_____" ] ], [ [ "The class predicted is the Gentoo. We can now check if we pass a culmen\ndepth superior to the threshold.", "_____no_output_____" ] ], [ [ "tree.predict([[0, 17]])", "_____no_output_____" ] ], [ [ "In this case, the tree predicts the Adelie specie.\n\nThus, we can conclude that a decision tree classifier will predict the most\nrepresented class within a partition.\n\nDuring the training, we have a count of samples in each partition, we can\nalso compute the probability of belonging to a specific class within this\npartition.", "_____no_output_____" ] ], [ [ "y_pred_proba = tree.predict_proba([[0, 17]])\ny_proba_class_0 = pd.Series(y_pred_proba[0], index=tree.classes_)", "_____no_output_____" ], [ "y_proba_class_0.plot.bar()\nplt.ylabel(\"Probability\")\n_ = plt.title(\"Probability to belong to a penguin class\")", "_____no_output_____" ] ], [ [ "We will manually compute the different probability directly from the tree\nstructure.", "_____no_output_____" ] ], [ [ "adelie_proba = 103 / 161\nchinstrap_proba = 52 / 161\ngentoo_proba = 6 / 161\nprint(f\"Probabilities for the different classes:\\n\"\n f\"Adelie: {adelie_proba:.3f}\\n\"\n f\"Chinstrap: {chinstrap_proba:.3f}\\n\"\n f\"Gentoo: {gentoo_proba:.3f}\\n\")", "_____no_output_____" ] ], [ [ "It is also important to note that the culmen length has been disregarded for\nthe moment. It means that whatever the value given, it will not be used\nduring the prediction.", "_____no_output_____" ] ], [ [ "tree.predict_proba([[10000, 17]])", "_____no_output_____" ] ], [ [ "Going back to our classification problem, the split found with a maximum\ndepth of 1 is not powerful enough to separate the three species and the model\naccuracy is low when compared to the linear model.", "_____no_output_____" ] ], [ [ "tree.fit(data_train, target_train)\ntest_score = tree.score(data_test, target_test)\nprint(f\"Accuracy of the DecisionTreeClassifier: {test_score:.2f}\")", "_____no_output_____" ] ], [ [ "Indeed, it is not a surprise. We saw earlier that a single feature will not\nbe able to separate all three species. However, from the previous analysis we\nsaw that by using both features we should be able to get fairly good results.\n\nIn the next exercise, you will increase the size of the tree depth. You will\nget intuitions on how the space partitioning is repeated over time.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb9c655b8625616975c27603fd45759371a8edf4
203,192
ipynb
Jupyter Notebook
ExcelAPI.ipynb
Andrewzekid/MCExcelAPI
1bbcb584a81aa31acb5f8b1ee5a42a6821dedf92
[ "MIT" ]
null
null
null
ExcelAPI.ipynb
Andrewzekid/MCExcelAPI
1bbcb584a81aa31acb5f8b1ee5a42a6821dedf92
[ "MIT" ]
null
null
null
ExcelAPI.ipynb
Andrewzekid/MCExcelAPI
1bbcb584a81aa31acb5f8b1ee5a42a6821dedf92
[ "MIT" ]
null
null
null
35.748065
1,658
0.342725
[ [ [ "<a href=\"https://colab.research.google.com/github/Andrewzekid/MCExcelAPI/blob/main/ExcelAPI.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport math\nimport datetime\nfrom openpyxl import Workbook", "_____no_output_____" ], [ "def clean_names(origional_name):\n #removes any unwanted whitespace from a name\n # \" Juri Mikoshiba \" turns into \"Juri Mikoshiba\"\n\n split_on_whitespace = origional_name.split(\" \")\n for i in split_on_whitespace:\n i = i.replace(\"\\t\",\"\")\n i = i.replace(\"\\n\",\"\")\n\n name = [i for i in split_on_whitespace if i != \"\"]\n cleaned_name = \" \".join(name)\n\n return cleaned_name", "_____no_output_____" ], [ "def clean_df(df):\n df[\"Name\"] = df.apply(lambda x: clean_names(x[\"Name\"]),axis=1)\n df[\"Score\"] = df.apply(lambda x: clean_scores(x[\"Score\"]),axis=1)\n df[\"Class\"] = df.apply(lambda x: clean_names(x[\"Class\"]),axis=1)\n\n return df", "_____no_output_____" ], [ "clean_df(df3)", "_____no_output_____" ], [ "clean_names(\"Arita Mana\")", "_____no_output_____" ], [ "clean_names(\"Arita Mana Angelina\")", "_____no_output_____" ], [ "\ndef clean_scores(origional_score):\n #removes any unwanted whitespace from a score\n if type(origional_score) == str:\n #if the score is a string\n #process a string (I.e: turn a score from \" 750 \" into 750)\n origional_score = int(origional_score)\n\n return origional_score", "_____no_output_____" ], [ "def clean_grade_string(string):\n #code to clean the strings for grade values i.e: \" 8A \" becomes \"G8A\"\n\n #code to replace extra whitespaces\n string = string.replace(\" \",\"\")\n if \"G\" not in string:\n string = \"G\" + string\n return string", "_____no_output_____" ], [ "SCORES_LIST = [750,500,250,100,100,100,100,100,100,100,100,100]\n#define the global variable \"Scores List\"", "_____no_output_____" ], [ "from openpyxl import load_workbook", "_____no_output_____" ], [ "from openpyxl.utils import get_column_letter", "_____no_output_____" ], [ "def read_excel_file(path,rowstart,rowend,colstart,colend):\n \"\"\"\n read(path,rowstart,rowend,colstart,colend)\n\n Description: \n Prints out the values from every cell within the rowstart,rowend,colstart,colend parameters, from a workbook that can be found via path.\n\n Parameters:\n Path(str):\n path to the workbook\n \n rowstart,rowend(int):\n indicate what rows to read from\n \n colstart,colend(int): \n indicate what columns to read from\n\n \"\"\"\n wb = load_workbook(path)\n ws = wb.active\n for row in range(rowstart,rowend):\n for column in range(colstart,colend):\n col = get_column_letter(column)\n print(ws[\"{}{}\".format(col,row)].value)\n print(\"=====================================\")\n df = pd.read_excel(path)\n return wb,df\n", "_____no_output_____" ], [ "wb,df = read_excel_file(\"Math Commitee Data (3).xlsx\",1,11,1,4)", "Name\nClass\nScore\n=====================================\nJuri Mikoshiba\nG6A\n\t3000\n=====================================\nManato Tanaka\nG6B\n3000\n=====================================\nTyler Glanville\nG6A\n1350\n=====================================\nDivyansh Gupta\nG8A\n1100\n=====================================\nShoukei Hada\nG6A\n250\n=====================================\nOta Horii\nG6A\n\t200\n=====================================\nYuvraj Jadia\nG6B\n100\n=====================================\nArita Mana Angelina\nG6B\n100\n=====================================\nShi Xintong\nG8B\n\t100\n=====================================\n" ], [ "df", "_____no_output_____" ], [ "wb1,df1 = read_excel_file(\"Math Commitee Week 6(1-5).xlsx\",1,4,1,20)", "ID\nStart time\nCompletion time\nEmail\nName\nTotal points\nQuiz feedback\nName (Please type your name)\nPoints - Name (Please type your name)\nFeedback - Name (Please type your name)\nHomeroom\nPoints - Homeroom\nFeedback - Homeroom\nYour answer\nPoints - Your answer\nFeedback - Your answer\n(Optional) Your Working in text - It may be featured in the Daily Notices as the solution of the week!\nPoints - (Optional) Your Working in text - It may be featured in the Daily Notices as the solution of the week!\nFeedback - (Optional) Your Working in text - It may be featured in the Daily Notices as the solution of the week!\n=====================================\n1\n2021-11-27 18:01:46.999998\n2021-11-27 18:06:55.999999\nanonymous\nNone\nNone\nNone\nManato Tanaka\nNone\nNone\nG6B\nNone\nNone\n11\nNone\nNone\nIt says its optional but it's required so I'm writing this so I can submit.\nNone\nNone\n=====================================\n2\n2021-11-27 18:13:15.000001\n2021-11-27 18:14:10.999997\nanonymous\nNone\nNone\nNone\nManato Tanaka\nNone\nNone\nG6B\nNone\nNone\n11\nNone\nNone\nI think I already submited it but I can submit again, so I sumbit again just in case.\nNone\nNone\n=====================================\n" ], [ "df1", "_____no_output_____" ], [ "df1", "_____no_output_____" ], [ "def process_results_file(path):\n wb1,df1 = read_excel_file(path,1,10,1,20)\n df1 = df1.dropna(axis=1)\n df1 = df1.drop([\"Email\",\"Start time\",\"ID\",\"(Optional) Your Working in text - It may be featured in the Daily Notices as the solution of the week!\"],axis=1)\n df1 = df1.drop_duplicates(subset=\"Name (Please type your name)\",keep=\"last\")\n return df1", "_____no_output_____" ], [ "def check_answers_and_assign_scores(df,answer,booster=1):\n correct_df_1 = df[df[\"Your answer\"] == answer]\n\n #rename the \"Name\" column of the dataframe containing the info about the people who got the correct answers\n correct_df = correct_df_1.rename({\"Name (Please type your name)\":\"Name\",\"Homeroom\":\"Class\"},axis=1)\n\n #main code to assign the scores\n correct_df[\"Completion time\"] = pd.to_datetime(correct_df[\"Completion time\"])\n correct_df = correct_df.sort_values(\"Completion time\")\n scores_to_be_used = SCORES_LIST[0:len(correct_df)] * booster\n correct_df[\"Score\"] = scores_to_be_used\n #as we no longer need the completion time and the answer columns anymore, drop them\n correct_df = correct_df.drop([\"Completion time\",\"Your answer\"],axis=1)\n correct_df = correct_df.reset_index()\n correct_df = correct_df.drop(\"index\",axis=1)\n print(correct_df)\n return correct_df", "_____no_output_____" ], [ "def check_and_remove_descrepancies(df,df2):\n indexes = list(df[\"Name\"])\n indexes_new = list(df2[\"Name\"])\n df = df.set_index(\"Name\")\n df2 = df2.set_index(\"Name\")\n print(indexes)\n print(indexes_new)\n print(df)\n print(df2)\n for i in range(len(indexes_new)):\n #loop through the newly answered questions\n if indexes_new[i] in indexes:\n #the same person has answered another question\n df.loc[indexes_new[i],\"Score\"] = df.loc[indexes_new[i],\"Score\"] + df2.loc[indexes_new[i],\"Score\"]\n else:\n \n #a new person has answered\n print(df2.loc[indexes_new[i]])\n df = df.append(df2.loc[indexes_new[i]],ignore_index=False)\n df = df.sort_values(\"Score\",ascending=False)\n return df", "_____no_output_____" ], [ "process_results_file(\"Math Commitee Week 6(1-5).xlsx\")", "ID\nStart time\nCompletion time\nEmail\nName\nTotal points\nQuiz feedback\nName (Please type your name)\nPoints - Name (Please type your name)\nFeedback - Name (Please type your name)\nHomeroom\nPoints - Homeroom\nFeedback - Homeroom\nYour answer\nPoints - Your answer\nFeedback - Your answer\n(Optional) Your Working in text - It may be featured in the Daily Notices as the solution of the week!\nPoints - (Optional) Your Working in text - It may be featured in the Daily Notices as the solution of the week!\nFeedback - (Optional) Your Working in text - It may be featured in the Daily Notices as the solution of the week!\n=====================================\n1\n2021-11-27 18:01:46.999998\n2021-11-27 18:06:55.999999\nanonymous\nNone\nNone\nNone\nManato Tanaka\nNone\nNone\nG6B\nNone\nNone\n11\nNone\nNone\nIt says its optional but it's required so I'm writing this so I can submit.\nNone\nNone\n=====================================\n2\n2021-11-27 18:13:15.000001\n2021-11-27 18:14:10.999997\nanonymous\nNone\nNone\nNone\nManato Tanaka\nNone\nNone\nG6B\nNone\nNone\n11\nNone\nNone\nI think I already submited it but I can submit again, so I sumbit again just in case.\nNone\nNone\n=====================================\n3\n2021-11-28 19:32:16.999996\n2021-11-28 20:08:50.000001\nanonymous\nNone\nNone\nNone\nJuri Mikoshiba\nNone\nNone\nG6A\nNone\nNone\n11\nNone\nNone\nFirst, change the required formula by making the denominator ab. Then we know that the required formula can also be a^2-bc-c^2/ab. As we know that ab=2, this formula can be further expanded. So it would be 4(a+c)-bc/2. a+b+c equals to 6, so (a+c) can be 6-b. So the formula would be expanded into 24-4b-bc/2. Summarizing until now, a+c=6-b, a=4+c, b(4+c)=2, 4b+4c=2, bc=2-4b. 24-4b-2+4b/2=22/2=11. The answer is 11.\nNone\nNone\n=====================================\n4\n2021-11-28 21:54:50\n2021-11-28 21:55:46.999998\nanonymous\nNone\nNone\nNone\nDivyansh Gupta\nNone\nNone\nG8A\nNone\nNone\n11\nNone\nNone\nUm, I don't have any. \nNone\nNone\n=====================================\n5\n2021-11-29 11:35:47.999996\n2021-11-29 11:36:18.000003\nanonymous\nNone\nNone\nNone\nAryan Sokhiya \nNone\nNone\nG9B\nNone\nNone\n11\nNone\nNone\nLOL\nNone\nNone\n=====================================\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\n=====================================\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\n=====================================\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\n=====================================\n" ], [ "def update_leaderboard(path_to_leaderboard,path_to_new_data,answer,booster=1):\n wb,df1 = read_excel_file(path_to_leaderboard,1,11,1,4)\n df1 = clean_df(df1)\n # df1 = clean_df(df1)\n \n #after we have processed the leaderboard dataframe, process the dataframe containing the new submissions\n df3 = process_results_file(path_to_new_data)\n df3 = check_answers_and_assign_scores(df3,answer=answer,booster=booster)\n df3 = clean_df(df3)\n print(df3)\n\n final_df = check_and_remove_descrepancies(df1,df3)\n #it is now done!!1\n return final_df", "_____no_output_____" ], [ "final_df = update_leaderboard(\"Math Commitee Data (3).xlsx\",\"Math Commitee Week 6(1-5).xlsx\",answer=11)", "Name\nClass\nScore\n=====================================\nJuri Mikoshiba\nG6A\n\t3000\n=====================================\nManato Tanaka\nG6B\n3000\n=====================================\nTyler Glanville\nG6A\n1350\n=====================================\nDivyansh Gupta\nG8A\n1100\n=====================================\nShoukei Hada\nG6A\n250\n=====================================\nOta Horii\nG6A\n\t200\n=====================================\nYuvraj Jadia\nG6B\n100\n=====================================\nArita Mana Angelina\nG6B\n100\n=====================================\nShi Xintong\nG8B\n\t100\n=====================================\nID\nStart time\nCompletion time\nEmail\nName\nTotal points\nQuiz feedback\nName (Please type your name)\nPoints - Name (Please type your name)\nFeedback - Name (Please type your name)\nHomeroom\nPoints - Homeroom\nFeedback - Homeroom\nYour answer\nPoints - Your answer\nFeedback - Your answer\n(Optional) Your Working in text - It may be featured in the Daily Notices as the solution of the week!\nPoints - (Optional) Your Working in text - It may be featured in the Daily Notices as the solution of the week!\nFeedback - (Optional) Your Working in text - It may be featured in the Daily Notices as the solution of the week!\n=====================================\n1\n2021-11-27 18:01:46.999998\n2021-11-27 18:06:55.999999\nanonymous\nNone\nNone\nNone\nManato Tanaka\nNone\nNone\nG6B\nNone\nNone\n11\nNone\nNone\nIt says its optional but it's required so I'm writing this so I can submit.\nNone\nNone\n=====================================\n2\n2021-11-27 18:13:15.000001\n2021-11-27 18:14:10.999997\nanonymous\nNone\nNone\nNone\nManato Tanaka\nNone\nNone\nG6B\nNone\nNone\n11\nNone\nNone\nI think I already submited it but I can submit again, so I sumbit again just in case.\nNone\nNone\n=====================================\n3\n2021-11-28 19:32:16.999996\n2021-11-28 20:08:50.000001\nanonymous\nNone\nNone\nNone\nJuri Mikoshiba\nNone\nNone\nG6A\nNone\nNone\n11\nNone\nNone\nFirst, change the required formula by making the denominator ab. Then we know that the required formula can also be a^2-bc-c^2/ab. As we know that ab=2, this formula can be further expanded. So it would be 4(a+c)-bc/2. a+b+c equals to 6, so (a+c) can be 6-b. So the formula would be expanded into 24-4b-bc/2. Summarizing until now, a+c=6-b, a=4+c, b(4+c)=2, 4b+4c=2, bc=2-4b. 24-4b-2+4b/2=22/2=11. The answer is 11.\nNone\nNone\n=====================================\n4\n2021-11-28 21:54:50\n2021-11-28 21:55:46.999998\nanonymous\nNone\nNone\nNone\nDivyansh Gupta\nNone\nNone\nG8A\nNone\nNone\n11\nNone\nNone\nUm, I don't have any. \nNone\nNone\n=====================================\n5\n2021-11-29 11:35:47.999996\n2021-11-29 11:36:18.000003\nanonymous\nNone\nNone\nNone\nAryan Sokhiya \nNone\nNone\nG9B\nNone\nNone\n11\nNone\nNone\nLOL\nNone\nNone\n=====================================\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\n=====================================\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\n=====================================\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\n=====================================\n Name Class Score\n0 Manato Tanaka G6B 750\n1 Juri Mikoshiba G6A 500\n2 Divyansh Gupta G8A 250\n3 Aryan Sokhiya G9B 100\n Name Class Score\n0 Manato Tanaka G6B 750\n1 Juri Mikoshiba G6A 500\n2 Divyansh Gupta G8A 250\n3 Aryan Sokhiya G9B 100\n['Juri Mikoshiba', 'Manato Tanaka', 'Tyler Glanville', 'Divyansh Gupta', 'Shoukei Hada', 'Ota Horii', 'Yuvraj Jadia', 'Arita Mana Angelina', 'Shi Xintong', 'Zhang Tongxi']\n['Manato Tanaka', 'Juri Mikoshiba', 'Divyansh Gupta', 'Aryan Sokhiya']\n Class Score\nName \nJuri Mikoshiba G6A 3000\nManato Tanaka G6B 3000\nTyler Glanville G6A 1350\nDivyansh Gupta G8A 1100\nShoukei Hada G6A 250\nOta Horii G6A 200\nYuvraj Jadia G6B 100\nArita Mana Angelina G6B 100\nShi Xintong G8B 100\nZhang Tongxi G7A 100\n Class Score\nName \nManato Tanaka G6B 750\nJuri Mikoshiba G6A 500\nDivyansh Gupta G8A 250\nAryan Sokhiya G9B 100\nClass G9B\nScore 100\nName: Aryan Sokhiya, dtype: object\n" ], [ "final_df", "_____no_output_____" ], [ "import re", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "clean_grade_string(\" 9A \")\n#normally the whitespace in \" 9A \" would ca", "_____no_output_____" ], [ "wb,df = read_excel_file(\"Math Commitee Data (3).xlsx\",1,11,1,4)", "Name\nClass\nScore\n=====================================\nJuri Mikoshiba\nG6A\n\t3000\n=====================================\nManato Tanaka\nG6B\n3000\n=====================================\nTyler Glanville\nG6A\n1350\n=====================================\nDivyansh Gupta\nG8A\n1100\n=====================================\nShoukei Hada\nG6A\n250\n=====================================\nOta Horii\nG6A\n\t200\n=====================================\nYuvraj Jadia\nG6B\n100\n=====================================\nArita Mana Angelina\nG6B\n100\n=====================================\nShi Xintong\nG8B\n\t100\n=====================================\n" ], [ "", "_____no_output_____" ], [ "df2 = process_results_file(\"Math Commitee Week 6(1-5).xlsx\")", "ID\nStart time\nCompletion time\nEmail\nName\nTotal points\nQuiz feedback\nName (Please type your name)\nPoints - Name (Please type your name)\nFeedback - Name (Please type your name)\nHomeroom\nPoints - Homeroom\nFeedback - Homeroom\nYour answer\nPoints - Your answer\nFeedback - Your answer\n(Optional) Your Working in text - It may be featured in the Daily Notices as the solution of the week!\nPoints - (Optional) Your Working in text - It may be featured in the Daily Notices as the solution of the week!\nFeedback - (Optional) Your Working in text - It may be featured in the Daily Notices as the solution of the week!\n=====================================\n1\n2021-11-27 18:01:46.999998\n2021-11-27 18:06:55.999999\nanonymous\nNone\nNone\nNone\nManato Tanaka\nNone\nNone\nG6B\nNone\nNone\n11\nNone\nNone\nIt says its optional but it's required so I'm writing this so I can submit.\nNone\nNone\n=====================================\n2\n2021-11-27 18:13:15.000001\n2021-11-27 18:14:10.999997\nanonymous\nNone\nNone\nNone\nManato Tanaka\nNone\nNone\nG6B\nNone\nNone\n11\nNone\nNone\nI think I already submited it but I can submit again, so I sumbit again just in case.\nNone\nNone\n=====================================\n3\n2021-11-28 19:32:16.999996\n2021-11-28 20:08:50.000001\nanonymous\nNone\nNone\nNone\nJuri Mikoshiba\nNone\nNone\nG6A\nNone\nNone\n11\nNone\nNone\nFirst, change the required formula by making the denominator ab. Then we know that the required formula can also be a^2-bc-c^2/ab. As we know that ab=2, this formula can be further expanded. So it would be 4(a+c)-bc/2. a+b+c equals to 6, so (a+c) can be 6-b. So the formula would be expanded into 24-4b-bc/2. Summarizing until now, a+c=6-b, a=4+c, b(4+c)=2, 4b+4c=2, bc=2-4b. 24-4b-2+4b/2=22/2=11. The answer is 11.\nNone\nNone\n=====================================\n4\n2021-11-28 21:54:50\n2021-11-28 21:55:46.999998\nanonymous\nNone\nNone\nNone\nDivyansh Gupta\nNone\nNone\nG8A\nNone\nNone\n11\nNone\nNone\nUm, I don't have any. \nNone\nNone\n=====================================\n5\n2021-11-29 11:35:47.999996\n2021-11-29 11:36:18.000003\nanonymous\nNone\nNone\nNone\nAryan Sokhiya \nNone\nNone\nG9B\nNone\nNone\n11\nNone\nNone\nLOL\nNone\nNone\n=====================================\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\n=====================================\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\n=====================================\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\n=====================================\n" ], [ "df2", "_____no_output_____" ], [ "def check_answers_and_assign_scores(df,answer,booster=1):\n correct_df_1 = df[df[\"Your answer\"] == answer]\n\n #rename the \"Name\" column of the dataframe containing the info about the people who got the correct answers\n correct_df = correct_df_1.rename({\"Name (Please type your name)\":\"Name\",\"Homeroom\":\"Class\"},axis=1)\n\n #main code to assign the scores\n correct_df[\"Completion time\"] = pd.to_datetime(correct_df[\"Completion time\"])\n correct_df = correct_df.sort_values(\"Completion time\")\n scores_to_be_used = SCORES_LIST[0:len(correct_df)] * booster\n correct_df[\"Score\"] = scores_to_be_used\n #as we no longer need the completion time and the answer columns anymore, drop them\n correct_df = correct_df.drop([\"Completion time\",\"Your answer\"],axis=1)\n correct_df = correct_df.reset_index()\n correct_df = correct_df.drop(\"index\",axis=1)\n print(correct_df)\n return correct_df", "_____no_output_____" ], [ "def modify_score(path,value,name,cellstr=None,df=None):\n \"\"\"\n Docstring:\n modify one value in the excel spreadsheet and save it\n\n Parameters:\n Path:\n Path to the excel file that you want to modify\n \n cellstr:\n Index of the cell\n \n Value: \n Value that you want to replace with\n \n name:\n Name of person that has thier score being modified\n\n\n \"\"\"\n wb = load_workbook(path)\n if cellstr != None:\n wb[cellstr].value = value\n print(\"Modified Value of Cell {} to: {}\".format(cellstr,value))\n wb.save(\"Math Commitee Data modified.xlsx\")\n else:\n #we do not know the precise location of the cell to be edited, so thus we will edit the dataframe and save that instead.\n if df == None:\n #no df provided, trigger error\n raise ValueError(\"Please provide a valid dataframe, or provide a cellstring\")\n else:\n #edit the dataframe\n df.loc[name,\"Score\"] = value\n print(\"{}'s score has been changed to {}!\".format(name,value))\n return df", "_____no_output_____" ], [ "grade_student_pairs = {\"Juri Mikoshiba\":\"G6A\",\"Manato Tanaka\":\"G6B\",\"Tyler Glanville\":\"G6A\",\"Shoukei Hada\":\"G6A\",\"Ota Horii\":\"G6B\",\"Divyansh Gupta\":\"G8A\",\"Yuvraj Jadia\":\"G6B\",\"Arita Mana Angelina\":\"G6B\",\"Shi Xintong\":\"G8B\",\"Zhang Tongxi\":\"G7A\"}", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "wb,df1 = read_excel_file(\"Math Commitee Data (3).xlsx\",1,4,1,12)", "Name\nClass\nScore\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\n=====================================\nJuri Mikoshiba\nG6A\n\t3000\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\n=====================================\nManato Tanaka\nG6B\n3000\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\n=====================================\n" ], [ "df1", "_____no_output_____" ], [ "df3", "_____no_output_____" ], [ "df4 = df3.set_index(\"Name\")", "_____no_output_____" ], [ "df4.index", "_____no_output_____" ], [ "df4.append(df4.loc[\"Aryan Sokhiya \"],ignore_index=False)", "_____no_output_____" ], [ "df = check_and_remove_descrepancies(df1,df3)", "['Juri Mikoshiba', 'Manato Tanaka', 'Tyler Glanville', 'Divyansh Gupta', 'Shoukei Hada', 'Ota Horii', 'Yuvraj Jadia', 'Arita Mana Angelina', 'Shi Xintong', 'Zhang Tongxi']\n['Manato Tanaka', 'Juri Mikoshiba', 'Divyansh Gupta', 'Aryan Sokhiya ']\n Class Score\nName \nJuri Mikoshiba G6A 3000\nManato Tanaka G6B 3000\nTyler Glanville G6A 1350\nDivyansh Gupta G8A 1100\nShoukei Hada G6A 250\nOta Horii G6A 200\nYuvraj Jadia G6B 100\nArita Mana Angelina G6B 100\nShi Xintong G8B 100\nZhang Tongxi G7A 100\n Class Score\nName \nManato Tanaka G6B 750\nJuri Mikoshiba G6A 500\nDivyansh Gupta G8A 250\nAryan Sokhiya G9B 100\nClass G9B\nScore 100\nName: Aryan Sokhiya , dtype: object\n" ], [ "df", "_____no_output_____" ], [ "print(df1,df3)", " Name Class Score\n0 Juri Mikoshiba G6A 3000\n1 Manato Tanaka G6B 3000\n2 Tyler Glanville G6A 1350\n3 Divyansh Gupta G8A 1100\n4 Shoukei Hada G6A 250\n5 Ota Horii G6A 200\n6 Yuvraj Jadia G6B 100\n7 Arita Mana Angelina G6B 100\n8 Shi Xintong G8B 100\n9 Zhang Tongxi G7A 100 Name Homeroom Score\n0 Manato Tanaka G6B 750\n1 Juri Mikoshiba G6A 500\n" ], [ "df1", "_____no_output_____" ], [ "df3", "_____no_output_____" ], [ "df1.loc[len(df1) + 1] = {\"Andrew\":[1,3]}", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "df3 = pd.DataFrame({\"Name\":[\"Andrew Wang\"],\"Class\":[\"G9B\"],\"Score\":[1500]})", "_____no_output_____" ], [ "df3.set_index(\"Name\")", "_____no_output_____" ], [ "pd.concat([df1,df3])", "_____no_output_____" ], [ "df1", "_____no_output_____" ], [ "df1", "_____no_output_____" ], [ "test_1,df2 = read_excel_file(\"Updated scores (1).xlsx\",1,4,1,12)", "Name\nScore\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\n=====================================\nJuri Mikoshiba\n3000\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\n=====================================\nManato Tanaka \n3000\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\n=====================================\n" ], [ "df2 = clean_df(df2)", "_____no_output_____" ], [ "df2", "_____no_output_____" ], [ "df1", "_____no_output_____" ], [ "df = check_and_remove_descrepancies(df1,df2)", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df2", "_____no_output_____" ], [ "df1", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "test_1,df2 = read_excel_file(\"Math Commitee Data (3).xlsx\",1,2,1,12)", "Name\nClass\nScore\nNone\nNone\nNone\nNone\nNone\nNone\nNone\nNone\n=====================================\n" ], [ "df2", "_____no_output_____" ], [ "df1", "_____no_output_____" ], [ "test_1", "_____no_output_____" ], [ "df.index", "_____no_output_____" ], [ "df.index[2] = \"Tyler Glanville\"", "_____no_output_____" ], [ "list(df.index)", "_____no_output_____" ], [ "indexes = list(df.index)", "_____no_output_____" ], [ "for i in range(len(indexes)):\n print(i)\n x = indexes[i].replace(\"\\t\",\"\")\n x = x.replace(\"\\n\",\"\")\n indexes[i] = x\n\n", "0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n" ], [ "df = df.set_index(\"Score\")\ndf", "_____no_output_____" ], [ "df[\"Name\"] = indexes", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.set_index(\"Name\")", "_____no_output_____" ], [ "df.Score", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df1 = df.copy(deep=True)", "_____no_output_____" ], [ "df1", "_____no_output_____" ], [ "df1 = clean_df(df1)", "0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n['Juri Mikoshiba', 'Manato Tanaka ', 'Tyler Glanville', 'Divyansh Gupta', 'Shoukei Hada', 'Ota Horii', 'Yuvraj Jadia', 'Arita Mana Angelina', 'Shi Xintong', 'Zhang Tongxi', 'Andrew Wang']\nEmpty DataFrame\nColumns: []\nIndex: [3000, 3000, 1350, 1100, 250, 200, 100, 100, 100, 100, 50]\n Name\nScore \n3000 Juri Mikoshiba\n3000 Manato Tanaka \n1350 Tyler Glanville\n1100 Divyansh Gupta\n250 Shoukei Hada\n200 Ota Horii\n100 Yuvraj Jadia\n100 Arita Mana Angelina\n100 Shi Xintong\n100 Zhang Tongxi\n50 Andrew Wang\nEmpty DataFrame\nColumns: []\nIndex: [Juri Mikoshiba, Manato Tanaka , Tyler Glanville, Divyansh Gupta, Shoukei Hada, Ota Horii, Yuvraj Jadia, Arita Mana Angelina, Shi Xintong, Zhang Tongxi, Andrew Wang]\n" ], [ "df1", "_____no_output_____" ], [ "indexes", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "df.inde", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9c7742eaf67e10e7880d02e15c3e5da428c9e4
118,695
ipynb
Jupyter Notebook
01. Python/04. Advanced/05.2 Python's property.ipynb
rostamimahdi1997/CS-Tutorial
19aeba72f012dab16a0db4a4ece308af401e769b
[ "MIT" ]
1
2022-02-21T11:51:34.000Z
2022-02-21T11:51:34.000Z
01. Python/04. Advanced/05.2 Python's property.ipynb
rostamimahdi1997/CS-Tutorial
19aeba72f012dab16a0db4a4ece308af401e769b
[ "MIT" ]
null
null
null
01. Python/04. Advanced/05.2 Python's property.ipynb
rostamimahdi1997/CS-Tutorial
19aeba72f012dab16a0db4a4ece308af401e769b
[ "MIT" ]
null
null
null
30.325754
1,071
0.57534
[ [ [ "<img src=\"../../images/banners/python-advanced.png\" width=\"600\"/>", "_____no_output_____" ], [ "# <img src=\"../../images/logos/python.png\" width=\"23\"/> Python's property(): Add Managed Attributes to Your Classes \n", "_____no_output_____" ], [ "## <img src=\"../../images/logos/toc.png\" width=\"20\"/> Table of Contents \n* [Managing Attributes in Your Classes](#managing_attributes_in_your_classes)\n * [The Getter and Setter Approach in Python](#the_getter_and_setter_approach_in_python)\n * [The Pythonic Approach](#the_pythonic_approach)\n* [Getting Started With Python’s `property()`](#getting_started_with_python’s_`property()`)\n * [Creating Attributes With `property()`](#creating_attributes_with_`property()`)\n * [Using `property()` as a Decorator](#using_`property()`_as_a_decorator)\n* [Providing Read-Only Attributes](#providing_read-only_attributes)\n* [Creating Read-Write Attributes](#creating_read-write_attributes)\n* [Providing Write-Only Attributes](#providing_write-only_attributes)\n* [Putting Python’s `property()` Into Action](#putting_python’s_`property()`_into_action)\n * [Validating Input Values](#validating_input_values)\n * [Providing Computed Attributes](#providing_computed_attributes)\n * [Caching Computed Attributes](#caching_computed_attributes)\n * [Logging Attribute Access and Mutation](#logging_attribute_access_and_mutation)\n * [Managing Attribute Deletion](#managing_attribute_deletion)\n * [Creating Backward-Compatible Class APIs](#creating_backward-compatible_class_apis)\n* [Overriding Properties in Subclasses](#overriding_properties_in_subclasses)\n* [Conclusion](#conclusion)\n\n---", "_____no_output_____" ], [ "With Python’s [`property()`](https://docs.python.org/3/library/functions.html#property), you can create **managed attributes** in your classes. You can use managed attributes, also known as **properties**, when you need to modify their internal implementation without changing the public [API](https://en.wikipedia.org/wiki/API) of the class. Providing stable APIs can help you avoid breaking your users’ code when they rely on your classes and objects.", "_____no_output_____" ], [ "Properties are arguably the most popular way to create managed attributes quickly and in the purest [Pythonic](https://realpython.com/learning-paths/writing-pythonic-code/) style.", "_____no_output_____" ], [ "**In this tutorial, you’ll learn how to:**", "_____no_output_____" ], [ "- Create **managed attributes** or **properties** in your classes\n- Perform **lazy attribute evaluation** and provide **computed attributes**\n- Avoid **setter** and **getter** methods to make your classes more Pythonic\n- Create **read-only**, **read-write**, and **write-only** properties\n- Create consistent and **backward-compatible APIs** for your classes\n", "_____no_output_____" ], [ "You’ll also write a few practical examples that use `property()` for validating input data, computing attribute values dynamically, logging your code, and more. To get the most out of this tutorial, you should know the basics of [object-oriented](https://realpython.com/python3-object-oriented-programming/) programming and [decorators](https://realpython.com/primer-on-python-decorators/) in Python.", "_____no_output_____" ], [ "<a class=\"anchor\" id=\"managing_attributes_in_your_classes\"></a>\n\n## Managing Attributes in Your Classes", "_____no_output_____" ], [ "When you define a class in an [object-oriented](https://en.wikipedia.org/wiki/Object-oriented_programming) programming language, you’ll probably end up with some instance and class [attributes](https://realpython.com/python3-object-oriented-programming/#class-and-instance-attributes). In other words, you’ll end up with variables that are accessible through the instance, class, or even both, depending on the language. Attributes represent or hold the internal [state](https://en.wikipedia.org/wiki/State_(computer_science)) of a given object, which you’ll often need to access and mutate.", "_____no_output_____" ], [ "Typically, you have at least two ways to manage an attribute. Either you can access and mutate the attribute directly or you can use **methods**. Methods are functions attached to a given class. They provide the behaviors and actions that an object can perform with its internal data and attributes.", "_____no_output_____" ], [ "If you expose your attributes to the user, then they become part of the public [API](https://en.wikipedia.org/wiki/API) of your classes. Your user will access and mutate them directly in their code. The problem comes when you need to change the internal implementation of a given attribute.", "_____no_output_____" ], [ "Say you’re working on a `Circle` class. The initial implementation has a single attribute called `.radius`. You finish coding the class and make it available to your end users. They start using `Circle` in their code to create a lot of awesome projects and applications. Good job!", "_____no_output_____" ], [ "Now suppose that you have an important user that comes to you with a new requirement. They don’t want `Circle` to store the radius any longer. They need a public `.diameter` attribute.", "_____no_output_____" ], [ "At this point, removing `.radius` to start using `.diameter` could break the code of some of your end users. You need to manage this situation in a way other than removing `.radius`.", "_____no_output_____" ], [ "Programming languages such as [Java](https://realpython.com/oop-in-python-vs-java/) and [C++](https://en.wikipedia.org/wiki/C%2B%2B) encourage you to never expose your attributes to avoid this kind of problem. Instead, you should provide **getter** and **setter** methods, also known as [accessors](https://en.wikipedia.org/wiki/Accessor_method) and [mutators](https://en.wikipedia.org/wiki/Mutator_method), respectively. These methods offer a way to change the internal implementation of your attributes without changing your public API.", "_____no_output_____" ], [ "In the end, these languages need getter and setter methods because they don’t provide a suitable way to change the internal implementation of an attribute if a given requirement changes. Changing the internal implementation would require an API modification, which can break your end users’ code.", "_____no_output_____" ], [ "<a class=\"anchor\" id=\"the_getter_and_setter_approach_in_python\"></a>\n\n### The Getter and Setter Approach in Python", "_____no_output_____" ], [ "Technically, there’s nothing that stops you from using getter and setter [methods](https://realpython.com/python3-object-oriented-programming/#instance-methods) in Python. Here’s how this approach would look:", "_____no_output_____" ] ], [ [ "# point.py\n\nclass Point:\n def __init__(self, x, y):\n self._x = x\n self._y = y\n\n def get_x(self):\n return self._x\n\n def set_x(self, value):\n self._x = value\n\n def get_y(self):\n return self._y\n\n def set_y(self, value):\n self._y = value", "_____no_output_____" ] ], [ [ "In this example, you create `Point` with two **non-public attributes** `._x` and `._y` to hold the [Cartesian coordinates](https://en.wikipedia.org/wiki/Cartesian_coordinate_system) of the point at hand.", "_____no_output_____" ], [ "To access and mutate the value of either `._x` or `._y`, you can use the corresponding getter and setter methods. Go ahead and save the above definition of `Point` in a Python [module](https://realpython.com/python-modules-packages/) and [import](https://realpython.com/python-import/) the class into your [interactive shell](https://realpython.com/interacting-with-python/).", "_____no_output_____" ], [ "Here’s how you can work with `Point` in your code:", "_____no_output_____" ] ], [ [ "point = Point(12, 5)\npoint.get_x()", "_____no_output_____" ], [ "point.get_y()", "_____no_output_____" ], [ "point.set_x(42)\npoint.get_x()", "_____no_output_____" ], [ "# Non-public attributes are still accessible\npoint._x", "_____no_output_____" ], [ "point._y", "_____no_output_____" ] ], [ [ "With `.get_x()` and `.get_y()`, you can access the current values of `._x` and `._y`. You can use the setter method to store a new value in the corresponding managed attribute. From this code, you can confirm that Python doesn’t restrict access to non-public attributes. Whether or not you do so is up to you.", "_____no_output_____" ], [ "<a class=\"anchor\" id=\"the_pythonic_approach\"></a>\n\n### The Pythonic Approach", "_____no_output_____" ], [ "Even though the example you just saw uses the Python coding style, it doesn’t look Pythonic. In the example, the getter and setter methods don’t perform any further processing with `._x` and `._y`. You can rewrite `Point` in a more concise and Pythonic way:", "_____no_output_____" ] ], [ [ "class Point:\n def __init__(self, x, y):\n self.x = x\n self.y = y", "_____no_output_____" ], [ "point = Point(12, 5)\npoint.x", "_____no_output_____" ], [ "point.y", "_____no_output_____" ], [ "point.x = 42\npoint.x", "_____no_output_____" ] ], [ [ "This code uncovers a fundamental principle. Exposing attributes to the end user is normal and common in Python. You don’t need to clutter your classes with getter and setter methods all the time, which sounds pretty cool! However, how can you handle requirement changes that would seem to involve API changes?", "_____no_output_____" ], [ "Unlike Java and C++, Python provides handy tools that allow you to change the underlying implementation of your attributes without changing your public API. The most popular approach is to turn your attributes into **properties**.", "_____no_output_____" ], [ "[Properties](https://en.wikipedia.org/wiki/Property_(programming)) represent an intermediate functionality between a plain attribute (or field) and a method. In other words, they allow you to create methods that behave like attributes. With properties, you can change how you compute the target attribute whenever you need to do so.", "_____no_output_____" ], [ "For example, you can turn both `.x` and `.y` into properties. With this change, you can continue accessing them as attributes. You’ll also have an underlying method holding `.x` and `.y` that will allow you to modify their internal implementation and perform actions on them right before your users access and mutate them.", "_____no_output_____" ], [ "The main advantage of Python properties is that they allow you to expose your attributes as part of your public API. If you ever need to change the underlying implementation, then you can turn the attribute into a property at any time without much pain. ", "_____no_output_____" ], [ "In the following sections, you’ll learn how to create properties in Python.", "_____no_output_____" ], [ "<a class=\"anchor\" id=\"getting_started_with_python’s_`property()`\"></a>\n\n## Getting Started With Python’s `property()`", "_____no_output_____" ], [ "Python’s [`property()`](https://docs.python.org/3/library/functions.html#property) is the Pythonic way to avoid formal getter and setter methods in your code. This function allows you to turn [class attributes](https://realpython.com/python3-object-oriented-programming/#class-and-instance-attributes) into **properties** or **managed attributes**. Since `property()` is a built-in function, you can use it without importing anything. Additionally, `property()` was [implemented in C](https://github.com/python/cpython/blob/main/Objects/descrobject.c#L1460) to ensure optimal performance.", "_____no_output_____" ], [ "With `property()`, you can attach getter and setter methods to given class attributes. This way, you can handle the internal implementation for that attribute without exposing getter and setter methods in your API. You can also specify a way to handle attribute deletion and provide an appropriate [docstring](https://realpython.com/documenting-python-code/) for your properties.", "_____no_output_____" ], [ "Here’s the full signature of `property()`:", "_____no_output_____" ] ], [ [ "property(fget=None, fset=None, fdel=None, doc=None)", "_____no_output_____" ] ], [ [ "The first two arguments take function objects that will play the role of getter (`fget`) and setter (`fset`) methods. Here’s a summary of what each argument does:", "_____no_output_____" ], [ "|Argument|Description|\n|:--|:--|\n|`fget`|Function that returns the value of the managed attribute|\n|`fset`|Function that allows you to set the value of the managed attribute|\n|`fdel`|Function to define how the managed attribute handles deletion|\n|`doc`|String representing the property’s docstring|\n", "_____no_output_____" ], [ "The [return](https://realpython.com/python-return-statement/) value of `property()` is the managed attribute itself. If you access the managed attribute, as in `obj.attr`, then Python automatically calls `fget()`. If you assign a new value to the attribute, as in `obj.attr = value`, then Python calls `fset()` using the input `value` as an argument. Finally, if you run a `del obj.attr` statement, then Python automatically calls `fdel()`.", "_____no_output_____" ], [ "You can use `doc` to provide an appropriate docstring for your properties. You and your fellow programmers will be able to read that docstring using Python’s [`help()`](https://docs.python.org/3/library/functions.html#help). The `doc` argument is also useful when you’re working with [code editors and IDEs](https://realpython.com/python-ides-code-editors-guide/) that support docstring access.", "_____no_output_____" ], [ "You can use `property()` either as a [function](https://realpython.com/defining-your-own-python-function/) or a [decorator](https://realpython.com/primer-on-python-decorators/) to build your properties. In the following two sections, you’ll learn how to use both approaches. However, you should know up front that the decorator approach is more popular in the Python community.", "_____no_output_____" ], [ "<a class=\"anchor\" id=\"creating_attributes_with_`property()`\"></a>\n\n### Creating Attributes With `property()`", "_____no_output_____" ], [ "You can create a property by calling `property()` with an appropriate set of arguments and assigning its return value to a class attribute. All the arguments to `property()` are optional. However, you typically provide at least a **setter function**.", "_____no_output_____" ], [ "The following example shows how to create a `Circle` class with a handy property to manage its radius:", "_____no_output_____" ] ], [ [ "# circle.py\n\nclass Circle:\n def __init__(self, radius):\n self._radius = radius\n\n def _get_radius(self):\n print(\"Get radius\")\n return self._radius\n\n def _set_radius(self, value):\n print(\"Set radius\")\n self._radius = value\n\n def _del_radius(self):\n print(\"Delete radius\")\n del self._radius\n\n radius = property(\n fget=_get_radius,\n fset=_set_radius,\n fdel=_del_radius,\n doc=\"The radius property.\"\n )", "_____no_output_____" ] ], [ [ "In this code snippet, you create `Circle`. The class initializer, `.__init__()`, takes `radius` as an argument and stores it in a non-public attribute called `._radius`. Then you define three non-public methods:", "_____no_output_____" ], [ "1. **`._get_radius()`** returns the current value of `._radius`\n2. **`._set_radius()`** takes `value` as an argument and assigns it to `._radius`\n3. **`._del_radius()`** deletes the instance attribute `._radius`\n", "_____no_output_____" ], [ "Once you have these three methods in place, you create a class attribute called `.radius` to store the property object. To initialize the property, you pass the three methods as arguments to `property()`. You also pass a suitable docstring for your property.", "_____no_output_____" ], [ "In this example, you use [keyword arguments](https://realpython.com/defining-your-own-python-function/#keyword-arguments) to improve the code readability and prevent confusion. That way, you know exactly which method goes into each argument.", "_____no_output_____" ], [ "To give `Circle` a try, run the following code:", "_____no_output_____" ] ], [ [ "circle = Circle(42.0)\n\ncircle.radius", "Get radius\n" ], [ "circle.radius = 100.0\n\ncircle.radius", "Set radius\nGet radius\n" ], [ "del circle.radius\n\ncircle.radius", "Delete radius\nGet radius\n" ], [ "help(circle)", "Help on Circle in module __main__ object:\n\nclass Circle(builtins.object)\n | Circle(radius)\n | \n | Methods defined here:\n | \n | __init__(self, radius)\n | Initialize self. See help(type(self)) for accurate signature.\n | \n | ----------------------------------------------------------------------\n | Data descriptors defined here:\n | \n | __dict__\n | dictionary for instance variables (if defined)\n | \n | __weakref__\n | list of weak references to the object (if defined)\n | \n | radius\n | The radius property.\n\n" ] ], [ [ "The `.radius` property hides the non-public instance attribute `._radius`, which is now your managed attribute in this example. You can access and assign `.radius` directly. Internally, Python automatically calls `._get_radius()` and `._set_radius()` when needed. When you execute `del circle.radius`, Python calls `._del_radius()`, which deletes the underlying `._radius`.", "_____no_output_____" ], [ "<div class=\"alert alert-success\" role=\"alert\">\n Properties are <strong>class attributes</strong> that manage <strong>instance attributes</strong>.\n</div>", "_____no_output_____" ], [ "You can think of a property as a collection of methods bundled together. If you inspect `.radius` carefully, then you can find the raw methods you provided as the `fget`, `fset`, and `fdel` arguments:", "_____no_output_____" ] ], [ [ "Circle.radius.fget", "_____no_output_____" ], [ "Circle.radius.fset", "_____no_output_____" ], [ "Circle.radius.fdel", "_____no_output_____" ], [ "dir(Circle.radius)", "_____no_output_____" ] ], [ [ "You can access the getter, setter, and deleter methods in a given property through the corresponding `.fget`, `.fset`, and `.fdel`.", "_____no_output_____" ], [ "Properties are also **overriding descriptors**. If you use [`dir()`](https://realpython.com/python-scope-legb-rule/#dir) to check the internal members of a given property, then you’ll find `.__set__()` and `.__get__()` in the list. These methods provide a default implementation of the [descriptor protocol](https://docs.python.org/3/howto/descriptor.html#descriptor-protocol).", "_____no_output_____" ], [ "The default implementation of `.__set__()`, for example, runs when you don’t provide a custom setter method. In this case, you get an `AttributeError` because there’s no way to set the underlying property.", "_____no_output_____" ], [ "<a class=\"anchor\" id=\"using_`property()`_as_a_decorator\"></a>\n\n### Using `property()` as a Decorator", "_____no_output_____" ], [ "Decorators are everywhere in Python. They’re functions that take another function as an argument and return a new function with added functionality. With a decorator, you can attach pre- and post-processing operations to an existing function.", "_____no_output_____" ], [ "When [Python 2.2](https://docs.python.org/3/whatsnew/2.2.html#attribute-access) introduced `property()`, the decorator syntax wasn’t available. The only way to define properties was to pass getter, setter, and deleter methods, as you learned before. The decorator syntax was added in [Python 2.4](https://docs.python.org/3/whatsnew/2.4.html#pep-318-decorators-for-functions-and-methods), and nowadays, using `property()` as a decorator is the most popular practice in the Python community.", "_____no_output_____" ], [ "The decorator syntax consists of placing the name of the decorator function with a leading `@` symbol right before the definition of the function you want to decorate:", "_____no_output_____" ] ], [ [ "@decorator\ndef func(a):\n return a", "_____no_output_____" ] ], [ [ "In this code fragment, `@decorator` can be a function or class intended to decorate `func()`. This syntax is equivalent to the following:", "_____no_output_____" ] ], [ [ "def func(a):\n return a\n\nfunc = decorator(func)", "_____no_output_____" ] ], [ [ "The final line of code reassigns the name `func` to hold the result of calling `decorator(func)`. Note that this is the same syntax you used to create a property in the section above.", "_____no_output_____" ], [ "Python’s `property()` can also work as a decorator, so you can use the `@property` syntax to create your properties quickly:", "_____no_output_____" ] ], [ [ "# circle.py\n\nclass Circle:\n def __init__(self, radius):\n self._radius = radius\n\n @property\n def radius(self):\n \"\"\"The radius property.\"\"\"\n print(\"Get radius\")\n return self._radius\n\n @radius.setter\n def radius(self, value):\n print(\"Set radius\")\n self._radius = value\n\n @radius.deleter\n def radius(self):\n print(\"Delete radius\")\n del self._radius", "_____no_output_____" ] ], [ [ "This code looks pretty different from the getter and setter methods approach. `Circle` now looks more Pythonic and clean. You don’t need to use method names such as `._get_radius()`, `._set_radius()`, and `._del_radius()` anymore. Now you have three methods with the same clean and descriptive attribute-like name. How is that possible?", "_____no_output_____" ], [ "The decorator approach for creating properties requires defining a first method using the public name for the underlying managed attribute, which is `.radius` in this case. This method should implement the getter logic.", "_____no_output_____" ], [ "Then you define the setter method for `.radius`. In this case, the syntax is fairly different. Instead of using `@property` again, you use `@radius.setter`. Why do you need to do that? Take another look at the `dir()` output:", "_____no_output_____" ] ], [ [ "dir(Circle.radius)", "_____no_output_____" ] ], [ [ "Besides `.fget`, `.fset`, `.fdel`, and a bunch of other special attributes and methods, `property` also provides `.deleter()`, `.getter()`, and `.setter()`. These three methods each return a new property.", "_____no_output_____" ], [ "When you decorate the second `.radius()` method with `@radius.setter`, you create a new property and reassign the class-level name `.radius` (line 8) to hold it. This new property contains the same set of methods of the initial property at line 8 with the addition of the new setter method provided on line 14. Finally, the decorator syntax reassigns the new property to the `.radius` class-level name.", "_____no_output_____" ], [ "The mechanism to define the deleter method is similar. This time, you need to use the `@radius.deleter` decorator. At the end of the process, you get a full-fledged property with the getter, setter, and deleter methods.", "_____no_output_____" ], [ "Finally, how can you provide suitable docstrings for your properties when you use the decorator approach? If you check `Circle` again, you’ll note that you already did so by adding a docstring to the getter method on line 9.", "_____no_output_____" ], [ "The new `Circle` implementation works the same as the example in the section above:", "_____no_output_____" ] ], [ [ "circle = Circle(42.0)\n\ncircle.radius", "Get radius\n" ], [ "circle.radius = 100.0\n\ncircle.radius", "Set radius\nGet radius\n" ], [ "del circle.radius", "Delete radius\n" ], [ "# This should raise AttributeError cause we deleted circle.radius above\ncircle.radius", "Get radius\n" ], [ "help(circle)", "Help on Circle in module __main__ object:\n\nclass Circle(builtins.object)\n | Circle(radius)\n | \n | Methods defined here:\n | \n | __init__(self, radius)\n | Initialize self. See help(type(self)) for accurate signature.\n | \n | ----------------------------------------------------------------------\n | Data descriptors defined here:\n | \n | __dict__\n | dictionary for instance variables (if defined)\n | \n | __weakref__\n | list of weak references to the object (if defined)\n | \n | radius\n | The radius property.\n\n" ] ], [ [ "You don’t need to use a pair of parentheses for calling `.radius()` as a method. Instead, you can access `.radius` as you would access a regular attribute, which is the primary use of properties. They allow you to treat methods as attributes, and they take care of calling the underlying set of methods automatically.", "_____no_output_____" ], [ "Here’s a recap of some important points to remember when you’re creating properties with the decorator approach:\n\n- The `@property` decorator must decorate the **getter method**.\n- The docstring must go in the **getter method**.\n- The **setter and deleter methods** must be decorated with the name of the getter method plus `.setter` and `.deleter`, respectively.", "_____no_output_____" ], [ "Up to this point, you’ve created managed attributes using `property()` as a function and as a decorator. If you check your `Circle` implementations so far, then you’ll note that their getter and setter methods don’t add any real extra processing on top of your attributes.", "_____no_output_____" ], [ "In general, you should avoid turning attributes that don’t require extra processing into properties. Using properties in those situations can make your code:", "_____no_output_____" ], [ "- Unnecessarily verbose\n- Confusing to other developers\n- Slower than code based on regular attributes\n", "_____no_output_____" ], [ "Unless you need something more than bare attribute access, don’t write properties. They’re a waste of [CPU](https://en.wikipedia.org/wiki/Central_processing_unit) time, and more importantly, they’re a waste of *your* time. Finally, you should avoid writing explicit getter and setter methods and then wrapping them in a property. Instead, use the `@property` decorator. That’s currently the most Pythonic way to go.", "_____no_output_____" ], [ "<a class=\"anchor\" id=\"providing_read-only_attributes\"></a>\n\n## Providing Read-Only Attributes", "_____no_output_____" ], [ "Probably the most elementary use case of `property()` is to provide **read-only attributes** in your classes. Say you need an [immutable](https://docs.python.org/3/glossary.html#term-immutable) `Point` class that doesn’t allow the user to mutate the original value of its coordinates, `x` and `y`. To achieve this goal, you can create `Point` like in the following example:", "_____no_output_____" ] ], [ [ "# point.py\n\nclass Point:\n def __init__(self, x, y):\n self._x = x\n self._y = y\n\n @property\n def x(self):\n return self._x\n\n @property\n def y(self):\n return self._y", "_____no_output_____" ] ], [ [ "Here, you store the input arguments in the attributes `._x` and `._y`. As you already learned, using the leading underscore (`_`) in names tells other developers that they’re non-public attributes and shouldn’t be accessed using dot notation, such as in `point._x`. Finally, you define two getter methods and decorate them with `@property`.", "_____no_output_____" ], [ "Now you have two read-only properties, `.x` and `.y`, as your coordinates:", "_____no_output_____" ] ], [ [ "point = Point(12, 5)", "_____no_output_____" ], [ "# Read coordinates\npoint.x", "_____no_output_____" ], [ "point.y", "_____no_output_____" ], [ "# Write coordinates\npoint.x = 42", "_____no_output_____" ] ], [ [ "Here, `point.x` and `point.y` are bare-bone examples of read-only properties. Their behavior relies on the underlying descriptor that `property` provides. As you already saw, the default `.__set__()` implementation raises an `AttributeError` when you don’t define a proper setter method.", "_____no_output_____" ], [ "You can take this implementation of `Point` a little bit further and provide explicit setter methods that raise a custom exception with more elaborate and specific messages:", "_____no_output_____" ] ], [ [ "# point.py\n\nclass WriteCoordinateError(Exception):\n pass\n\nclass Point:\n def __init__(self, x, y):\n self._x = x\n self._y = y\n\n @property\n def x(self):\n return self._x\n\n @x.setter\n def x(self, value):\n raise WriteCoordinateError(\"x coordinate is read-only\")\n\n @property\n def y(self):\n return self._y\n\n @y.setter\n def y(self, value):\n raise WriteCoordinateError(\"y coordinate is read-only\")", "_____no_output_____" ] ], [ [ "In this example, you define a custom exception called `WriteCoordinateError`. This exception allows you to customize the way you implement your immutable `Point` class. Now, both setter methods raise your custom exception with a more explicit message. Go ahead and give your improved `Point` a try!", "_____no_output_____" ], [ "<a class=\"anchor\" id=\"creating_read-write_attributes\"></a>\n\n## Creating Read-Write Attributes", "_____no_output_____" ], [ "You can also use `property()` to provide managed attributes with **read-write** capabilities. In practice, you just need to provide the appropriate getter method (“read”) and setter method (“write”) to your properties in order to create read-write managed attributes.", "_____no_output_____" ], [ "Say you want your `Circle` class to have a `.diameter` attribute. However, taking the radius and the diameter in the class initializer seems unnecessary because you can compute the one using the other. Here’s a `Circle` that manages `.radius` and `.diameter` as read-write attributes:", "_____no_output_____" ] ], [ [ "# circle.py\n\nimport math\n\nclass Circle:\n def __init__(self, radius):\n self.radius = radius\n\n @property\n def radius(self):\n return self._radius\n\n @radius.setter\n def radius(self, value):\n self._radius = float(value)\n\n @property\n def diameter(self):\n return self.radius * 2\n\n @diameter.setter\n def diameter(self, value):\n self.radius = value / 2", "_____no_output_____" ] ], [ [ "Here, you create a `Circle` class with a read-write `.radius`. In this case, the getter method just returns the radius value. The setter method converts the input value for the radius and assigns it to the non-public `._radius`, which is the variable you use to store the final data.", "_____no_output_____" ], [ "> **Note:** There is a subtle detail to note in this new implementation of `Circle` and its `.radius` attribute. In this case, the class initializer assigns the input value to the `.radius` property directly instead of storing it in a dedicated non-public attribute, such as `._radius`.", "_____no_output_____" ], [ "Why? Because you need to make sure that every value provided as a radius, including the initialization value, goes through the setter method and gets converted to a floating-point number.", "_____no_output_____" ], [ "`Circle` also implements a `.diameter` attribute as a property. The getter method computes the diameter using the radius. The setter method does something curious. Instead of storing the input diameter `value` in a dedicated attribute, it calculates the radius and writes the result into `.radius`.", "_____no_output_____" ], [ "Here’s how your `Circle` works:", "_____no_output_____" ] ], [ [ "circle = Circle(42)\ncircle.radius", "_____no_output_____" ], [ "circle.diameter", "_____no_output_____" ], [ "circle.diameter = 100\ncircle.diameter", "_____no_output_____" ], [ "circle.radius", "_____no_output_____" ] ], [ [ "Both `.radius` and `.diameter` work as normal attributes in these examples, providing a clean and Pythonic public API for your `Circle` class.", "_____no_output_____" ], [ "<a class=\"anchor\" id=\"providing_write-only_attributes\"></a>\n\n## Providing Write-Only Attributes", "_____no_output_____" ], [ "You can also create **write-only** attributes by tweaking how you implement the getter method of your properties. For example, you can make your getter method raise an exception every time a user accesses the underlying attribute value.", "_____no_output_____" ], [ "Here’s an example of handling passwords with a write-only property:", "_____no_output_____" ] ], [ [ "# users.py\n\nimport hashlib\nimport os\n\nclass User:\n def __init__(self, name, password):\n self.name = name\n self.password = password\n\n @property\n def password(self):\n raise AttributeError(\"Password is write-only\")\n\n @password.setter\n def password(self, plaintext):\n salt = os.urandom(32)\n self._hashed_password = hashlib.pbkdf2_hmac(\n \"sha256\", plaintext.encode(\"utf-8\"), salt, 100_000\n )", "_____no_output_____" ] ], [ [ "The initializer of `User` takes a username and a password as arguments and stores them in `.name` and `.password`, respectively. You use a property to manage how your class processes the input password. The getter method raises an `AttributeError` whenever a user tries to retrieve the current password. This turns `.password` into a write-only attribute:", "_____no_output_____" ] ], [ [ "john = User(\"John\", \"secret\")\n\njohn._hashed_password", "_____no_output_____" ], [ "john.password", "_____no_output_____" ], [ "john.password = \"supersecret\"\njohn._hashed_password", "_____no_output_____" ] ], [ [ "In this example, you create `john` as a `User` instance with an initial password. The setter method hashes the password and stores it in `._hashed_password`. Note that when you try to access `.password` directly, you get an `AttributeError`. Finally, assigning a new value to `.password` triggers the setter method and creates a new hashed password.", "_____no_output_____" ], [ "In the setter method of `.password`, you use `os.urandom()` to generate a 32-byte random [string](https://realpython.com/python-strings/) as your hashing function’s [salt](https://en.wikipedia.org/wiki/Salt_(cryptography)). To generate the hashed password, you use [`hashlib.pbkdf2_hmac()`](https://docs.python.org/3/library/hashlib.html#hashlib.pbkdf2_hmac). Then you store the resulting hashed password in the non-public attribute `._hashed_password`. Doing so ensures that you never save the plaintext password in any retrievable attribute.", "_____no_output_____" ], [ "<a class=\"anchor\" id=\"putting_python’s_`property()`_into_action\"></a>\n\n## Putting Python’s `property()` Into Action", "_____no_output_____" ], [ "So far, you’ve learned how to use Python’s `property()` built-in function to create managed attributes in your classes. You used `property()` as a function and as a decorator and learned about the differences between these two approaches. You also learned how to create read-only, read-write, and write-only attributes.", "_____no_output_____" ], [ "In the following sections, you’ll code a few examples that will help you get a better practical understanding of common use cases of `property()`.", "_____no_output_____" ], [ "<a class=\"anchor\" id=\"validating_input_values\"></a>\n\n### Validating Input Values", "_____no_output_____" ], [ "One of the most common use cases of `property()` is building managed attributes that validate the input data before storing or even accepting it as a secure input. [Data validation](https://en.wikipedia.org/wiki/Data_validation) is a common requirement in code that takes input from users or other information sources that you consider untrusted.", "_____no_output_____" ], [ "Python’s `property()` provides a quick and reliable tool for dealing with input data validation. For example, thinking back to the `Point` example, you may require the values of `.x` and `.y` to be valid [numbers](https://realpython.com/python-numbers/). Since your users are free to enter any type of data, you need to make sure that your point only accepts numbers.", "_____no_output_____" ], [ "Here’s an implementation of `Point` that manages this requirement:", "_____no_output_____" ] ], [ [ "# point.py\n\nclass Point:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n @property\n def x(self):\n return self._x\n\n @x.setter\n def x(self, value):\n try:\n self._x = float(value)\n print(\"Validated!\")\n except ValueError:\n raise ValueError('\"x\" must be a number') from None\n\n @property\n def y(self):\n return self._y\n\n @y.setter\n def y(self, value):\n try:\n self._y = float(value)\n print(\"Validated!\")\n except ValueError:\n raise ValueError('\"y\" must be a number') from None", "_____no_output_____" ] ], [ [ "The setter methods of `.x` and `.y` use [`try` … `except`](https://realpython.com/python-exceptions/#the-try-and-except-block-handling-exceptions) blocks that validate input data using the Python [EAFP](https://docs.python.org/3/glossary.html#term-eafp) style. If the call to `float()` succeeds, then the input data is valid, and you get `Validated!` on your screen. If `float()` raises a `ValueError`, then the user gets a `ValueError` with a more specific message.", "_____no_output_____" ], [ "It’s important to note that assigning the `.x` and `.y` properties directly in `.__init__()` ensures that the validation also occurs during object initialization. Not doing so is a common mistake when using `property()` for data validation.", "_____no_output_____" ], [ "Here’s how your `Point` class works now:", "_____no_output_____" ] ], [ [ "point = Point(12, 5)", "Validated!\nValidated!\n" ], [ "point.x", "_____no_output_____" ], [ "point.y", "_____no_output_____" ], [ "point.x = 42\n\npoint.x", "Validated!\n" ], [ "point.y = 100.0\n\npoint.y", "Validated!\n" ], [ "point.x = \"one\"", "_____no_output_____" ], [ "point.y = \"1o\"", "_____no_output_____" ] ], [ [ "If you assign `.x` and `.y` values that `float()` can turn into floating-point numbers, then the validation is successful, and the value is accepted. Otherwise, you get a `ValueError`.", "_____no_output_____" ], [ "This implementation of `Point` uncovers a fundamental weakness of `property()`. Did you spot it?", "_____no_output_____" ], [ "That’s it! You have repetitive code that follows specific patterns. This repetition breaks the [DRY (Don’t Repeat Yourself)](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself) principle, so you would want to [refactor](https://realpython.com/python-refactoring/) this code to avoid it. To do so, you can abstract out the repetitive logic using a descriptor:", "_____no_output_____" ] ], [ [ "# point.py\n\nclass Coordinate:\n def __set_name__(self, owner, name):\n self._name = name\n\n def __get__(self, instance, owner):\n return instance.__dict__[self._name]\n\n def __set__(self, instance, value):\n try:\n instance.__dict__[self._name] = float(value)\n print(\"Validated!\")\n except ValueError:\n raise ValueError(f'\"{self._name}\" must be a number') from None\n\nclass Point:\n x = Coordinate()\n y = Coordinate()\n\n def __init__(self, x, y):\n self.x = x\n self.y = y", "_____no_output_____" ] ], [ [ "Now your code is a bit shorter. You managed to remove repetitive code by defining `Coordinate` as a [descriptor](https://realpython.com/python-descriptors/) that manages your data validation in a single place. The code works just like your earlier implementation. Go ahead and give it a try!", "_____no_output_____" ], [ "In general, if you find yourself copying and pasting property definitions all around your code or if you spot repetitive code like in the example above, then you should consider using a proper descriptor.", "_____no_output_____" ], [ "<a class=\"anchor\" id=\"providing_computed_attributes\"></a>\n\n### Providing Computed Attributes", "_____no_output_____" ], [ "If you need an attribute that builds its value dynamically whenever you access it, then `property()` is the way to go. These kinds of attributes are commonly known as **computed attributes**. They’re handy when you need them to look like [eager](https://en.wikipedia.org/wiki/Eager_evaluation) attributes, but you want them to be [lazy](https://en.wikipedia.org/wiki/Lazy_evaluation).", "_____no_output_____" ], [ "The main reason for creating eager attributes is to optimize computation costs when you access the attribute often. On the other hand, if you rarely use a given attribute, then a lazy property can postpone its computation until needed, which can make your programs more efficient.", "_____no_output_____" ], [ "Here’s an example of how to use `property()` to create a computed attribute `.area` in a `Rectangle` class:", "_____no_output_____" ] ], [ [ "class Rectangle:\n def __init__(self, width, height):\n self.width = width\n self.height = height\n\n @property\n def area(self):\n return self.width * self.height", "_____no_output_____" ] ], [ [ "In this example, the `Rectangle` initializer takes `width` and `height` as arguments and stores them in regular instance attributes. The read-only property `.area` computes and returns the area of the current rectangle every time you access it.", "_____no_output_____" ], [ "Another common use case of properties is to provide an auto-formatted value for a given attribute:", "_____no_output_____" ] ], [ [ "class Product:\n def __init__(self, name, price):\n self._name = name\n self._price = float(price)\n\n @property\n def price(self):\n return f\"${self._price:,.2f}\"", "_____no_output_____" ] ], [ [ "In this example, `.price` is a property that formats and returns the price of a particular product. To provide a currency-like format, you use an [f-string](https://realpython.com/python-f-strings/) with appropriate formatting options.", "_____no_output_____" ], [ "As a final example of computed attributes, say you have a `Point` class that uses `.x` and `.y` as Cartesian coordinates. You want to provide [polar coordinates](https://en.wikipedia.org/wiki/Polar_coordinate_system) for your point so that you can use them in a few computations. The polar coordinate system represents each point using the distance to the origin and the angle with the horizontal coordinate axis.", "_____no_output_____" ], [ "Here’s a Cartesian coordinates `Point` class that also provides computed polar coordinates:", "_____no_output_____" ] ], [ [ "# point.py\n\nimport math\n\nclass Point:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n @property\n def distance(self):\n return round(math.dist((0, 0), (self.x, self.y)))\n\n @property\n def angle(self):\n return round(math.degrees(math.atan(self.y / self.x)), 1)\n\n def as_cartesian(self):\n return self.x, self.y\n\n def as_polar(self):\n return self.distance, self.angle", "_____no_output_____" ] ], [ [ "This example shows how to compute the distance and angle of a given `Point` object using its `.x` and `.y` Cartesian coordinates. Here’s how this code works in practice:", "_____no_output_____" ] ], [ [ "point = Point(12, 5)", "_____no_output_____" ], [ "point.x", "_____no_output_____" ], [ "point.y", "_____no_output_____" ], [ "point.distance", "_____no_output_____" ], [ "point.angle", "_____no_output_____" ], [ "point.as_cartesian()", "_____no_output_____" ], [ "point.as_polar()", "_____no_output_____" ] ], [ [ "When it comes to providing computed or lazy attributes, `property()` is a pretty handy tool. However, if you’re creating an attribute that you use frequently, then computing it every time can be costly and wasteful. A good strategy is to [cache](https://realpython.com/lru-cache-python/) them once the computation is done.", "_____no_output_____" ], [ "<a class=\"anchor\" id=\"caching_computed_attributes\"></a>\n\n### Caching Computed Attributes", "_____no_output_____" ], [ "Sometimes you have a given computed attribute that you use frequently. Constantly repeating the same computation may be unnecessary and expensive. To work around this problem, you can cache the computed value and save it in a non-public dedicated attribute for further reuse.", "_____no_output_____" ], [ "To prevent unexpected behaviors, you need to think of the mutability of the input data. If you have a property that computes its value from constant input values, then the result will never change. In that case, you can compute the value just once:", "_____no_output_____" ] ], [ [ "# circle.py\n\nfrom time import sleep\n\nclass Circle:\n def __init__(self, radius):\n self.radius = radius\n self._diameter = None\n\n @property\n def diameter(self):\n if self._diameter is None:\n sleep(0.5) # Simulate a costly computation\n self._diameter = self.radius * 2\n return self._diameter", "_____no_output_____" ] ], [ [ "Even though this implementation of `Circle` properly caches the computed diameter, it has the drawback that if you ever change the value of `.radius`, then `.diameter` won’t return a correct value:", "_____no_output_____" ] ], [ [ "circle = Circle(42.0)\ncircle.radius", "_____no_output_____" ], [ "circle.diameter # With delay", "_____no_output_____" ], [ "circle.diameter # Without delay", "_____no_output_____" ], [ "circle.radius = 100.0\ncircle.diameter # Wrong diameter", "_____no_output_____" ] ], [ [ "In these examples, you create a circle with a radius equal to `42.0`. The `.diameter` property computes its value only the first time you access it. That’s why you see a delay in the first execution and no delay in the second. Note that even though you change the value of the radius, the diameter stays the same.", "_____no_output_____" ], [ "If the input data for a computed attribute mutates, then you need to recalculate the attribute:", "_____no_output_____" ] ], [ [ "# circle.py\n\nfrom time import sleep\n\nclass Circle:\n def __init__(self, radius):\n self.radius = radius\n\n @property\n def radius(self):\n return self._radius\n\n @radius.setter\n def radius(self, value):\n self._diameter = None\n self._radius = value\n\n @property\n def diameter(self):\n if self._diameter is None:\n sleep(0.5) # Simulate a costly computation\n self._diameter = self._radius * 2\n return self._diameter", "_____no_output_____" ] ], [ [ "The setter method of the `.radius` property resets `._diameter` to [`None`](https://realpython.com/null-in-python/) every time you change the value of the radius. With this little update, `.diameter` recalculates its value the first time you access it after every mutation of `.radius`:", "_____no_output_____" ] ], [ [ "circle = Circle(42.0)", "_____no_output_____" ], [ "circle.radius", "_____no_output_____" ], [ "circle.diameter # With delay", "_____no_output_____" ], [ "circle.diameter # Without delay", "_____no_output_____" ], [ "circle.radius = 100.0\ncircle.diameter # With delay", "_____no_output_____" ], [ "circle.diameter # Without delay", "_____no_output_____" ] ], [ [ "Cool! `Circle` works correctly now! It computes the diameter the first time you access it and also every time you change the radius.", "_____no_output_____" ], [ "Another option to create cached properties is to use [`functools.cached_property()`](https://docs.python.org/3/library/functools.html#functools.cached_property) from the standard library. This function works as a decorator that allows you to transform a method into a cached property. The property computes its value only once and caches it as a normal attribute during the lifetime of the instance:", "_____no_output_____" ] ], [ [ "# circle.py\n\nfrom functools import cached_property\nfrom time import sleep\n\nclass Circle:\n def __init__(self, radius):\n self.radius = radius\n\n @cached_property\n def diameter(self):\n sleep(0.5) # Simulate a costly computation\n return self.radius * 2", "_____no_output_____" ] ], [ [ "Here, `.diameter` computes and caches its value the first time you access it. This kind of implementation is suitable for those computations in which the input values don’t mutate. Here’s how it works:", "_____no_output_____" ] ], [ [ "circle = Circle(42.0)\ncircle.diameter # With delay", "_____no_output_____" ], [ "circle.diameter # Without delay", "_____no_output_____" ], [ "circle.radius = 100\ncircle.diameter # Wrong diameter", "_____no_output_____" ], [ "# Allow direct assignment\ncircle.diameter = 200\ncircle.diameter # Cached value", "_____no_output_____" ] ], [ [ "When you access `.diameter`, you get its computed value. That value remains the same from this point on. However, unlike `property()`, `cached_property()` doesn’t block attribute mutations unless you provide a proper setter method. That’s why you can update the diameter to `200` in the last couple of lines.", "_____no_output_____" ], [ "If you want to create a cached property that doesn’t allow modification, then you can use `property()` and [`functools.cache()`](https://docs.python.org/3/library/functools.html#functools.cache) like in the following example:", "_____no_output_____" ] ], [ [ "# circle.py\n\n# this import works in python 3.9+\nfrom functools import cache\nfrom time import sleep\n\nclass Circle:\n def __init__(self, radius):\n self.radius = radius\n\n @property\n @cache\n def diameter(self):\n sleep(0.5) # Simulate a costly computation\n return self.radius * 2", "_____no_output_____" ] ], [ [ "This code stacks `@property` on top of `@cache`. The combination of both decorators builds a cached property that prevents mutations:", "_____no_output_____" ] ], [ [ "circle = Circle(42.0)", "_____no_output_____" ], [ "circle.diameter # With delay", "_____no_output_____" ], [ "circle.diameter # Without delay", "_____no_output_____" ], [ "circle.radius = 100\ncircle.diameter", "_____no_output_____" ], [ "circle.diameter = 200", "_____no_output_____" ] ], [ [ "In these examples, when you try to assign a new value to `.diameter`, you get an `AttributeError` because the setter functionality comes from the internal descriptor of `property`.", "_____no_output_____" ], [ "<a class=\"anchor\" id=\"logging_attribute_access_and_mutation\"></a>\n\n### Logging Attribute Access and Mutation", "_____no_output_____" ], [ "Sometimes you need to keep track of what your code does and how your programs flow. A way to do that in Python is to use [`logging`](https://realpython.com/python-logging/). This module provides all the functionality you would require for logging your code. It’ll allow you to constantly watch the code and generate useful information about how it works.", "_____no_output_____" ], [ "If you ever need to keep track of how and when you access and mutate a given attribute, then you can take advantage of `property()` for that, too:", "_____no_output_____" ] ], [ [ "# circle.py\n\nimport logging\n\nlogging.basicConfig(\n format=\"%(asctime)s: %(message)s\",\n level=logging.INFO,\n datefmt=\"%H:%M:%S\"\n)\n\nclass Circle:\n def __init__(self, radius):\n self._msg = '\"radius\" was {state}. Current value: {value}'\n self.radius = radius\n\n @property\n def radius(self):\n \"\"\"The radius property.\"\"\"\n logging.info(self._msg.format(state=\"accessed\", value=str(self._radius)))\n return self._radius\n\n @radius.setter\n def radius(self, value):\n try:\n self._radius = float(value)\n logging.info(self._msg.format(state=\"mutated\", value=str(self._radius)))\n except ValueError:\n logging.info('validation error while mutating \"radius\"')", "_____no_output_____" ] ], [ [ "Here, you first import `logging` and define a basic configuration. Then you implement `Circle` with a managed attribute `.radius`. The getter method generates log information every time you access `.radius` in your code. The setter method logs each mutation that you perform on `.radius`. It also logs those situations in which you get an error because of bad input data.", "_____no_output_____" ], [ "Here’s how you can use `Circle` in your code:", "_____no_output_____" ] ], [ [ "circle = Circle(42.0)\n\ncircle.radius", "17:25:24: \"radius\" was mutated. Current value: 42.0\n17:25:24: \"radius\" was accessed. Current value: 42.0\n" ], [ "circle.radius = 100", "17:25:24: \"radius\" was mutated. Current value: 100.0\n" ], [ "circle.radius", "17:25:25: \"radius\" was accessed. Current value: 100.0\n" ], [ "circle.radius = \"value\"", "17:25:25: validation error while mutating \"radius\"\n" ] ], [ [ "Logging useful data from attribute access and mutation can help you debug your code. Logging can also help you identify sources of problematic data input, analyze the performance of your code, spot usage patterns, and more.", "_____no_output_____" ], [ "<a class=\"anchor\" id=\"managing_attribute_deletion\"></a>\n\n### Managing Attribute Deletion", "_____no_output_____" ], [ "You can also create properties that implement deleting functionality. This might be a rare use case of `property()`, but having a way to delete an attribute can be handy in some situations.", "_____no_output_____" ], [ "Say you’re implementing your own [tree](https://en.wikipedia.org/wiki/Tree_(data_structure)) data type. A tree is an [abstract data type](https://en.wikipedia.org/wiki/Abstract_data_type) that stores elements in a hierarchy. The tree components are commonly known as **nodes**. Each node in a tree has a parent node, except for the root node. Nodes can have zero or more children.", "_____no_output_____" ], [ "Now suppose you need to provide a way to delete or clear the list of children of a given node. Here’s an example that implements a tree node that uses `property()` to provide most of its functionality, including the ability to clear the list of children of the node at hand:", "_____no_output_____" ] ], [ [ "# tree.py\n\nclass TreeNode:\n def __init__(self, data):\n self._data = data\n self._children = []\n\n @property\n def children(self):\n return self._children\n\n @children.setter\n def children(self, value):\n if isinstance(value, list):\n self._children = value\n else:\n del self.children\n self._children.append(value)\n\n @children.deleter\n def children(self):\n self._children.clear()\n\n def __repr__(self):\n return f'{self.__class__.__name__}(\"{self._data}\")'", "_____no_output_____" ] ], [ [ "In this example, `TreeNode` represents a node in your custom tree data type. Each node stores its children in a Python [list](https://realpython.com/python-lists-tuples/). Then you implement `.children` as a property to manage the underlying list of children. The deleter method calls `.clear()` on the list of children to remove them all:", "_____no_output_____" ] ], [ [ "root = TreeNode(\"root\")\nchild1 = TreeNode(\"child 1\")\nchild2 = TreeNode(\"child 2\")", "_____no_output_____" ], [ "root.children = [child1, child2]", "_____no_output_____" ], [ "root.children", "_____no_output_____" ], [ "del root.children\nroot.children", "_____no_output_____" ] ], [ [ "Here, you first create a `root` node to start populating the tree. Then you create two new nodes and assign them to `.children` using a list. The [`del`](https://realpython.com/python-keywords/#the-del-keyword) statement triggers the internal deleter method of `.children` and clears the list.", "_____no_output_____" ], [ "<a class=\"anchor\" id=\"creating_backward-compatible_class_apis\"></a>\n\n### Creating Backward-Compatible Class APIs", "_____no_output_____" ], [ "As you already know, properties turn method calls into direct attribute lookups. This feature allows you to create clean and Pythonic APIs for your classes. You can expose your attributes publicly without the need for getter and setter methods.", "_____no_output_____" ], [ "If you ever need to modify how you compute a given public attribute, then you can turn it into a property. Properties make it possible to perform extra processing, such as data validation, without having to modify your public APIs.", "_____no_output_____" ], [ "Suppose you’re creating an accounting application and you need a base class to manage currencies. To this end, you create a `Currency` class that exposes two attributes, `.units` and `.cents`:", "_____no_output_____" ] ], [ [ "class Currency:\n def __init__(self, units, cents):\n self.units = units\n self.cents = cents\n\n # Currency implementation...", "_____no_output_____" ] ], [ [ "This class looks clean and Pythonic. Now say that your requirements change, and you decide to store the total number of cents instead of the units and cents. Removing `.units` and `.cents` from your public API to use something like `.total_cents` would break more than one client’s code.", "_____no_output_____" ], [ "In this situation, `property()` can be an excellent option to keep your current API unchanged. Here’s how you can work around the problem and avoid breaking your clients’ code:", "_____no_output_____" ] ], [ [ "# currency.py\n\nCENTS_PER_UNIT = 100\n\nclass Currency:\n def __init__(self, units, cents):\n self._total_cents = units * CENTS_PER_UNIT + cents\n\n @property\n def units(self):\n return self._total_cents // CENTS_PER_UNIT\n\n @units.setter\n def units(self, value):\n self._total_cents = self.cents + value * CENTS_PER_UNIT\n\n @property\n def cents(self):\n return self._total_cents % CENTS_PER_UNIT\n\n @cents.setter\n def cents(self, value):\n self._total_cents = self.units * CENTS_PER_UNIT + value\n\n # Currency implementation...", "_____no_output_____" ] ], [ [ "Now your class stores the total number of cents instead of independent units and cents. However, your users can still access and mutate `.units` and `.cents` in their code and get the same result as before. Go ahead and give it a try!", "_____no_output_____" ], [ "When you write something upon which many people are going to build, you need to guarantee that modifications to the internal implementation don’t affect how end users work with your classes.", "_____no_output_____" ], [ "<a class=\"anchor\" id=\"overriding_properties_in_subclasses\"></a>\n\n## Overriding Properties in Subclasses", "_____no_output_____" ], [ "When you create Python classes that include properties and release them in a package or library, you should expect your users to do a lot of different things with them. One of those things could be **subclassing** them to customize their functionalities. In these cases, your users have to be careful and be aware of a subtle gotcha. If you partially override a property, then you lose the non-overridden functionality.", "_____no_output_____" ], [ "For example, suppose you’re coding an `Employee` class to manage employee information in your company’s internal accounting system. You already have a class called `Person`, and you think about subclassing it to reuse its functionalities.", "_____no_output_____" ], [ "`Person` has a `.name` attribute implemented as a property. The current implementation of `.name` doesn’t meet the requirement of returning the name in uppercase letters. Here’s how you end up solving this:", "_____no_output_____" ] ], [ [ "# persons.py\n\nclass Person:\n def __init__(self, name):\n self._name = name\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, value):\n self._name = value\n\n # Person implementation...\n\nclass Employee(Person):\n @property\n def name(self):\n return super().name.upper()\n\n # Employee implementation...", "_____no_output_____" ] ], [ [ "In `Employee`, you override `.name` to make sure that when you access the attribute, you get the employee name in uppercase:", "_____no_output_____" ] ], [ [ "person = Person(\"John\")\nperson.name", "_____no_output_____" ], [ "person.name = \"John Doe\"\nperson.name", "_____no_output_____" ], [ "employee = Employee(\"John\")\nemployee.name", "_____no_output_____" ] ], [ [ "Great! `Employee` works as you need! It returns the name using uppercase letters. However, subsequent tests uncover an unexpected behavior:", "_____no_output_____" ] ], [ [ "# this should raise AttributeError\nemployee.name = \"John Doe\"", "_____no_output_____" ] ], [ [ "What happened? Well, when you override an existing property from a parent class, you override the whole functionality of that property. In this example, you reimplemented the getter method only. Because of that, `.name` lost the rest of the functionality from the base class. You don’t have a setter method any longer.", "_____no_output_____" ], [ "The idea is that if you ever need to override a property in a subclass, then you should provide all the functionality you need in the new version of the property at hand.", "_____no_output_____" ], [ "<a class=\"anchor\" id=\"conclusion\"></a>\n\n## Conclusion", "_____no_output_____" ], [ "A **property** is a special type of class member that provides functionality that’s somewhere in between regular attributes and methods. Properties allow you to modify the implementation of instance attributes without changing the public API of the class. Being able to keep your APIs unchanged helps you avoid breaking code your users wrote on top of older versions of your classes.", "_____no_output_____" ], [ "Properties are the [Pythonic](https://realpython.com/learning-paths/writing-pythonic-code/) way to create **managed attributes** in your classes. They have several use cases in real-world programming, making them a great addition to your skill set as a Python developer.", "_____no_output_____" ], [ "**In this tutorial, you learned how to:**", "_____no_output_____" ], [ "- Create **managed attributes** with Python’s `property()`\n- Perform **lazy attribute evaluation** and provide **computed attributes**\n- Avoid **setter** and **getter** methods with properties\n- Create **read-only**, **read-write**, and **write-only** attributes\n- Create consistent and **backward-compatible APIs** for your classes\n", "_____no_output_____" ], [ "You also wrote several practical examples that walked you through the most common use cases of `property()`. Those examples include input [data validation](#validating-input-values), computed attributes, [logging](https://realpython.com/python-logging/) your code, and more.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cb9c8cd7f5bc4191f171cfce9d62e4003bb4c79a
4,933
ipynb
Jupyter Notebook
doc/lt.ipynb
KoichiYasuoka/deplacy
bb9c19bb8cf00ccf192aa9243666504ddc773018
[ "MIT" ]
93
2020-03-30T02:34:39.000Z
2022-03-15T19:36:32.000Z
doc/lt.ipynb
KoichiYasuoka/deplacy
bb9c19bb8cf00ccf192aa9243666504ddc773018
[ "MIT" ]
2
2022-01-03T00:43:42.000Z
2022-01-09T23:52:22.000Z
doc/lt.ipynb
KoichiYasuoka/deplacy
bb9c19bb8cf00ccf192aa9243666504ddc773018
[ "MIT" ]
4
2020-12-03T11:15:19.000Z
2022-03-15T19:36:34.000Z
30.263804
169
0.523008
[ [ [ "# [deplacy](https://koichiyasuoka.github.io/deplacy/) sintaksinei analizei\n", "_____no_output_____" ], [ "## su [spaCy](https://spacy.io/)\n", "_____no_output_____" ] ], [ [ "!pip install deplacy\n!sudo pip install -U spacy\n!sudo python -m spacy download lt_core_news_md\nimport pkg_resources,imp\nimp.reload(pkg_resources)\nimport spacy\nnlp=spacy.load(\"lt_core_news_md\")\ndoc=nlp(\"Dievas davė dantis, duos ir duonos.\")\nimport deplacy\ndeplacy.render(doc)\ndeplacy.serve(doc,port=None)\n# import graphviz\n# graphviz.Source(deplacy.dot(doc))", "_____no_output_____" ] ], [ [ "## su [Trankit](https://github.com/nlp-uoregon/trankit)\n", "_____no_output_____" ] ], [ [ "!pip install deplacy trankit transformers\nimport trankit\nnlp=trankit.Pipeline(\"lithuanian\")\ndoc=nlp(\"Dievas davė dantis, duos ir duonos.\")\nimport deplacy\ndeplacy.render(doc)\ndeplacy.serve(doc,port=None)\n# import graphviz\n# graphviz.Source(deplacy.dot(doc))", "_____no_output_____" ] ], [ [ "## su [Stanza](https://stanfordnlp.github.io/stanza)\n", "_____no_output_____" ] ], [ [ "!pip install deplacy stanza\nimport stanza\nstanza.download(\"lt\")\nnlp=stanza.Pipeline(\"lt\")\ndoc=nlp(\"Dievas davė dantis, duos ir duonos.\")\nimport deplacy\ndeplacy.render(doc)\ndeplacy.serve(doc,port=None)\n# import graphviz\n# graphviz.Source(deplacy.dot(doc))", "_____no_output_____" ] ], [ [ "## su [Camphr-Udify](https://camphr.readthedocs.io/en/latest/notes/udify.html)\n", "_____no_output_____" ] ], [ [ "!pip install deplacy camphr en-udify@https://github.com/PKSHATechnology-Research/camphr_models/releases/download/0.7.0/en_udify-0.7.tar.gz\nimport pkg_resources,imp\nimp.reload(pkg_resources)\nimport spacy\nnlp=spacy.load(\"en_udify\")\ndoc=nlp(\"Dievas davė dantis, duos ir duonos.\")\nimport deplacy\ndeplacy.render(doc)\ndeplacy.serve(doc,port=None)\n# import graphviz\n# graphviz.Source(deplacy.dot(doc))", "_____no_output_____" ] ], [ [ "## su [UDPipe 2](http://ufal.mff.cuni.cz/udpipe/2)\n", "_____no_output_____" ] ], [ [ "!pip install deplacy\ndef nlp(t):\n import urllib.request,urllib.parse,json\n with urllib.request.urlopen(\"https://lindat.mff.cuni.cz/services/udpipe/api/process?model=lt&tokenizer&tagger&parser&data=\"+urllib.parse.quote(t)) as r:\n return json.loads(r.read())[\"result\"]\ndoc=nlp(\"Dievas davė dantis, duos ir duonos.\")\nimport deplacy\ndeplacy.render(doc)\ndeplacy.serve(doc,port=None)\n# import graphviz\n# graphviz.Source(deplacy.dot(doc))", "_____no_output_____" ] ], [ [ "## su [spacy-udpipe](https://github.com/TakeLab/spacy-udpipe)\n", "_____no_output_____" ] ], [ [ "!pip install deplacy spacy-udpipe\nimport spacy_udpipe\nspacy_udpipe.download(\"lt\")\nnlp=spacy_udpipe.load(\"lt\")\ndoc=nlp(\"Dievas davė dantis, duos ir duonos.\")\nimport deplacy\ndeplacy.render(doc)\ndeplacy.serve(doc,port=None)\n# import graphviz\n# graphviz.Source(deplacy.dot(doc))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb9c8f07b123751707c053bd3e48017ac4d0cdde
486,509
ipynb
Jupyter Notebook
Lending Club Loan Data Kernel.ipynb
TomasMantero/Minimizing-risks-for-loan-investments-Keras-ANN
d86dfa61d4664c6b116aadd7e1f42c4e063759d8
[ "Apache-2.0" ]
null
null
null
Lending Club Loan Data Kernel.ipynb
TomasMantero/Minimizing-risks-for-loan-investments-Keras-ANN
d86dfa61d4664c6b116aadd7e1f42c4e063759d8
[ "Apache-2.0" ]
null
null
null
Lending Club Loan Data Kernel.ipynb
TomasMantero/Minimizing-risks-for-loan-investments-Keras-ANN
d86dfa61d4664c6b116aadd7e1f42c4e063759d8
[ "Apache-2.0" ]
null
null
null
195.071772
118,204
0.883762
[ [ [ "---\n\n# Minimizing risks for loan investments - (Keras - Artificial Neural Network)\n[by Tomas Mantero](https://www.kaggle.com/tomasmantero)\n\n---\n\n### Table of Contents\n1. [Overview](#ch1)\n1. [Dataset](#ch2)\n1. [Exploratory Data Analysis](#ch3)\n1. [Data PreProcessing](#ch4)\n1. [Categorical Variables and Dummy Variables](#ch5)\n1. [Scaling and Train Test Split](#ch6)\n1. [Creating a Model](#ch7)\n1. [Training the Model](#ch8)\n1. [Evaluation on Test Data](#ch9)\n1. [Predicting on a New Customer](#ch10)", "_____no_output_____" ], [ "<a id=\"ch1\"></a>\n## Overview \n---\n\nOne of the objectives of this notebook is to **show step-by-step how to visualize the dataset and assess whether or not a new customer is likely to pay back the loan.**\n\nLendingClub is a US peer-to-peer lending company, headquartered in San Francisco, California. It was the first peer-to-peer lender to register its offerings as securities with the Securities and Exchange Commission, and to offer loan trading on a secondary market. LendingClub is the world's largest peer-to-peer lending platform.\n\nGiven historical data on loans given out with information on whether or not the borrower defaulted (charge-off), can we build a model that can predict wether or nor a borrower will pay back their loan? This way in the future when we get a new potential customer **we can assess whether or not they are likely to pay back the loan.**\n\nThe following questions will be answered throughout the Kernel:\n* ***Which features are available in the dataset?***\n* ***What is the distribution of numerical feature values across the samples?***\n* ***What is the length of the dataframe?***\n* ***What is the total count of missing values per column?***\n* ***How many unique employment job titles are there?***\n* ***Would you offer this person a loan?***\n* ***Did this person actually end up paying back their loan?***\n\nIf you have a question or feedback, do not hesitate to write and if you like this kernel,<b><font color='green'> please upvote! </font></b>\n\n<img src=\"https://images.pexels.com/photos/259165/pexels-photo-259165.jpeg?auto=compress&cs=tinysrgb&h=750&w=1260\" title=\"source: www.pexels.com\" width=\"500\" height=\"500\"/>\n<br>", "_____no_output_____" ], [ "<a id=\"ch2\"></a>\n## Dataset\n---\n\n* We will be using a subset of the LendingClub DataSet obtained from Kaggle: https://www.kaggle.com/wordsforthewise/lending-club\n\nThere are many LendingClub data sets on Kaggle. Here is the information on this particular data set:\n\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>LoanStatNew</th>\n <th>Description</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>loan_amnt</td>\n <td>The listed amount of the loan applied for by the borrower. If at some point in time, the credit department reduces the loan amount, then it will be reflected in this value.</td>\n </tr>\n <tr>\n <th>1</th>\n <td>term</td>\n <td>The number of payments on the loan. Values are in months and can be either 36 or 60.</td>\n </tr>\n <tr>\n <th>2</th>\n <td>int_rate</td>\n <td>Interest Rate on the loan</td>\n </tr>\n <tr>\n <th>3</th>\n <td>installment</td>\n <td>The monthly payment owed by the borrower if the loan originates.</td>\n </tr>\n <tr>\n <th>4</th>\n <td>grade</td>\n <td>LC assigned loan grade</td>\n </tr>\n <tr>\n <th>5</th>\n <td>sub_grade</td>\n <td>LC assigned loan subgrade</td>\n </tr>\n <tr>\n <th>6</th>\n <td>emp_title</td>\n <td>The job title supplied by the Borrower when applying for the loan.*</td>\n </tr>\n <tr>\n <th>7</th>\n <td>emp_length</td>\n <td>Employment length in years. Possible values are between 0 and 10 where 0 means less than one year and 10 means ten or more years.</td>\n </tr>\n <tr>\n <th>8</th>\n <td>home_ownership</td>\n <td>The home ownership status provided by the borrower during registration or obtained from the credit report. Our values are: RENT, OWN, MORTGAGE, OTHER</td>\n </tr>\n <tr>\n <th>9</th>\n <td>annual_inc</td>\n <td>The self-reported annual income provided by the borrower during registration.</td>\n </tr>\n <tr>\n <th>10</th>\n <td>verification_status</td>\n <td>Indicates if income was verified by LC, not verified, or if the income source was verified</td>\n </tr>\n <tr>\n <th>11</th>\n <td>issue_d</td>\n <td>The month which the loan was funded</td>\n </tr>\n <tr>\n <th>12</th>\n <td>loan_status</td>\n <td>Current status of the loan</td>\n </tr>\n <tr>\n <th>13</th>\n <td>purpose</td>\n <td>A category provided by the borrower for the loan request.</td>\n </tr>\n <tr>\n <th>14</th>\n <td>title</td>\n <td>The loan title provided by the borrower</td>\n </tr>\n <tr>\n <th>15</th>\n <td>zip_code</td>\n <td>The first 3 numbers of the zip code provided by the borrower in the loan application.</td>\n </tr>\n <tr>\n <th>16</th>\n <td>addr_state</td>\n <td>The state provided by the borrower in the loan application</td>\n </tr>\n <tr>\n <th>17</th>\n <td>dti</td>\n <td>A ratio calculated using the borrower’s total monthly debt payments on the total debt obligations, excluding mortgage and the requested LC loan, divided by the borrower’s self-reported monthly income.</td>\n </tr>\n <tr>\n <th>18</th>\n <td>earliest_cr_line</td>\n <td>The month the borrower's earliest reported credit line was opened</td>\n </tr>\n <tr>\n <th>19</th>\n <td>open_acc</td>\n <td>The number of open credit lines in the borrower's credit file.</td>\n </tr>\n <tr>\n <th>20</th>\n <td>pub_rec</td>\n <td>Number of derogatory public records</td>\n </tr>\n <tr>\n <th>21</th>\n <td>revol_bal</td>\n <td>Total credit revolving balance</td>\n </tr>\n <tr>\n <th>22</th>\n <td>revol_util</td>\n <td>Revolving line utilization rate, or the amount of credit the borrower is using relative to all available revolving credit.</td>\n </tr>\n <tr>\n <th>23</th>\n <td>total_acc</td>\n <td>The total number of credit lines currently in the borrower's credit file</td>\n </tr>\n <tr>\n <th>24</th>\n <td>initial_list_status</td>\n <td>The initial listing status of the loan. Possible values are – W, F</td>\n </tr>\n <tr>\n <th>25</th>\n <td>application_type</td>\n <td>Indicates whether the loan is an individual application or a joint application with two co-borrowers</td>\n </tr>\n <tr>\n <th>26</th>\n <td>mort_acc</td>\n <td>Number of mortgage accounts.</td>\n </tr>\n <tr>\n <th>27</th>\n <td>pub_rec_bankruptcies</td>\n <td>Number of public record bankruptcies</td>\n </tr>\n </tbody>\n</table>\n\n---", "_____no_output_____" ], [ "### Imports", "_____no_output_____" ] ], [ [ "# data analysis and wrangling\nimport pandas as pd\nimport numpy as np\nimport random as rnd\n\n# visualization\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n# scaling and train test split\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\n\n# creating a model\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation\nfrom tensorflow.keras.constraints import max_norm\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.models import load_model\n\n# evaluation on test data\nfrom sklearn.metrics import classification_report,confusion_matrix", "_____no_output_____" ] ], [ [ "Before starting, let us make a function to get feature information on the data as a .csv file for easy lookup throughout the notebook. ", "_____no_output_____" ] ], [ [ "data_info = pd.read_csv('C:/Users/Tomas/Desktop/Carpetas/Programación/Python/Kaggle/Lending Club Loan Data/lending_club_info.csv',index_col='LoanStatNew')\n\ndef feat_info(col_name):\n print(data_info.loc[col_name]['Description'])\n\n# example\nfeat_info('mort_acc')", "Number of mortgage accounts.\n" ] ], [ [ "### Loading the data", "_____no_output_____" ] ], [ [ "df = pd.read_csv('C:/Users/Tomas/Desktop/Carpetas/Programación/Python/Kaggle/Lending Club Loan Data/lending_club_loan_two.csv')", "_____no_output_____" ] ], [ [ "**Which features are available in the dataset?**", "_____no_output_____" ] ], [ [ "print(df.info())", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 396030 entries, 0 to 396029\nData columns (total 27 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 loan_amnt 396030 non-null float64\n 1 term 396030 non-null object \n 2 int_rate 396030 non-null float64\n 3 installment 396030 non-null float64\n 4 grade 396030 non-null object \n 5 sub_grade 396030 non-null object \n 6 emp_title 373103 non-null object \n 7 emp_length 377729 non-null object \n 8 home_ownership 396030 non-null object \n 9 annual_inc 396030 non-null float64\n 10 verification_status 396030 non-null object \n 11 issue_d 396030 non-null object \n 12 loan_status 396030 non-null object \n 13 purpose 396030 non-null object \n 14 title 394275 non-null object \n 15 dti 396030 non-null float64\n 16 earliest_cr_line 396030 non-null object \n 17 open_acc 396030 non-null float64\n 18 pub_rec 396030 non-null float64\n 19 revol_bal 396030 non-null float64\n 20 revol_util 395754 non-null float64\n 21 total_acc 396030 non-null float64\n 22 initial_list_status 396030 non-null object \n 23 application_type 396030 non-null object \n 24 mort_acc 358235 non-null float64\n 25 pub_rec_bankruptcies 395495 non-null float64\n 26 address 396030 non-null object \ndtypes: float64(12), object(15)\nmemory usage: 81.6+ MB\nNone\n" ] ], [ [ "**Preview the data**", "_____no_output_____" ] ], [ [ "df.head()", "_____no_output_____" ] ], [ [ "**What is the distribution of numerical feature values across the samples?**", "_____no_output_____" ] ], [ [ "df.describe().transpose()", "_____no_output_____" ] ], [ [ "<a id=\"ch3\"></a>\n## Exploratory Data Analysis\n---\n\n### Analyze by visualizing data\nGet an understanding for which variables are important, view summary statistics, and visualize the data.", "_____no_output_____" ], [ "### Pearson correlation matrix\nWe use the Pearson correlation coefficient to examine the strength and direction of the linear relationship between two continuous variables.\n\nThe correlation coefficient can range in value from −1 to +1. The larger the absolute value of the coefficient, the stronger the relationship between the variables. For the Pearson correlation, an absolute value of 1 indicates a perfect linear relationship. A correlation close to 0 indicates no linear relationship between the variables. \n\nThe sign of the coefficient indicates the direction of the relationship. If both variables tend to increase or decrease together, the coefficient is positive, and the line that represents the correlation slopes upward. If one variable tends to increase as the other decreases, the coefficient is negative, and the line that represents the correlation slopes downward.\n\n* We can see a strong correlation between loan_amnt and installment. (The monthly payment owed by the borrower if the loan originates)", "_____no_output_____" ] ], [ [ "sns.set(style=\"whitegrid\", font_scale=1)\n\nplt.figure(figsize=(12,12))\nplt.title('Pearson Correlation Matrix',fontsize=25)\nsns.heatmap(df.corr(),linewidths=0.25,vmax=0.7,square=True,cmap=\"GnBu\",linecolor='w',\n annot=True, annot_kws={\"size\":10}, cbar_kws={\"shrink\": .7})", "_____no_output_____" ] ], [ [ "### Loan status and loan amount distribution\n* This is an imbalance problem, because we have a lot more entries of people that fully paid their loans then people that did not pay back.\n* We can expect to probably do very well in terms of accuracy but our precision and recall are going to be the true metrics that we will have to evaluate our model based off of.\n* In the loan amount distribution we can see spikes in even ten thousend dollar, so this is indicating that there are certain amounts that are basically standard loans.", "_____no_output_____" ] ], [ [ "f, axes = plt.subplots(1, 2, figsize=(15,5))\nsns.countplot(x='loan_status', data=df, ax=axes[0])\nsns.distplot(df['loan_amnt'], kde=False, bins=40, ax=axes[1])\nsns.despine()\naxes[0].set(xlabel='Status', ylabel='')\naxes[0].set_title('Count of Loan Status', size=20)\naxes[1].set(xlabel='Loan Amount', ylabel='')\naxes[1].set_title('Loan Amount Distribution', size=20)", "_____no_output_____" ] ], [ [ "### Relationship between loan_amnt, loan_status and installment", "_____no_output_____" ] ], [ [ "f, axes = plt.subplots(1, 2, figsize=(15,5))\nsns.scatterplot(x='installment', y='loan_amnt', data=df, ax=axes[0])\nsns.boxplot(x='loan_status', y='loan_amnt', data=df, ax=axes[1])\nsns.despine()\naxes[0].set(xlabel='Installment', ylabel='Loan Amount')\naxes[0].set_title('Scatterplot between Loan Amount and Installment', size=15)\naxes[1].set(xlabel='Loan Status', ylabel='Loan Amount')\naxes[1].set_title('Boxplot between Loan Amount and Loan Status', size=15)", "_____no_output_____" ] ], [ [ "In case that the boxplot is a little hard to read you can always compare the averages here: \n* So you can see the charged off average price is a little higher than the fully paid loan.", "_____no_output_____" ] ], [ [ "df.groupby('loan_status')['loan_amnt'].describe()", "_____no_output_____" ] ], [ [ "### Countplot per grade and subgrade\n* Essentially this is showing the percentage of charged off loans.\n* Looks like it is increasing as the letter grade gets higher.\n* Better grades are bluer and the worse grades are redder.", "_____no_output_____" ] ], [ [ "f, axes = plt.subplots(1, 2, figsize=(15,5), gridspec_kw={'width_ratios': [1, 2]})\nsns.countplot(x='grade', hue='loan_status', data=df, order=sorted(df['grade'].unique()), palette='seismic', ax=axes[0])\nsns.countplot(x='sub_grade', data=df, palette='seismic', order=sorted(df['sub_grade'].unique()), ax=axes[1])\nsns.despine()\naxes[0].set(xlabel='Grade', ylabel='Count')\naxes[0].set_title('Count of Loan Status per Grade', size=20)\naxes[1].set(xlabel='Sub Grade', ylabel='Count')\naxes[1].set_title('Count of Loan Status per Sub Grade', size=20)\nplt.tight_layout()", "_____no_output_____" ] ], [ [ "Now we are going to create a new column called 'loan_repaid' which will contain a 1 if the loan status was \"Fully Paid\" and a 0 if it was \"Charged Off\".", "_____no_output_____" ] ], [ [ "df['loan_repaid'] = df['loan_status'].map({'Fully Paid':1,'Charged Off':0})\ndf[['loan_repaid','loan_status']].head()", "_____no_output_____" ] ], [ [ "* The interest rate has essentially the highest negative correlation with whether or not someone is going to repay their loan.\n* If you have a extremely high interest rate you are going to find it harder to pay off that loan.", "_____no_output_____" ] ], [ [ "df.corr()['loan_repaid'].sort_values(ascending=True).drop('loan_repaid').plot.bar(color='green')", "_____no_output_____" ] ], [ [ "<a id=\"ch4\"></a>\n## Data preprocessing\n---\nRemove or fill any missing data. Remove unnecessary or repetitive features. Convert categorical string features to dummy variables.\n\n### Missing data\n***What is the length of the dataframe?***", "_____no_output_____" ] ], [ [ "print(len(df))", "396030\n" ] ], [ [ "***What is the total count of missing values per column?***\n\nWe have missing values in emp_title, emp_length, title, revol_util, mort_acc and pub_rec_bankruptcies. ", "_____no_output_____" ] ], [ [ "df.isnull().sum()", "_____no_output_____" ], [ "feat_info('emp_title')\nprint('\\n')\nfeat_info('emp_length')\nprint('\\n')\nfeat_info('title')\nprint('\\n')\nfeat_info('revol_util')\nprint('\\n')\nfeat_info('mort_acc')\nprint('\\n')\nfeat_info('pub_rec_bankruptcies')", "The job title supplied by the Borrower when applying for the loan.*\n\n\nEmployment length in years. Possible values are between 0 and 10 where 0 means less than one year and 10 means ten or more years. \n\n\nThe loan title provided by the borrower\n\n\nRevolving line utilization rate, or the amount of credit the borrower is using relative to all available revolving credit.\n\n\nNumber of mortgage accounts.\n\n\nNumber of public record bankruptcies\n" ] ], [ [ "### Percentage of missing values per column\n* In the plot we can see how much data is missing as a percentage of the total data.\n* Notice that there is missing almost 10% of mortgage accounts, so we can not drop all those rows. \n* On the other hand, we could drop missing values in revol_util or pub_rec_bankruptcies.", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(10,5))\n((df.isnull().sum())/len(df)*100).plot.bar(title='Percentage of missing values per column', color='green')", "_____no_output_____" ] ], [ [ "Let's examine emp_title and emp_length to see whether it will be okay to drop them.\n\n***How many unique employment job titles are there?***\n\nRealistically there are too many unique job titles to try to convert this to a dummy variable feature.", "_____no_output_____" ] ], [ [ "print(df['emp_title'].nunique())\ndf['emp_title'].value_counts()", "173105\n" ] ], [ [ "Let's drop emp_title:", "_____no_output_____" ] ], [ [ "df = df.drop('emp_title',axis=1)", "_____no_output_____" ] ], [ [ "Now we want the percentage of charge offs per category. Essentially informing us what percent of people per employment category didn't pay back their loan. \n* We can see that across the extremes it looks to be extremely similar.\n* Looks like this particular feature of employment length doesn't actually have some extreme differences on the charge off rates.\n* Looks like regardless of what actual employment length you have if you were to pick someone, about 20% of them are going to have not paid back their loans.", "_____no_output_____" ] ], [ [ "per_charge_off = df[df['loan_repaid'] == 0]['emp_length'].value_counts() / df[df['loan_repaid'] == 1]['emp_length'].value_counts()\nper_charge_off.plot.bar(color='green')", "_____no_output_____" ] ], [ [ "Let's drop emp_length:", "_____no_output_____" ] ], [ [ "df = df.drop('emp_length', axis=1)", "_____no_output_____" ] ], [ [ "If we review the title column vs the purpose column looks like there is repeated information", "_____no_output_____" ] ], [ [ "df[['title', 'purpose']].head(10)", "_____no_output_____" ] ], [ [ "The title column is simply a string subcategory/description of the purpose column. Let's drop the column.", "_____no_output_____" ] ], [ [ "df = df.drop('title', axis=1)", "_____no_output_____" ] ], [ [ "Now we are going to deal with the missing data of mort_acc. Since mort_acc has a strong correlation with total_acc we will group the dataframe by the total_acc and calculate the mean value for the mort_acc per total_acc entry. To get the result below:", "_____no_output_____" ] ], [ [ "print(\"Mean of mort_acc column per total_acc\")\ntotal_acc_avg = df.groupby('total_acc').mean()['mort_acc']\nprint(total_acc_avg)", "Mean of mort_acc column per total_acc\ntotal_acc\n2.0 0.000000\n3.0 0.052023\n4.0 0.066743\n5.0 0.103289\n6.0 0.151293\n ... \n124.0 1.000000\n129.0 1.000000\n135.0 3.000000\n150.0 2.000000\n151.0 0.000000\nName: mort_acc, Length: 118, dtype: float64\n" ] ], [ [ "Let's fill in the missing mort_acc values based on their total_acc value. If the mort_acc is missing, then we will fill in that missing value with the mean value corresponding to its total_acc value from the Series we created above. This involves using an .apply() method with two columns.", "_____no_output_____" ] ], [ [ "total_acc_avg = df.groupby('total_acc').mean()['mort_acc']\n\ndef fill_mort_acc(total_acc,mort_acc):\n '''\n Accepts the total_acc and mort_acc values for the row.\n Checks if the mort_acc is NaN , if so, it returns the avg mort_acc value\n for the corresponding total_acc value for that row.\n \n total_acc_avg here should be a Series or dictionary containing the mapping of the\n groupby averages of mort_acc per total_acc values.\n '''\n if np.isnan(mort_acc):\n return total_acc_avg[total_acc]\n else:\n return mort_acc\n \ndf['mort_acc'] = df.apply(lambda x: fill_mort_acc(x['total_acc'], x['mort_acc']), axis=1)", "_____no_output_____" ] ], [ [ "revol_util and the pub_rec_bankruptcies have missing data points, but they account for less than 0.5% of the total data. Let's remove the rows that are missing those values in those columns with dropna().", "_____no_output_____" ] ], [ [ "df = df.dropna()", "_____no_output_____" ], [ "# check for missing values\ndf.isnull().sum()", "_____no_output_____" ] ], [ [ "<a id=\"ch5\"></a>\n## Categorical variables and dummy variables\n---\nWe're done working with the missing data! Now we just need to deal with the string values due to the categorical columns.", "_____no_output_____" ], [ "### Term feature\nConvert the term feature into either a 36 or 60 integer numeric data type using .apply() or .map().", "_____no_output_____" ] ], [ [ "print(df['term'].value_counts())\nprint('\\n')\nprint('\\n')\n\ndf['term'] = df['term'].apply(lambda term: int(term[:3]))\n\nprint(df['term'].value_counts())", " 36 months 301247\n 60 months 93972\nName: term, dtype: int64\n\n\n\n\n36 301247\n60 93972\nName: term, dtype: int64\n" ] ], [ [ "### Grade feature\nWe already know grade is part of sub_grade, so just drop the grade feature.", "_____no_output_____" ] ], [ [ "df = df.drop('grade', axis=1)", "_____no_output_____" ] ], [ [ "Let's convert the subgrade into dummy variables. Then concatenate these new columns to the original dataframe. Remember to drop the original subgrade column and to add drop_first=True to your get_dummies call.", "_____no_output_____" ] ], [ [ "subgrade_dummies = pd.get_dummies(df['sub_grade'],drop_first=True)\ndf = pd.concat([df.drop('sub_grade',axis=1),subgrade_dummies],axis=1)", "_____no_output_____" ] ], [ [ "### Verification_status, application_type, initial_list_status, purpose features\nLet's convert these columns into dummy variables and concatenate them with the original dataframe.", "_____no_output_____" ] ], [ [ "dummies = pd.get_dummies(df[['verification_status', 'application_type','initial_list_status','purpose']], drop_first=True)\n\ndf = df.drop(['verification_status', 'application_type','initial_list_status','purpose'],axis=1)\n\ndf = pd.concat([df,dummies],axis=1)", "_____no_output_____" ] ], [ [ "### Home_ownership feature\nConvert these to dummy variables, but replace NONE and ANY with OTHER, so that we end up with just 4 categories, MORTGAGE, RENT, OWN, OTHER. Then concatenate them with the original dataframe.", "_____no_output_____" ] ], [ [ "df['home_ownership'] = df['home_ownership'].replace(['NONE', 'ANY'], 'OTHER')\ndummies = pd.get_dummies(df['home_ownership'],drop_first=True)\ndf = df.drop('home_ownership',axis=1)\ndf = pd.concat([df,dummies],axis=1)", "_____no_output_____" ] ], [ [ "### Address feature\nLet's feature engineer a zip code column from the address in the data set. Create a column called 'zip_code' that extracts the zip code from the address column.", "_____no_output_____" ] ], [ [ "df['zip_code'] = df['address'].apply(lambda address:address[-5:])\n\ndummies = pd.get_dummies(df['zip_code'],drop_first=True)\ndf = df.drop(['zip_code','address'],axis=1)\ndf = pd.concat([df,dummies],axis=1)", "_____no_output_____" ] ], [ [ "### Issue_d feature\nThis would be data leakage, we wouldn't know beforehand whether or not a loan would be issued when using our model, so in theory we wouldn't have an issue_date, drop this feature.", "_____no_output_____" ] ], [ [ "df = df.drop('issue_d', axis=1)", "_____no_output_____" ] ], [ [ "### Earliest_cr_line feature\nThis appears to be a historical time stamp feature. Extract the year from this feature using a .apply function, then convert it to a numeric feature. Set this new data to a feature column called 'earliest_cr_year'. Then drop the earliest_cr_line feature.", "_____no_output_____" ] ], [ [ "df['earliest_cr_year'] = df['earliest_cr_line'].apply(lambda date:int(date[-4:]))\ndf = df.drop('earliest_cr_line', axis=1)\n\ndf.select_dtypes(['object']).columns", "_____no_output_____" ], [ "df = df.drop('loan_status',axis=1)", "_____no_output_____" ] ], [ [ "<a id=\"ch6\"></a>\n## Scaling and train test split\n---", "_____no_output_____" ] ], [ [ "# Features\nX = df.drop('loan_repaid',axis=1).values\n\n# Label\ny = df['loan_repaid'].values\n\n# Split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=101)", "_____no_output_____" ], [ "print(X_train.shape)\nprint(X_test.shape)\nprint(y_train.shape)\nprint(y_test.shape)", "(316175, 78)\n(79044, 78)\n(316175,)\n(79044,)\n" ] ], [ [ "### Normalizing / scaling the data\nWe scale the feature data. To prevent data leakage from the test set, we only fit our scaler to the training set.", "_____no_output_____" ] ], [ [ "scaler = MinMaxScaler()\n\n# fit and transfrom\nX_train = scaler.fit_transform(X_train)\nX_test = scaler.transform(X_test)\n\n# everything has been scaled between 1 and 0\nprint('Max: ',X_train.max())\nprint('Min: ', X_train.min())", "Max: 1.0\nMin: 0.0\n" ] ], [ [ "<a id=\"ch7\"></a>\n## Creating a model\n---\n**Dropout Layers**\n* Dropout is a technique where randomly selected neurons are ignored during training. They are “dropped-out” randomly.\n* Simply put, dropout refers to ignoring units (i.e. neurons) during the training phase of certain set of neurons which is chosen at random.\n* Helps prevent overfitting.", "_____no_output_____" ] ], [ [ "model = Sequential()\n\n# input layer\nmodel.add(Dense(78,activation='relu'))\nmodel.add(Dropout(0.2))\n\n# hidden layer\nmodel.add(Dense(39,activation='relu'))\nmodel.add(Dropout(0.2))\n\n# hidden layer\nmodel.add(Dense(19,activation='relu'))\nmodel.add(Dropout(0.2))\n\n# output layer\nmodel.add(Dense(1, activation='sigmoid'))\n\n# compile model\nmodel.compile(optimizer=\"adam\", loss='binary_crossentropy')", "_____no_output_____" ] ], [ [ "### Early stopping\nThis callback allows you to specify the performance measure to monitor, the trigger, and once triggered, it will stop the training process. Basically, it stop training when a monitored quantity has stopped improving.", "_____no_output_____" ] ], [ [ "early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=25)", "_____no_output_____" ] ], [ [ "<a id=\"ch8\"></a>\n## Training the model\n---\nNow that the model is ready, we can fit the model into the data.", "_____no_output_____" ] ], [ [ "model.fit(x=X_train, \n y=y_train, \n epochs=400,\n batch_size=256,\n validation_data=(X_test, y_test),\n callbacks=[early_stop])", "Epoch 1/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2980 - val_loss: 0.2646\nEpoch 2/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2659 - val_loss: 0.2644\nEpoch 3/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2627 - val_loss: 0.2620\nEpoch 4/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2615 - val_loss: 0.2625\nEpoch 5/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2608 - val_loss: 0.2616\nEpoch 6/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2602 - val_loss: 0.2620\nEpoch 7/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2596 - val_loss: 0.2618\nEpoch 8/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2595 - val_loss: 0.2615\nEpoch 9/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2591 - val_loss: 0.2614\nEpoch 10/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2588 - val_loss: 0.2613\nEpoch 11/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2586 - val_loss: 0.2614\nEpoch 12/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2581 - val_loss: 0.2613\nEpoch 13/400\n1236/1236 [==============================] - 3s 3ms/step - loss: 0.2582 - val_loss: 0.2611\nEpoch 14/400\n1236/1236 [==============================] - 4s 3ms/step - loss: 0.2578 - val_loss: 0.2611\nEpoch 15/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2574 - val_loss: 0.2608\nEpoch 16/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2575 - val_loss: 0.2614\nEpoch 17/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2575 - val_loss: 0.2613\nEpoch 18/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2572 - val_loss: 0.2611\nEpoch 19/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2568 - val_loss: 0.2614\nEpoch 20/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2567 - val_loss: 0.2611\nEpoch 21/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2566 - val_loss: 0.2612\nEpoch 22/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2565 - val_loss: 0.2618\nEpoch 23/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2563 - val_loss: 0.2613\nEpoch 24/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2561 - val_loss: 0.2612\nEpoch 25/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2558 - val_loss: 0.2612\nEpoch 26/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2555 - val_loss: 0.2613\nEpoch 27/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2556 - val_loss: 0.2613\nEpoch 28/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2555 - val_loss: 0.2611\nEpoch 29/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2554 - val_loss: 0.2610\nEpoch 30/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2554 - val_loss: 0.2610\nEpoch 31/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2552 - val_loss: 0.2609\nEpoch 32/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2552 - val_loss: 0.2615\nEpoch 33/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2548 - val_loss: 0.2611\nEpoch 34/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2547 - val_loss: 0.2618\nEpoch 35/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2544 - val_loss: 0.2611\nEpoch 36/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2545 - val_loss: 0.2611\nEpoch 37/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2545 - val_loss: 0.2616\nEpoch 38/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2544 - val_loss: 0.2610\nEpoch 39/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2539 - val_loss: 0.2612\nEpoch 40/400\n1236/1236 [==============================] - 3s 2ms/step - loss: 0.2543 - val_loss: 0.2621\nEpoch 00040: early stopping\n" ] ], [ [ "### Training loss per epoch\n* This plot shows the training loss per epoch.\n* This plot helps us to see if there is overfitting in the model. In this case there is no overfitting because both lines go down at the same time. ", "_____no_output_____" ] ], [ [ "losses = pd.DataFrame(model.history.history)\n\nplt.figure(figsize=(15,5))\nsns.lineplot(data=losses,lw=3)\nplt.xlabel('Epochs')\nplt.ylabel('')\nplt.title('Training Loss per Epoch')\nsns.despine()", "_____no_output_____" ] ], [ [ "<a id=\"ch9\"></a>\n## Evaluation on test data\n---\n***Classification Report***\n* **Accuracy** is just the actual percent that we got right, in this case it was 89%.\n* Note that since the data is imbalance if we were to make a model that approve all the loans, it would have a 80% accuracy.\n* For example: (317696/len(df)) = 0.80 where 317696 is `df[loan_repaid].value_counts()`\n* So do not be fooled by a model that returns back 80% accuracy because by default a model that always reports back to the loan will be repaid itself will be 80% accurate on this actual test data set.\n\n\n* The **recall** means \"how many of this class you find over the whole number of element of this class\"\n* The **precision** will be \"how many are correctly classified among that class\"\n* The **f1-score** is the harmonic mean between precision & recall\n* The **support** is the number of occurence of the given class in your dataset.\n\n\n* Precision is 0.97, which is really good. On the other hand, recall is not good. \n* We should focus in improving the f1-score in the 0 class. We should improve the 0.61.\n\n***Confusion Matirx***\n* A confusion matrix is a technique for summarizing the performance of a classification algorithm.\n* Classification accuracy alone can be misleading if you have an unequal number of observations in each class, which is our case. \n\n\n* We have 230 Type I errors (False Positive) and 8698 Type II errors (False Negative). \n* 6960 True Positive and 63156 True Negative. ", "_____no_output_____" ] ], [ [ "predictions = model.predict_classes(X_test)\n\nprint('Classification Report:')\nprint(classification_report(y_test, predictions))\nprint('\\n')\nprint('Confusion Matirx:')\nprint(confusion_matrix(y_test, predictions))", "Classification Report:\n precision recall f1-score support\n\n 0 0.97 0.44 0.61 15658\n 1 0.88 1.00 0.93 63386\n\n accuracy 0.89 79044\n macro avg 0.92 0.72 0.77 79044\nweighted avg 0.90 0.89 0.87 79044\n\n\n\nConfusion Matirx:\n[[ 6960 8698]\n [ 230 63156]]\n" ] ], [ [ "<a id=\"ch10\"></a>\n## Predicting on a new customer\n---\n***Would you offer this person a loan?***", "_____no_output_____" ] ], [ [ "rnd.seed(101)\nrandom_ind = rnd.randint(0,len(df))\n\nnew_customer = df.drop('loan_repaid',axis=1).iloc[random_ind]\nnew_customer", "_____no_output_____" ], [ "# we need to reshape this to be in the same shape of the training data that the model was trained on\nmodel.predict_classes(new_customer.values.reshape(1,78))", "_____no_output_____" ] ], [ [ "***Did this person actually end up paying back their loan?***", "_____no_output_____" ] ], [ [ "# the prediction was right\ndf.iloc[random_ind]['loan_repaid']", "_____no_output_____" ] ], [ [ "## References\n* [An Introduction to Statistical Learning with Applications in R](http://faculty.marshall.usc.edu/gareth-james/ISL/) - This book provides an introduction to statistical learning methods.\n* [Python for Data Science and Machine Learning Bootcamp](https://www.udemy.com/course/python-for-data-science-and-machine-learning-bootcamp/) - Use Python for Data Science and Machine Learning.", "_____no_output_____" ], [ "**Thank you** for taking the time to read through my exploration of a Kaggle dataset. I look forward to doing more!\n\nIf you have a question or feedback, do not hesitate to comment and if you like this kernel,<b><font color='green'> please upvote! </font></b>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb9ca5a6a53a97dee40e02102674ca1cec3a93a3
579,143
ipynb
Jupyter Notebook
P1.ipynb
AakashGupta1993/SSUALF
f4fd467eb4c8de17700fa13cc2212ea7645b1ea6
[ "MIT" ]
null
null
null
P1.ipynb
AakashGupta1993/SSUALF
f4fd467eb4c8de17700fa13cc2212ea7645b1ea6
[ "MIT" ]
null
null
null
P1.ipynb
AakashGupta1993/SSUALF
f4fd467eb4c8de17700fa13cc2212ea7645b1ea6
[ "MIT" ]
null
null
null
545.332392
116,880
0.942427
[ [ [ "# Self-Driving Car Engineer Nanodegree\n\n\n## Project: **Finding Lane Lines on the Road** \n***\n### GOAL :\n Make a pipeline that finds lane lines on the road for basic understanding of concepts\n Reflect on your work in a written report\n\n---", "_____no_output_____" ], [ "## Pipeline :\nTo achieve the first goal to find lane lines I am using canny edge detection technique.\n\n**The main steps involved in canny edge detection are as follow:**\n\n1. Grayscale Conversion\n2. Gaussian Blur\n3. Determine the Intensity Gradients\n4. Non Maximum Suppression - thins the edges\n5. Double Thresholding - reduces the noise (takes out some edges that were detected which were acually not present)\n6. Edge Tracking by Hysteresis - to connect weak edges to strong ones\n\nSteps followed for canny edge detction in this project :\n1. Grayscle conversion using cv2\n2. Gaussian Blur using cv2\n3. Canny edge using cv2\n - Noise Reduction\n - Finding Intensity Gradient of the Image\n - Non-maximum Suppression\n - Hysteresis Thresholding\n4. Masking area of interest\n5. Hough Lines - to identify the lane lines\n6. addWeighted function - to show lines on the original image\n\n---\nHere it is what to expect after completion of pipeline :\n<figure>\n <img src=\"test_images/solidWhiteRight.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Before pipeline process </p> \n </figcaption>\n</figure>\n <p></p> \n<figure>\n <img src=\"examples/laneLines_thirdPass.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> After pipeline process</p> \n </figcaption>\n</figure>", "_____no_output_____" ], [ "## Import Packages", "_____no_output_____" ] ], [ [ "#importing some useful packages\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Read in an Image", "_____no_output_____" ] ], [ [ "#reading in an image\nimage = mpimg.imread('test_images/solidWhiteRight.jpg')\n\n#printing out some stats and plotting\nprint('This image is:', type(image), 'with dimensions:', image.shape)\nplt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')", "This image is: <class 'numpy.ndarray'> with dimensions: (540, 960, 3)\n" ] ], [ [ "## Ideas for Lane Detection Pipeline", "_____no_output_____" ], [ "**Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**\n\n`cv2.inRange()` for color selection \n`cv2.fillPoly()` for regions selection \n`cv2.line()` to draw lines on an image given endpoints \n`cv2.addWeighted()` to coadd / overlay two images\n`cv2.cvtColor()` to grayscale or change color\n`cv2.imwrite()` to output images to file \n`cv2.bitwise_and()` to apply a mask to an image\n\n**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**", "_____no_output_____" ], [ "## Helper Functions", "_____no_output_____" ], [ "Below are some helper functions to help get you started. They should look familiar from the lesson!", "_____no_output_____" ] ], [ [ "import math\n\ndef grayscale(img):\n \"\"\"Applies the Grayscale transform\n This will return an image with only one color channel\n but NOTE: to see the returned image as grayscale\n (assuming your grayscaled image is called 'gray')\n you should call plt.imshow(gray, cmap='gray')\"\"\"\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \ndef canny(img, low_threshold, high_threshold):\n \"\"\"Applies the Canny transform\"\"\"\n return cv2.Canny(img, low_threshold, high_threshold)\n\ndef gaussian_blur(img, kernel_size):\n \"\"\"Applies a Gaussian Noise kernel\"\"\"\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n\ndef region_of_interest(img, vertices):\n \"\"\"\n Applies an image mask.\n \n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n \"\"\"\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\n\nymax_left_g=539\nymin_left_g=0\n\nxmax_left_g=0\nxmin_left_g=0\n\nymax_right_g=539\nymin_right_g=0\n\nxmax_right_g=0\nxmin_right_g=0\ncounter=0\ndef draw_lines(img, lines, color=[255, 0, 0], thickness=10):\n \"\"\"\n NOTE: this is the function you might want to use as a starting point once you want to \n average/extrapolate the line segments you detect to map out the full\n extent of the lane (going from the result shown in raw-lines-example.mp4\n to that shown in P1_example.mp4). \n \n Think about things like separating line segments by their \n slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left\n line vs. the right line. Then, you can average the position of each of \n the lines and extrapolate to the top and bottom of the lane.\n \n This function draws `lines` with `color` and `thickness`. \n Lines are drawn on the image inplace (mutates the image).\n If you want to make the lines semi-transparent, think about combining\n this function with the weighted_img() function below\n \"\"\"\n slope1=0\n slope2=0\n count1=0\n count2=0\n left_x=[]\n left_y=[]\n right_x=[]\n right_y=[]\n \n global counter\n \n global ymax_left_g\n global ymin_left_g\n \n global xmax_left_g\n global xmin_left_g\n\n global ymax_right_g\n global ymin_right_g\n \n global xmax_right_g\n global xmin_right_g\n for vertices in lines :\n slope=(vertices[0][3]-vertices[0][1])/(vertices[0][2]-vertices[0][0])\n #print('Slope : ', slope )\n #print('Slope : ', (lines[3]-vertices[1])/(vertices[2]-vertices[0]) )\n #print('vertices : ' , vertices) #==========================\n\n if abs(slope)<0.4 or abs(slope)>0.7:\n # Ignore invalid lines\n #cv2.line(img, (xmin_left_g, ymin_left_g), (xmax_left_g, ymax_left_g), color, thickness)\n #cv2.line(img, (xmin_right_g,ymin_right_g), (xmax_right_g, ymax_right_g), color, thickness)\n continue\n elif slope>0 :\n slope1=slope1+round(slope, 4)\n count1=count1+1\n left_x.append(vertices[0][0])\n left_x.append(vertices[0][2])\n left_y.append(vertices[0][1])\n left_y.append(vertices[0][3])\n else :\n slope2=slope2+round(slope,4)\n count2=count2+1\n right_x.append(vertices[0][0])\n right_x.append(vertices[0][2])\n right_y.append(vertices[0][1])\n right_y.append(vertices[0][3])\n \n if len(left_x)==0 and len(right_x)==0 :\n #right line\n cv2.line(img, (xmin_left_g, ymin_left_g), (xmax_left_g, ymax_left_g), color, thickness)\n #left line\n cv2.line(img, (xmin_right_g,ymin_right_g), (xmax_right_g, ymax_right_g), color, thickness)\n elif len(left_x)==0 :\n slope_c_right=np.polyfit(right_x,right_y,1)\n ymax_right=539\n ymin_right=np.min(right_y)\n\n xmax_right= int((ymax_right-slope_c_right[1])/slope_c_right[0])\n xmin_right= int((ymin_right-slope_c_right[1])/slope_c_right[0])\n \n #right line\n cv2.line(img, (xmin_left_g, ymin_left_g), (xmax_left_g, ymax_left_g), color, thickness)\n #left line\n cv2.line(img, (xmin_right,ymin_right), (xmax_right, ymax_right), color, thickness)\n elif len(right_x)==0:\n slope_c_left=np.polyfit(left_x,left_y,1)\n ymax_left=539\n ymin_left=np.min(left_y)\n\n xmax_left= int((ymax_left-slope_c_left[1])/slope_c_left[0])\n xmin_left= int((ymin_left-slope_c_left[1])/slope_c_left[0])\n\n #right line\n cv2.line(img, (xmin_left, ymin_left), (xmax_left, ymax_left), color, thickness)\n #left line\n cv2.line(img, (xmin_right_g,ymin_right_g), (xmax_right_g, ymax_right_g), color, thickness)\n else:\n slope_c_left=np.polyfit(left_x,left_y,1)\n ymax_left=539\n ymin_left=325 #np.min(left_y)\n\n xmax_left= int((ymax_left-slope_c_left[1])/slope_c_left[0])\n xmin_left= int((ymin_left-slope_c_left[1])/slope_c_left[0])\n\n slope_c_right=np.polyfit(right_x,right_y,1)\n ymax_right=539\n ymin_right=325 #np.min(right_y)\n\n xmax_right= int((ymax_right-slope_c_right[1])/slope_c_right[0])\n xmin_right= int((ymin_right-slope_c_right[1])/slope_c_right[0])\n\n ymax_left_g=ymax_left\n ymin_left_g=ymin_left\n\n xmax_left_g=xmax_left\n xmin_left_g=xmin_left\n\n ymax_right_g=ymax_right\n ymin_right_g=ymin_right\n\n xmax_right_g=xmax_right\n xmin_right_g=xmin_right\n\n counter=1\n #right line\n cv2.line(img, (xmin_left, ymin_left), (xmax_left, ymax_left), color, thickness)\n #left line\n cv2.line(img, (xmin_right,ymin_right), (xmax_right, ymax_right), color, thickness)\n\ndef hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):\n \"\"\"\n `img` should be the output of a Canny transform.\n \n Returns an image with hough lines drawn.\n \"\"\"\n lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n \n draw_lines(line_img, lines)\n return line_img\n\n# Python 3 has support for cool math symbols.\n\ndef weighted_img(img, initial_img, α=0.8, β=1., γ=0.):\n \"\"\"\n `img` is the output of the hough_lines(), An image with lines drawn on it.\n Should be a blank image (all black) with lines drawn on it.\n \n `initial_img` should be the image before any processing.\n \n The result image is computed as follows:\n \n initial_img * α + img * β + γ\n NOTE: initial_img and img must be the same shape!\n \"\"\"\n return cv2.addWeighted(initial_img, α, img, β, γ)", "_____no_output_____" ] ], [ [ "## Test Images\n\nBuild your pipeline to work on the images in the directory \"test_images\" \n**You should make sure your pipeline works well on these images before you try the videos.**", "_____no_output_____" ] ], [ [ "import os\nos.listdir(\"test_images/\")", "_____no_output_____" ] ], [ [ "## Build a Lane Finding Pipeline\n\n", "_____no_output_____" ], [ "Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.\n\nTry tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.", "_____no_output_____" ] ], [ [ "# TODO: Build your pipeline that will draw lane lines on the test_images\n# then save them to the test_images_output directory.\n\nplt.imshow(image)\nplt.figure() #to print multiple images", "_____no_output_____" ], [ "#image_working= np.copy(image) #to copy an image\n\ngray = grayscale(image) #grayscale conversion\nplt.imshow(gray, cmap='gray')\nplt.figure() #multiple images in same output\nkernel_size = 7\nblur_gray = gaussian_blur(gray,kernel_size)\n\n#setting threshold values\nhighThreshold = 180*0.7;\nlowThreshold = highThreshold*0.3;\n\nedges=cv2.Canny(blur_gray,lowThreshold,highThreshold)\nplt.imshow(edges, cmap='gray')\n\n", "_____no_output_____" ], [ "## perform the region_of_interest, here or earlier\n#taking region_of_interest as traiangle\n\nupper_left=[400,320]\nupper_right=[550,320]\nleft_bottom=[150,539]\nright_bottom=[900,539]\ntriangle = np.array([ upper_left, left_bottom , right_bottom,upper_right])\nmasked_image=region_of_interest(edges,[triangle])\nplt.imshow(masked_image,cmap='gray')\nprint (image.shape[1])", "960\n" ], [ "#hough_transform, hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap)\nimage_hough=hough_lines(masked_image,2,np.pi/180,25,25,10)\n\nplt.imshow(image_hough,cmap='gray')", "_____no_output_____" ], [ "#weighted_img\n\nw_img=weighted_img(image_hough,image)\nplt.imshow(w_img,cmap='gray')\nmpimg.imsave(\"test-after.png\", w_img)", "_____no_output_____" ] ], [ [ "## Test on Videos\n\nYou know what's cooler than drawing lanes over images? Drawing lanes over video!\n\nWe can test our solution on two provided videos:\n\n`solidWhiteRight.mp4`\n\n`solidYellowLeft.mp4`\n\n**Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**\n\n**If you get an error that looks like this:**\n```\nNeedDownloadError: Need ffmpeg exe. \nYou can download it by calling: \nimageio.plugins.ffmpeg.download()\n```\n**Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**", "_____no_output_____" ] ], [ [ "# Import everything needed to edit/save/watch video clips\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML", "_____no_output_____" ] ], [ [ "## Reading in an image from video clip", "_____no_output_____" ] ], [ [ "#reading in an image from video clip\nclipTest = VideoFileClip('test_videos/solidWhiteRight.mp4')\n#clipTest = VideoFileClip('test_videos/solidYellowLeft.mp4')\n\nn_frames = sum(1 for x in clipTest.iter_frames())\n\nprint(n_frames)\ncount=0\nfor frame in clipTest.iter_frames():\n count=count+1\n if count==174:\n image=frame\n break\n#printing out some stats and plotting\nprint('This image is:', type(image), 'with dimensions:', image.shape)\nplt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')", "221\nThis image is: <class 'numpy.ndarray'> with dimensions: (540, 960, 3)\n" ], [ "def process_image(image):\n\n\n gray = grayscale(image) #grayscale conversion\n kernel_size = 7\n blur_gray = gaussian_blur(gray,kernel_size) #gaussian blur\n\n #setting threshold values\n highThreshold = 180*0.7;\n lowThreshold = highThreshold*0.3;\n\n edges=cv2.Canny(blur_gray,lowThreshold,highThreshold) #canny edge\n #plt.imshow(edges, cmap='gray')\n\n upper_left=[450,320]\n upper_right=[550,320]\n left_bottom=[130,539]\n right_bottom=[900,539]\n mask_area = np.array([ upper_left, left_bottom , right_bottom,upper_right])\n masked_image=region_of_interest(edges,[mask_area])\n\n #hough_transform, hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap)\n image_hough=hough_lines(masked_image,2,np.pi/180,25,25,5)\n \n #weighted_img -print line detected on top of original image\n\n result=weighted_img(image_hough,image)\n return result\n\n\n", "_____no_output_____" ] ], [ [ "Let's try the one with the solid white lane on the right first ...", "_____no_output_____" ] ], [ [ "global counter\ncounter=0\nwhite_output = 'test_videos_output/solidWhiteRight.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\").subclip(0,5)\nclip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\")\nwhite_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!\n%time white_clip.write_videofile(white_output, audio=False)", "[MoviePy] >>>> Building video test_videos_output/solidWhiteRight.mp4\n[MoviePy] Writing video test_videos_output/solidWhiteRight.mp4\n" ] ], [ [ "Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.", "_____no_output_____" ] ], [ [ "HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(white_output))", "_____no_output_____" ] ], [ [ "## Improve the draw_lines() function\n\n**At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video \"P1_example.mp4\".**\n\n**Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**", "_____no_output_____" ], [ "Now for the one with the solid yellow lane on the left. This one's more tricky!", "_____no_output_____" ] ], [ [ "global counter\ncounter=0\nyellow_output = 'test_videos_output/solidYellowLeft.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)\nclip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')\nyellow_clip = clip2.fl_image(process_image)\n%time yellow_clip.write_videofile(yellow_output, audio=False)", "[MoviePy] >>>> Building video test_videos_output/solidYellowLeft.mp4\n[MoviePy] Writing video test_videos_output/solidYellowLeft.mp4\n" ], [ "HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(yellow_output))", "_____no_output_____" ] ], [ [ "## Reflections and Thoughts\n\nThere are number of ways in which the current implementation of pipelines can be improved. A running average of the lane lines can be maintained in case an erroneous frame appears. Pipeline process here does not take into account the dark and light patches on the road due to shadows, rains etc. An approach to this can be included in the pipeline process.\n\nRunning average can be implemented by storing the values as global variables. To take care of light and dark patches we can work on hsv colorspace.", "_____no_output_____" ], [ "## Optional Challenge\n\nTry your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!", "_____no_output_____" ] ], [ [ "'''challenge_output = 'test_videos_output/challenge.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)\nclip3 = VideoFileClip('test_videos/challenge.mp4')\nchallenge_clip = clip3.fl_image(process_image)\n%time challenge_clip.write_videofile(challenge_output, audio=False)'''", "_____no_output_____" ], [ "'''HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(challenge_output))'''", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
cb9ca5f180eb68047f784f7ad97a68c45996e28d
303,471
ipynb
Jupyter Notebook
demos/demo1/houses_mexico_city.ipynb
rafrodriguez/elnortescrapper
4fe3a64cfc19027f0e4a60d75ef67e1232fad71c
[ "BSD-3-Clause" ]
null
null
null
demos/demo1/houses_mexico_city.ipynb
rafrodriguez/elnortescrapper
4fe3a64cfc19027f0e4a60d75ef67e1232fad71c
[ "BSD-3-Clause" ]
null
null
null
demos/demo1/houses_mexico_city.ipynb
rafrodriguez/elnortescrapper
4fe3a64cfc19027f0e4a60d75ef67e1232fad71c
[ "BSD-3-Clause" ]
null
null
null
186.751385
486
0.753367
[ [ [ "# Plotting the cheapest and the most expensive houses for sale in Mexico City", "_____no_output_____" ], [ "[elnortescrapper](https://github.com/rafrodriguez/elnortescrapper) is a custom-made Python web scrapper for advertisements of houses for sale in Mexico that are listed in [Avisos de ocasión](http://www.avisosdeocasion.com).\n\nIt was used to retrieve the advertisements of houses for sale in Mexico City into a CSV file.\n\nThis notebook shows how to use that information to plot the 300 cheapest and the 300 most expensive houses for sale in Mexico City\n\n\n", "_____no_output_____" ] ], [ [ "import pandas\nfrom pandas import DataFrame", "_____no_output_____" ], [ "df = pandas.read_csv('mexico_city_houses.csv')", "_____no_output_____" ], [ "df.count()", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "# Prepare the column \"precio\" (price)\n\n# Remove the word 'pesos'\ndf_with_pesos_removed = df[ df['precio'].str.contains(' pesos') ]['precio'].str.replace(' pesos', '')\ndf.ix[df['precio'].str.contains(' pesos'), 'precio'] = df_with_pesos_removed\n\n # Remove the commas\ndf.ix[:,'precio'] = df['precio'].str.replace(',','')\n\n# Convert the prices in USD (\"dólares\") to MXN\nfrom forex_python.converter import CurrencyRates\nexchange_rates = CurrencyRates()\nUSD_to_MXN = exchange_rates.get_rate('USD','MXN')\ndf.ix[df['precio'].str.contains('dólares'),'precio'] = df[ df['precio'].str.contains('dólares')]['precio'].str.replace(' dólares','').str.replace(',','').astype('int')*USD_to_MXN\n\n# Convert the 'precio' column to numeric\ndf.ix[:,'precio'] = pandas.to_numeric(df['precio'])\n\nprint(\"(Converted USD prices with 1 USD =\",USD_to_MXN,\" MXN)\")", "(Converted USD prices with 1 USD = 18.724 MXN)\n" ], [ "# We are going to plot the n most expensive and n cheapest houses for sale\nn = 300", "_____no_output_____" ], [ "from string import Template", "_____no_output_____" ], [ "# Template for the final html with the embedded map\n# It has two placeholders: one for the javascript code of the map and one for the google maps api key\n\nhtml_template = Template(\"\"\"\n<!DOCTYPE html>\n <html>\n <head>\n <meta name=\"viewport\" content=\"initial-scale=1.0, user-scalable=no\">\n <meta charset=\"utf-8\">\n <title>Map</title>\n <style>\n #map {\n height: 500px;\n width: 1000px;\n }\n html, body {\n height: 100%;\n margin: 0;\n padding: 0;\n }\n </style>\n </head>\n <body>\n <div id=\"map\"></div>\n <script>\n $map_js\n </script>\n $google_maps_api_key\n </body>\n </html>\n\"\"\")", "_____no_output_____" ], [ "# Template for the javascript code for the map\n\nmap_js_template = Template(\"\"\"\nfunction initMap() {\n var map = new google.maps.Map(document.getElementById('map'));\n\n var bounds = new google.maps.LatLngBounds();\n\n // Markers of most expensive houses\n var markers_set_1 = [$markers_1];\n\n for( i = 0; i < markers_set_1.length; i++ ) {\n marker_position = new google.maps.LatLng(markers_set_1[i][1], markers_set_1[i][2]);\n var marker = new google.maps.Marker({\n position: marker_position,\n map: map,\n title: markers_set_1[i][0],\n url: markers_set_1[i][3]\n });\n marker.setIcon('http://maps.google.com/mapfiles/ms/icons/green-dot.png')\n\n // Update the bounds\n bounds.extend(marker_position)\n\n // Add the InfoWindow as a property of each marker in order\n // to ensure that it is displayed next to it\n marker.info = new google.maps.InfoWindow({\n content: markers_set_1[i][0] + '<br> <a target=\"_blank\" href=\"'+markers_set_1[i][3]+'\">Open ad</a>'\n });\n\n // Listener to open the InfoWindow\n google.maps.event.addListener(marker, 'click', function() {\n this.info.open(map, this);\n });\n\n // If closing an opened InfoWindow when another part of the map is clicked,\n // add a listener to the map here, and keep track of the last opened InfoWindow\n }\n\n // Markers of cheapest houses\n var markers_set_2 = [$markers_2];\n\n for( i = 0; i < markers_set_2.length; i++ ) {\n marker_position = new google.maps.LatLng(markers_set_2[i][1], markers_set_2[i][2]);\n var marker = new google.maps.Marker({\n position: marker_position,\n map: map,\n title: markers_set_1[i][0],\n url: markers_set_1[i][3]\n });\n marker.setIcon('http://maps.google.com/mapfiles/ms/icons/purple-dot.png')\n\n // Update the bounds\n bounds.extend(marker_position)\n\n // Add the InfoWindow as a property of each marker in order\n // to ensure that it is displayed next to the marker\n // and not next to the last marker\n marker.info = new google.maps.InfoWindow({\n content: markers_set_2[i][0] + '<br> <a target=\"_blank\" href=\"'+markers_set_2[i][3]+'\">Open ad</a>'\n });\n\n google.maps.event.addListener(marker, 'click', function() {\n this.info.open(map, this);\n });\n\n // Adjust the bounds of the map\n map.fitBounds(bounds);\n map.setCenter(bounds.getCenter());\n }\n }\"\"\"\n)", "_____no_output_____" ], [ "# Choose the entries that have location and sort the DataFrame by price\ndf_by_price_asc = df[ pandas.notnull(df['latitude'])].sort_values(['precio'], ascending=True)\n\n# Markers for the most expensive houses, in the form of a javascript list\nmarkers_1 = \"\"\nfor index, element in df_by_price_asc.ix[:,['precio', 'colonia', 'latitude', 'longitude', 'url']].tail(n).iterrows():\n precio = \"$\"+\"{:,}\".format(int(element['precio']))\n colonia = str(element['colonia'])\n latitude = str(element['latitude'])\n longitude = str(element['longitude'])\n url = element['url']\n markers_1 += \"['\"+precio+\" (\"+colonia+\")',\"+latitude+\",\"+longitude+\",'\"+url+\"'],\\n\"\n\n# Markers for the cheapest houses, in the form of a javascript list\nmarkers_2 = \"\"\nfor index, element in df_by_price_asc.ix[:,['precio', 'colonia', 'latitude', 'longitude', 'url']].head(n).iterrows():\n precio = \"$\"+\"{:,}\".format(int(element['precio']))\n colonia = str(element['colonia'])\n latitude = str(element['latitude'])\n longitude = str(element['longitude'])\n url = element['url']\n markers_2 += \"['\"+precio+\" (\"+colonia+\")',\"+latitude+\",\"+longitude+\",'\"+url+\"'],\\n\"\n \n# Replace in the template of the map\nmap_js = map_js_template.safe_substitute({'markers_1':markers_1,'markers_2':markers_2})", "_____no_output_____" ], [ "# Replace the key and the javascript of the map in the final html template\n\ngoogle_maps_api_key = '''<script async defer\n src=\"https://maps.googleapis.com/maps/api/js?key=AIzaSyByfLrvUSff1YaEZq1r1vDT9xhW8-6nZOc&callback=initMap\">\n </script>'''\n\nfinal_html = html_template.safe_substitute({'map_js':map_js, 'google_maps_api_key':google_maps_api_key})\n", "_____no_output_____" ] ], [ [ "<b style=\"color:purple;\">Purple:</b> The cheapest houses for sale\n\n<b style=\"color:green;\">Green:</b> The most expensive houses for sale", "_____no_output_____" ] ], [ [ "from IPython.display import HTML\nHTML(final_html)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb9cbe96e743d192d6a4620644e1369fc4fb118f
67,804
ipynb
Jupyter Notebook
notebooks/graph_algos/deep graph infomax/corpus 2020 audience_overlap lvl data 1.ipynb
us241098/News-Media-Peers
3d30e8b018ef7b60d80dbc1b7f21546e1f15327e
[ "MIT" ]
null
null
null
notebooks/graph_algos/deep graph infomax/corpus 2020 audience_overlap lvl data 1.ipynb
us241098/News-Media-Peers
3d30e8b018ef7b60d80dbc1b7f21546e1f15327e
[ "MIT" ]
null
null
null
notebooks/graph_algos/deep graph infomax/corpus 2020 audience_overlap lvl data 1.ipynb
us241098/News-Media-Peers
3d30e8b018ef7b60d80dbc1b7f21546e1f15327e
[ "MIT" ]
1
2020-08-03T10:22:33.000Z
2020-08-03T10:22:33.000Z
108.4864
25,754
0.688027
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "%pwd", "_____no_output_____" ], [ "node_features_file = \"../../generate_node_features/corpus_2020_audience_overlap_level_0_and_1_node_features.csv\"\nedge_file = \"../../generate_node_features/combined_data_corpus_2020_level_0_1_df_edges.csv\"", "_____no_output_____" ], [ "node_features_df = pd.read_csv(node_features_file, index_col=0)", "_____no_output_____" ], [ "node_features_df.head()", "_____no_output_____" ], [ "node_features_df.info()", "<class 'pandas.core.frame.DataFrame'>\nIndex: 12303 entries, gradescope.com to growveg.com\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 alexa_ranks 9128 non-null float64\n 1 daily_pageviews_per_visitors 9129 non-null float64\n 2 daily_time_on_sites 6780 non-null float64\n 3 total_sites_linking_ins 11966 non-null float64\n 4 bounce_rate 6300 non-null float64\ndtypes: float64(5)\nmemory usage: 576.7+ KB\n" ], [ "node_features_df.alexa_ranks = node_features_df.alexa_ranks.fillna(0)\nnode_features_df.total_sites_linking_ins = node_features_df.total_sites_linking_ins.fillna(0)", "_____no_output_____" ], [ "node_features_df.info()", "<class 'pandas.core.frame.DataFrame'>\nIndex: 12303 entries, gradescope.com to growveg.com\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 alexa_ranks 12303 non-null float64\n 1 daily_pageviews_per_visitors 9129 non-null float64\n 2 daily_time_on_sites 6780 non-null float64\n 3 total_sites_linking_ins 12303 non-null float64\n 4 bounce_rate 6300 non-null float64\ndtypes: float64(5)\nmemory usage: 576.7+ KB\n" ] ], [ [ "# Normalizing features", "_____no_output_____" ] ], [ [ "node_features_df['normalized_alexa_rank'] = node_features_df['alexa_ranks'].apply(lambda x: 1/x if x else 0)", "_____no_output_____" ], [ "import math\n\nnode_features_df['normalized_total_sites_linked_in'] = node_features_df['total_sites_linking_ins'].apply(lambda x: math.log2(x) if x else 0)", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ] ], [ [ "edge_df = pd.read_csv(edge_file)\n\nedge_df.head()", "_____no_output_____" ], [ "edge_df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 28381 entries, 0 to 28380\nData columns (total 2 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 source 28381 non-null object\n 1 target 28381 non-null object\ndtypes: object(2)\nmemory usage: 443.6+ KB\n" ], [ "import stellargraph as sg", "_____no_output_____" ], [ "G = sg.StellarGraph(node_features_df[['normalized_alexa_rank', 'normalized_total_sites_linked_in']], edge_df)\nprint(G.info())", "StellarGraph: Undirected multigraph\n Nodes: 12303, Edges: 28381\n\n Node types:\n default: [12303]\n Features: float32 vector, length 2\n Edge types: default-default->default\n\n Edge types:\n default-default->default: [28381]\n Weights: all 1 (default)\n Features: none\n" ] ], [ [ "# Unsupervised Deep Graph Infomax", "_____no_output_____" ] ], [ [ "from stellargraph.mapper import (\n CorruptedGenerator,\n FullBatchNodeGenerator,\n GraphSAGENodeGenerator,\n HinSAGENodeGenerator,\n ClusterNodeGenerator,\n)\nfrom stellargraph import StellarGraph\nfrom stellargraph.layer import GCN, DeepGraphInfomax, GraphSAGE, GAT, APPNP, HinSAGE\n\nfrom tensorflow import keras", "_____no_output_____" ] ], [ [ "1. Specify the other optional parameter values: root nodes, the number of walks to take per node, the length of each walk, and random seed.", "_____no_output_____" ] ], [ [ "nodes = list(G.nodes())\nnumber_of_walks = 1\nlength = 5", "_____no_output_____" ] ], [ [ "2. Create the UnsupervisedSampler instance with the relevant parameters passed to it.", "_____no_output_____" ] ], [ [ "fullbatch_generator = FullBatchNodeGenerator(G, sparse=False)\ngcn_model = GCN(layer_sizes=[128], activations=[\"relu\"], generator=fullbatch_generator)\n\ncorrupted_generator = CorruptedGenerator(fullbatch_generator)\ngen = corrupted_generator.flow(G.nodes())", "Using GCN (local pooling) filters...\n" ], [ "from tensorflow.keras import Model\nimport tensorflow as tf\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import EarlyStopping", "_____no_output_____" ] ], [ [ "3. Create a node pair generator:", "_____no_output_____" ] ], [ [ "infomax = DeepGraphInfomax(gcn_model, corrupted_generator)\nx_in, x_out = infomax.in_out_tensors()\n\ndeep_graph_infomax_model = Model(inputs=x_in, outputs=x_out)\ndeep_graph_infomax_model.compile(loss=tf.nn.sigmoid_cross_entropy_with_logits, optimizer=Adam(lr=1e-3))", "_____no_output_____" ], [ "from stellargraph.utils import plot_history", "_____no_output_____" ], [ "epochs = 100\n\nes = EarlyStopping(monitor=\"loss\", min_delta=0, patience=20)\nhistory = deep_graph_infomax_model.fit(gen, epochs=epochs, verbose=0, callbacks=[es])\nplot_history(history)", "_____no_output_____" ], [ "x_emb_in, x_emb_out = gcn_model.in_out_tensors()\n\n# for full batch models, squeeze out the batch dim (which is 1)\nx_out = tf.squeeze(x_emb_out, axis=0)\nemb_model = Model(inputs=x_emb_in, outputs=x_out)", "_____no_output_____" ], [ "node_features_fullbactch_generator = fullbatch_generator.flow(node_features_df.index)\nnode_embeddings = emb_model.predict(node_features_fullbactch_generator)", "_____no_output_____" ], [ "embeddings_wv = dict(zip(node_features_df.index.tolist(), node_embeddings))", "_____no_output_____" ], [ "embeddings_wv['crooked.com']", "_____no_output_____" ], [ "class ModelWrapper:\n def __init__(self, embeddings_wv):\n self.wv = embeddings_wv\n \n def __str__(self):\n return 'Unsupervised Deep Graph Infomax'", "_____no_output_____" ], [ "import sys,os\nsys.path.append(\"/home/panayot/Documents/site_similarity\")", "_____no_output_____" ], [ "from utils.notebook_utils import train_model", "_____no_output_____" ], [ "data_year = '2020'\nnode2vec_model = ModelWrapper(embeddings_wv)", "_____no_output_____" ], [ "%pwd", "C:\\Users\\Paco\\Documents\\site_similarity\\notebooks\\node_features_graphs\n" ], [ "%run ../../utils/notebook_utils.py", "_____no_output_____" ], [ "from sklearn import svm\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.linear_model import LogisticRegressionCV", "_____no_output_____" ], [ "result_report = []\n\nclf = LogisticRegressionCV(Cs=10, cv=5, scoring=\"accuracy\", multi_class=\"ovr\", max_iter=300, random_state=42)\nresult_report.append([\n str(node2vec_model),\n 'LogisticRegression CV = 5',\n *list(train_model(clf, node2vec_model=node2vec_model, data_year=data_year).values())\n]);\n\nclf2 = LogisticRegressionCV(Cs=10, cv=10, scoring=\"accuracy\", multi_class=\"ovr\", max_iter=300, random_state=42)\nresult_report.append([\n str(node2vec_model),\n 'LogisticRegression CV = 10',\n *list(train_model(clf2, node2vec_model=node2vec_model, data_year=data_year).values())\n]);\n\ntree_clf = GradientBoostingClassifier(random_state=42)\nresult_report.append([\n str(node2vec_model),\n 'GradientBoostingClassifier',\n *list(train_model(tree_clf, node2vec_model=node2vec_model, data_year=data_year).values())\n]);\n\nsvm_clf = svm.SVC(decision_function_shape='ovo', probability=True, random_state=42)\nresult_report.append([\n str(node2vec_model),\n 'SVC ovo',\n *list(train_model(svm_clf, node2vec_model=node2vec_model, data_year=data_year).values())\n]);\n\nmodel_res = pd.DataFrame(result_report,\n columns=[\"Feature\", \"Classifier\", \"Accuracy\", \"Balanced Accuracy score\",\n \"F1 micro score\", \"F1 macro score\", \"F1 weighted score\", \"MAE\", \"Confusion matrix\"])", "Start training...\nStart training...\nStart training...\nStart training...\n" ], [ "model_res.head()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9cc3798f3b62147832ed1e4ce4b9a1448b3f09
3,656
ipynb
Jupyter Notebook
examples/String_Initialization.ipynb
Sirlupinwatson1/tensortrade
f58b642419e6d001398786f1f44b97ddc77dfd96
[ "Apache-2.0" ]
6
2019-10-18T17:36:29.000Z
2021-11-24T03:06:42.000Z
examples/String_Initialization.ipynb
Sirlupinwatson1/tensortrade
f58b642419e6d001398786f1f44b97ddc77dfd96
[ "Apache-2.0" ]
null
null
null
examples/String_Initialization.ipynb
Sirlupinwatson1/tensortrade
f58b642419e6d001398786f1f44b97ddc77dfd96
[ "Apache-2.0" ]
3
2019-12-24T21:40:22.000Z
2020-07-27T00:05:44.000Z
26.302158
340
0.57686
[ [ [ "# String Initialization\n\nAfter having some experience with the library and its capabilities, it becomes common to import the same types of components. As more components get added to the library it will be hard to keep track of them, therefore being able to call components by string can make experimenting with different exchanges and strategies much easier.", "_____no_output_____" ], [ "### Calling components from packages\nIn order for a component to be callable from a package, it first must be submitted into the registry located in the ```__init__.py``` file that defines the components package. Once the component is registered it is then callable by running ```<component-package>.get(<component-name>)```.", "_____no_output_____" ] ], [ [ "import tensortrade as td\n\nexchange = td.exchanges.get('fbm')\naction_strategy = td.actions.get('discrete')\nreward_strategy = td.rewards.get('simple')\n\nexchange", "_____no_output_____" ] ], [ [ "Given this capability you can simply instantiate an environment using the following code.", "_____no_output_____" ] ], [ [ "from tensortrade.environments import TradingEnvironment\n\nenv = TradingEnvironment(exchange='fbm',\n action_strategy='discrete',\n reward_strategy='simple')\n\nenv.exchange", "_____no_output_____" ] ], [ [ "Once default trading environment configurations are developed they can be added to the environment registry in order to be called by only one line.", "_____no_output_____" ] ], [ [ "import tensortrade.environments as envs\n\nenv = envs.get('basic')\n\n### Equivalent creation\n\nenv = envs.TradingEnvironment(exchange='simulated',\n action_strategy='discrete',\n reward_strategy='simple')\n\nenv.exchange", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb9cc3860f51e98d664f5dc57d869afd390ce331
300,339
ipynb
Jupyter Notebook
iMCSpec.ipynb
SwastikC/MCMCSPEC
2047f1fec7dcb77b42cf279b6e82063d97af4f0f
[ "MIT" ]
2
2021-03-15T20:14:22.000Z
2021-03-15T20:14:24.000Z
iMCSpec.ipynb
SwastikC/MCMCSPEC
2047f1fec7dcb77b42cf279b6e82063d97af4f0f
[ "MIT" ]
null
null
null
iMCSpec.ipynb
SwastikC/MCMCSPEC
2047f1fec7dcb77b42cf279b6e82063d97af4f0f
[ "MIT" ]
1
2021-03-14T16:12:22.000Z
2021-03-14T16:12:22.000Z
326.810664
62,596
0.917859
[ [ [ "# iMCSpec (iSpec+emcee)", "_____no_output_____" ], [ "iMCSpec is a tool which combines iSpec(https://www.blancocuaresma.com/s/iSpec) and emcee(https://emcee.readthedocs.io/en/stable/) into a single unit to perform Bayesian analysis of spectroscopic data to estimate stellar parameters. For more details on the individual code please refer to the links above. This code have been tested on Syntehtic dataset as well as GAIA BENCHMARK stars (https://www.blancocuaresma.com/s/benchmarkstars). The example shown here is for the grid genarated MARCS.GES_atom_hfs. If you want to use any other grid, just download it from the https://www.cfa.harvard.edu/~sblancoc/iSpec/grid/ and make the necessary changes in the line_regions.", "_____no_output_____" ] ], [ [ "Dependencies :\niSpec : iSpec v2020.10.01\npython = 3+ Branch\nnumpy>=1.18.5\nscipy>=1.5.0\nmatplotlib>=3.2.2\nastropy>=4.0.1.post1\nlockfile>=0.12.2\nCython>=0.29.21\npandas>=1.0.5\nstatsmodels>=0.11.1\ndill>=0.3.2\nemcee>=3.0.2\ncorner>=2.1.1", "_____no_output_____" ] ], [ [ "Let us import all the necessary packages that are required for this analysis. ", "_____no_output_____" ] ], [ [ "import os\nimport sys\nimport numpy as np\nimport pandas as pd\nimport emcee\nfrom multiprocessing import Pool\nimport matplotlib.pyplot as plt\n\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\nos.environ['QT_QPA_PLATFORM']='offscreen'\nos.environ[\"NUMEXPR_MAX_THREADS\"] = \"8\" #CHECK NUMBER OF CORES ON YOUR MACHINE AND CHOOSE APPROPRIATELY \n\nispec_dir = '/home/swastik/iSpec' #MENTION YOUR DIRECTORY WHERE iSPEC is present \nsys.path.insert(0, os.path.abspath(ispec_dir))\n\nimport ispec\n#np.seterr(all=\"ignore\") #FOR MCMC THE WARNING COMES FOR RED BLUE MOVES WHEN ANY PARTICULAR WALKER VALUE DONOT LIE IN THE PARAMETER SPACE", "_____no_output_____" ] ], [ [ "Let us read the input spectra. Here I have the input spectrum in .txt format for reading the spectra. You can use the .fits format also for reading the spectra using Astropy (https://docs.astropy.org/en/stable/io/fits/). Please note that my input spectra is normalized and radial velocity (RV) corrected. For normalization and RV correction you can used iSpec or iraf. ", "_____no_output_____" ] ], [ [ "df = pd.read_csv('/home/swastik/Downloads/test/HPArcturus.txt', sep ='\\s+') #ENTER YOUR INPUT SPECTRA\ndf = df[df.flux != 0] #FOR SOME SPECTROGRAPH PARTS OF SPECTRA ARE MISSING AND THE CORRESPONDING FLUX VALUES ARE LABELLED AS ZEROS. WE WANT TO IGNORE SUCH POINTS\n\nx = df['waveobs'].values\ny = df['flux'].values\nyerr = df['err'].values\ndf = np.array(df,dtype=[('waveobs', '<f8'), ('flux', '<f8'), ('err', '<f8')])", "_____no_output_____" ] ], [ [ "You can perform the analysis on the entire spectrum or choose specific regions/segments for which you want to perform the analysis for.", "_____no_output_____" ] ], [ [ "#--- Read lines with atomic data ------------------------------------------------\n\n# line_regions = ispec.read_line_regions(ispec_dir + \"/input/regions/47000_GES/grid_synth_good_for_params_all.txt\") #CHANGE THIS ACCORDINGLY FOR THE INPUT GRID \n# line_regions = ispec.adjust_linemasks(df, line_regions, max_margin=0.5)\n# segments = ispec.create_segments_around_lines(line_regions, margin=0.5) \n\n\n# ### Add also regions from the wings of strong lines:\n# ## H beta\n# hbeta_segments = ispec.read_segment_regions(ispec_dir + \"/input/regions/wings_Hbeta_segments.txt\")\n# #segments = hbeta_segments\n# segments = np.hstack((segments, hbeta_segments))\n# ## H ALPHA\n# halpha_segments = ispec.read_segment_regions(ispec_dir + \"/input/regions/wings_Halpha_segments.txt\")\n# segments = np.hstack((segments, halpha_segments))\n# ## MG TRIPLET\n# mgtriplet_segments = ispec.read_segment_regions(ispec_dir + \"/input/regions/wings_MgTriplet_segments.txt\")\n# segments = np.hstack((segments, mgtriplet_segments))\n##IRON\n# fe_segments = ispec.read_segment_regions(ispec_dir + \"/input/regions/fe_lines_segments.txt\")\n# segments = np.hstack((segments, fe_segments))\n##CALCIUM TRIPLET\n# catriplet_segments = ispec.read_segment_regions(ispec_dir + \"/input/regions/Calcium_Triplet_segments.txt\")\n# segments = np.hstack((segments, catriplet_segments))\n##Na doublet\n# NaDoublet_segments = ispec.read_segment_regions(ispec_dir + \"/input/regions/Calcium_Triplet_segments.txt\")\n# segments = np.hstack((segments, NaDoublet_segments_segments))", "_____no_output_____" ], [ "# for j in range(len(segments)):\n# segments[j][0] = segments[j][0]+0.05\n# segments[j][1] = segments[j][1]-0.05\n#YOU CAN CHANGE THE STARTING AND ENDING POINTS OF THE SEGEMENT", "_____no_output_____" ] ], [ [ "I will create a mask all false values with the same dimension as my original spectra in 1D. I will keep only those values of wavelength and flux for which the value falls in the segments (i.e, Mask is True).", "_____no_output_____" ] ], [ [ "# mask =np.zeros(x.shape,dtype =bool)\n# for i in range(len(segments)):\n# mask|= (x>segments[i][0])&(x<segments[i][1])", "_____no_output_____" ], [ "# x = x[mask] #SELECTING THOSE VALUES ONLY FOR WHICH MASK VALUE IS TRUE\n# y = y[mask]\n# #yerr = yerr[mask]\nyerr = y*0.0015 #IF ERROR IS NOT SPECIFIED YOU CAN CHOOSE ACCORDINGLY", "_____no_output_____" ] ], [ [ "Now let us interpolate the spectrum using iSpec. Here for simplicity I have considered only Teff, log g and [M/H] as free parameters. Vmic and Vmac are obtained from emperical relations by Jofre et al.2013 and Maria Bergemann", "_____no_output_____" ] ], [ [ "def synthesize_spectrum(theta):\n teff ,logg ,MH = theta\n# alpha = ispec.determine_abundance_enchancements(MH)\n alpha =0.0\n microturbulence_vel = ispec.estimate_vmic(teff, logg, MH) \n macroturbulence = ispec.estimate_vmac(teff, logg, MH) \n limb_darkening_coeff = 0.6\n resolution = 47000\n vsini = 1.6 #CHANGE HERE\n code = \"grid\"\n precomputed_grid_dir = ispec_dir + \"/input/grid/SPECTRUM_MARCS.GES_GESv6_atom_hfs_iso.480_680nm/\"\n# precomputed_grid_dir = ispec_dir + \"/input/grid/SPECTRUM_MARCS.GES_GESv6_atom_hfs_iso.480_680nm_light/\"\n# The light grid comes bundled with iSpec. It is just for testing purpose. Donot use it for Scientific purpose.\n grid = ispec.load_spectral_grid(precomputed_grid_dir)\n\n atomic_linelist = None\n isotopes = None\n modeled_layers_pack = None\n solar_abundances = None\n fixed_abundances = None\n abundances = None\n atmosphere_layers = None\n regions = None\n \n if not ispec.valid_interpolated_spectrum_target(grid, {'teff':teff, 'logg':logg, 'MH':MH, 'alpha':alpha, 'vmic': microturbulence_vel}):\n msg = \"The specified effective temperature, gravity (log g) and metallicity [M/H] \\\n fall out of the spectral grid limits.\"\n print(msg)\n\n # Interpolation\n synth_spectrum = ispec.create_spectrum_structure(x)\n synth_spectrum['flux'] = ispec.generate_spectrum(synth_spectrum['waveobs'], \\\n atmosphere_layers, teff, logg, MH, alpha, atomic_linelist, isotopes, abundances, \\\n fixed_abundances, microturbulence_vel = microturbulence_vel, \\\n macroturbulence=macroturbulence, vsini=vsini, limb_darkening_coeff=limb_darkening_coeff, \\\n R=resolution, regions=regions, verbose=1,\n code=code, grid=grid)\n return synth_spectrum", "_____no_output_____" ] ], [ [ "You can also synthesize the spectrum directly from various atmospheric models. A skeleton of the code taken from iSpec is shown below. For more details check example.py in iSpec.", "_____no_output_____" ] ], [ [ "# def synthesize_spectrum(theta,code=\"spectrum\"):\n# teff ,logg ,MH = theta\n# resolution = 47000\n# alpha = ispec.determine_abundance_enchancements(MH)\n# microturbulence_vel = ispec.estimate_vmic(teff, logg, MH)\n# macroturbulence = ispec.estimate_vmac(teff, logg, MH)\n\n# limb_darkening_coeff = 0.6\n# regions = None\n\n# # Selected model amtosphere, linelist and solar abundances\n \n# #model = ispec_dir + \"/input/atmospheres/MARCS/\"\n# #model = ispec_dir + \"/input/atmospheres/MARCS.GES/\"\n# #model = ispec_dir + \"/input/atmospheres/MARCS.APOGEE/\"\n# #model = ispec_dir + \"/input/atmospheres/ATLAS9.APOGEE/\"\n# model = ispec_dir + \"/input/atmospheres/ATLAS9.Castelli/\"\n# #model = ispec_dir + \"/input/atmospheres/ATLAS9.Kurucz/\"\n# #model = ispec_dir + \"/input/atmospheres/ATLAS9.Kirby/\" \n\n# #atomic_linelist_file = ispec_dir + \"/input/linelists/transitions/VALD.300_1100nm/atomic_lines.tsv\"\n# #atomic_linelist_file = ispec_dir + \"/input/linelists/transitions/VALD.1100_2400nm/atomic_lines.tsv\"\n# atomic_linelist_file = ispec_dir + \"/input/linelists/transitions/GESv6_atom_hfs_iso.420_920nm/atomic_lines.tsv\"\n# #atomic_linelist_file = ispec_dir + \"/input/linelists/transitions/GESv6_atom_nohfs_noiso.420_920nm/atomic_lines.tsv\"\n \n# isotope_file = ispec_dir + \"/input/isotopes/SPECTRUM.lst\"\n \n\n# atomic_linelist = ispec.read_atomic_linelist(atomic_linelist_file, wave_base=wave_base, wave_top=wave_top)\n# atomic_linelist = atomic_linelist[atomic_linelist['theoretical_depth'] >= 0.01]\n\n# isotopes = ispec.read_isotope_data(isotope_file)\n\n# if \"ATLAS\" in model:\n# solar_abundances_file = ispec_dir + \"/input/abundances/Grevesse.1998/stdatom.dat\"\n# else:\n# # MARCS\n# solar_abundances_file = ispec_dir + \"/input/abundances/Grevesse.2007/stdatom.dat\"\n# #solar_abundances_file = ispec_dir + \"/input/abundances/Asplund.2005/stdatom.dat\"\n# #solar_abundances_file = ispec_dir + \"/input/abundances/Asplund.2009/stdatom.dat\"\n# #solar_abundances_file = ispec_dir + \"/input/abundances/Anders.1989/stdatom.dat\"\n\n# modeled_layers_pack = ispec.load_modeled_layers_pack(model)\n# solar_abundances = ispec.read_solar_abundances(solar_abundances_file)\n \n# ## Custom fixed abundances\n# #fixed_abundances = ispec.create_free_abundances_structure([\"C\", \"N\", \"O\"], chemical_elements, solar_abundances)\n# #fixed_abundances['Abund'] = [-3.49, -3.71, -3.54] # Abundances in SPECTRUM scale (i.e., x - 12.0 - 0.036) and in the same order [\"C\", \"N\", \"O\"]\n# ## No fixed abundances\n# fixed_abundances = None\n\n# atmosphere_layers = ispec.interpolate_atmosphere_layers(modeled_layers_pack, {'teff':teff, 'logg':logg, 'MH':MH, 'alpha':alpha}, code=code)\n# synth_spectrum = ispec.create_spectrum_structure(x)\n# synth_spectrum['flux'] = ispec.generate_spectrum(synth_spectrum['waveobs'],\n# atmosphere_layers, teff, logg, MH, alpha, atomic_linelist, isotopes, solar_abundances,\n# fixed_abundances, microturbulence_vel = microturbulence_vel,\n# macroturbulence=macroturbulence, vsini=vsini, limb_darkening_coeff=limb_darkening_coeff,\n# R=resolution, regions=regions, verbose=0,\n# code=code)\n\n# return synth_spectrum", "_____no_output_____" ] ], [ [ "So far we have discussed about reading the input original spectra and interpolating the synthetic spectra from iSpec. Now the important part that comes into picture is to compare the original spectra and the interpolated spectra. For this we will use Montecarlo Markhov chain method to compare both the spectrums. For this we have used the emcee package by \nDan Foreman-Mackey. ", "_____no_output_____" ] ], [ [ "walkers = eval(input(\"Enter Walkers: \")) #WALKER IMPLIES THE INDEPENDENT RANDOMLY SELECTED PARAMETER SETS. NOTE IT SHOULD HAVE ATLEAST TWICE THE VALUE OF AVAILABLE FREE PARAMETERS\nIter = eval(input(\"Enter Iterations: \")) #ITERATION IMPLIES NUMBER OF RUNS THE PARAMETERS WILL BE CHECKED FOR CONVERGENCE. FOR MOST CASES 250-300 SHOULD DO.\n", "Enter Walkers: 8\nEnter Iterations: 120\n" ] ], [ [ "We will be creating four functions for this MCMC run. The first is straightforward, and is known as the model. The model function should take as an argument a list representing our θ vector, and return the model evaluated at that θ. For completion, your model function should also have your parameter array as an input. The form of this function comes from the Gaussian probability distribution P(x)dx.", "_____no_output_____" ] ], [ [ "def log_likelihood(theta):\n model = synthesize_spectrum(theta) #GENARATING THE SPECTRUM FOR A GIVEN VALUE OF THETA\n sigma2 = yerr ** 2 # FINDING THE Variance\n return -0.5 * np.sum((y - (model['flux'])) ** 2/ sigma2) # returns the -chi^2/2 value ", "_____no_output_____" ] ], [ [ "There is no unique way to set up your prior function. For the simplistic case we have choosen the log prior function returns zero if the input values genarated randomly lies wittin the specified ranges and -infinity if it doesnt(atleast one vale should satisty this criterion). You can choose your own prior function as well.", "_____no_output_____" ] ], [ [ "def log_prior(theta):\n teff, logg, MH = theta\n if 3200 < teff < 6900 and 1.1 < logg < 4.8 and -2.49 < MH <= 0.49 : #CHANGE HERE\n return 0.0\n return -np.inf", "_____no_output_____" ] ], [ [ "The last function we need to define is lnprob(). This function combines the steps above by running the lnprior function, and if the function returned -np.inf, passing that through as a return, and if not (if all priors are good), returning the lnlike for that model (by convention we say it’s the lnprior output + lnlike output, since lnprior’s output should be zero if the priors are good). lnprob needs to take as arguments theta,x,y,and yerr, since these get passed through to lnlike.", "_____no_output_____" ] ], [ [ "def log_probability(theta):\n lp = log_prior(theta)\n if not np.isfinite(lp):\n return -np.inf\n return lp + log_likelihood(theta)", "_____no_output_____" ] ], [ [ "Select input guess values and create intial set of stellar parameters RANDOMLY", "_____no_output_____" ] ], [ [ "initial = np.array([4650,1.8,-0.7]) #INPUT GUESS VALUES \npos = initial + np.array([100,0.1,0.1])*np.random.randn(walkers, 3) # YOU CAN CHOOSE UNIFORM RANDOM FUNCTION OR GAUSSIAUN RANDOM NUMBER GENARATOR\nnwalkers, ndim = pos.shape", "_____no_output_____" ] ], [ [ "Now we will run the EMCEE sampler to run the code. This will take some time depending on the your system. But don't worry :)", "_____no_output_____" ] ], [ [ "sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability)\nsampler.run_mcmc(pos,Iter, progress=True)", "100%|██████████| 120/120 [2:10:57<00:00, 65.48s/it] \n" ] ], [ [ "Let us plot the Walkers and Iterations. Check out for convergence in this plot. If you see the convergence you are good to go. ", "_____no_output_____" ] ], [ [ "fig, axes = plt.subplots(3, figsize=(10, 7), sharex=True)\nsamples = sampler.get_chain()\naccepted = sampler.backend.accepted.astype(bool) #Here accepted indicated that the lines for each parameter below have converged/moved at least one time.\n\nlabels = [\"teff\",\"logg\",\"MH\"]\nfor i in range(ndim):\n ax = axes[i]\n ax.plot(samples[:, :, i], \"k\", alpha=0.3)\n ax.set_ylabel(labels[i])\n ax.yaxis.set_label_coords(-0.1, 0.5)\naxes[-1].set_xlabel(\"step number\");", "_____no_output_____" ] ], [ [ "Let us check how good is the fitting.....", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(1, figsize=(10, 7), sharex=True)\nsamples = sampler.flatchain\ntheta_max = samples[np.argmax(sampler.flatlnprobability)]\nbest_fit_model = synthesize_spectrum(theta_max)\nax.plot(x,y,alpha=0.3)\nax.plot(x,best_fit_model['flux'],alpha =0.3)\nax.plot(x,y-best_fit_model['flux'],alpha =0.3)\nplt.savefig('t2.pdf') #CHANGE HERE\nprint(('Theta max: ',theta_max)) # Genarating the spectrum for the Maximum likelyhood function.\n#NOTE THE SPIKES IN THE PLOT BELOW. THESE ARE DUE TO THE FACT THAT END POINTS OF THE SPECTRUMS ARE EXTRAPOLATED", "('Theta max: ', array([ 4.32736776e+03, 1.58554183e+00, -4.36054183e-01]))\n" ] ], [ [ "Since the first few runs the walkers are exploring the parameter space and convergence have not yet been achived. We will ignore such runs. This is also known as \"BURN-IN\".", "_____no_output_____" ] ], [ [ "new_samples = sampler.get_chain(discard=100, thin=1, flat=False) \nnew_samples = new_samples[:,accepted,:] # WE ARE ONLY CHOOSING THE VALUES FOR WHICH THE WALKER HAVE MOVED ATLEAST ONCE DURING THE ENTIRE ITERATION. Stagnent walkers indicates that the prior function might have returned -inf. ", "_____no_output_____" ] ], [ [ "Checking the Convergence after the BURN-IN... If it seems to be converged then it is DONE.", "_____no_output_____" ] ], [ [ "fig, axes = plt.subplots(3, figsize=(10, 7), sharex=True)\nfor i in range(ndim):\n ax = axes[i]\n ax.plot(new_samples[:, :, i], \"k\", alpha=0.3)\n ax.set_ylabel(labels[i])\n ax.yaxis.set_label_coords(-0.1, 0.5)\naxes[-1].set_xlabel(\"step number\")\nplt.savefig('t3.pdf') #CHANGE HERE\nflat_samples = new_samples.reshape(-1,new_samples.shape[2])\nnp.savetxt(\"RNtesto.txt\",flat_samples,delimiter='\\t') #CHANGE HERE\n", "_____no_output_____" ] ], [ [ "# DATA VISUALIZATION", "_____no_output_____" ], [ "Now after the final list of stellar parameters it is important to visualise the stellar parameter distribution. Also it is important to check for any correlation among the stellar parameters. Here I have shown two medhods by which you can do this. Note: I have taken very few points for analysis and for a proper plot you actually need a much larger dataset (40x300:150 Burns at minimum)", "_____no_output_____" ] ], [ [ "import corner\nfrom pandas.plotting import scatter_matrix ", "_____no_output_____" ], [ "df = pd.read_csv('/home/swastik/RNtesto.txt',delimiter='\\t',header = None) \ndf.columns = [\"$T_{eff}$\", \"logg\", \"[M/H]\"]\ndf.hist() #Plotting Histogram for each individual stellar parameters. THIS NEED NOT BE A GAUSSIAN ONE", "_____no_output_____" ], [ "#df = df[df.logg < 4.451 ] #REMOVE ANY OUTLIER DISTRIBUTION", "_____no_output_____" ], [ "scatter_matrix(df, alpha=0.2, figsize=(6, 6), diagonal='kde') #PLOTTING THE SCATTER MATRIX. I HAVE USED A VERY LIGHT DATASET FOR TEST PURPOSE> YOU CAN USE A MORE WALKER X ITERATION FOR A BETTER RESULT", "_____no_output_____" ], [ "samples = np.vstack([df]) #IT IS NECESSARY TO STACK THE DATA VERTICALLY TO OBTAIN THE DISTRIBUTION FROM THE DATA FRAME", "_____no_output_____" ], [ "value2 = np.mean(samples, axis=0) \nplt.rcParams[\"font.size\"] = \"10\" #THIS CHANGES THE FONT SIZE OF THE LABELS(NOT LEGEND)", "_____no_output_____" ], [ "#FINALLY... MAKING THE CORNER PLOT>>>>\n\n\n#fig = corner.corner(df,show_titles=True,plot_datapoints=True,quantiles=[0.16, 0.5, 0.84],color ='black',levels=(1-np.exp(-0.5),),label_kwargs=dict(fontsize=20,color = 'black'),hist_kwargs=dict(fill = True,color = 'dodgerblue'),alpha =0.2)\nfig = corner.corner(df,show_titles=True,plot_datapoints=True,quantiles=[0.16, 0.5, 0.84],color ='black',label_kwargs=dict(fontsize=20,color = 'black'),hist_kwargs=dict(fill = True,color = 'dodgerblue'),alpha =0.2)\naxes = np.array(fig.axes).reshape((3, 3))\nfor i in range(3):\n ax = axes[i, i]\n ax.axvline(value2[i], color=\"r\",alpha =0.8)\nfor yi in range(3):\n for xi in range(yi):\n ax = axes[yi, xi]\n ax.axvline(value2[xi], color=\"r\",alpha =0.8,linestyle = 'dashed')\n ax.axhline(value2[yi], color=\"r\",alpha =0.8,linestyle = 'dashed')\n ax.plot(value2[xi], value2[yi], \"r\")\n# plt.tight_layout()\n#THE CORNER PLOT DONOT LOOK GREAT>> THE REASON IS FEW NUMBER OF DATA POINTS AND SHARP CONVERGENCE", "_____no_output_____" ] ], [ [ "I would like to thank Sergi blanco-cuaresma for the valuable suggestions and feedbacks regarding the iSpec code and its intregation with emcee. I would also thank \nDan Foreman-Mackey for his insightful comments on using emcee. I would also like to thank Aritra Chakraborty and Dr.Ravinder Banyal for their comments and suggestion on improving the code which might have not been possible without their help.", "_____no_output_____" ] ] ]
[ "markdown", "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "raw" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
cb9cc91a89c5c78876822622cb37043ca7b5b9e9
8,159
ipynb
Jupyter Notebook
module2-loadingdata/LS_DS_112_Loading_Data_Assignment.ipynb
Anthonyyoyo/DS-Unit-1-Sprint-1-Dealing-With-Data
0328605877606ad98d9c6d6dbae933c2f942bcad
[ "MIT" ]
1
2019-08-06T22:12:18.000Z
2019-08-06T22:12:18.000Z
module2-loadingdata/LS_DS_112_Loading_Data_Assignment.ipynb
Nutritiousfacts/DS-Unit-1-Sprint-1-Dealing-With-Data
39afbd43aef12d724b270b45dfe2c19dd5d5353c
[ "MIT" ]
null
null
null
module2-loadingdata/LS_DS_112_Loading_Data_Assignment.ipynb
Nutritiousfacts/DS-Unit-1-Sprint-1-Dealing-With-Data
39afbd43aef12d724b270b45dfe2c19dd5d5353c
[ "MIT" ]
null
null
null
50.055215
463
0.627283
[ [ [ "<a href=\"https://colab.research.google.com/github/ryanleeallred/DS-Unit-1-Sprint-1-Dealing-With-Data/blob/master/module2-loadingdata/LS_DS_112_Loading_Data_Assignment.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Practice Loading Datasets\n\nThis assignment is purposely semi-open-ended you will be asked to load datasets both from github and also from CSV files from the [UC Irvine Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php). \n\nRemember that the UCI datasets may not have a file type of `.csv` so it's important that you learn as much as you can about the dataset before you try and load it. See if you can look at the raw text of the file either locally, on github, using the `!curl` shell command, or in some other way before you try and read it in as a dataframe, this will help you catch what would otherwise be unforseen problems.\n", "_____no_output_____" ], [ "## 1) Load a dataset from Github (via its *RAW* URL)\n\nPick a dataset from the following repository and load it into Google Colab. Make sure that the headers are what you would expect and check to see if missing values have been encoded as NaN values:\n\n<https://github.com/ryanleeallred/datasets>", "_____no_output_____" ] ], [ [ "# TODO your work here!\n# And note you should write comments, descriptions, and add new\n# code and text blocks as needed", "_____no_output_____" ] ], [ [ "## 2) Load a dataset from your local machine\nDownload a dataset from the [UC Irvine Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php) and then upload the file to Google Colab either using the files tab in the left-hand sidebar or by importing `files` from `google.colab` The following link will be a useful resource if you can't remember the syntax: <https://towardsdatascience.com/3-ways-to-load-csv-files-into-colab-7c14fcbdcb92>\n\nWhile you are free to try and load any dataset from the UCI repository, I strongly suggest starting with one of the most popular datasets like those that are featured on the right-hand side of the home page. \n\nSome datasets on UCI will have challenges associated with importing them far beyond what we have exposed you to in class today, so if you run into a dataset that you don't know how to deal with, struggle with it for a little bit, but ultimately feel free to simply choose a different one. \n\n- Make sure that your file has correct headers, and the same number of rows and columns as is specified on the UCI page. If your dataset doesn't have headers use the parameters of the `read_csv` function to add them. Likewise make sure that missing values are encoded as `NaN`.", "_____no_output_____" ] ], [ [ "# TODO your work here!\n# And note you should write comments, descriptions, and add new\n# code and text blocks as needed", "_____no_output_____" ] ], [ [ "## 3) Load a dataset from UCI using `!wget`\n\n\"Shell Out\" and try loading a file directly into your google colab's memory using the `!wget` command and then read it in with `read_csv`.\n\nWith this file we'll do a bit more to it.\n\n- Read it in, fix any problems with the header as make sure missing values are encoded as `NaN`.\n- Use the `.fillna()` method to fill any missing values. \n - [.fillna() documentation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.fillna.html)\n- Create one of each of the following plots using the Pandas plotting functionality:\n - Scatterplot\n - Histogram\n - Density Plot\n", "_____no_output_____" ], [ "## Stretch Goals - Other types and sources of data\n\nNot all data comes in a nice single file - for example, image classification involves handling lots of image files. You still will probably want labels for them, so you may have tabular data in addition to the image blobs - and the images may be reduced in resolution and even fit in a regular csv as a bunch of numbers.\n\nIf you're interested in natural language processing and analyzing text, that is another example where, while it can be put in a csv, you may end up loading much larger raw data and generating features that can then be thought of in a more standard tabular fashion.\n\nOverall you will in the course of learning data science deal with loading data in a variety of ways. Another common way to get data is from a database - most modern applications are backed by one or more databases, which you can query to get data to analyze. We'll cover this more in our data engineering unit.\n\nHow does data get in the database? Most applications generate logs - text files with lots and lots of records of each use of the application. Databases are often populated based on these files, but in some situations you may directly analyze log files. The usual way to do this is with command line (Unix) tools - command lines are intimidating, so don't expect to learn them all at once, but depending on your interests it can be useful to practice.\n\nOne last major source of data is APIs: https://github.com/toddmotto/public-apis\n\nAPI stands for Application Programming Interface, and while originally meant e.g. the way an application interfaced with the GUI or other aspects of an operating system, now it largely refers to online services that let you query and retrieve data. You can essentially think of most of them as \"somebody else's database\" - you have (usually limited) access.\n\n*Stretch goal* - research one of the above extended forms of data/data loading. See if you can get a basic example working in a notebook. Image, text, or (public) APIs are probably more tractable - databases are interesting, but there aren't many publicly accessible and they require a great deal of setup.", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cb9cc98daa7f16097ac9a29cda7cd8b9cf20e162
102,416
ipynb
Jupyter Notebook
python/pyechonest-json-tests.ipynb
ruohoruotsi/ruohoreviews
a00e724ccb98dcf51ae4ec541665d3059a29b27f
[ "MIT" ]
1
2020-07-28T22:03:55.000Z
2020-07-28T22:03:55.000Z
python/pyechonest-json-tests.ipynb
ruohoruotsi/ruohoreviews
a00e724ccb98dcf51ae4ec541665d3059a29b27f
[ "MIT" ]
null
null
null
python/pyechonest-json-tests.ipynb
ruohoruotsi/ruohoreviews
a00e724ccb98dcf51ae4ec541665d3059a29b27f
[ "MIT" ]
null
null
null
50.801587
1,012
0.613732
[ [ [ "###################################\n# Test cell, pyechonest - IO HAVOC\n###################################\n\nimport os\nimport sys\nsys.path.append(os.environ[\"HOME\"] + \"/github/pyechonest\")\nimport pyechonest.track as track\nimport pyechonest.artist as artist\nimport pyechonest.util as util\nimport pyechonest.song as song\n\nimport sys, pprint\n# pprint.pprint(sys.modules)\n\ndef get_tempo(artist):\n \"gets the tempo for a song\"\n results = song.search(artist=artist, results=1, buckets=['audio_summary'])\n if len(results) > 0:\n return results[0].audio_summary['tempo']\n else:\n return None\n\nfor hottt_artist in artist.top_hottt(results=1):\n print(hottt_artist.name, hottt_artist.id, hottt_artist.hotttnesss)\n # print(hottt_artist.name + \" \" + str(get_tempo(hottt_artist.name)))\n # a = artist.Artist(hottt_artist.name)\n \n a = artist.Artist(\"Red Foo\")\n images = a.get_images(results=2)\n print(images[0]['url'])\n# print(images[1]['url'])\n\n # test spotify (spotifpy)\n images_b = a.get_spotify_images()\n print(str(len(images_b)) + \" images found on Spotify\")\n print(images_b[0]['url'])", "Justin Bieber ARFCWSZ123526A0AFD 0.97552\n [pyechonest] found 0 pruned images\n [spotipy] found 0 images\n0 images found on Spotify\n" ], [ "###################################\n# Generate new pyechonest cache\n###################################\n\nimport time\nimport json\nfrom pyechonest import config\nfrom pyechonest import artist\n\nconfig.ECHO_NEST_API_KEY=\"2VN1LKJEQUBPUKXEC\"\nhotttArtistsCache = []\n\nfave4x4ArtistsList = [\n 'Juan Atkins','Faithless', 'Ruoho Ruotsi', 'Maurice Fulton',\n 'Leftfield', 'Frivolous', 'Basement Jaxx','Glitch Mob', \n 'Hollis P. Monroe', 'Frankie Knuckles', 'Francois K',\n 'Trentemøller', 'Chelonis R. Jones', 'Steve Bug', \n 'Jimpster', 'Jeff Samuel', 'Ian Pooley',\n 'Luomo', 'Kerri Chandler', 'Charles Webster',\n 'Roy Davis Jr.', 'Robert Owens',\n 'Black Science Orchestra', 'Mr. Fingers', 'Saint Etienne',\n 'Masters at Work', 'Theo Parrish', 'Moodymann', \n 'Basic Channel', 'Rhythm & Sound', 'Roman Flügel', \n 'Joe Lewis', 'DJ Said', 'Recloose', 'Kate Simko', 'Aschka', \n 'Maya Jane Coles', 'Gys', 'Deadbeat', 'Soultek',\n 'DeepChord', 'Vladislav Delay', 'Andy Stott', 'Intrusion', \n 'Rod Modell', 'Kassem Mosse', 'Murcof', 'Marc Leclair',\n 'Fax', 'Monolake', 'Kit Clayton', 'Bvdub', 'Swayzak', \n 'Wookie', 'Artful Dodger', 'MJ Cole',\n 'Les Rythmes Digitales', 'Fischerspooner', 'Cassius',\n 'Miguel Migs', 'Osunlade', 'Metro Area', 'Dennis Ferrer',\n 'Ron Trent', 'Larry Heard', 'Alton Miller', 'King Britt',\n 'Bougie Soliterre', 'Todd Terry', 'Black Coffee', \n 'Richie Hawtin', 'Speedy J', 'Kenny Larkin', 'Laurent Garnier',\n 'Carl Craig', 'Robert Hood', 'John Tejada', 'Thomas P. Heckmann',\n 'Aril Brikha', 'Tiefschwarz', 'Funk D\\'Void', 'A Guy Called Gerald',\n 'Jeff Mills', 'Aaron Carl', 'Josh Wink', 'Derrick May', \n 'Frankie Bones', 'DJ Assault', 'AUX 88', 'Fumiya Tanaka',\n 'The Lady Blacktronika', 'Junior Lopez', 'Someone Else', 'Noah Pred',\n 'Danny Tenaglia', 'Pete Tong', 'Booka Shade', 'Paul Kalkbrenner',\n 'Dapayk & Padberg', 'Igor O. Vlasov', 'Dreem Teem', 'Todd Edwards',\n '187 Lockdown', 'Serious Danger', 'Deep Dish', 'Ellen Allien',\n 'Matias Aguayo', 'Alex Smoke', 'Modeselektor', 'Mike Shannon', \n 'Radio Slave', 'Jonas Bering', 'Glitterbug', 'Justus Köhncke',\n 'Wolfgang Voigt', 'Ripperton', 'Isolée', 'Alex Under',\n 'Phonique', 'James Holden', 'Minilogue', 'Michael Mayer', \n 'Pantha Du Prince', 'Håkan Lidbo', 'Lusine', 'Kalabrese', \n 'Matthew Herbert', 'Jan Jelinek', 'Lucien-N-Luciano', 'Closer Musik',\n 'Apparat', 'Guillaume & The Coutu Dumonts', 'Thomas Brinkmann',\n 'The Soft Pink Truth', 'Ada', 'Wighnomy Brothers', 'Ricardo Villalobos',\n 'Jesse Somfay','Falko Brocksieper', 'Damian Lazarus', 'Superpitcher',\n 'Catz N\\' Dogz', 'Pan/Tone', 'Broker/Dealer', 'Dinky', 'T.Raumschmiere',\n 'Stephen Beaupré', 'Konrad Black', 'Claude VonStroke', 'DJ Koze',\n 'Cobblestone Jazz', 'Robag Wruhme', 'Seth Troxler', 'Stewart Walker', \n 'Farben', 'Pier Bucci', 'Mathew Jonson', 'LoSoul', 'Safety Scissors',\n 'Anja Schneider', 'Markus Guentner', 'Fuckpony', 'Onur Özer', 'Mossa',\n 'Kenneth James Gibson', 'Butane', 'Mikael Stavöstrand', 'Franklin de Costa',\n 'Quantec', 'Jin Choi', 'The Mountain People', 'Château Flight', 'Havantepe',\n 'Tomas Jirku', 'Limaçon', 'Redshape', 'Mike Huckaby', 'Taylor Deupree', \n 'Substance & Vainqueur'\n ]\n\nfaveBassArtistsList = [ \n 'Photek', 'Zomby', 'Kode9', 'Vex\\'d', 'Plastician', 'Joy Orbison', \n 'Eskmo', 'Tes La Rok', 'DFRNT', 'Africa HiTech', 'King Midas Sound',\n 'Skream', 'Djunya', '2562', 'Fantastic Mr. Fox', 'Ikonika', \n 'Timeblind', 'Mark Pritchard', 'Appleblim', 'Ramadanman', 'D1', \n 'Matty G', 'Peverelist', 'Untold', 'Roska', 'El-B', 'Mala', \n 'Coki',' Hijak', 'Mount Kimbie', 'Chrissy Murderbot', 'Scuba',\n 'Kush Arora', 'Meesha', 'Martyn'\n ]\n\n# Currently no image resources on EN, lastfm or Spotify for 'Terre Thaemlitz'\nfaveClassicArtistsList = [\n 'Björk', 'Kraftwerk', 'DJ Shadow', 'Radiohead', 'The Orb', \n 'Jean-Michel Jarre', 'Aphex Twin', 'Tangerine Dream', \n 'Boards of Canada', 'Amon Tobin', 'Ratatat', 'Massive Attack',\n 'Röyksopp', 'LCD Soundsystem', 'Gotan Project', \n 'Gus-Gus', 'Everything but the Girl', 'Ursula 1000', 'Llorca',\n 'UNKLE', 'The Future Sound of London', 'The Avalanches', \n 'Laika', 'Thievery Corporation', 'Groove Armada', 'Bonobo', \n 'DJ Food','Tricky', 'Dirty Vegas', 'Télépopmusik', 'Hooverphonic', \n 'dZihan & Kamien', 'Talvin Singh', 'DJ Vadim', 'Cibo Matto', \n 'Esthero', 'Martina Topley-Bird', 'Dimitri From Paris', \n 'Coldcut', 'Death in Vegas', 'Róisín Murphy', 'Nitin Sawhney',\n 'José Padilla', 'Jimi Tenor', 'Mr. Scruff', 'Dub Pistols', \n 'Morcheeba', 'Supreme Beings of Leisure', 'Air', 'DJ Krush', 'RJD2',\n 'Underworld', 'jenn mierau', 'Einstürzende Neubauten',\n 'Nurse with Wound', 'The Legendary Pink Dots', 'Skinny Puppy', \n 'Atari Teenage Riot', 'Venetian Snares', 'µ-Ziq', 'Richard Devine',\n 'Squarepusher', 'Autechre', 'Le Tigre', 'Queens of the Stone Age',\n 'Xiu Xiu', 'Baby Dee', 'Alastair Galbraith', '不失者', 'I Am Robot and Proud',\n 'Meg Baird'\n ]\n\nfaveElectroacousticArtistsList = [\n 'Arthur Russell', 'Jon Appleton', 'Charles Dodge', 'Morton Subotnick', \n 'James Tenney', 'David Tudor', 'Vladimir Ussachevsky', \n 'Pauline Oliveros', 'Robert Ashley', 'Nam June Paik', 'La Monte Young', \n 'Phill Niblock', 'François Bayle', 'James Tenney', 'Tim Hecker', 'Pamela Z',\n 'Christian Wolff', 'Jean-Claude Risset', 'Paul Lansky', 'Laurie Spiegel',\n 'Antye Greie', 'Ryoji Ikeda', 'alva noto', 'Ryuichi Sakamoto', 'Lawrence English',\n 'Tujiko Noriko', 'Arvo Pärt', 'Fennesz', 'Christopher Willits', 'Colleen',\n 'Ben Frost', 'Jóhann Jóhannsson', 'Sylvain Chauveau'\n ]\n\nfavedubArtistsList = [\n 'King Tubby', 'Scientist', 'Lee \"Scratch\" Perry', 'Augustus Pablo', \n 'Prince Jammy', 'Mad Professor', 'Roots Radics', 'The Upsetters', \n 'Sly Dunbar', 'Robbie Shakespeare', 'Keith Hudson', 'Tappa Zukie', 'Big Youth', \n 'The Aggrovators', 'U-Roy', 'Prince Far I', \n 'Black Uhuru', 'Horace Andy', 'I-Roy', 'The Abyssinians', \n 'Pablo Moses', 'Max Romeo', 'The Heptones', 'Burning Spear',\n 'Dennis Brown', 'Jacob Miller', 'Barrington Levy', 'Sugar Minnot',\n 'Yellowman', 'Gregory Isaacs', 'John Holt', 'Alton Ellis',\n 'Ken Boothe', 'The Ethiopians', 'Joe Higgs', 'Tommy McCook', \n 'The Melodians', 'Delroy Wilson', 'Isaac Haile Selassie', 'Polycubist'\n ]\n\nfaveAfricanArtistsList = [\n 'Manu Dibango', 'Baaba Maal',\n 'Antibalas Afrobeat Orchestra', 'Orlando Julius', 'William Onyeabor', \n 'Orchestre Poly-Rythmo', 'Sir Victor Uwaifo', \n 'Tony Allen & His Afro Messengers', 'Sahara All Stars Band Jos', \n 'Lijadu Sisters', 'King Sunny Ade', 'Ebo Taylor', \n 'Gasper Lawal', 'Tunji Oyelana and the Benders', '2 Face', 'P Square',\n 'Shina Williams & His African Percussionists', 'Weird MC', 'Plantashun Boiz',\n 'Paul I.K. Dairo', 'D\\'banj', 'Ruggedman', 'Eedris Abdulkareem', \n 'Styl-Plus', 'Tony Tetuila', 'Olamide', 'Ebenezer Obey', \n 'Haruna Ishola', 'Lágbájá', 'Prince Nico Mbarga', 'West African Highlife Band',\n 'Modenine', 'Terry tha Rapman', 'Olu Maintain', 'Majek Fashek', 'Konono N°1',\n 'Koffi Olomidé', 'Les Bantous de la Capitale', 'Thomas Mapfumo', 'Oliver Mtukudzi',\n 'Chiwoniso Maraire', 'Thomas Mapfumo & The Blacks Unlimited', 'Angélique Kidjo',\n 'Oumou Sangare', 'Ismaël Lô', 'Geoffrey Oryema', 'Salif Keita', 'Amadou & Mariam',\n 'Orchestra Baobab', 'Bembeya Jazz National', 'Tiwa Savage'\n ]\n\n\ndef addArtist(artist):\n print(artist.name, artist.id)\n artistToAdd = { \"name\": artist.name,\n \"id\": artist.id,\n \"images\": artist.get_images(results=25), \n \"URLs\": artist.urls,\n \"genres\": artist.terms,\n \"twitter_id\": artist.get_twitter_id()\n }\n \n hotttArtistsCache.append(artistToAdd)\n time.sleep(10) # delays for 20 seconds\n\n \ndef addFaveArtistList(artistList):\n for fave_artist in artistList:\n a = artist.Artist(fave_artist, buckets=['images', 'urls', 'terms'])\n addArtist(a)\n \n \ndef writeArtistsCaches():\n \n # HOTTT artists\n for hottt_artist in artist.top_hottt(results=300):\n a = artist.Artist(hottt_artist.id, buckets=['images', 'urls', 'terms'])\n addArtist(a)\n\n # FAVE 4x4 artists\n addFaveArtistList(fave4x4ArtistsList)\n\n # FAVE Bass artists\n addFaveArtistList(faveBassArtistsList)\n \n # FAVE Classic artists\n addFaveArtistList(faveClassicArtistsList)\n \n # FAVE Electroacoustic artists\n addFaveArtistList(faveElectroacousticArtistsList)\n\n # FAVE Dub artists\n addFaveArtistList(favedubArtistsList)\n \n # FAVE African artists\n addFaveArtistList(faveAfricanArtistsList)\n\n \n with open('artistMetaData.js', 'w') as outfile:\n outfile.write(\"var hotttArtistsCache = \")\n json.dump(hotttArtistsCache, outfile)\n outfile.write(\";\")\n outfile.close()\n\n print (\"\\n\" + \"Fini - writeArtistsCaches\") \n \n\n###################### \nprint('fave4x4ArtistsList: ' + str(len(fave4x4ArtistsList)))\nprint('faveBassArtistsList: ' + str(len(faveBassArtistsList)))\nprint('faveClassicArtistsList: ' + str(len(faveClassicArtistsList)))\nprint('faveElectroacousticArtistsList: ' + str(len(faveElectroacousticArtistsList)))\nprint('favedubArtistsList: ' + str(len(favedubArtistsList)))\nprint('faveAfricanArtistsList: ' + str(len(faveAfricanArtistsList)))\nprint('\\n\\n')\n\nwriteArtistsCaches() ", "fave4x4ArtistsList: 179\nfaveBassArtistsList: 35\nfaveClassicArtistsList: 71\nfaveElectroacousticArtistsList: 33\nfavedubArtistsList: 40\nfaveAfricanArtistsList: 51\n\n\n\nJustin Bieber ARFCWSZ123526A0AFD\n [pyechonest] found 5 pruned images\nOne Direction ARZHFAY130708EE366\n [pyechonest] found 0 pruned images\n[{'height': 1000, 'url': 'https://i.scdn.co/image/5bb443424a1ad71603c43d67f5af1a04da6bb3c8', 'width': 1000}]\n [spotipy] found 1 images\nKygo ARTZYQN13EEEF973E7\n [pyechonest] found 1 pruned images\nAdele AR7J9AP1187FB5BD64\n [pyechonest] found 5 pruned images\nCalvin Harris ARJRB241187FB556A3\n [pyechonest] found 5 pruned images\nEllie Goulding ARKTTJV12592CDA07F\n [pyechonest] found 0 pruned images\n[{'height': 1000, 'url': 'https://i.scdn.co/image/cdfa418a53726ce2255cd543d4be873af49b5499', 'width': 1000}]\n [spotipy] found 1 images\nTaylor Swift ARS54I31187FB46721\n [pyechonest] found 7 pruned images\nAvicii ARWLAEE122BCFCA245\n [pyechonest] found 1 pruned images\nEd Sheeran ARSDWSZ122ECCB706A\n [pyechonest] found 1 pruned images\nLost Frequencies ARDAMPQ14681604720\n [pyechonest] found 0 pruned images\n[{'height': 1000, 'url': 'https://i.scdn.co/image/5161f528e78f8818f844c38253adaa052320dc1f', 'width': 1000}]\n [spotipy] found 1 images\nThe Weeknd ARYUDWF12F2B89BB33\n [pyechonest] found 0 pruned images\n[{'height': 1000, 'url': 'https://i.scdn.co/image/2adb640684adf725303a89f6e132f036f30c82e0', 'width': 1000}]\n [spotipy] found 1 images\nDrake ARODZUF11F4C841E1F\n [pyechonest] found 1 pruned images\nDavid Guetta ARH2QI91187FB3788D\n [pyechonest] found 5 pruned images\nRobin Schulz ARYQOAT1359C24C8BB\n [pyechonest] found 1 pruned images\nSam Smith ARUZM8A11C8A41519C\n [pyechonest] found 3 pruned images\nMeghan Trainor ARGUQXQ12D5CD78CB1\n [pyechonest] found 6 pruned images\nMajor Lazer ARLGOUD12298900ACE\n [pyechonest] found 1 pruned images\nSia AR6ENUY1187B994158\n [pyechonest] found 3 pruned images\nColdplay ARJ7KF01187B98D717\n [pyechonest] found 2 pruned images\nSelena Gomez ARZ5NER11A348F0B33\n [pyechonest] found 3 pruned images\nAriana Grande AROHQCR13244CF7152\n [pyechonest] found 1 pruned images\nZara Larsson ARVJAER13C7A655CBB\n [pyechonest] found 4 pruned images\nWiz Khalifa ARN0GFV1187FB508CC\n [pyechonest] found 4 pruned images\nOmi ARZ2B1F1187FB3EAAD\n [pyechonest] found 4 pruned images\nNicky Jam ARSTQ2A1187B99CF14\n [pyechonest] found 3 pruned images\nFelix Jaehn ARFCEEW140A1ED1875\n [pyechonest] found 1 pruned images\nMaroon 5 ARF5M7Q1187FB501E8\n [pyechonest] found 1 pruned images\nShawn Mendes ARPPLQX146B59A9035\n [pyechonest] found 0 pruned images\n[{'height': 500, 'url': 'https://i.scdn.co/image/31fb1351a95eac56ea900ce4f52e7339d75e8e53', 'width': 500}]\n [spotipy] found 1 images\nHozier ARKLCET1407EC5357C\n [pyechonest] found 1 pruned images\nPharrell Williams ARLIWDY1456895A560\n [pyechonest] found 4 pruned images\nMaître Gims ARQYBQA1480E5F2085\n [pyechonest] found 1 pruned images\nJason Derülo ARICFET123E29C2452\n [pyechonest] found 1 pruned images\nRudimental ARPXEKG121318C5A25\n [pyechonest] found 2 pruned images\nJames Bay ARHEQXN13EB4582BB9\n [pyechonest] found 2 pruned images\nMadcon ARNOG4Q1187B9A5A62\n [pyechonest] found 1 pruned images\nJess Glynne ARDVNQH14408EC0258\n [pyechonest] found 0 pruned images\n[{'height': 1000, 'url': 'https://i.scdn.co/image/616ed21d1ed5b88e0b184c26092a1f4095bcc2f1', 'width': 1000}]\n [spotipy] found 1 images\nCharlie Puth ARCOVHW12CEB2758FC\n [pyechonest] found 1 pruned images\nDisclosure ARPCATO12B3B3540E2\n [pyechonest] found 4 pruned images\n5 Seconds Of Summer AREXQVQ13B20C75202\n [pyechonest] found 8 pruned images\nSkrillex ARUAMGA123E29C15AD\n [pyechonest] found 0 pruned images\n[{'height': 667, 'url': 'https://i.scdn.co/image/39e1d16745912f0ae73e7967f062a4414479775b', 'width': 1000}]\n [spotipy] found 1 images\nWalk the Moon ARAZEGJ11E905798ED\n [pyechonest] found 3 pruned images\nDiplo ARQDK391187FB3CA23\n [pyechonest] found 1 pruned images\nNaughty Boy ARDWQVL1187FB3771D\n [pyechonest] found 14 pruned images\nDemi Lovato ARUCF6P11A348F0B17\n [pyechonest] found 4 pruned images\nFifth Harmony ARJMGRN13F3ABCE000\n [pyechonest] found 6 pruned images\nJessie J ARWWTYW11F4C842642\n [pyechonest] found 4 pruned images\nImagine Dragons ARRVRFP126FE025327\n [pyechonest] found 3 pruned images\nPitbull ARK9BHE1187FB3AC9D\n [pyechonest] found 2 pruned images\nTove Lo ARBUIGY13A709C567F\n [pyechonest] found 2 pruned images\nFlo Rida AR1IJ1Z11C8A41500D\n [pyechonest] found 3 pruned images\nChris Brown ARXOTQH1187FB57084\n [pyechonest] found 4 pruned images\nJohn Legend ARHDI7O1187B9B6850\n [pyechonest] found 3 pruned images\nRihanna ARKU3Z61187FB51DCA\n [pyechonest] found 2 pruned images\nMacklemore & Ryan Lewis ARZEHLK12DDD1882BF\n [pyechonest] found 0 pruned images\n[{'height': 667, 'url': 'https://i.scdn.co/image/e759528b0f02b598d603d33a33c7e93e52d904e7', 'width': 1000}]\n [spotipy] found 1 images\nLana Del Rey ARICLSK131AAE25F5E\n [pyechonest] found 0 pruned images\n[{'height': 1000, 'url': 'https://i.scdn.co/image/15f34b9bf2867358d36a6e9c60ed0dacf29a6340', 'width': 1000}]\n [spotipy] found 1 images\nLittle Mix ARONVKP134198E55ED\n [pyechonest] found 0 pruned images\n[{'height': 973, 'url': 'https://i.scdn.co/image/dd11273cbb4101ead697b679644a39da2079a75e', 'width': 973}]\n [spotipy] found 1 images\nGalantis ARFKZRQ1391FF1B17F\n [pyechonest] found 1 pruned images\nZedd ARTQFNZ12E2A671BBF\n [pyechonest] found 3 pruned images\nNico & Vinz ARPYLSV143823E2416\n [pyechonest] found 3 pruned images\nRachel Platten ARVBMUD122988F3D45\n [pyechonest] found 0 pruned images\n[{'height': 667, 'url': 'https://i.scdn.co/image/a6e40711bd9934f9bf953053314ac8c2e6681afe', 'width': 1000}]\n [spotipy] found 1 images\nThe Avener ARHNOTL141EC88437C\n [pyechonest] found 0 pruned images\n[{'height': 1000, 'url': 'https://i.scdn.co/image/3e89639fb36e18b79f733b12020bb60335deac31', 'width': 1000}]\n [spotipy] found 1 images\nAdam Lambert ARIGTAO11FED0C4411\n [pyechonest] found 3 pruned images\nSam Hunt AR0IVTI1187B9B47ED\n [pyechonest] found 1 pruned images\nP!nk ARZRYXF1187FB555CC\n [pyechonest] found 5 pruned images\nDJ Snake AR22ZMN1187B9B6203\n [pyechonest] found 2 pruned images\nArmin van Buuren AR0W7561187FB57131\n [pyechonest] found 2 pruned images\nMåns Zelmerlöw ARLSB9Q1187B9B8D62\n [pyechonest] found 0 pruned images\n[{'height': 563, 'url': 'https://i.scdn.co/image/ef44764b30143160fbfb04ea71a728c7f4ea2de1', 'width': 1000}]\n [spotipy] found 1 images\nMagic! ARYUWLF1473F667067\n [pyechonest] found 1 pruned images\nJ Balvin ARMVSKH130DCC6F6D7\n [pyechonest] found 4 pruned images\nGrimes ARJUURB12AF7D91356\n [pyechonest] found 0 pruned images\n[{'height': 750, 'url': 'https://i.scdn.co/image/bf093be59129a2cc4e2e37a01f544130567b4d49', 'width': 1000}]\n [spotipy] found 1 images\nHardwell ARTARKC1241B9C4789\n [pyechonest] found 7 pruned images\nOtto Knows ARYUPIQ131AAE260D1\n [pyechonest] found 3 pruned images\nBigBang ARNVEFF1187FB37369\n [pyechonest] found 24 pruned images\nDimitri Vegas ARPMQIO1241B9C506A\n [pyechonest] found 5 pruned images\nNick Jonas AR0SJVA11C8A414FDE\n [pyechonest] found 2 pruned images\nTiësto ARIW30O1187FB5A29A\n [pyechonest] found 10 pruned images\nMark Ronson ARPM1O31187B9A0ECD\n [pyechonest] found 2 pruned images\nX Ambassadors AROZWDS13DA2F9F488\n [pyechonest] found 0 pruned images\n[{'height': 563, 'url': 'https://i.scdn.co/image/f0b476c460e92861b37918d4cdc7cda1dda65f27', 'width': 1000}]\n [spotipy] found 1 images\nKaty Perry AR0IVTL1187B9AD520\n [pyechonest] found 13 pruned images\nKlingande ARFRFJX13DFF684D25\n [pyechonest] found 2 pruned images\nThe Notorious B.I.G. ARSZ6MA1187FB43D29\n [pyechonest] found 2 pruned images\nEminem ARTH9041187FB43E1F\n [pyechonest] found 2 pruned images\nMr. Probz ARMTPRF1269FCD5C94\n [pyechonest] found 2 pruned images\nMartin Garrix ARAGGUA136E57105F8\n [pyechonest] found 1 pruned images\nFlorence + The Machine ARNCHOP121318C56B8\n [pyechonest] found 3 pruned images\nCarly Rae Jepsen ARRWGYU12086C17800\n [pyechonest] found 1 pruned images\nJuan Magan AR912GD1187B9B9B0F\n [pyechonest] found 2 pruned images\nDuke Dumont ARZNPPG1187FB4C845\n [pyechonest] found 4 pruned images\nMartin Solveig ARAJHQQ1187B98B369\n [pyechonest] found 1 pruned images\nZhu ARZLHMW12AF7DABF36\n [pyechonest] found 1 pruned images\nEnrique Iglesias ARDCFNZ1187FB3DDB0\n [pyechonest] found 4 pruned images\nAndy Grammer ARGTGFO11EBCD751DA\n [pyechonest] found 3 pruned images\nPrince Royce ARASIMV1257509D792\n [pyechonest] found 1 pruned images\nMaluma ARUZWFS131529ED7B6\n [pyechonest] found 1 pruned images\nHalsey ARUXOSI133F5CF830B\n [pyechonest] found 0 pruned images\n[{'height': 500, 'url': 'https://i.scdn.co/image/94f16d3edb81d13f210e8c9edfd9cfded7b3549e', 'width': 500}]\n [spotipy] found 1 images\nAronChupa ARGYKIB142EE80AF67\n [pyechonest] found 0 pruned images\n[{'height': 750, 'url': 'https://i.scdn.co/image/43fbd31068bc54e4b634fd409f670f1a0d2bc358', 'width': 1000}]\n [spotipy] found 1 images\nLuke Bryan ARNNM56119B86686EA\n [pyechonest] found 2 pruned images\nRicky Martin AR2FSX31187B98E3EF\n [pyechonest] found 4 pruned images\nDaddy Yankee ARBNXYA1187FB51C50\n [pyechonest] found 1 pruned images\nLukas Graham ARAKRAI136EBF89A5A\n [pyechonest] found 4 pruned images\nTroye Sivan ARPFLJR12FC14E461E\n [pyechonest] found 0 pruned images\n[{'height': 1000, 'url': 'https://i.scdn.co/image/a1d744336c4449c3e44669dec631852f35e24382', 'width': 1000}]\n [spotipy] found 1 images\nKendrick Lamar AREJQVO12C1DF51FFE\n [pyechonest] found 2 pruned images\nEva Simons ARO7WS11187FB3A2A9\n [pyechonest] found 0 pruned images\n[{'height': 615, 'url': 'https://i.scdn.co/image/b171d21ed601e7ab40f7d93eb6be2a194186f647', 'width': 485}]\n [spotipy] found 1 images\nVance Joy ARXGKXE13CABFC507C\n [pyechonest] found 2 pruned images\nBruno Mars ARJHCSL123E29C21E8\n [pyechonest] found 0 pruned images\n[{'height': 500, 'url': 'https://i.scdn.co/image/f22774ca7d636e724164a65b2601ab39538a3aed', 'width': 500}]\n [spotipy] found 1 images\nClean Bandit ARIRBAA133D0D26D78\n [pyechonest] found 2 pruned images\nKendji Girac ARUMBXB1469BE6C175\n [pyechonest] found 0 pruned images\n[{'height': 272, 'url': 'https://i.scdn.co/image/2e71765b6bb085ab156f7f7ebbf30a85bb24ee16', 'width': 413}]\n [spotipy] found 1 images\n소녀시대 AR7ASIS11C8A416335\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/19262c59fca03e0a3547577941a59b0fe05c4337', 'width': 640}]\n [spotipy] found 1 images\nWisin ARCON5N1187B99EE79\n [pyechonest] found 12 pruned images\nBeyoncé AR65K7A1187FB4DAA4\n [pyechonest] found 24 pruned images\nPentatonix ARLXJMY13385835016\n [pyechonest] found 8 pruned images\nChristine and the Queens ARFJRXE13559201595\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/8a74a29c3a4f07399ba41481921e08e167ea00d3', 'width': 640}]\n [spotipy] found 1 images\nThe 1975 AROJAOG1391FF1AFE1\n [pyechonest] found 2 pruned images\nCro ARIGDMX1269FCD4CFB\n [pyechonest] found 2 pruned images\nCarrie Underwood ARLE2071187FB3A270\n [pyechonest] found 5 pruned images\nLuan Santana ARBDJZA129CDD17965\n [pyechonest] found 0 pruned images\n[{'height': 1000, 'url': 'https://i.scdn.co/image/2eda0c9a4ada974cd07634e3ab2cf5ce3fb4f9bf', 'width': 1000}]\n [spotipy] found 1 images\nJohn Newman ARNWGMK1241B9C9CE2\n [pyechonest] found 3 pruned images\nNelly ARCBD0U1187FB466EF\n [pyechonest] found 2 pruned images\nAndreas Bourani ARUDDNT130708ED1B1\n [pyechonest] found 2 pruned images\nFall Out Boy ARSG7NQ1187FB57482\n [pyechonest] found 1 pruned images\nDaft Punk ARF8HTQ1187B9AE693\n [pyechonest] found 0 pruned images\n[{'height': 751, 'url': 'https://i.scdn.co/image/e52651f03da8c9bf264f75cdabf39cf039606ddc', 'width': 999}]\n [spotipy] found 1 images\nMika AR8QWS71187B9A7BD5\n [pyechonest] found 4 pruned images\nPassenger ARMTZR11187B9AD0C2\n [pyechonest] found 1 pruned images\nGente De Zona ARMHTOS11F50C48448\n [pyechonest] found 4 pruned images\nDillon Francis ARYHZBN12E7C5F3265\n [pyechonest] found 4 pruned images\nBlack M ARYUCME13C1C299823\n [pyechonest] found 0 pruned images\n[{'height': 800, 'url': 'https://i.scdn.co/image/142e68a7b0677d8d24686d5e1e85c2392edc1f51', 'width': 531}]\n [spotipy] found 1 images\nSam Feldt ARCDGZR146DFC82BDC\n [pyechonest] found 0 pruned images\n[{'height': 960, 'url': 'https://i.scdn.co/image/29470c951e9fac1834894792b3d3f88203a1f53f', 'width': 640}]\n [spotipy] found 1 images\nSarah Connor ARNAM6D1187B9B2718\n [pyechonest] found 1 pruned images\nAdam Levine ARSVDDS1187FB36855\n [pyechonest] found 3 pruned images\nPablo Alboran AROOLMX12AF7DA3659\n [pyechonest] found 0 pruned images\n[{'height': 563, 'url': 'https://i.scdn.co/image/4bc0794b544ec4d7f53a9662affa64573c579487', 'width': 1000}]\n [spotipy] found 1 images\nOneRepublic AR73S4G1187B9A03C2\n [pyechonest] found 2 pruned images\nLil Jon ARHLGDG11F4C846A37\n [pyechonest] found 2 pruned images\nAmerican Authors ARECGUA13B4C995FC8\n [pyechonest] found 0 pruned images\n[{'height': 667, 'url': 'https://i.scdn.co/image/8530b5d5a0c974aaa4a0cea395750a8c5b839438', 'width': 1000}]\n [spotipy] found 1 images\nOf Monsters and Men ARJLJNQ131B421B438\n [pyechonest] found 4 pruned images\nGeorge Ezra ARKCGJK141A92CD71D\n [pyechonest] found 1 pruned images\nVigiland ARFSXOZ14185A12FF9\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/b124d2e3635dd47b5c0e45fb0743b17325c05809', 'width': 640}]\n [spotipy] found 1 images\nDeorro ARSVRID139A2DB2C5B\n [pyechonest] found 0 pruned images\n[{'height': 1000, 'url': 'https://i.scdn.co/image/c32d8fb6bc98dd13950570bcc14232f3b1900c2c', 'width': 667}]\n [spotipy] found 1 images\nBring Me the Horizon ARC8U4E1187FB5BFA9\n [pyechonest] found 0 pruned images\n[{'height': 1000, 'url': 'https://i.scdn.co/image/f03a66bded86fdcacb337236db0df7a4d8db4b82', 'width': 1000}]\n [spotipy] found 1 images\nOliver Heldens ARSWVFF13F2EC8D3F6\n [pyechonest] found 0 pruned images\n[{'height': 563, 'url': 'https://i.scdn.co/image/d597f64d6765e9c9eddbc4da372b03e47137323f', 'width': 1000}]\n [spotipy] found 1 images\nShakira AR6PJ8R1187FB5AD70\n [pyechonest] found 0 pruned images\n[{'height': 1500, 'url': 'https://i.scdn.co/image/2f3e5d32ac142276452b97fd388c1fed49563b24', 'width': 1000}]\n [spotipy] found 1 images\nMacklemore AR2XDHU1187B9B484F\n [pyechonest] found 3 pruned images\nMumford and Sons ARJERLY11E2835D7DC\n [pyechonest] found 2 pruned images\nf(x) AROBEGP12406A89D26\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/9753f6ca4c3df55b19302ac745cdb66a5504c1fa', 'width': 640}]\n [spotipy] found 1 images\nMadonna ARBEOHF1187B9B044D\n [pyechonest] found 2 pruned images\nMalú AR0R2CT1187FB4E64E\n [pyechonest] found 0 pruned images\n[{'height': 1081, 'url': 'https://i.scdn.co/image/260c9e030d795f1c7b25d609335fc70863d2edf4', 'width': 1000}]\n [spotipy] found 1 images\nSigma ARMM06Q1187FB37202\n [pyechonest] found 4 pruned images\nJesse & Joy ARMHAS71187FB5AFE9\n [pyechonest] found 1 pruned images\nRed Foo AREL9S41187FB38F83\n [pyechonest] found 0 pruned images\n [spotipy] found 0 images\nRita Ora ARLXLHI134198E77C6\n [pyechonest] found 3 pruned images\nExo ARNBGK01187B98EF43\n [pyechonest] found 4 pruned images\nLorde ARUXAKW13D610B0A9B\n [pyechonest] found 17 pruned images\nLena AR38CVO1187FB50DDD\n [pyechonest] found 2 pruned images\nAdel Tawil ARQI82O1187B9A81FA\n [pyechonest] found 3 pruned images\nGestört aber Geil ARDBOZH13D7B67D8C5\n [pyechonest] found 0 pruned images\n[{'height': 1000, 'url': 'https://i.scdn.co/image/a22aa5e4fa039b91c3b74399b2dc466d38a62360', 'width': 1000}]\n [spotipy] found 1 images\nA Great Big World ARGPAWB13B0169D793\n [pyechonest] found 3 pruned images\nMarina Kaye ARPZMVW145D815BF82\n [pyechonest] found 1 pruned images\nBanda Sinaloense MS de Sergio Lizárraga ARLQFWU12AA0D9194B\n [pyechonest] found 3 pruned images\nMotrip ARZXHMI134CD511FFE\n [pyechonest] found 1 pruned images\nDr. Dre ARZER7I1187FB385AF\n [pyechonest] found 4 pruned images\nStromae ARLGNCZ1269FCD4F62\n [pyechonest] found 2 pruned images\nLady Gaga ARX6TAQ11C8A415850\n [pyechonest] found 15 pruned images\nSido ARW1D6G1187FB490CD\n [pyechonest] found 2 pruned images\nMarvin Gaye ARVNNXD1187B9AE50D\n [pyechonest] found 0 pruned images\n[{'height': 1547, 'url': 'https://i.scdn.co/image/703fe6e6d231364377958c1cd0725a8e6e1d7f6c', 'width': 1000}]\n [spotipy] found 1 images\nTwenty One Pilots ARZLNTM12AF7DB4839\n [pyechonest] found 2 pruned images\nNicki Minaj ARDYVEQ122BCFCC19E\n [pyechonest] found 4 pruned images\nR.City ARBIZYD1241B9C906D\n [pyechonest] found 0 pruned images\n [spotipy] found NOTHING\nJulion Alvarez y Su Norteno Banda ARXFFTB122988FD1A5\n [pyechonest] found 4 pruned images\nFleetwood Mac AR6BJ1V1187B9AE3B7\n [pyechonest] found 7 pruned images\nAlesso ARFBFYB12C60E4E610\n [pyechonest] found 5 pruned images\nJustin Timberlake ARWK5QH1187B9A9B7F\n [pyechonest] found 6 pruned images\nGlasperlenspiel ARUJFDO132698ABCC0\n [pyechonest] found 3 pruned images\nBob Dylan AR5FP401187FB523C9\n [pyechonest] found 13 pruned images\nCHVRCHES ARCZOJQ13A5B8886AB\n [pyechonest] found 3 pruned images\nYears & Years ARPDYVR13A6C15570C\n [pyechonest] found 1 pruned images\nLouane ARTIMFV13CFD709D06\n [pyechonest] found 1 pruned images\nLed Zeppelin ARDIBRT1187B9AF176\n [pyechonest] found 5 pruned images\nBlake Shelton ARV9LSS1187B98C471\n [pyechonest] found 6 pruned images\nArctic Monkeys ARM0P6Z1187FB4D466\n [pyechonest] found 4 pruned images\nInna AROPZVY11E2835D3D3\n [pyechonest] found 3 pruned images\nAnna Naklab ARQYBLG14303B7418C\n [pyechonest] found 3 pruned images\nBryan Adams ARRJES71187FB4D09E\n [pyechonest] found 0 pruned images\n[{'height': 1000, 'url': 'https://i.scdn.co/image/a536bf733956e47aba8cdaa17d974b4496c5cc55', 'width': 1000}]\n [spotipy] found 1 images\nJasmine Thompson ARHOHRN12D5CD7C67A\n [pyechonest] found 3 pruned images\nZac Brown Band AR478R21187FB42BC4\n [pyechonest] found 6 pruned images\nAlejandro Sanz ARQATCR1187FB4D3E6\n [pyechonest] found 2 pruned images\nBirdy ARRJRIS1187B9AD39C\n [pyechonest] found 1 pruned images\nElle King ARPNRSK13687E71F38\n [pyechonest] found 0 pruned images\n[{'height': 1469, 'url': 'https://i.scdn.co/image/15078e584a84382bb92de4d051d94ce080af2922', 'width': 1000}]\n [spotipy] found 1 images\nAndrea Bocelli ARV481W1187FB38CD9\n [pyechonest] found 11 pruned images\nEfecto Pasillo ARYWMSK12CEB27583D\n [pyechonest] found 1 pruned images\nKiesza ARHJSZQ11EBCD7BE27\n [pyechonest] found 5 pruned images\nSimple Plan ARLYTW71187FB3809D\n [pyechonest] found 3 pruned images\nDimitri Vegas and Like Mike ARXBNMD12496DAB7D2\n [pyechonest] found 1 pruned images\nRobin Thicke AROETUE1187FB4A677\n [pyechonest] found 2 pruned images\nDawin ARTRFCX1359C24CC98\n [pyechonest] found 0 pruned images\n[{'height': 432, 'url': 'https://i.scdn.co/image/e1c002b490d8df48d56045a165f444b8d1f5883d', 'width': 999}]\n [spotipy] found 1 images\nShaggy ARTYXQC1187B9ACAB2\n [pyechonest] found 2 pruned images\nSoprano ARI9DDK1187FB4FD68\n [pyechonest] found 1 pruned images\nTim McGraw ARDR3K91187B9B04EB\n [pyechonest] found 5 pruned images\nA$AP Rocky ARVKMMF133F5CF74A6\n [pyechonest] found 2 pruned images\nGorgon City ARCJPWR136658E907C\n [pyechonest] found 1 pruned images\nRomeo Santos AREUJEI12FE0876ED5\n [pyechonest] found 5 pruned images\nFedez ARXARGE1269FB357A8\n [pyechonest] found 2 pruned images\nImany ARJATFR130708ED2D0\n [pyechonest] found 3 pruned images\nMuse ARR3ONV1187B9A2F59\n [pyechonest] found 2 pruned images\nFlorida Georgia Line ARIUOBQ12BE4890CD5\n [pyechonest] found 4 pruned images\nLabrinth ARANUHH12A57A06AEF\n [pyechonest] found 0 pruned images\n[{'height': 750, 'url': 'https://i.scdn.co/image/60c57cce09b04c0dd99264dda95138b856904860', 'width': 1000}]\n [spotipy] found 1 images\nDvbbs ARUXSAC136E570E79A\n [pyechonest] found 0 pruned images\n[{'height': 460, 'url': 'https://i.scdn.co/image/8e8b43412b0b39cefcb97902e61f7d0f956566ce', 'width': 650}]\n [spotipy] found 1 images\nThe Vamps AROSSCZ148A8DE9F11\n [pyechonest] found 0 pruned images\n[{'height': 720, 'url': 'https://i.scdn.co/image/0956d6f06e9de9d9cc87036b65c701a93761e3e0', 'width': 720}]\n [spotipy] found 1 images\nMichael Jackson ARXPPEY1187FB51DF4\n [pyechonest] found 21 pruned images\nThomas Rhett ARNPFHG13544FB4F3E\n [pyechonest] found 2 pruned images\nWanda ARR35051187B99CED8\n [pyechonest] found 1 pruned images\nPlan B ARXS6PG1187FB4195A\n [pyechonest] found 0 pruned images\n[{'height': 372, 'url': 'https://i.scdn.co/image/745a51df66eddd146cc02f1c9af60da957f9a3f7', 'width': 261}]\n [spotipy] found 1 images\nFetty Wap ARXCQFR14690BA8487\n [pyechonest] found 0 pruned images\n[{'height': 563, 'url': 'https://i.scdn.co/image/0d56632017e9a02e8a5d5f8f7babf002f019a27b', 'width': 1000}]\n [spotipy] found 1 images\nMarco Mengoni ARATWKK1271F576926\n [pyechonest] found 0 pruned images\n[{'height': 300, 'url': 'https://i.scdn.co/image/fff282fc5a66a111be63e1d2e80195437268cf96', 'width': 300}]\n [spotipy] found 1 images\nFirst Aid Kit ARBDKOI12086C14B65\n [pyechonest] found 1 pruned images\nEchosmith ARXDZWY137E74B02FB\n [pyechonest] found 3 pruned images\nDJ Antoine AR2E3LI1187FB38797\n [pyechonest] found 2 pruned images\nFréro Delavega ARUTYVT13DA2EF94FB\n [pyechonest] found 1 pruned images\nFeder ARBUOJU13361805FF0\n [pyechonest] found 1 pruned images\nOlly Murs ARGLFXO12592CDB6B4\n [pyechonest] found 1 pruned images\nFoo Fighters AR6XPWV1187B9ADAEB\n [pyechonest] found 6 pruned images\nIcona Pop ARMXUUI12E9676D850\n [pyechonest] found 2 pruned images\nCharli XCX ARMWDYO11F50C49B36\n [pyechonest] found 1 pruned images\nGwen Stefani ARVBRGZ1187FB4675A\n [pyechonest] found 5 pruned images\nAfrojack ARX6MR511C8A42C159\n [pyechonest] found 0 pruned images\n[{'height': 768, 'url': 'https://i.scdn.co/image/e2e0b8df2086b8e4b66bc6920509d07c6180587b', 'width': 768}]\n [spotipy] found 1 images\nElvis Presley ARULZ741187B9AD2EF\n [pyechonest] found 7 pruned images\nAnirudh Ravichander AREMXDB134EC8C13F2\n [pyechonest] found 2 pruned images\nTori Kelly ARJEMDG11EBCD7AB46\n [pyechonest] found 2 pruned images\nEric Church AROED381187B99AE76\n [pyechonest] found 6 pruned images\nTame Impala ARGWWVA11E2835DEF5\n [pyechonest] found 1 pruned images\nMo ARKSSSF1187FB3B4B9\n [pyechonest] found 3 pruned images\nKeith Urban AR95IYB1187FB3FE0B\n [pyechonest] found 4 pruned images\nThe Game ARPBTRI1187FB52457\n [pyechonest] found 10 pruned images\nYellow Claw ARSWPLI138257457AE\n [pyechonest] found 1 pruned images\nMadilyn Bailey ARTEOXV12F6A860BA3\n [pyechonest] found 1 pruned images\nThe Beatles AR6XZ861187FB4CECD\n [pyechonest] found 10 pruned images\nKwabs ARACFYF143393707B2\n [pyechonest] found 0 pruned images\n[{'height': 1000, 'url': 'https://i.scdn.co/image/42a9677ab3e9d850e748d01deeb6117c4cd22d73', 'width': 1000}]\n [spotipy] found 1 images\nJanet Jackson ART4QZC1187FB51612\n [pyechonest] found 4 pruned images\n周杰倫 AR9ZP1S1187FB52513\n [pyechonest] found 5 pruned images\nALTJ ARDLXKN122ECCB85EC\n [pyechonest] found 2 pruned images\nLydia AR0HT2Z1187B9952F5\n [pyechonest] found 2 pruned images\nPearl Jam ARFVYJI1187B9B8E13\n [pyechonest] found 4 pruned images\nPanic! At the Disco ARL6FF81187FB5A916\n [pyechonest] found 4 pruned images\nManá ARX2F7M1187B9A5563\n [pyechonest] found 2 pruned images\nBritney Spears AR03BDP1187FB5B324\n [pyechonest] found 7 pruned images\nCœur De Pirate ARFSBHR12086C16CCA\n [pyechonest] found 0 pruned images\n[{'height': 1183, 'url': 'https://i.scdn.co/image/9c0f5c303a0ccbfa631386be16d99ae91addcbdd', 'width': 964}]\n [spotipy] found 1 images\nBeck ARC2XR11187FB5CC95\n [pyechonest] found 4 pruned images\nEazy-E AR0D6EL1187B9AF46B\n [pyechonest] found 3 pruned images\nWinner ARNN95J1187B99296D\n [pyechonest] found 2 pruned images\nJosef Salvat ARCZTBF13CD5136547\n [pyechonest] found 0 pruned images\n[{'height': 563, 'url': 'https://i.scdn.co/image/4e4e847ef9b0f6cb796cb0c611455e9290d960ad', 'width': 1000}]\n [spotipy] found 1 images\nSDP ARMIN011187B9947EB\n [pyechonest] found 2 pruned images\nHenrique & Juliano ARLZKAF13D8AAAC3B4\n [pyechonest] found 0 pruned images\n[{'height': 800, 'url': 'https://i.scdn.co/image/02305ca9bfa3e4b6b2aa7e10802a3089679063c9', 'width': 800}]\n [spotipy] found 1 images\nMiriam Bryant ARDBLSF1382574524F\n [pyechonest] found 1 pruned images\nLoreen ARUZUYU12EEE6497CB\n [pyechonest] found 5 pruned images\nMatt Simons ARPCQSV12D5CD781B4\n [pyechonest] found 1 pruned images\nKadebostany ARYCDTS13C83629FCD\n [pyechonest] found 1 pruned images\nKylie Minogue AR8V1X71187B9B919B\n [pyechonest] found 3 pruned images\nAlan Jackson AROWH4C1187B9B11CB\n [pyechonest] found 5 pruned images\nAtif Aslam ARJIFDG1187FB462F1\n [pyechonest] found 7 pruned images\nhurts AR79GT01187FB5CC51\n [pyechonest] found 0 pruned images\n[{'height': 1000, 'url': 'https://i.scdn.co/image/24656952adb2db2cc047a1a81e1d4bf081cafe17', 'width': 1000}]\n [spotipy] found 1 images\nMichel Teló ARWERSW129CDD1796E\n [pyechonest] found 0 pruned images\n[{'height': 873, 'url': 'https://i.scdn.co/image/fc878eb2318dcd2710227d8dd3ce65002ff3751f', 'width': 606}]\n [spotipy] found 1 images\nEnya AREACYD1187FB566DE\n [pyechonest] found 1 pruned images\nLunchmoney Lewis ARIYYFR14B5A84137A\n [pyechonest] found 0 pruned images\n[{'height': 1000, 'url': 'https://i.scdn.co/image/852e7e4c646e094557b7a5710b0af977bcdce6fc', 'width': 1000}]\n [spotipy] found 1 images\nCéline Dion ARFWL8S1187B9B4B44\n [pyechonest] found 3 pruned images\nAmy Winehouse ARWD25M1187FB4C563\n [pyechonest] found 3 pruned images\nAC/DC ARWR05M1187B9951A2\n [pyechonest] found 14 pruned images\nDJ Khaled ARPVZTA1187B9A63A8\n [pyechonest] found 2 pruned images\nPerfume ARDPL611187FB3D4F7\n [pyechonest] found 0 pruned images\n[{'height': 667, 'url': 'https://i.scdn.co/image/cb39546243936f1c0a63b4fcdd7bbb307cb7ca2a', 'width': 1000}]\n [spotipy] found 1 images\nThe XX ARNDRBI122BCFCC2E3\n [pyechonest] found 3 pruned images\nRöyksopp ARDA9NZ1187FB3A825\n [pyechonest] found 1 pruned images\nLittle Big Town ARLNMV01187FB3ECA8\n [pyechonest] found 3 pruned images\nNathaniel Rateliff ARHNZSA1269FCD3329\n [pyechonest] found 3 pruned images\nChris Young ARG8E041187B98B941\n [pyechonest] found 2 pruned images\nGeorge Strait ARK4ZEK1187B990A4F\n [pyechonest] found 4 pruned images\nMylène Farmer ARWYYZP1187FB4FDB7\n [pyechonest] found 1 pruned images\nMariah Carey ARKSZW81187B9B695D\n [pyechonest] found 6 pruned images\nBig Sean ARGG7S81187FB511E9\n [pyechonest] found 1 pruned images\nLa Arrolladora Banda El Limón ARLNZDA122988EDC3E\n [pyechonest] found 4 pruned images\nThe Chainsmokers ARSUSSG13B864F22D8\n [pyechonest] found 0 pruned images\n[{'height': 1000, 'url': 'https://i.scdn.co/image/d49d5744e44a51ee6dd5afcd0e3269c25ef76243', 'width': 1000}]\n [spotipy] found 1 images\nSilento ARQLUKU14A3B1DDF99\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/a278e30b0e371b3ba2f6d0116cae1d4c2ca6c893', 'width': 640}]\n [spotipy] found 1 images\nSynapson ARSLMZP12FE0874F36\n [pyechonest] found 2 pruned images\nSteve Aoki ARG420G1187FB5B636\n [pyechonest] found 2 pruned images\nKanye West ARRH63Y1187FB47783\n [pyechonest] found 11 pruned images\nQueen ARL4TII1187B9B46E1\n [pyechonest] found 12 pruned images\nAntti Tuisku ARGJXJD1187FB413B1\n [pyechonest] found 1 pruned images\nOmarion AR7GVU91187B99F7E2\n [pyechonest] found 1 pruned images\nMark Forster ARKMCGL13586C70E3E\n [pyechonest] found 0 pruned images\n[{'height': 667, 'url': 'https://i.scdn.co/image/44277e5b808274938ff500067d873e3d2c976bd0', 'width': 1000}]\n [spotipy] found 1 images\nChayanne AR8B79N1187B98FB4A\n [pyechonest] found 1 pruned images\nArijit Singh ARIZOQJ139FF6C8C58\n [pyechonest] found 6 pruned images\nLykke Li ARGHEC01187FB597B0\n [pyechonest] found 1 pruned images\nCarlos Vives ARLAWDD1187FB48367\n [pyechonest] found 6 pruned images\nMike Posner AREHKZU122E5C4FC81\n [pyechonest] found 1 pruned images\nA-Trak ARV87Z21187B98932E\n [pyechonest] found 0 pruned images\n[{'height': 563, 'url': 'https://i.scdn.co/image/7a91faa4b4d2e545478d6aac9729d528cc7165a5', 'width': 1000}]\n [spotipy] found 1 images\nPia Mia ARZPSCD13833B91374\n [pyechonest] found 1 pruned images\nSHINee ARD4ZPF11A348F0ACB\n [pyechonest] found 2 pruned images\nHillsong Worship ARBAXYQ1462590F668\n [pyechonest] found 0 pruned images\n[{'height': 387, 'url': 'https://i.scdn.co/image/8e21b02289a355a31d0c4fddb5105bf456e3a0df', 'width': 999}]\n [spotipy] found 1 images\nAKB48 ARNUUVP1187FB3D618\n [pyechonest] found 0 pruned images\n[{'height': 473, 'url': 'https://i.scdn.co/image/f45e90373480faad453733929bb366013b2612d6', 'width': 640}]\n [spotipy] found 1 images\nTravie McCoy ARYJHCE127D395D4BB\n [pyechonest] found 1 pruned images\nRod Stewart ARYGO6R1187B9A7822\n [pyechonest] found 4 pruned images\nGotye ARKBXMX1187B9AA807\n [pyechonest] found 0 pruned images\n[{'height': 665, 'url': 'https://i.scdn.co/image/d733af188f577d5b91c393633484c0ec6530a017', 'width': 999}]\n [spotipy] found 1 images\nCalibre 50 ARPAUWX12BEF24F888\n [pyechonest] found 3 pruned images\nMarc Anthony ARXM0CX1187B98FD56\n [pyechonest] found 1 pruned images\nJuan Atkins ARFEZLZ1187B9AEEED\n [pyechonest] found 2 pruned images\nFaithless ARXDQJB1187FB57154\n [pyechonest] found 3 pruned images\nRuoho Ruotsi ARPYOHF1257509B5C7\n [pyechonest] found 1 pruned images\nMaurice Fulton ARRWMKN1187FB43F91\n [pyechonest] found 2 pruned images\nLeftfield ARQLGB41187FB4F7EB\n [pyechonest] found 0 pruned images\n[{'height': 926, 'url': 'https://i.scdn.co/image/10981bce6373cfcfa2bcceaaaeaa89bb02094b86', 'width': 926}]\n [spotipy] found 1 images\nFrivolous ARN1TD11187B9A38EB\n [pyechonest] found 3 pruned images\nBasement Jaxx ARFSPOV1187B9B2363\n [pyechonest] found 3 pruned images\nThe Glitch Mob ARBBFJR126FE02537E\n [pyechonest] found 1 pruned images\nHollis P. Monroe AR5S2ZK1187B9B4C8F\n [pyechonest] found 1 pruned images\nFrankie Knuckles AR3X0GS1187FB3F714\n [pyechonest] found 5 pruned images\nFrançois K AREJDX21187FB42421\n [pyechonest] found 2 pruned images\nTrentemøller AR9S8551187FB3EA81\n [pyechonest] found 1 pruned images\nChelonis R. Jones ARNJWA01187FB4276C\n [pyechonest] found 1 pruned images\nSteve Bug ARFJHZU1187B98ACA1\n [pyechonest] found 1 pruned images\nJimpster AR5ZHV81187FB501C5\n [pyechonest] found 3 pruned images\nJeff Samuel AR9M3K01187FB381B2\n [pyechonest] found 3 pruned images\nIan Pooley AR7OO5F1187B9B84A3\n [pyechonest] found 3 pruned images\nLuomo AREKQXQ1187FB45D0E\n [pyechonest] found 1 pruned images\nKerri Chandler ARK0N891187B99BBC7\n [pyechonest] found 3 pruned images\nCharles Webster AROM3UD1187FB37B35\n [pyechonest] found 1 pruned images\nRoy Davis Jr. AR9C5VS1187FB377FE\n [pyechonest] found 4 pruned images\nRobert Owens ARZ2XLT1187B995BAE\n [pyechonest] found 3 pruned images\nBlack Science Orchestra ARAFW1Q1187FB49A74\n [pyechonest] found 2 pruned images\nMr. Fingers ARVNGQW1187FB4B235\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/6ae93db7f36f9431d13daf269983c876036b5425', 'width': 640}]\n [spotipy] found 1 images\nSaint Etienne ARJFNSC1187B9939BD\n [pyechonest] found 1 pruned images\nMasters at Work ARFLL2D1187FB52A62\n [pyechonest] found 3 pruned images\nTheo Parrish AR9ZGZK1187FB47228\n [pyechonest] found 3 pruned images\nMoodymann ARYP9WB1187B98C639\n [pyechonest] found 1 pruned images\nBasic Channel AR2CM6U1187B999EB7\n [pyechonest] found 6 pruned images\nRhythm & Sound AR8UPM11187FB3B707\n [pyechonest] found 2 pruned images\nRoman Flügel AR78FOI1187FB43573\n [pyechonest] found 2 pruned images\nJoe Lewis ARXLXHL1187B9967D0\n [pyechonest] found 1 pruned images\nDj Said ARKDABZ11F4C83F271\n [pyechonest] found 1 pruned images\nRecloose AR7300X1187B99ABFD\n [pyechonest] found 4 pruned images\nKate Simko ARI799Z1187FB4AC71\n [pyechonest] found 3 pruned images\nAschka ARQFAJV11F4C8410C2\n [pyechonest] found 1 pruned images\nMaya Jane Coles ARYVDGE123526A0EC0\n [pyechonest] found 2 pruned images\nGys AR75CQ71187B991C70\n [pyechonest] found 2 pruned images\nDeadbeat ARB3JMR1187FB4227B\n [pyechonest] found 3 pruned images\nSoultek ARU6K871187B9962B7\n [pyechonest] found 2 pruned images\nDeepChord ARP0AFR1187FB4BBB9\n [pyechonest] found 3 pruned images\nVladislav Delay ARRKV0P1187B98D4FE\n [pyechonest] found 1 pruned images\nAndy Stott AROEOHN1187B9B2736\n [pyechonest] found 1 pruned images\nIntrusion ARAIQJH11C8A421D75\n [pyechonest] found 4 pruned images\nRod Modell ARHM80M1187FB3713C\n [pyechonest] found 1 pruned images\nKassem Mosse AR6PAIJ119B8667EEB\n [pyechonest] found 2 pruned images\nMurcof AR6RHW31187B9A9698\n [pyechonest] found 2 pruned images\nMarc Leclair ARU1WAP1187B9AA0FA\n [pyechonest] found 1 pruned images\nFax AR13E8A1187FB3931D\n [pyechonest] found 5 pruned images\nMonolake AROVIZH1187FB37B1A\n [pyechonest] found 3 pruned images\nKit Clayton AROI7W91187B9AFD08\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/7bc2d7f84ab3fb7af0adfad1f5ac5b3ac008710f', 'width': 640}]\n [spotipy] found 1 images\nBvdub ARZLWJ21187FB57513\n [pyechonest] found 2 pruned images\nSwayzak AR36ITB1187B9A636D\n [pyechonest] found 4 pruned images\nWookie AR7SVON1187B9A7FEC\n [pyechonest] found 3 pruned images\nArtful Dodger AR18XYZ1187FB4CD22\n [pyechonest] found 7 pruned images\nMJ Cole ARK5MMZ1187FB466D1\n [pyechonest] found 4 pruned images\nLes Rythmes Digitales AR3NPVS1187FB5108F\n [pyechonest] found 3 pruned images\nFischerspooner AR9Z2O31187B9B49F9\n [pyechonest] found 2 pruned images\nCassius ARCGHBU1187FB4B637\n [pyechonest] found 0 pruned images\n[{'height': 279, 'url': 'https://i.scdn.co/image/5e2603e98cc1701f3c947e4bae1e8b278fb8ed9a', 'width': 445}]\n [spotipy] found 1 images\nMiguel Migs ARY2ZUX1187B9A266C\n [pyechonest] found 4 pruned images\nOsunlade ARM29941187FB42945\n [pyechonest] found 4 pruned images\nMetro Area ARXX2TR1187FB5B420\n [pyechonest] found 6 pruned images\nDennis Ferrer AR31Y6Y1187B99A541\n [pyechonest] found 3 pruned images\nRon Trent AR62SFY1187FB49528\n [pyechonest] found 3 pruned images\nLarry Heard ARA1SZ31187FB3F149\n [pyechonest] found 0 pruned images\n[{'height': 408, 'url': 'https://i.scdn.co/image/f72d7297ffa9abffb22cba12756168560272147c', 'width': 600}]\n [spotipy] found 1 images\nAlton Miller ARM625W1187FB53403\n [pyechonest] found 1 pruned images\nKing Britt ARCKUD71187FB3BC0A\n [pyechonest] found 3 pruned images\nBougie Soliterre ARA52PX1187B9971A0\n [pyechonest] found 1 pruned images\nTodd Terry ARERYQ81187FB46B0C\n [pyechonest] found 1 pruned images\nBlack Coffee AREQQCV12472CE3116\n [pyechonest] found 3 pruned images\nRichie Hawtin AR2LV0Q1187B9AA3B1\n [pyechonest] found 4 pruned images\nSpeedy J ARJZX6S1187B9927C6\n [pyechonest] found 4 pruned images\nKenny Larkin ARKRUMS1187B9BA1B6\n [pyechonest] found 2 pruned images\nLaurent Garnier ARRYBYF1187FB3929E\n [pyechonest] found 1 pruned images\nCarl Craig ARQ3EJL1187B98BF06\n [pyechonest] found 1 pruned images\nRobert Hood ARDNSXI1187B993545\n [pyechonest] found 6 pruned images\nJohn Tejada ARFO65E1187B9ACA27\n [pyechonest] found 6 pruned images\nThomas P. Heckmann ARON6EK1187B9AEE57\n [pyechonest] found 1 pruned images\nAril Brikha ARX0RMO1187B99425D\n [pyechonest] found 4 pruned images\nTiefschwarz ARBSSV01187FB3A0C4\n [pyechonest] found 4 pruned images\nFunk D'Void ARJH2B61187B9B9465\n [pyechonest] found 0 pruned images\n[{'height': 600, 'url': 'https://i.scdn.co/image/dd87c13e799ba586b87d54cc54e5832e26e331d8', 'width': 399}]\n [spotipy] found 1 images\nA Guy Called Gerald AR3V8ZM1187FB4CFA5\n [pyechonest] found 4 pruned images\nJeff Mills ARVLHYF1187B9BA2DA\n [pyechonest] found 2 pruned images\nAaron Carl ARIMXIP11F4C8425C0\n [pyechonest] found 1 pruned images\nJosh Wink ARAAZN9119B86688D9\n [pyechonest] found 5 pruned images\nDerrick May AR08VFK1187FB505A3\n [pyechonest] found 2 pruned images\nFrankie Bones ARZ4DLQ1187B9B9106\n [pyechonest] found 2 pruned images\nDJ Assault ARHODHZ1187B9AF76A\n [pyechonest] found 4 pruned images\nAUX 88 ARE7BYR1187FB4CCB3\n [pyechonest] found 3 pruned images\nFumiya Tanaka ARE7QZJ1187FB560DA\n [pyechonest] found 1 pruned images\nThe Lady Blacktronika ARDTRSA1269FB32C51\n [pyechonest] found 2 pruned images\nJunior Lopez ARZFYWS1241B9C8AE5\n [pyechonest] found 1 pruned images\nSomeone Else ARMQJOP1187B98CFD6\n [pyechonest] found 2 pruned images\nNoah Pred AR7YZ221187FB3B9A3\n [pyechonest] found 1 pruned images\nDanny Tenaglia ARYSPFJ1187B99B6F1\n [pyechonest] found 4 pruned images\nPete Tong ARIDID61187B99B31A\n [pyechonest] found 4 pruned images\nBooka Shade ARAPRD01187FB52527\n [pyechonest] found 3 pruned images\nPaul Kalkbrenner ARH33CP1187FB3836B\n [pyechonest] found 1 pruned images\nDapayk & Padberg ARK4U291187FB3A297\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/16242934603dbb80e539231e02b913a4a49a71c7', 'width': 640}]\n [spotipy] found 1 images\nIgor O. Vlasov AR9VKG21187B9B7492\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/1cf7b5b4c40cbb5360e35e5dbf5acbd2d8e47ad6', 'width': 640}]\n [spotipy] found 1 images\nDreem Teem ARSLFJI1187B9B344D\n [pyechonest] found 1 pruned images\nTodd Edwards ARZ6G0I1187FB47969\n [pyechonest] found 5 pruned images\n187 Lockdown ARYH88P1187FB4D19E\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/aa679b2395b3e4aa12b8e8b2c7d4ab6f738299ae', 'width': 640}]\n [spotipy] found 1 images\nSerious Danger ARXZPGN1187B9959A3\n [pyechonest] found 1 pruned images\nDeep Dish ARVPLEV1187B9AEBF1\n [pyechonest] found 2 pruned images\nEllen Allien AR5HM7D1187FB4D2B7\n [pyechonest] found 2 pruned images\nMatias Aguayo ARXDV6O1187B9B635F\n [pyechonest] found 1 pruned images\nAlex Smoke AR1WEZI1187B9AA902\n [pyechonest] found 4 pruned images\nModeselektor ARA6BH81187B98F3AF\n [pyechonest] found 3 pruned images\nMike Shannon ARUTPVX1187B9B15A2\n [pyechonest] found 3 pruned images\nRadio Slave ARKVKKG1187FB3B49A\n [pyechonest] found 3 pruned images\nJonas Bering ARUIVWJ1187B99C325\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/e692ad09aa258ac2e0475cdf1d1f0b57f1b9679b', 'width': 640}]\n [spotipy] found 1 images\nGlitterbug ARJYWGI12086C14F6F\n [pyechonest] found 2 pruned images\nJustus Köhncke ARDZXJ01187FB4FA1C\n [pyechonest] found 3 pruned images\nWolfgang Voigt ARO6SRC1187FB4DC59\n [pyechonest] found 0 pruned images\n[{'height': 436, 'url': 'https://i.scdn.co/image/2eefffe6087825d9a32780a37bbf24f6d043e627', 'width': 292}]\n [spotipy] found 1 images\nRipperton AR82IDY1187FB4979A\n [pyechonest] found 0 pruned images\n[{'height': 645, 'url': 'https://i.scdn.co/image/ad3abb0cc935b096dddd1bfdc556870ef8783cff', 'width': 1000}]\n [spotipy] found 1 images\nIsolée ARVYGJF1187B9A9555\n [pyechonest] found 4 pruned images\nAlex Under ARR8ZE31187FB381C6\n [pyechonest] found 2 pruned images\nPhonique ARJPEOF1187B998D00\n [pyechonest] found 3 pruned images\nJames Holden ARCU5PR1187B995F02\n [pyechonest] found 2 pruned images\nMinilogue ARO9FTG1187FB49832\n [pyechonest] found 4 pruned images\nMichael Mayer ARNNUFY1187FB58880\n [pyechonest] found 2 pruned images\nPantha Du Prince ARFDPOU1187FB36386\n [pyechonest] found 1 pruned images\nHåkan Lidbo ARPLEU11187FB5ABF8\n [pyechonest] found 2 pruned images\nLusine ARBERAU1187FB45567\n [pyechonest] found 4 pruned images\nKalabrese AROM0O71187FB425D7\n [pyechonest] found 2 pruned images\nMatthew Herbert ARG6SK51187B99A03C\n [pyechonest] found 3 pruned images\nJan Jelinek ARDPRO11187B9B0C61\n [pyechonest] found 3 pruned images\nLucien-N-Luciano ARGNUTA1187FB397C6\n [pyechonest] found 0 pruned images\n[{'height': 635, 'url': 'https://i.scdn.co/image/e8a21c0a87d737f877b169c7fae4eb0096e782dd', 'width': 640}]\n [spotipy] found 1 images\nCloser Musik ARAUI561187FB3CE99\n [pyechonest] found 1 pruned images\nApparat ARAKSQE1187B9AFE6C\n [pyechonest] found 2 pruned images\nGuillaume & The Coutu Dumonts AR7QN2J1187FB4BE2D\n [pyechonest] found 1 pruned images\nThomas Brinkmann ARDD6EI1187B9AFC4E\n [pyechonest] found 3 pruned images\nThe Soft Pink Truth ARRGBU11187B98A7A3\n [pyechonest] found 1 pruned images\nAda ARWL02J1187FB38DA8\n [pyechonest] found 2 pruned images\nWighnomy Brothers ARB6R5V1187B9B65AD\n [pyechonest] found 2 pruned images\nRicardo Villalobos AR0LB821187B9B90F1\n [pyechonest] found 1 pruned images\nJesse Somfay ARUTCX41187FB43DF8\n [pyechonest] found 2 pruned images\nFalko Brocksieper ARXT4DN1187FB389C1\n [pyechonest] found 3 pruned images\nDamian Lazarus ARLDECI1187B9B89E1\n [pyechonest] found 1 pruned images\nSuperpitcher ARB4AWX1187B9B0563\n [pyechonest] found 3 pruned images\nCatz N' Dogz AR5YPV81187FB5B7E4\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/5c405282ae1236ce08bee242975be44562685480', 'width': 640}]\n [spotipy] found 1 images\nPan/Tone ARTRD4D1187FB38CBD\n [pyechonest] found 0 pruned images\n[{'height': 615, 'url': 'https://i.scdn.co/image/3ad37ff0b79c4d876f25b9fafa2f95bd7ed5badd', 'width': 640}]\n [spotipy] found 1 images\nBroker/Dealer ARTE3YT1187FB42D1A\n [pyechonest] found 1 pruned images\nDinky ARVL7X81187B989EED\n [pyechonest] found 2 pruned images\nT.Raumschmiere ARIW0KX1187FB5BA38\n [pyechonest] found 2 pruned images\nStephen Beaupré AR942TI1187FB43470\n [pyechonest] found 1 pruned images\nKonrad Black AR9YBRW1187B99A8DE\n [pyechonest] found 1 pruned images\nClaude VonStroke AR7EWST1187FB45890\n [pyechonest] found 0 pruned images\n[{'height': 667, 'url': 'https://i.scdn.co/image/75cd94141dc76debb9a5d96d1512891e07ec8389', 'width': 1000}]\n [spotipy] found 1 images\nDJ Koze AR3O2UJ1187FB38B8B\n [pyechonest] found 1 pruned images\nCobblestone Jazz AR5NYUD1187B988D9C\n [pyechonest] found 3 pruned images\nRobag Wruhme ARW3CB71187B9B69FE\n [pyechonest] found 2 pruned images\nSeth Troxler ARKSM3X1187FB4ADDA\n [pyechonest] found 1 pruned images\nStewart Walker ARWTENB1187B99E043\n [pyechonest] found 2 pruned images\nFarben ARL1QN11187FB39407\n [pyechonest] found 0 pruned images\n[{'height': 667, 'url': 'https://i.scdn.co/image/01ecf4d63fc57504fca26fd3dd8c78fca89fd6e2', 'width': 1000}]\n [spotipy] found 1 images\nPier Bucci ARM93971187FB42EED\n [pyechonest] found 1 pruned images\nMathew Jonson AR88M7P1187B9B7192\n [pyechonest] found 3 pruned images\nLoSoul ARSM0321187FB567B0\n [pyechonest] found 2 pruned images\nSafety Scissors ARG1UZV1187FB37E88\n [pyechonest] found 2 pruned images\nAnja Schneider ARGRM841187B99D8A0\n [pyechonest] found 0 pruned images\n[{'height': 538, 'url': 'https://i.scdn.co/image/2803d0c2234f7487484510b6f0228517385a95dc', 'width': 600}]\n [spotipy] found 1 images\nMarkus Guentner AR2NMDJ1187FB392B9\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/a4dcc1a8cebaa7f5c4c6e0d89315cf8787b72688', 'width': 640}]\n [spotipy] found 1 images\nFuckpony ARVFCVA1187B992D8A\n [pyechonest] found 2 pruned images\nOnur Özer AR07YJO1187FB3EA2F\n [pyechonest] found 2 pruned images\nMossa ARS68LC1187B9A3443\n [pyechonest] found 1 pruned images\nKenneth James Gibson ARUUNI81187FB51C80\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/6bc229d160fcdc6ba24d490e47ad8e42c375af4a', 'width': 640}]\n [spotipy] found 1 images\nButane ARC8SY01187FB49FC7\n [pyechonest] found 11 pruned images\nMikael Stavöstrand ARIGP6W1187B99AF87\n [pyechonest] found 1 pruned images\nFranklin de Costa ARUSA7K1187FB38211\n [pyechonest] found 2 pruned images\nQuantec AR3LLO61187FB4BBCF\n [pyechonest] found 4 pruned images\nJin Choi ARZPL3F1187B9B21F9\n [pyechonest] found 1 pruned images\nThe Mountain People AR6VAR71187FB5CE81\n [pyechonest] found 1 pruned images\nChâteau Flight AR5PG7K1187FB4E0A6\n [pyechonest] found 2 pruned images\nHavantepe ARWOTCH123526A0F43\n [pyechonest] found 1 pruned images\nTomas Jirku ARMNBNR1187B9B1B54\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/645099954bbf46435e60c1040be9f0fd735edc44', 'width': 640}]\n [spotipy] found 1 images\nLimaçon AR5WADS1187B9B73B5\n [pyechonest] found 3 pruned images\nRedshape ARVH0FS1187B996447\n [pyechonest] found 3 pruned images\nMike Huckaby AR9MS421187B99106A\n [pyechonest] found 1 pruned images\nTaylor Deupree AR226CG1187B9B1748\n [pyechonest] found 2 pruned images\nSubstance & Vainqueur ARVUK8D1187FB4C0A6\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/624bfc246b33298f16f56535957766a3cf0fce67', 'width': 640}]\n [spotipy] found 1 images\nPhotek ARKQ6QN1187B9B016D\n [pyechonest] found 1 pruned images\nZomby ARGGXTD11C8A42D729\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/91a5da3abe1f5ee714f10e0a27579c7f396e4126', 'width': 640}]\n [spotipy] found 1 images\nKode9 ARJDPS41187FB3E84A\n [pyechonest] found 4 pruned images\nVex'd ARXAHU31187B9B1520\n [pyechonest] found 1 pruned images\nPlastician ARC2C0O1187B9A9869\n [pyechonest] found 7 pruned images\nJoy Orbison ARLBHBE12472CE2A35\n [pyechonest] found 2 pruned images\nEskmo ARMXXTP12086C118B8\n [pyechonest] found 2 pruned images\nTes La Rok AR54F671187FB507B4\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/88cd075352d3975fe8e6852b6266e4298c73ec34', 'width': 640}]\n [spotipy] found 1 images\nDFRNT ARHMRGY1241B9C7E9E\n [pyechonest] found 3 pruned images\nAfrica HiTech ARWONCX12592CD91AB\n [pyechonest] found 3 pruned images\nKing Midas Sound ARBAND51187FB50C7F\n [pyechonest] found 0 pruned images\n[{'height': 240, 'url': 'https://i.scdn.co/image/8a96479fc93ae9e4302cafba06b85d0dba4951c3', 'width': 368}]\n [spotipy] found 1 images\nSkream ARNXHP01187FB585FE\n [pyechonest] found 2 pruned images\nDjunya ARSZKFN12086C11883\n [pyechonest] found 1 pruned images\n2562 AR8Y3UG1187B9A3468\n [pyechonest] found 2 pruned images\nFantastic Mr. Fox ARFQXHA12086C131AB\n [pyechonest] found 2 pruned images\nIkonika ARH9FYH11A348F0D1D\n [pyechonest] found 0 pruned images\n[{'height': 1485, 'url': 'https://i.scdn.co/image/3e77042ede69316fb879a0f2bcfc4b941077e265', 'width': 1000}]\n [spotipy] found 1 images\nTimeblind AR5K6AA1187B994927\n [pyechonest] found 2 pruned images\nMark Pritchard AR6OVD21187B9A62F9\n [pyechonest] found 1 pruned images\nAppleblim ARCG0KG1187FB50B9A\n [pyechonest] found 2 pruned images\nRamadanman ARXGTYM1187FB5613A\n [pyechonest] found 2 pruned images\nD1 ARZDIO01187B98EDA3\n [pyechonest] found 3 pruned images\nMatty G ARR77TQ1187FB507C2\n [pyechonest] found 4 pruned images\nPeverelist AR7EAKO11A348F0D21\n [pyechonest] found 1 pruned images\nUntold ARDNHTQ11C8A4153C2\n [pyechonest] found 2 pruned images\nRoska ARNSHBI123E29C2853\n [pyechonest] found 1 pruned images\nEl-B AROMBZC1187B988FEA\n [pyechonest] found 1 pruned images\nMala ARZ4G1D1187FB4643B\n [pyechonest] found 2 pruned images\nCoki ARFELYQ1187FB5830C\n [pyechonest] found 4 pruned images\nHijak ARTPLNM1187FB507C4\n [pyechonest] found 2 pruned images\nMount Kimbie ARHDFGD12472CE34AC\n [pyechonest] found 1 pruned images\nChrissy Murderbot AR53LQT1187B9B4ADC\n [pyechonest] found 1 pruned images\nScuba AR26LAR1187FB4846B\n [pyechonest] found 0 pruned images\n[{'height': 1200, 'url': 'https://i.scdn.co/image/87f606bfe23565338785bd15958be3eb4049de39', 'width': 800}]\n [spotipy] found 1 images\nKush Arora AR91M1O119B340318B\n [pyechonest] found 2 pruned images\nMeesha ARTUHCW1242078044A\n [pyechonest] found 1 pruned images\nMartyn ARMSTWE1187FB3C8EB\n [pyechonest] found 2 pruned images\nBjörk ARAOQ5T1187FB435AB\n [pyechonest] found 9 pruned images\nKraftwerk AR1ZKBE1187FB53629\n [pyechonest] found 3 pruned images\nDJ Shadow ARME7101187FB3ECDD\n [pyechonest] found 1 pruned images\nRadiohead ARH6W4X1187B99274F\n [pyechonest] found 4 pruned images\nThe Orb AR414I51187B99D330\n [pyechonest] found 4 pruned images\nJean Michel Jarre ARGCQH71187B9B6921\n [pyechonest] found 9 pruned images\nAphex Twin ARYPTWE1187FB49D64\n [pyechonest] found 0 pruned images\n[{'height': 563, 'url': 'https://i.scdn.co/image/cdec8941332a899a09854dbcd56fc65ce4548b58', 'width': 1000}]\n [spotipy] found 1 images\nTangerine Dream AR2L9A61187B9ADDBC\n [pyechonest] found 1 pruned images\nBoards of Canada AROG07L1187FB4C826\n [pyechonest] found 0 pruned images\n[{'height': 902, 'url': 'https://i.scdn.co/image/74b3d0a50dc8f149e62fa642a6ab3f96b85e3232', 'width': 1000}]\n [spotipy] found 1 images\nAmon Tobin AR2AVSC1187B991634\n [pyechonest] found 0 pruned images\n[{'height': 988, 'url': 'https://i.scdn.co/image/5cca719982db81d271fb18a0a5b353a068d787bb', 'width': 999}]\n [spotipy] found 1 images\nRatatat AREPZK61187B990670\n [pyechonest] found 1 pruned images\nMassive Attack ARNF13I1187FB562A5\n [pyechonest] found 3 pruned images\nRöyksopp ARDA9NZ1187FB3A825\n [pyechonest] found 1 pruned images\nLCD Soundsystem ARFIU2R1187B9927F9\n [pyechonest] found 2 pruned images\nGotan Project ARBYYT61187FB37B64\n [pyechonest] found 3 pruned images\nGusGus ARDKBAV1187FB4AF61\n [pyechonest] found 1 pruned images\nEverything but the Girl ARTMP681187B9B77CD\n [pyechonest] found 0 pruned images\n[{'height': 1249, 'url': 'https://i.scdn.co/image/942a51071f256ec10e2b2983adc783899fb72ade', 'width': 1000}]\n [spotipy] found 1 images\nUrsula 1000 ARHMH7Q1187B988F38\n [pyechonest] found 4 pruned images\nLlorca ARRRORY1187B9A139D\n [pyechonest] found 2 pruned images\nUNKLE ARQG2JT1187B99F211\n [pyechonest] found 2 pruned images\nThe Future Sound of London ARUMQ5P1187B9AC811\n [pyechonest] found 0 pruned images\n[{'height': 979, 'url': 'https://i.scdn.co/image/1825794be8dff85c852ad3e6f5fc8fb9e1a4f522', 'width': 1000}]\n [spotipy] found 1 images\nThe Avalanches ARKTCFU1187FB57FA4\n [pyechonest] found 3 pruned images\nLaika ARI8GYL1187B98EB12\n [pyechonest] found 3 pruned images\nThievery Corporation ARVTCS91187FB44DF6\n [pyechonest] found 1 pruned images\nGroove Armada ARWRMUE1187B9AC2CB\n [pyechonest] found 2 pruned images\nBonobo ARRB71R1187FB5751C\n [pyechonest] found 3 pruned images\nDJ Food ARM9UI31187B99D115\n [pyechonest] found 5 pruned images\nTricky AR065TW1187FB4C3A5\n [pyechonest] found 3 pruned images\nDirty Vegas ARV86KJ1187B9AE6D7\n [pyechonest] found 3 pruned images\nTélépopmusik ARQ5CQ21187B9B855B\n [pyechonest] found 4 pruned images\nHooverphonic AR6GPHG1187FB5608B\n [pyechonest] found 2 pruned images\ndZihan & Kamien ARUD8TE1187FB5B489\n [pyechonest] found 1 pruned images\nTalvin Singh AR0N0DX1187FB41327\n [pyechonest] found 3 pruned images\nDJ Vadim AR58HSY1187FB3E218\n [pyechonest] found 0 pruned images\n[{'height': 1500, 'url': 'https://i.scdn.co/image/acd45b5731da98d4556e5ac5663db7704c39c47e', 'width': 1000}]\n [spotipy] found 1 images\nCibo Matto ARPOXHP1187B9B8112\n [pyechonest] found 2 pruned images\nEsthero ARR1EM41187B98D816\n [pyechonest] found 2 pruned images\nMartina Topley-Bird ARUS5MJ1187B9B0A5D\n [pyechonest] found 1 pruned images\nDimitri From Paris ARVNR9N1187B9B88A7\n [pyechonest] found 5 pruned images\nColdcut ARM7LTM1187B9B7EA3\n [pyechonest] found 7 pruned images\nDeath in Vegas ARSY3S31187B9AFA0E\n [pyechonest] found 0 pruned images\n[{'height': 798, 'url': 'https://i.scdn.co/image/42a64219885a3931903703920d68f5a9289fca79', 'width': 1000}]\n [spotipy] found 1 images\nRóisín Murphy ARF799P1187B9A97D5\n [pyechonest] found 3 pruned images\nNitin Sawhney AR0V19Y1187FB5A73C\n [pyechonest] found 2 pruned images\nJosé Padilla ARI8BV61187B9B2993\n [pyechonest] found 3 pruned images\nJimi Tenor AR0G6AY1187B990CD8\n [pyechonest] found 3 pruned images\nMr. Scruff AR74WCA1187FB555D7\n [pyechonest] found 2 pruned images\nDub Pistols ARXK4YK1187FB4D0C4\n [pyechonest] found 2 pruned images\nMorcheeba ARU3C671187FB3F71B\n [pyechonest] found 2 pruned images\nSupreme Beings of Leisure AR20YMY1187B990B28\n [pyechonest] found 3 pruned images\nAir ARZZ5ZR1187FB4D149\n [pyechonest] found 3 pruned images\nDJ Krush ARH7O4C1187B9A5837\n [pyechonest] found 1 pruned images\nRJD2 ARQG4O41187B98A03B\n [pyechonest] found 2 pruned images\nUnderworld ARQ985T1187FB48640\n [pyechonest] found 2 pruned images\njenn mierau ARBKBKK11EB9C81612\n [pyechonest] found 1 pruned images\nEinstürzende Neubauten ARVQ0YD1187B9BA5B4\n [pyechonest] found 2 pruned images\nNurse With Wound AR5OKRL1187B9B04FC\n [pyechonest] found 4 pruned images\nThe Legendary Pink Dots AR80D5M1187FB3846F\n [pyechonest] found 1 pruned images\nSkinny Puppy ARAXOCH1187B9AD3F3\n [pyechonest] found 2 pruned images\nAtari Teenage Riot AR3TG3X1187FB4D577\n [pyechonest] found 0 pruned images\n[{'height': 682, 'url': 'https://i.scdn.co/image/795018b60fac67ebfff410aaccaa7bf499df13fc', 'width': 1000}]\n [spotipy] found 1 images\nVenetian Snares ARNCTJ91187B98D813\n [pyechonest] found 3 pruned images\nµ-Ziq AR3DAT71187FB40CCB\n [pyechonest] found 2 pruned images\nRichard Devine ARW81LA1187B9AFA8D\n [pyechonest] found 2 pruned images\nSquarepusher ARLIQ281187FB3DC05\n [pyechonest] found 1 pruned images\nAutechre AR4GKTH1187FB4C8DE\n [pyechonest] found 2 pruned images\nLe Tigre ARZPYZ11187FB4938A\n [pyechonest] found 1 pruned images\nQueens of the Stone Age AR6G4V01187B9AD086\n [pyechonest] found 10 pruned images\nXiu Xiu AR2O4CE1187B9B7C7A\n [pyechonest] found 2 pruned images\nBaby Dee ARPYH891187B9AA43D\n [pyechonest] found 1 pruned images\nAlastair Galbraith ARVVRB51187FB4D305\n [pyechonest] found 4 pruned images\n不失者 AR2FO651187FB3A4AF\n [pyechonest] found 0 pruned images\n[{'height': 600, 'url': 'https://i.scdn.co/image/fd3cdc9385f5c14ecf49025221f1a116a333503f', 'width': 600}]\n [spotipy] found 1 images\nI Am Robot and Proud ARJDX1G1187FB5C9D7\n [pyechonest] found 4 pruned images\nMeg Baird AR8JZZ41187FB4FE0D\n [pyechonest] found 4 pruned images\nArthur Russell ARUNCN01187FB426B7\n [pyechonest] found 0 pruned images\n[{'height': 667, 'url': 'https://i.scdn.co/image/c8443c8dc49d6c4b59dac7d7153871a66aa6a6db', 'width': 1000}]\n [spotipy] found 1 images\nJon Appleton ARC89TA1187B98A3FC\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/d23f754e1c76362062c4d50ee4d5742a2670c529', 'width': 640}]\n [spotipy] found 1 images\nCharles Dodge ARB14C01187B98D49A\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/e0ee3c295b5d88ac776f77d773fb05b65646f35a', 'width': 640}]\n [spotipy] found 1 images\nMorton Subotnick ARKYOJT1187B9A7505\n [pyechonest] found 3 pruned images\nJames Tenney ARU6IWB1187B993AB5\n [pyechonest] found 3 pruned images\nDavid Tudor ARZ7ME31187B992254\n [pyechonest] found 2 pruned images\nVladimir Ussachevsky ARBWZC31187B9AA211\n [pyechonest] found 1 pruned images\nPauline Oliveros ARV2AKI1187B9A894C\n [pyechonest] found 3 pruned images\nRobert Ashley ARDBC241187B9A9F6C\n [pyechonest] found 3 pruned images\nNam June Paik ARX23CK1187B98FA4F\n [pyechonest] found 5 pruned images\nLa Monte Young ARQR1AI1187B9A9E2F\n [pyechonest] found 2 pruned images\nPhill Niblock AREMT0M1187B9B2623\n [pyechonest] found 4 pruned images\nFrançois Bayle ARODW4K1187B9AA0B6\n [pyechonest] found 0 pruned images\n[{'height': 319, 'url': 'https://i.scdn.co/image/3cb58dda6c5ef75e91092f3f3fdbba37c0726d4b', 'width': 600}]\n [spotipy] found 1 images\nJames Tenney ARU6IWB1187B993AB5\n [pyechonest] found 3 pruned images\nTim Hecker ARRAY5D1187B9AC11C\n [pyechonest] found 0 pruned images\n[{'height': 311, 'url': 'https://i.scdn.co/image/5f35d95bed1352960b559006b9332c2f5b70cca7', 'width': 422}]\n [spotipy] found 1 images\nPamela Z ARRLXDA1187B9B405F\n [pyechonest] found 2 pruned images\nChristian Wolff ARR3PFT1187FB4B051\n [pyechonest] found 1 pruned images\nJean-Claude Risset ARVQUVR1187B9A9E0F\n [pyechonest] found 0 pruned images\n[{'height': 429, 'url': 'https://i.scdn.co/image/7dc2f56444d3b5ac96385709760e07c9a6ff7191', 'width': 600}]\n [spotipy] found 1 images\nPaul Lansky ARSFENR1187B9AA1BA\n [pyechonest] found 1 pruned images\nLaurie Spiegel ARJVNMF1187B9AA2B7\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/709fdac0aa45e2af83957a385ee46978ab3af164', 'width': 640}]\n [spotipy] found 1 images\nAntye Greie ARBTMZH1269FB2DEFB\n [pyechonest] found 1 pruned images\nRyoji Ikeda AREQSQF11F33DFFFE4\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/77007b608d81c3d57ddf8423c0888368b9c35385', 'width': 640}]\n [spotipy] found 1 images\nAlva Noto ARI9LZI1187B9A3A53\n [pyechonest] found 0 pruned images\n[{'height': 671, 'url': 'https://i.scdn.co/image/6291ff70ff7bc1907c76211140f4585881cae777', 'width': 1000}]\n [spotipy] found 1 images\nRYuichi Sakamoto ARXSFIQ14A06F40E63\n [pyechonest] found 0 pruned images\n[{'height': 1333, 'url': 'https://i.scdn.co/image/7e69d413dae85a33a3709cbb09cefec5b2694591', 'width': 1000}]\n [spotipy] found 1 images\nLawrence English AR6N1NW1187B9ADC26\n [pyechonest] found 0 pruned images\n[{'height': 395, 'url': 'https://i.scdn.co/image/fa0897aaca6ecdccc4424bad9aa0f6d95f3178c8', 'width': 591}]\n [spotipy] found 1 images\nTujiko Noriko AROYEB51187FB512C7\n [pyechonest] found 0 pruned images\n[{'height': 250, 'url': 'https://i.scdn.co/image/f6f4d3838fe8fc659abde3ae645dd5739ff20d04', 'width': 250}]\n [spotipy] found 1 images\nArvo Pärt ARSOI001187B9B565A\n [pyechonest] found 0 pruned images\n[{'height': 1223, 'url': 'https://i.scdn.co/image/d943ef02d45e3c51a7b24412284bbc3ca07e875a', 'width': 1000}]\n [spotipy] found 1 images\nFennesz ARAEZSI1187B9A8D1E\n [pyechonest] found 1 pruned images\nChristopher Willits ARHSHU81187B9AFF15\n [pyechonest] found 7 pruned images\nColleen ARMWIHP1187B98C7D5\n [pyechonest] found 2 pruned images\nBen Frost ARTYO6G1187FB40ABC\n [pyechonest] found 2 pruned images\nJóhann Jóhannsson ARXO7AU1187B9B249F\n [pyechonest] found 0 pruned images\n[{'height': 1002, 'url': 'https://i.scdn.co/image/d874b8d4da66db3a21cee321ab23751f749c1542', 'width': 1000}]\n [spotipy] found 1 images\nSylvain Chauveau ARS6V5F1187FB49CE9\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/d3c275e12632912fed3b5b6bcb815859b37bf56e', 'width': 640}]\n [spotipy] found 1 images\nKing Tubby ARA4Y4F1187FB4275C\n [pyechonest] found 3 pruned images\nScientist AR4Q5ZA1187FB598A6\n [pyechonest] found 3 pruned images\nLee \"Scratch\" Perry ARS5NKR1187FB4EE96\n [pyechonest] found 2 pruned images\nAugustus Pablo AR1D1ES1187FB57228\n [pyechonest] found 0 pruned images\n[{'height': 666, 'url': 'https://i.scdn.co/image/85202891a6984679c483b50bf6f8104a32c9e4cc', 'width': 1000}]\n [spotipy] found 1 images\nPrince Jammy ARZ87AU1187FB4A24D\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/62da32a7a57e0e1efedc733a4aa243f4d535929f', 'width': 640}]\n [spotipy] found 1 images\nMad Professor ARD2GXE1187B9A2E26\n [pyechonest] found 5 pruned images\nRoots Radics ARS9WQL1187FB3FCE5\n [pyechonest] found 3 pruned images\nThe Upsetters ARXCYYI1187FB51D67\n [pyechonest] found 3 pruned images\nSly Dunbar ARGBVEY11F4C84226B\n [pyechonest] found 4 pruned images\nRobbie Shakespeare ARG8FT21187B9B8579\n [pyechonest] found 3 pruned images\nKeith Hudson ART02IC1187B994301\n [pyechonest] found 3 pruned images\nTappa Zukie AR2ELJW1187FB4AEAC\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/553c210fd3861cc6d0f65e050303ff8ea4576c20', 'width': 640}]\n [spotipy] found 1 images\nBig Youth AR6BZPG1187FB59B8D\n [pyechonest] found 3 pruned images\nThe Aggrovators ARZIXW21187B9B7174\n [pyechonest] found 2 pruned images\nU-Roy ARCETNJ11F4C83C829\n [pyechonest] found 2 pruned images\nPrince Far I ARBZCLP1187B99C35B\n [pyechonest] found 2 pruned images\nBlack Uhuru ARMDWND1187B9AEBE2\n [pyechonest] found 3 pruned images\nHorace Andy AR3PN3R1187FB4CEBD\n [pyechonest] found 3 pruned images\nI-Roy ARRQVWY11F4C83CD57\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/2fcd0284ef004f37c3aa667660d0e9927c95dd02', 'width': 640}]\n [spotipy] found 1 images\nThe Abyssinians ARVY5HZ1187B9B3815\n [pyechonest] found 5 pruned images\nPablo Moses ARRA7K41187FB45DAB\n [pyechonest] found 4 pruned images\nMax Romeo ARYCUWT1187B98AFF4\n [pyechonest] found 2 pruned images\nThe Heptones ARQT15C1187FB401DD\n [pyechonest] found 3 pruned images\nBurning Spear ARJ2PMY1187FB5B563\n [pyechonest] found 2 pruned images\nDennis Brown ARSTA431187B9A3599\n [pyechonest] found 4 pruned images\nJacob Miller ARJEAL11187B9A876B\n [pyechonest] found 2 pruned images\nBarrington Levy AR1SUNB1187FB41BC8\n [pyechonest] found 6 pruned images\nSugar Minnot ARNDZAG11F4C8408ED\n [pyechonest] found 0 pruned images\n [spotipy] found 0 images\nYellowman ARZKNHT1187FB4F5FD\n [pyechonest] found 4 pruned images\nGregory Isaacs AR57Z841187B9B7D7D\n [pyechonest] found 7 pruned images\nJohn Holt ARJPZSG1187FB3E985\n [pyechonest] found 3 pruned images\nAlton Ellis AR7DNHO1187B992B0B\n [pyechonest] found 5 pruned images\nKen Boothe ARL53RS1187B9B19AD\n [pyechonest] found 4 pruned images\nThe Ethiopians ARHKFR71187FB3AFAC\n [pyechonest] found 3 pruned images\nJoe Higgs ARLYGIM1187FB4376E\n [pyechonest] found 3 pruned images\nTommy McCook ARLXRDN1187FB567C1\n [pyechonest] found 3 pruned images\nThe Melodians ARVODOV1187B9AE0CC\n [pyechonest] found 3 pruned images\nDelroy Wilson ARZZRK91187B9A5CA5\n [pyechonest] found 3 pruned images\nIsaac Haile Selassie ARFQOGS122988EF5E6\n [pyechonest] found 1 pruned images\nPolycubist ARWTXMS12AF7D98C10\n [pyechonest] found 1 pruned images\nManu Dibango ARKA5831187FB41206\n [pyechonest] found 5 pruned images\nBaaba Maal ARZ1F401187FB5B0A3\n [pyechonest] found 5 pruned images\nAntibalas Afrobeat Orchestra ARVNNLP14C56E0A0EF\n [pyechonest] found 1 pruned images\nOrlando Julius ARA872I1187B997CFF\n [pyechonest] found 1 pruned images\nWilliam Onyeabor ARQASDQ1187B99CFDF\n [pyechonest] found 1 pruned images\nOrchestre Poly-Rythmo ARLFEAC1269FCCE57C\n [pyechonest] found 0 pruned images\n[{'height': 286, 'url': 'https://i.scdn.co/image/d45626a9dc629e15b30866e454ddc0f7bb8a560a', 'width': 201}]\n [spotipy] found 1 images\nSir Victor Uwaifo ARQRLU71187B997CF7\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/f5c1fd977f9b74745d478b9fe13afe86e2701d31', 'width': 640}]\n [spotipy] found 1 images\nTony Allen & His Afro Messengers ARK8PJE1187FB36150\n [pyechonest] found 0 pruned images\n [spotipy] found 0 images\nSahara All Stars Band Jos ARWQ4CL1187B99CFD1\n [pyechonest] found 0 pruned images\n [spotipy] found 0 images\nLijadu Sisters ARDNMGK1187B99CFD5\n [pyechonest] found 1 pruned images\nKing Sunny Ade ARK4ZVF1187B9B5992\n [pyechonest] found 3 pruned images\nEbo Taylor AR8R0C81187B9A5781\n [pyechonest] found 3 pruned images\nGasper Lawal ARS6NR51187FB48FFF\n [pyechonest] found 0 pruned images\n [spotipy] found 0 images\nTunji Oyelana and the Benders ARQR7011187B9AF92C\n [pyechonest] found 0 pruned images\n [spotipy] found 0 images\n2 Face ARSDY741187B9B86AC\n [pyechonest] found 1 pruned images\nP-Square ARDSCLE1187B9AD790\n [pyechonest] found 2 pruned images\nShina Williams & His African Percussionists ARG2I9K1187B99CFD6\n [pyechonest] found 0 pruned images\n [spotipy] found 0 images\nWeird MC ARCXBWS1187B9A82E2\n [pyechonest] found 1 pruned images\nPlantashun Boiz ARRLXGD124207807B3\n [pyechonest] found 1 pruned images\nPaul I.K. Dairo ARCCS8O1187B999691\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/98246567670296b7dec3dea1fb8f87b137f56b30', 'width': 640}]\n [spotipy] found 1 images\nD'banj AR9KOTY11C8A414BAD\n [pyechonest] found 1 pruned images\nRuggedman ARDSOKW11C8A41514F\n [pyechonest] found 2 pruned images\nEedris Abdulkareem ARPGTO81187FB38710\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/9bbc70dc741eab9d697acbd69ee62789f3dd2c00', 'width': 640}]\n [spotipy] found 1 images\nStyl-Plus ARGXGVN1187FB45B3D\n [pyechonest] found 1 pruned images\nTony Tetuila ARWPTRZ11F4C8471B4\n [pyechonest] found 1 pruned images\nOlamide ARRYCHW12F2CC6A3C8\n [pyechonest] found 2 pruned images\nEbenezer Obey ARNT1LI1187B99EB6E\n [pyechonest] found 0 pruned images\n[{'height': 1347, 'url': 'https://i.scdn.co/image/f2c0975d57d6d26cb4387506e999207bd9a30104', 'width': 1000}]\n [spotipy] found 1 images\nHaruna Ishola ARHVMHO122988F06F6\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/338dcabb59d98c132dc87d83db6824109c146e90', 'width': 640}]\n [spotipy] found 1 images\nLágbájá AR3F3SG1187B98EED5\n [pyechonest] found 1 pruned images\nPrince Nico Mbarga AR4ZHA51187B9B92B4\n [pyechonest] found 1 pruned images\nWest African Highlife Band ARLIG1S1187FB4BD2F\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/17c65ebdfab20cd27c5c183ec3659ea5bad02cfa', 'width': 640}]\n [spotipy] found 1 images\nModenine AROGIKF1242077FBEF\n [pyechonest] found 1 pruned images\nTerry tha Rapman AR95CBB11C8A414A12\n [pyechonest] found 2 pruned images\nOlu Maintain ARPRORY12D5CD7AD71\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/da35fc347118227a280b31de6b32f9044bdbe98d', 'width': 640}]\n [spotipy] found 1 images\nMajek Fashek ARLSD651187FB4A01E\n [pyechonest] found 3 pruned images\nKonono N°1 ARJAFTG149675A9128\n [pyechonest] found 1 pruned images\nKoffi Olomidé ARCQ3LZ1187FB49B42\n [pyechonest] found 2 pruned images\nLes Bantous de la Capitale AR7LN7G1187FB395BB\n [pyechonest] found 2 pruned images\nThomas Mapfumo ARQ7KGD1187B989CE4\n [pyechonest] found 4 pruned images\nOliver Mtukudzi ARS79SF1187FB5BB6D\n [pyechonest] found 4 pruned images\nChiwoniso Maraire ARIXGNA1269FCD1352\n [pyechonest] found 2 pruned images\nThomas Mapfumo & The Blacks Unlimited AR43NC91187FB3852A\n [pyechonest] found 0 pruned images\n[{'height': 640, 'url': 'https://i.scdn.co/image/f4ed8c8a426122dcb215f27f7b2baea316f5a2fb', 'width': 640}]\n [spotipy] found 1 images\nAngélique Kidjo ARJTAX61187FB40B19\n [pyechonest] found 4 pruned images\nOumou Sangare ARKDRUN1187B9B8A4D\n [pyechonest] found 5 pruned images\nIsmaël Lô ARTM6SK1187B9B804D\n [pyechonest] found 3 pruned images\nGeoffrey Oryema ARAENMM1187FB4064C\n [pyechonest] found 4 pruned images\nSalif Keita ARC1WKD1187FB5A532\n [pyechonest] found 3 pruned images\nAmadou & Mariam AREXOET1187B9B80A3\n [pyechonest] found 3 pruned images\nOrchestra Baobab AROCST41187FB53EE3\n [pyechonest] found 4 pruned images\nBembeya Jazz National AR7TGNH1187B9999C2\n [pyechonest] found 4 pruned images\nTiwa Savage ARJZYUB13443D17058\n [pyechonest] found 3 pruned images\n\nFini - writeArtistsCaches\n" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
cb9cca712ef865b92e6a50cf65e968affcdcf459
30,661
ipynb
Jupyter Notebook
Amazon A2I and Amazon Fraud Detector.ipynb
barisyasin/amazon-a2i-sample-jupyter-notebooks
4ecb9d327f4ce645aafa4077bd84f7ae2a126dc8
[ "Apache-2.0" ]
null
null
null
Amazon A2I and Amazon Fraud Detector.ipynb
barisyasin/amazon-a2i-sample-jupyter-notebooks
4ecb9d327f4ce645aafa4077bd84f7ae2a126dc8
[ "Apache-2.0" ]
null
null
null
Amazon A2I and Amazon Fraud Detector.ipynb
barisyasin/amazon-a2i-sample-jupyter-notebooks
4ecb9d327f4ce645aafa4077bd84f7ae2a126dc8
[ "Apache-2.0" ]
null
null
null
36.940964
728
0.603307
[ [ [ "# Amazon Augmented AI (Amazon A2I) integration with Amazon Fraud Detector", "_____no_output_____" ], [ "# Visit https://github.com/aws-samples/amazon-a2i-sample-jupyter-notebooks for all A2I Sample Notebooks\n", "_____no_output_____" ], [ "1. [Introduction](#Introduction)\n2. [Prerequisites](#Setup)\n 1. [Workteam](#Workteam)\n 2. [Notebook Permission](#Notebook-Permission)\n3. [Client Setup](#Client-Setup)\n4. [Create Control Plane Resources](#Create-Control-Plane-Resources)\n 1. [Create Human Task UI](#Create-Human-Task-UI)\n 2. [Create Flow Definition](#Create-Flow-Definition)\n5. Scenario: When Activation Conditions are met, and a Human Loop is created\n 1. [Check Status of Human Loop](#Check-Status-of-Human-Loop)\n 2. [Wait For Workers to Complete Task](#Wait-For-Workers-to-Complete-Task)\n 3. [Check Status of Human Loop](#Check-Status-of-Human-Loop)\n 4. [View Task Results](#View-Task-Results)", "_____no_output_____" ], [ "# Introduction\n\nAmazon Augmented AI (Amazon A2I) makes it easy to build the workflows required for human review of ML predictions. Amazon A2I brings human review to all developers, removing the undifferentiated heavy lifting associated with building human review systems or managing large numbers of human reviewers. \n\nAmazon A2I provides built-in human review workflows for common machine learning use cases, such as content moderation and text extraction from documents, which allows predictions from Amazon Rekognition and Amazon Textract to be reviewed easily. You can also create your own workflows for ML models built on Amazon SageMaker or any other tools. Using Amazon A2I, you can allow human reviewers to step in when a model is unable to make a high confidence prediction or to audit its predictions on an on-going basis. Learn more here: https://aws.amazon.com/augmented-ai/\n\nIn this tutorial, we will show how you can use Amazon A2I directly with Amazon Fraud Detector to check for high confidence fraud predictions\n\nFor more in depth instructions, visit https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-getting-started.html", "_____no_output_____" ], [ "To incorporate Amazon A2I into your human review workflows, you need three resources:\n\n* A **worker task template** to create a worker UI. The worker UI displays your input data, such as documents or images, and instructions to workers. It also provides interactive tools that the worker uses to complete your tasks. For more information, see https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-instructions-overview.html\n\n* A **human review workflow**, also referred to as a flow definition. You use the flow definition to configure your human workforce and provide information about how to accomplish the human review task. For built-in task types, you also use the flow definition to identify the conditions under which a review human loop is triggered. You can use the flow definition to specify that a model prediction will be sent to a human for review based on the threshold defined by you for Fraud detection. You can create a flow definition in the Amazon Augmented AI console or with the Amazon A2I APIs. To learn more about both of these options, see https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-create-flow-definition.html\n\n* A **human loop** to start your human review workflow. When you use one of the built-in task types, the corresponding AWS service creates and starts a human loop on your behalf when the conditions specified in your flow definition are met or for each object if no conditions were specified. When a human loop is triggered, human review tasks are sent to the workers as specified in the flow definition.\n\nWhen using a custom task type, you start a human loop using the Amazon Augmented AI Runtime API. When you call StartHumanLoop in your custom application, a task is sent to human reviewers.", "_____no_output_____" ], [ "### Install Latest SDK", "_____no_output_____" ] ], [ [ "# First, let's get the latest installations of our dependencies\n!pip install --upgrade pip\n!pip install botocore --upgrade\n!pip install boto3 --upgrade\n!pip install -U botocore", "_____no_output_____" ] ], [ [ "## Setup\nWe need to set up the following data:\n* `region` - Region to call A2I\n* `bucket` - A S3 bucket accessible by the given role\n * Used to store the sample images & output results\n * Must be within the same region A2I is called from\n* `role` - The IAM role used as part of StartHumanLoop. By default, this notebook will use the execution role\n* `workteam` - Group of people to send the work to", "_____no_output_____" ] ], [ [ "import boto3\nimport botocore\n\nREGION = boto3.session.Session().region_name", "_____no_output_____" ] ], [ [ "#### Create and Setup S3 Bucket and Paths\nCreate your own S3 bucket and replace the following with that bucket name ", "_____no_output_____" ] ], [ [ "# Replace the following with your bucket name\nBUCKET = 'your Amazon S3 bucket name'", "_____no_output_____" ] ], [ [ "Your bucket, `BUCKET` must be located in the same AWS Region that you are using to run this notebook. This cell checks if they are located in the same Region. ", "_____no_output_____" ] ], [ [ "# Amazon S3 (S3) client\ns3 = boto3.client('s3', REGION)\nbucket_region = s3.head_bucket(Bucket=BUCKET)['ResponseMetadata']['HTTPHeaders']['x-amz-bucket-region']\nassert bucket_region == REGION, \"Your S3 bucket {} and this notebook need to be in the same region.\".format(BUCKET)", "_____no_output_____" ] ], [ [ "### Notebook Permission\n\nThe AWS IAM Role used to execute the notebook needs to have the following permissions:\n\n* FraudDetectorFullAccess\n* SagemakerFullAccess\n* AmazonSageMakerMechanicalTurkAccess (if using MechanicalTurk as your Workforce)\n* S3 Read and Write Access to the bucket you specified in `BUCKET`. \n", "_____no_output_____" ] ], [ [ "from sagemaker import get_execution_role\n\n# Setting Role to the default SageMaker Execution Role\nROLE = get_execution_role()\ndisplay(ROLE)", "_____no_output_____" ] ], [ [ "Visit: https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-permissions-security.html to add the necessary permissions to your role", "_____no_output_____" ], [ "### Workteam or Workforce", "_____no_output_____" ], [ "A workforce is the group of workers that you have selected to label your dataset. You can choose either the Amazon Mechanical Turk workforce, a vendor-managed workforce, or you can create your own private workforce for human reviews. Whichever workforce type you choose, Amazon Augmented AI takes care of sending tasks to workers. \n\nWhen you use a private workforce, you also create work teams, a group of workers from your workforce that are assigned to Amazon Augmented AI human review tasks. You can have multiple work teams and can assign one or more work teams to each job.", "_____no_output_____" ], [ "# To create your Workteam, visit the instructions here: https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-management.html\n\nNOTE: After you have created your workteam, replace WORKTEAM_ARN below with your own Workteam ARN\n", "_____no_output_____" ] ], [ [ "WORKTEAM_ARN = \"your workteam ARN\"\n", "_____no_output_____" ] ], [ [ "## Client Setup", "_____no_output_____" ], [ "Here we are going to setup the clients. ", "_____no_output_____" ] ], [ [ "from IPython.core.display import display, HTML\nfrom IPython.display import clear_output\ndisplay(HTML(\"<style>.container { width:90% }</style>\"))\n# ------------------------------------------------------------------\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\n\nimport os\nimport sys\nimport time\nimport json\nimport uuid \nfrom datetime import datetime\nimport io\n\n# -- Client setup -- \nimport boto3\nimport sagemaker\n\n# -- sklearn --\nfrom sklearn.metrics import roc_curve, roc_auc_score, auc, roc_auc_score\n%matplotlib inline", "_____no_output_____" ], [ "import pprint\n\n# Pretty print setup\npp = pprint.PrettyPrinter(indent=2)\n\n# Function to pretty-print AWS SDK responses\ndef print_response(response):\n if 'ResponseMetadata' in response:\n del response['ResponseMetadata']\n pp.pprint(response)", "_____no_output_____" ], [ "# Amazon SageMaker client\nsagemaker = boto3.client('sagemaker', REGION)\n\n\n# Amazon Augmented AI (A2I) Runtime client\na2i_runtime_client = boto3.client('sagemaker-a2i-runtime', REGION)\n\n\n# -- initialize the Amazon Fraud Detector client \nclient = boto3.client('frauddetector')", "_____no_output_____" ] ], [ [ "# Amazon Fraud Detector Set up", "_____no_output_____" ], [ "To generate fraud predictions, Amazon Fraud Detector uses machine learning models that are trained\nwith your historical fraud data. Each model is trained using a model type, which is a specialized recipe to\nbuild a fraud detection model for a specific fraud use case. Deployed models are imported to detectors,\nwhere you can configure decision logic (for example, rules) to interpret the model’s score and assign\noutcomes such as pass or send transaction to a human investigator for review.\n\nYou can use the AWS Console to create and manage models and detector versions. Alternatively, you can\nuse the AWS Command Line Interface (AWS CLI) or one of the Amazon Fraud Detector SDKs.\nAmazon Fraud Detector components include events, entities, labels, models, rules, variables, outcomes,\nand detectors. Using these components, you can build an evaluation that contains your fraud detection\nlogic.\n ", "_____no_output_____" ], [ "### To Create a fraud detector model using the console, please refer to the link below\n https://docs.aws.amazon.com/frauddetector/latest/ug/frauddetector.pdf\n \n ### To Create a fraud detector model using an SDK / Python notebook, please refer to the link below\nhttps://github.com/aws-samples/aws-fraud-detector-samples\n#### NOTE:\nThe following model is create using the default data set provided by Amazon Fraud Detector (at https://docs.aws.amazon.com/frauddetector/latest/ug/samples/training_data.zip)\n\nAfter you create your own Fraud Detector Model, replace the MODEL_NAME, DETECTOR_NAME, EVENT_TYPE and ENTITY_TYPE with your fraud detector model values\n ", "_____no_output_____" ] ], [ [ "MODEL_NAME = 'sample_fraud_detection'\nDETECTOR_NAME = 'fraud_detector'\nEVENT_TYPE = 'registration'\nENTITY_TYPE = 'customer'\n\n# -- model performance summary -- \nauc = client.describe_model_versions(\n modelId= MODEL_NAME,\n modelVersionNumber='1.0',\n modelType='ONLINE_FRAUD_INSIGHTS',\n maxResults=10\n)['modelVersionDetails'][0]['trainingResult']['trainingMetrics']['auc']\n\n\ndf_model = pd.DataFrame(client.describe_model_versions(\n modelId= MODEL_NAME,\n modelVersionNumber='1.0',\n modelType='ONLINE_FRAUD_INSIGHTS',\n maxResults=10\n)['modelVersionDetails'][0]['trainingResult']['trainingMetrics']['metricDataPoints'])\n\n\nplt.figure(figsize=(10,10))\nplt.plot(df_model[\"fpr\"], df_model[\"tpr\"], color='darkorange',\n lw=2, label='ROC curve (area = %0.3f)' % auc)\nplt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title( MODEL_NAME + ' ROC Chart')\nplt.legend(loc=\"lower right\",fontsize=12)\nplt.axvline(x = 0.02 ,linewidth=2, color='r')\nplt.axhline(y = 0.73 ,linewidth=2, color='r')\nplt.show()", "_____no_output_____" ] ], [ [ "### Test the fraud detector with a sample data record\nUsing the fraud detector client, invoke the model endpoint with a sample record and examine the results including fraud detection score", "_____no_output_____" ] ], [ [ "\neventId = uuid.uuid1()\ntimestampStr = '2013-07-16T19:00:00Z'\n\n# Construct a sample data record\n\nrec = {\n 'ip_address': '36.72.99.64',\n 'email_address': '[email protected]',\n 'billing_state' : 'NJ',\n 'user_agent' : 'Mozilla',\n 'billing_postal' : '32067',\n 'phone_number' :'703-989-7890',\n 'user_agent' : 'Mozilla',\n 'billing_address' :'12351 Amanda Knolls Fake St'\n}\n\n\npred = client.get_event_prediction(detectorId=DETECTOR_NAME, \n detectorVersionId='1',\n eventId = str(eventId),\n eventTypeName = EVENT_TYPE,\n eventTimestamp = timestampStr, \n entities = [{'entityType': ENTITY_TYPE, 'entityId':str(eventId.int)}],\n eventVariables=rec) ", "_____no_output_____" ], [ "pred", "_____no_output_____" ], [ "# Extract/print the model score\npred['modelScores'][0]['scores']['sample_fraud_detection_insightscore']", "_____no_output_____" ] ], [ [ "# Create Control Plane Resources\n\nCreate Control Plane Resources\nCreate Human Task UI\n\nCreate a human task UI resource, giving a UI template in liquid html. This template will be rendered to the human workers whenever human loop is required.\n\nFor over 70 pre built UIs, check: https://github.com/aws-samples/amazon-a2i-sample-task-uis.\n\nHere we'll be constructing the following control plane resources: Human Task UI and Flow Definition, using the SageMaker CreateTaskUI and CreateFlowDefinition APIs, respectively.\n\nThese resources can be created once and used to drive any subsequent A2I human loops.\n\nNOTE: The following template models a \"Claim\" - i.e. mark if a given claim is fraudulent, valid claim or needs further investigation", "_____no_output_____" ] ], [ [ "template=\"\"\"<script src=\"https://assets.crowd.aws/crowd-html-elements.js\"></script>\n\n<crowd-form>\n <crowd-classifier\n name=\"category\"\n categories=\"['Fradulent Claim', 'Valid Claim', 'Needs furthur Investigation']\"\n header=\"Select the most relevant category\"\n >\n <classification-target>\n <h3><strong>Risk Score (out of 1000): </strong><span style=\"color: #ff9900;\">{{ task.input.score.sample_fraud_detection_insightscore }}</span></h3>\n <hr>\n\t<h3> Claim Details </h3>\n <p style=\"padding-left: 50px;\"><strong>Email Address : </strong>{{ task.input.taskObject.email_address }}</p>\n <p style=\"padding-left: 50px;\"><strong>Billing Address : </strong>{{ task.input.taskObject.billing_address }}</p>\n <p style=\"padding-left: 50px;\"><strong>Billing State : </strong>{{ task.input.taskObject.billing_state }}</p>\n <p style=\"padding-left: 50px;\"><strong>Billing Zip : </strong>{{ task.input.taskObject.billing_postal }}</p>\n <p style=\"padding-left: 50px;\"><strong>Originating IP : </strong>{{ task.input.taskObject.ip_address }}</p>\n <p style=\"padding-left: 50px;\"><strong>Phone Number : </strong>{{ task.input.taskObject.phone_number }}</p>\n <p style=\"padding-left: 50px;\"><strong>User Agent : </strong>{{ task.input.taskObject.user_agent }}</p>\n \n </classification-target>\n \n <full-instructions header=\"Claim Verification instructions\">\n <ol>\n <li><strong>Review</strong> the claim application and documents carefully.</li>\n <li>Mark the claim as valid or fraudulent</li>\n </ol>\n </full-instructions>\n\n <short-instructions>\n Choose the most relevant category that is expressed by the text. \n </short-instructions>\n </crowd-classifier>\n\n</crowd-form>\n\"\"\"", "_____no_output_____" ], [ "def create_task_ui(task_ui_name, template):\n '''\n Creates a Human Task UI resource.\n\n Returns:\n struct: HumanTaskUiArn\n '''\n response = sagemaker.create_human_task_ui(\n HumanTaskUiName=task_ui_name,\n UiTemplate={'Content': template})\n return response", "_____no_output_____" ] ], [ [ "### Create an Augmented AI task UI", "_____no_output_____" ] ], [ [ "# Task UI name - this value is unique per account and region. You can also provide your own value here.\ntaskUIName = 'fraud'+ str(uuid.uuid1())\n\n# Create task UI\nhumanTaskUiResponse = create_task_ui(taskUIName, template)\nhumanTaskUiArn = humanTaskUiResponse['HumanTaskUiArn']\nprint(humanTaskUiArn)", "_____no_output_____" ] ], [ [ "# Create the Flow Definition¶\n\nIn this section, we're going to create a flow definition definition. Flow Definitions allow us to specify:\n\n The workforce that your tasks will be sent to.\n The instructions that your workforce will receive. This is called a worker task template.\n The configuration of your worker tasks, including the number of workers that receive a task and time limits to complete tasks.\n Where your output data will be stored.\n\nThis demo is going to use the API, but you can optionally create this workflow definition in the console as well.\n\nFor more details and instructions, see: https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-create-flow-definition.html.\n", "_____no_output_____" ] ], [ [ "OUTPUT_PATH = f's3://{BUCKET}/a2i-results'\n\ndef create_flow_definition(flow_definition_name):\n '''\n Creates a Flow Definition resource\n\n Returns:\n struct: FlowDefinitionArn\n '''\n response = sagemaker.create_flow_definition(\n FlowDefinitionName= flow_definition_name,\n RoleArn= ROLE,\n HumanLoopConfig= {\n \"WorkteamArn\": WORKTEAM_ARN,\n \"HumanTaskUiArn\": humanTaskUiArn,\n \"TaskCount\": 1,\n \"TaskDescription\": \"Please review the claim data and flag for potential fraud\",\n \"TaskTitle\": \"Review and Approve / Reject claim.\"\n },\n OutputConfig={\n \"S3OutputPath\" : OUTPUT_PATH\n }\n )\n \n return response['FlowDefinitionArn']", "_____no_output_____" ], [ "# Flow definition name - this value is unique per account and region. You can also provide your own value here.\n#uniqueId = str(uuid.uuid4())\nuniqueId = str(int(round(time.time() * 1000)))\nflowDefinitionName = f'fraud-detector-a2i-{uniqueId}'\n#flowDefinitionName = 'fraud-detector-a2i' \n\nflowDefinitionArn = create_flow_definition(flowDefinitionName)\nprint(flowDefinitionArn)", "_____no_output_____" ] ], [ [ "# Starting Human Loops\n\nNow that we have setup our Flow Definition, we are ready to call our Amazon Fraud detector and start our human loops. \nIn this tutorial, we are interested in starting a HumanLoop only if the prediction probability score returned by our model for objects detected is more than risk threshold 900.\n\n we will kick off a HumanLoop to engage our workforce for a human review.\n# Start human loop if the model risk score exceeds a certain treshold", "_____no_output_____" ] ], [ [ "\nOUTPUT_PATH = f's3://{BUCKET}/a2i-results'\n\nFraudScore= pred['modelScores'][0]['scores']['sample_fraud_detection_insightscore']\nprint(FraudScore)\n\n## SET YOUR OWN THRESHOLD HERE\nSCORE_THRESHOLD = 900\n\nif FraudScore > SCORE_THRESHOLD :\n # Create the human loop input JSON object\n humanLoopInput = {\n 'score' : pred['modelScores'][0]['scores'],\n 'taskObject': rec\n }\n\n print(json.dumps(humanLoopInput))\n humanLoopName = 'Fraud-detector-' + str(int(round(time.time() * 1000)))\n print('Starting human loop - ' + humanLoopName)\n\n response = a2i_runtime_client.start_human_loop(\n HumanLoopName=humanLoopName,\n FlowDefinitionArn= flowDefinitionArn,\n HumanLoopInput={\n 'InputContent': json.dumps(humanLoopInput)\n }\n )\n\n", "_____no_output_____" ] ], [ [ "### Check Status of Human Loop", "_____no_output_____" ] ], [ [ "all_human_loops_in_workflow = a2i_runtime_client.list_human_loops(FlowDefinitionArn=flowDefinitionArn)['HumanLoopSummaries']\n\nfor human_loop in all_human_loops_in_workflow:\n print(f'\\nHuman Loop Name: {human_loop[\"HumanLoopName\"]}')\n print(f'Human Loop Status: {human_loop[\"HumanLoopStatus\"]} \\n')\n print('\\n')\n", "_____no_output_____" ] ], [ [ "# Wait For Workers to Complete Task\n\nSince we are using private workteam, we should go to the labling UI to perform the inspection ourselves.\n", "_____no_output_____" ] ], [ [ "workteamName = WORKTEAM_ARN[WORKTEAM_ARN.rfind('/') + 1:]\nprint(\"Navigate to the private worker portal and do the tasks. Make sure you've invited yourself to your workteam!\")\nprint('https://' + sagemaker.describe_workteam(WorkteamName=workteamName)['Workteam']['SubDomain'])", "_____no_output_____" ] ], [ [ "### Check Status of Human Loop", "_____no_output_____" ] ], [ [ "all_human_loops_in_workflow = a2i_runtime_client.list_human_loops(FlowDefinitionArn=flowDefinitionArn)['HumanLoopSummaries']\n\ncompleted_loops = []\nfor human_loop in all_human_loops_in_workflow:\n print(f'\\nHuman Loop Name: {human_loop[\"HumanLoopName\"]}')\n print(f'Human Loop Status: {human_loop[\"HumanLoopStatus\"]} \\n')\n print('\\n')\n if human_loop['HumanLoopStatus'] == 'Completed':\n completed_loops.append(human_loop['HumanLoopName'])\n", "_____no_output_____" ], [ "print(completed_loops)", "_____no_output_____" ] ], [ [ "### View Task Results ", "_____no_output_____" ], [ "Once work is completed, Amazon A2I stores results in your S3 bucket and sends a Cloudwatch event. Your results should be available in the S3 OUTPUT_PATH when all work is completed.", "_____no_output_____" ] ], [ [ "import re\nimport pprint\npp = pprint.PrettyPrinter(indent=2)\n\ndef retrieve_a2i_results_from_output_s3_uri(bucket, a2i_s3_output_uri):\n '''\n Gets the json file published by A2I and returns a deserialized object\n '''\n splitted_string = re.split('s3://' + bucket + '/', a2i_s3_output_uri)\n output_bucket_key = splitted_string[1]\n\n response = s3.get_object(Bucket=bucket, Key=output_bucket_key)\n content = response[\"Body\"].read()\n return json.loads(content)\n \n\nfor human_loop_name in completed_loops:\n\n describe_human_loop_response = a2i_runtime_client.describe_human_loop(\n HumanLoopName=human_loop_name\n )\n \n print(f'\\nHuman Loop Name: {describe_human_loop_response[\"HumanLoopName\"]}')\n print(f'Human Loop Status: {describe_human_loop_response[\"HumanLoopStatus\"]}')\n print(f'Human Loop Output Location: : {describe_human_loop_response[\"HumanLoopOutput\"][\"OutputS3Uri\"]} \\n')\n \n # Uncomment below line to print out a2i human answers\n pp.pprint(retrieve_a2i_results_from_output_s3_uri(BUCKET, describe_human_loop_response['HumanLoopOutput']['OutputS3Uri']))\n", "_____no_output_____" ] ], [ [ "# Cleanup\nTo avoid incurring unnecessary charges, delete the resources used in this\nwalkthrough when not in use. For instructions, see the following:\n\nHow do I delete an S3 Bucket? https://docs.aws.amazon.com/AmazonS3/latest/user-guide/delete-bucket.html\n\nDelete a Flow Definition https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-delete-flow-definition.html\n\nCleanup: SageMaker Resources https://sagemaker-workshop.com/cleanup/sagemaker.html\nDelete Amazon Fraud detector resources https://docs.aws.amazon.com/frauddetector/latest/ug/delete-detector.html", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
cb9cd958c7831df4d55aa4e43c64e40f4c7dc7ec
190,303
ipynb
Jupyter Notebook
python/.ipynb_checkpoints/analyzing dense output for one trial condition-checkpoint.ipynb
timtyree/bgmc
891e003a9594be9e40c53822879421c2b8c44eed
[ "MIT" ]
null
null
null
python/.ipynb_checkpoints/analyzing dense output for one trial condition-checkpoint.ipynb
timtyree/bgmc
891e003a9594be9e40c53822879421c2b8c44eed
[ "MIT" ]
null
null
null
python/.ipynb_checkpoints/analyzing dense output for one trial condition-checkpoint.ipynb
timtyree/bgmc
891e003a9594be9e40c53822879421c2b8c44eed
[ "MIT" ]
null
null
null
99.844176
48,444
0.785831
[ [ [ "# analyzing dense output\nTimothy Tyree<br>\n3.26.2021", "_____no_output_____" ] ], [ [ "# darkmode=True\nfrom lib.my_initialization import *\n# For darkmode plots\nfrom jupyterthemes import jtplot\njtplot.style(theme='monokai', context='notebook', ticks=True, grid=False)\n", "_____no_output_____" ] ], [ [ "## plot the collision times", "_____no_output_____" ] ], [ [ "#load the example data\n# os.chdir(nb_dir)\n# data_dir='data/out.csv'\n# assert ( os.path.exists(data_dir) ) \n# df=pd.read_csv(data_dir,lineterminator=None)\n\n#load a log file\ninput_folder=f\"{nb_dir}/data\"\ninput_fn='example.log'\nos.chdir(input_folder)\ndf=parse_output_log(input_fn, include_inputs=False,printing=False)\nNtrials=df.index.values.shape[0]\nNmin=11\ndf.drop(columns=[f'{i}' for i in range(Nmin)], inplace=True)\n#any duds still there?\nassert ( not (df.values<-9000).any() )\ndf.head()", "_____no_output_____" ], [ "dfd=df.describe().T.drop(columns=['count'])\ndfd.head()", "_____no_output_____" ], [ "#extract the values\nx_values=np.array([eval(x) for x in dfd.index.values])\ny_values=np.array(dfd['50%'].values)\nyerr1_values=np.array(dfd['25%'].values)\nyerr2_values=np.array(dfd['75%'].values)\n\ny2_values=np.array(dfd['mean'].values)\ny2err1_values=y2_values-np.array(dfd['std'].values)\ny2err2_values=y2_values+np.array(dfd['std'].values)", "_____no_output_____" ], [ "#plot the values\nfigsize=(12,6)\nfontsize=18\nsaving=True\nsavefig_folder=f'{nb_dir}/../fig'\nsavefig_fn='colltimes.png'\nfig, axs = plt.subplots(ncols=2,figsize=figsize)\nax=axs[1]\nax.fill_between(x_values,yerr1_values,yerr2_values,alpha=0.3)\nax.plot(x_values,y_values,lw=2)\nax.set_xlabel('n', fontsize=fontsize)\nax.set_ylabel(r'time to first collision (sec)', fontsize=fontsize)\nax.tick_params(axis='both', which='major', labelsize=fontsize)\nax.tick_params(axis='both', which='minor', labelsize=0)\nax.set_title('median', fontsize=fontsize)\nax=axs[0]\nax.fill_between(x_values,y2err1_values,y2err2_values,alpha=0.3)\nax.plot(x_values,y2_values,lw=2)\nax.set_xlabel('n', fontsize=fontsize)\nax.set_ylabel(r'time to first collision (sec)', fontsize=fontsize)\nax.tick_params(axis='both', which='major', labelsize=fontsize)\nax.tick_params(axis='both', which='minor', labelsize=0)\nax.set_title('mean', fontsize=fontsize)\nfig.tight_layout()\nif not saving:\n plt.show()\nelse:\n plt.tight_layout()\n os.chdir(savefig_folder)\n plt.savefig(savefig_fn, dpi=300)\n print(f\"saved figure in \\n\\t{savefig_fn}\")\n# plt.close()", "saved figure in \n\tcolltimes.png\n" ] ], [ [ "## plot the collision rates", "_____no_output_____" ] ], [ [ "#plot the values\nfigsize=(12,6)\nfontsize=18\nsaving=True\nsavefig_folder=f'{nb_dir}/../fig'\nsavefig_fn='collrates.png'\nfig, axs = plt.subplots(ncols=2,figsize=figsize)\nax=axs[1]\nax.fill_between(x_values,1/yerr2_values,1/yerr1_values,alpha=0.3)\nax.plot(x_values,1/y_values,lw=2)\nax.set_xlabel('n', fontsize=fontsize)\nax.set_ylabel(r'$W_{{-2}}$ (1/sec)', fontsize=fontsize)\nax.tick_params(axis='both', which='major', labelsize=fontsize)\nax.tick_params(axis='both', which='minor', labelsize=0)\nax.set_title('median', fontsize=fontsize)\nax=axs[0]\n# ax.fill_between(x_values,1/y2err2_values,1/y2err1_values,alpha=0.3)\nax.plot(x_values,1/y2_values,lw=2)\nax.set_xlabel('n', fontsize=fontsize)\nax.set_ylabel(r'$W_{{-2}}$ (1/sec)', fontsize=fontsize)\nax.tick_params(axis='both', which='major', labelsize=fontsize)\nax.tick_params(axis='both', which='minor', labelsize=0)\n# ax.set_ylim([0,1])\nax.set_title('mean', fontsize=fontsize)\nfig.tight_layout()\nif not saving:\n plt.show()\nelse:\n plt.tight_layout()\n os.chdir(savefig_folder)\n plt.savefig(savefig_fn, dpi=300)\n print(f\"saved figure in \\n\\t{savefig_fn}\")\n# plt.close()", "saved figure in \n\tcollrates.png\n" ] ], [ [ "## Results\ncollision rates/times are being generated with uncertainty that cannot be reliably quantified in terms of mean and standard deviation. Therefore, output logs must include dense output", "_____no_output_____" ], [ "- DONE: dev parsing output logs\n- DONE: generate run_1.dat and run_test.dat\n", "_____no_output_____" ] ], [ [ "def parse_output_log(input_fn,include_inputs=True,printing=False):\n with open(input_fn) as f:\n trgt1='Printing Inputs...\\n'\n trgt2='Printing Outputs...\\n'\n for n,line in enumerate(f):\n if trgt1 == line:\n if printing:\n print(f'found inputs starting after line {n}')\n n_input=n\n if trgt2 == line:\n if printing:\n print(f'found outputs starting after line {n}')\n n_output=n\n\n with open(input_fn) as f:\n inputs=f.readlines()[n_input+1:n_output-1]\n col_name_lst=[]\n col_value_lst=[]\n for line in inputs:\n string=line.split(' ')[-1]\n eid=string.find('=')\n if eid!=-1:\n col_name=string[:eid]\n col_value=eval(string[eid+1:-2])\n col_name_lst.append(col_name)\n col_value_lst.append(col_value)\n df=pd.read_csv(input_fn,header=n_output-2)\n #drop that 'Unammed: {Nmax}' column \n df.drop(columns=[df.columns[-1]], inplace=True)\n if include_inputs:\n if printing:\n print(\"input parameters were:\")\n print(col_name_lst)\n print(col_value_lst)\n print(\"returning outputs as pandas.DataFrame instance\")\n for name,value in zip ( col_name_lst, col_value_lst):\n df[name]=value\n return df", "_____no_output_____" ], [ "input_folder=f\"{nb_dir}/data\"\ninput_fn='example.log'\nos.chdir(input_folder)\ndf=parse_output_log(input_fn, include_inputs=True,printing=True)\ndf.head()", "found inputs starting after line 10\nfound outputs starting after line 17\ninput parameters were:\n['r', 'D', 'L', 'kappa', 'dt']\n[5.0, 1.56, 2.025, 1.0, 1.0]\nreturning outputs as pandas.DataFrame instance\n" ] ], [ [ "## generate run_1.dat \n", "_____no_output_____" ] ], [ [ "A_values=np.array([20.25,25,39,50,56.25,100,156.25,189])[::-1]\nL_values=np.sqrt(A_values)\nL_values", "_____no_output_____" ], [ "niter=250 #trials per worker\nr_values=np.array([0.1,0.2,0.3,0.4,0.5,1.0])#cm\nD_values=np.array([0.25,0.5,0.75,1.0,1.25,1.5,1.75,2.0])#cm^2/s\nA_values=np.array([20.25,25,39,50,56.25,100,156.25,189])[::-1]#cm^2\nL_values=np.sqrt(A_values)#cm\nkappa_values=np.array([1,10,100])#1/s\nnum_trials_per_setting=6\n#iterate over settings, scheduling the longest jobs first\ncount=0\nfor r in r_values:\n for D in D_values:\n for L in L_values:\n for kappa in kappa_values:\n num_trials=0\n while num_trials<num_trials_per_setting:\n num_trials+=1\n count=count+1\nprint(count)", "6912\n" ] ], [ [ "## debugging log parsing", "_____no_output_____" ] ], [ [ "input_fn='/home/timothytyree/Documents/GitHub/bgmc/python/data/osg_output/Log/job.out.8623404.5130'\n# def parse_output_log(input_fn,\ninclude_inputs=True,\nprinting=True\n# ):\nwith open(input_fn) as f:\n trgt1='Printing Inputs...\\n'\n trgt2='Printing Outputs...\\n'\n for n,line in enumerate(f):\n if trgt1 == line:\n if printing:\n print(f'found inputs starting after line {n}')\n n_input=n\n if trgt2 == line:\n if printing:\n print(f'found outputs starting after line {n}')\n n_output=n\n\nwith open(input_fn) as f:\n inputs=f.readlines()[n_input+1:n_output-1]\ncol_name_lst=[]\ncol_value_lst=[]\nfor line in inputs:\n string=line.split(' ')[-1]\n eid=string.find('=')\n if eid!=-1:\n col_name=string[:eid]\n col_value=eval(string[eid+1:-1])\n col_name_lst.append(col_name)\n col_value_lst.append(col_value)\ndf=pd.read_csv(input_fn,header=n_output-1)\n#drop that 'Unammed: {Nmax}' column \ndf.drop(columns=[df.columns[-1]], inplace=True)\nif include_inputs:\n if printing:\n print(\"input parameters were:\")\n print(col_name_lst)\n print(col_value_lst)\n print(\"returning outputs as pandas.DataFrame instance\")\n for name,value in zip ( col_name_lst, col_value_lst):\n df[name]=value\n# return df", "found inputs starting after line 11\nfound outputs starting after line 18\n" ], [ "eid=string.find('=')\nif eid!=-1:\n col_name=string[:eid]\n col_value=eval(string[eid+1:-2])\n col_name_lst.append(col_name)\n col_value_lst.append(col_value)", "_____no_output_____" ], [ "string[eid+1:-1]", "_____no_output_____" ] ], [ [ "# (not meant for this ipynb) analyze manual output", "_____no_output_____" ] ], [ [ "#preliminary?\narr1=np.array([6.62549e-05,6.329412e-05,6.154902e-05,5.390196e-05,4.927451e-05,4.609804e-05,4.245098e-05,4.035294e-05,3.492157e-05,2.927451e-05,2.776471e-05,2.539216e-05,2.288235e-05,1.94902e-05,1.717647e-05,1.590196e-05,1.413725e-05,1.362745e-05,1.109804e-05,1.011765e-05,9.235294e-06,8.941176e-06,7.941176e-06,7.039216e-06,6.313725e-06,6.176471e-06,6.078431e-06,6e-06,5.823529e-06,5.45098e-06,4.529412e-06,4.509804e-06,4.352941e-06,4.313725e-06,4e-06,3.921569e-06,3.862745e-06,3.745098e-06,3.588235e-06,3.411765e-06,3.333333e-06,2.627451e-06,2.529412e-06,2.490196e-06,2.313725e-06,2.313725e-06,2.294118e-06,2.27451e-06,2.254902e-06,2.078431e-06,1.960784e-06,1.803922e-06,1.72549e-06,1.705882e-06,1.647059e-06,1.627451e-06,1.588235e-06,1.568627e-06,1.568627e-06,1.529412e-06,1.45098e-06,1.45098e-06,1.45098e-06,1.45098e-06,1.45098e-06,1.431373e-06,1.431373e-06,1.431373e-06,1.431373e-06,1.431373e-06,1.411765e-06,1.392157e-06,1.392157e-06,1.352941e-06,1.333333e-06,1.333333e-06,1.313725e-06,1.313725e-06,1.27451e-06,1.215686e-06,1.215686e-06,1.176471e-06,1.176471e-06,1.176471e-06,1.176471e-06,1.176471e-06,1.176471e-06,1.176471e-06,1.176471e-06,1.137255e-06,1.137255e-06,1.137255e-06,1.137255e-06,1.137255e-06,1.137255e-06,1.117647e-06,1.098039e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.078431e-06,1.058824e-06,1.058824e-06])\narr2=np.array([4.143137e-05,4.027451e-05,4.027451e-05,4.015686e-05,3.792157e-05,3.439216e-05,2.94902e-05,2.560784e-05,2.513725e-05,2.264706e-05,2.058824e-05,1.770588e-05,1.670588e-05,1.55098e-05,1.523529e-05,1.347059e-05,1.168627e-05,1.009804e-05,9.529412e-06,9.058824e-06,8.431373e-06,8e-06,7.666667e-06,7.666667e-06,7.215686e-06,6.509804e-06,5.901961e-06,5.666667e-06,5e-06,4.627451e-06,4.156863e-06,3.960784e-06,3.784314e-06,3.72549e-06,3.607843e-06,3.529412e-06,3.529412e-06,3.333333e-06,3.313725e-06,3.294118e-06,3.039216e-06,2.980392e-06,2.745098e-06,2.490196e-06,2.352941e-06,2.333333e-06,2.333333e-06,2.333333e-06,2.235294e-06,2.215686e-06,2.137255e-06,2.039216e-06,1.960784e-06,1.921569e-06,1.882353e-06,1.882353e-06,1.862745e-06,1.803922e-06,1.72549e-06,1.705882e-06,1.666667e-06,1.490196e-06,1.333333e-06,1.333333e-06,1.313725e-06,1.294118e-06,1.27451e-06,1.27451e-06,1.254902e-06,1.254902e-06,1.254902e-06,1.254902e-06,1.254902e-06,1.254902e-06,1.235294e-06,1.235294e-06,1.235294e-06,1.215686e-06,1.215686e-06,1.215686e-06,1.196078e-06,1.176471e-06,1.156863e-06,1.156863e-06,1.117647e-06,1.117647e-06,1.117647e-06,1.117647e-06,1.117647e-06,1.117647e-06,1.117647e-06,1.098039e-06,1.098039e-06,1.058824e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.039216e-06,1.019608e-06,1.019608e-06,1.019608e-06,1.019608e-06,1.019608e-06,1.019608e-06,1.019608e-06])", "_____no_output_____" ], [ "# test-3-7.input all equal :( niter=30\narr1=np.array([0.00263,0.001522,0.002293333,0.002127857,0.00177,0.001630476,0.001752917,0.001701538,0.001588077,0.001218889,0.001012857,0.0008446429,0.0007289655,0.0006366667,0.0005946667,0.000502,0.0004183333,0.000389,0.000378,0.0003736667,0.000344,0.00033,0.0003193333,0.0003073333,0.000291,0.0002713333,0.000257,0.0002026667,0.000162,0.0001593333,0.0001453333,0.0001386667,0.0001386667,0.0001386667,0.00012,0.0001093333,0.000109,0.0001063333,0.0001036667,9.266667e-05,8.633333e-05,8.633333e-05,8.566667e-05,8.1e-05,5.766667e-05,4.733333e-05,3.833333e-05,3.833333e-05,3.833333e-05,3.833333e-05,3.533333e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.266667e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.766667e-05,2.766667e-05,2.7e-05,2.633333e-05,2.633333e-05,2.566667e-05,2.533333e-05,2.5e-05,2.5e-05,2.5e-05,2.5e-05,2.5e-05,2.4e-05,2.4e-05,2.366667e-05,2.333333e-05,2.333333e-05,2.166667e-05,2.1e-05,2.1e-05,2.1e-05,2.1e-05,2.033333e-05,2e-05,1.966667e-05,1.966667e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.733333e-05,1.733333e-05,1.666667e-05,1.666667e-05,1.633333e-05,1.633333e-05,1.6e-05,1.6e-05,1.6e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.533333e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.4e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.233333e-05,1.2e-05,1.133333e-05,1.133333e-05,1.133333e-05,1.133333e-05,1.1e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05])\narr2=np.array([0.00263,0.001522,0.002293333,0.002127857,0.00177,0.001630476,0.001752917,0.001701538,0.001588077,0.001218889,0.001012857,0.0008446429,0.0007289655,0.0006366667,0.0005946667,0.000502,0.0004183333,0.000389,0.000378,0.0003736667,0.000344,0.00033,0.0003193333,0.0003073333,0.000291,0.0002713333,0.000257,0.0002026667,0.000162,0.0001593333,0.0001453333,0.0001386667,0.0001386667,0.0001386667,0.00012,0.0001093333,0.000109,0.0001063333,0.0001036667,9.266667e-05,8.633333e-05,8.633333e-05,8.566667e-05,8.1e-05,5.766667e-05,4.733333e-05,3.833333e-05,3.833333e-05,3.833333e-05,3.833333e-05,3.533333e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.266667e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.766667e-05,2.766667e-05,2.7e-05,2.633333e-05,2.633333e-05,2.566667e-05,2.533333e-05,2.5e-05,2.5e-05,2.5e-05,2.5e-05,2.5e-05,2.4e-05,2.4e-05,2.366667e-05,2.333333e-05,2.333333e-05,2.166667e-05,2.1e-05,2.1e-05,2.1e-05,2.1e-05,2.033333e-05,2e-05,1.966667e-05,1.966667e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.733333e-05,1.733333e-05,1.666667e-05,1.666667e-05,1.633333e-05,1.633333e-05,1.6e-05,1.6e-05,1.6e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.533333e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.4e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.233333e-05,1.2e-05,1.133333e-05,1.133333e-05,1.133333e-05,1.133333e-05,1.1e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05])\narr1=np.array([0.00263,0.001522,0.002293333,0.002127857,0.00177,0.001630476,0.001752917,0.001701538,0.001588077,0.001218889,0.001012857,0.0008446429,0.0007289655,0.0006366667,0.0005946667,0.000502,0.0004183333,0.000389,0.000378,0.0003736667,0.000344,0.00033,0.0003193333,0.0003073333,0.000291,0.0002713333,0.000257,0.0002026667,0.000162,0.0001593333,0.0001453333,0.0001386667,0.0001386667,0.0001386667,0.00012,0.0001093333,0.000109,0.0001063333,0.0001036667,9.266667e-05,8.633333e-05,8.633333e-05,8.566667e-05,8.1e-05,5.766667e-05,4.733333e-05,3.833333e-05,3.833333e-05,3.833333e-05,3.833333e-05,3.533333e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.266667e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.766667e-05,2.766667e-05,2.7e-05,2.633333e-05,2.633333e-05,2.566667e-05,2.533333e-05,2.5e-05,2.5e-05,2.5e-05,2.5e-05,2.5e-05,2.4e-05,2.4e-05,2.366667e-05,2.333333e-05,2.333333e-05,2.166667e-05,2.1e-05,2.1e-05,2.1e-05,2.1e-05,2.033333e-05,2e-05,1.966667e-05,1.966667e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.733333e-05,1.733333e-05,1.666667e-05,1.666667e-05,1.633333e-05,1.633333e-05,1.6e-05,1.6e-05,1.6e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.533333e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.4e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.233333e-05,1.2e-05,1.133333e-05,1.133333e-05,1.133333e-05,1.133333e-05,1.1e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05])\narr2=np.array([0.00263,0.001522,0.002293333,0.002127857,0.00177,0.001630476,0.001752917,0.001701538,0.001588077,0.001218889,0.001012857,0.0008446429,0.0007289655,0.0006366667,0.0005946667,0.000502,0.0004183333,0.000389,0.000378,0.0003736667,0.000344,0.00033,0.0003193333,0.0003073333,0.000291,0.0002713333,0.000257,0.0002026667,0.000162,0.0001593333,0.0001453333,0.0001386667,0.0001386667,0.0001386667,0.00012,0.0001093333,0.000109,0.0001063333,0.0001036667,9.266667e-05,8.633333e-05,8.633333e-05,8.566667e-05,8.1e-05,5.766667e-05,4.733333e-05,3.833333e-05,3.833333e-05,3.833333e-05,3.833333e-05,3.533333e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.466667e-05,3.266667e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,3.133333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.933333e-05,2.766667e-05,2.766667e-05,2.7e-05,2.633333e-05,2.633333e-05,2.566667e-05,2.533333e-05,2.5e-05,2.5e-05,2.5e-05,2.5e-05,2.5e-05,2.4e-05,2.4e-05,2.366667e-05,2.333333e-05,2.333333e-05,2.166667e-05,2.1e-05,2.1e-05,2.1e-05,2.1e-05,2.033333e-05,2e-05,1.966667e-05,1.966667e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.833333e-05,1.733333e-05,1.733333e-05,1.666667e-05,1.666667e-05,1.633333e-05,1.633333e-05,1.6e-05,1.6e-05,1.6e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.566667e-05,1.533333e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.466667e-05,1.4e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.3e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.266667e-05,1.233333e-05,1.2e-05,1.133333e-05,1.133333e-05,1.133333e-05,1.133333e-05,1.1e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05,1.066667e-05])\n# (each leading with -9999,-9999,-9999,-9999,-9999,-9999,-9999,-9999,)\n\n# test-3-7.input all equal :( niter=3\narr1=np.array([0.0013,0.00164,0.00164,0.00164,0.001596667,0.00137,0.00068,0.00068,0.00068,0.00068,0.00068,0.00068,0.00068,0.0006433333,0.0006433333,0.0006433333,0.0004233333,0.0004233333,0.0004233333,0.0004233333,0.0004233333,0.0002966667,0.0002966667,0.0002966667,0.0002333333,0.0002333333,0.0001666667,0.0001666667,0.0001666667,0.0001666667,0.0001666667,0.0001166667,0.0001166667,0.0001166667,0.0001166667,4.666667e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,1.666667e-05,1.666667e-05,1.666667e-05,1.666667e-05,1.666667e-05,1.666667e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05])\narr2=np.array([0.0013,0.00164,0.00164,0.00164,0.001596667,0.00137,0.00068,0.00068,0.00068,0.00068,0.00068,0.00068,0.00068,0.0006433333,0.0006433333,0.0006433333,0.0004233333,0.0004233333,0.0004233333,0.0004233333,0.0004233333,0.0002966667,0.0002966667,0.0002966667,0.0002333333,0.0002333333,0.0001666667,0.0001666667,0.0001666667,0.0001666667,0.0001666667,0.0001166667,0.0001166667,0.0001166667,0.0001166667,4.666667e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3.333333e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,3e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,2e-05,1.666667e-05,1.666667e-05,1.666667e-05,1.666667e-05,1.666667e-05,1.666667e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05,1.333333e-05])", "_____no_output_____" ], [ "navg=1\nx1_values=np.arange(11,11+arr1.size-navg)\nx2_values=np.arange(11,11+arr2.size-navg)\n#take a moving average\ny1_values=1./(arr1[navg:]+arr1[:-navg])\ny2_values=1./(arr2[navg:]+arr2[:-navg])\n", "_____no_output_____" ] ], [ [ "I turned off forces, but the effect on collision times was zero to machine precision...\n\n^this means either (i) I'm not in a parameter regime where collision times are significantly effected by forces or (ii) there's a bug in my code", "_____no_output_____" ] ], [ [ "fontsize=18\nplt.plot(x1_values, y1_values,lw=2,label='unbiased random walk')\nplt.plot(x2_values, y2_values,lw=2,label='attractive forces')\nplt.xscale('log')\nplt.yscale('log')\n# plt.xlim([1e-0,1e3])\n# plt.ylim([1e4,6e6])\n# plt.title('attractive forces increased exponent\\n',fontsize=fontsize)\n\n\n# plt.title(u'comparison to simulation\\nwith two hybrid modes',fontsize=fontsize)\nplt.xlabel(r'N',fontsize=fontsize)\nplt.ylabel(r'$W_{-2}$ (Hz)', fontsize=fontsize)\n# plt.xlabel(r'q (cm$^{-2}$)',fontsize=fontsize)\n# plt.ylabel(r'w (Hz cm$^{-2}$)', fontsize=fontsize)\nplt.tick_params(axis='both', which='major', labelsize=fontsize)\nplt.tick_params(axis='both', which='minor', labelsize=0)\nplt.legend(fontsize=fontsize-5,ncol=1,loc='upper left')\nplt.show()", "_____no_output_____" ], [ "# from numpy import linspace\n# from scipy.integrate import odeint\n\n# #you can choose a better guess using fsolve instead of 0\n# def integral(y, _, F_l, M, cache=[0]):\n# v, preva = y[1], cache[0]\n# #use value for 'a' from the previous timestep\n# F_r = (((1 - preva) / 3) ** 2 + (2 * (1 + preva) / 3) ** 2) * v \n# #calculate the new value\n# a = (F_l - F_r) / M\n# cache[0] = a\n# return [v, a]\n\n# y0 = [0, 5]\n# time = linspace(0., 10., 1000)\n# F_lon = 100.\n# mass = 1000.\n\n# dydt = odeint(integral, y0, time, args=(F_lon, mass))", "_____no_output_____" ], [ "# plt.scatter(x=time,y=time*(10-time),c=time,cmap='jet')\n# # plt.scatter(x=time,y=dydt[:,0],c=time,cmap='jet')\n# # plt.scatter(x=time,y=dydt[:,1])\n\n# # plt.xscale('log')\n# # plt.yscale('log')\n# # plt.xlim([1e-1,1e1])\n# # plt.ylim([1e4,6e4])\n# # plt.title('accelerated attraction/annihilation trials only')\n# plt.show()", "_____no_output_____" ], [ "1", "_____no_output_____" ], [ "#load the example data\n# os.chdir(nb_dir)\n# data_dir='data/out.csv'\n# assert ( os.path.exists(data_dir) ) \n# df=pd.read_csv(data_dir,lineterminator=None)\n\n#load a log file\ninput_folder=f\"{nb_dir}/data\"\ninput_fn='example.log'\nos.chdir(input_folder)\ndf=parse_output_log(input_fn, include_inputs=False,printing=False)\nNtrials=df.index.values.shape[0]\nNmin=11\ndf.drop(columns=[f'{i}' for i in range(Nmin)], inplace=True)\n#any duds still there?\nassert ( not (df.values<-9000).any() )\ndf.head()", "/home/timothytyree/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:16: RuntimeWarning: invalid value encountered in less\n app.launch_new_instance()\n" ] ], [ [ "# notes/eqns for ou process", "_____no_output_____" ], [ "Suppose we have two particles that have been nearest neighbors since time, $t=0$.\nLet $X_t=$ the distance between two particles at time $t$. We may then model $X_t$ reverting to (from) some preferred distance, $x_0\\ge0$ by setting $\\varkappa>0$ ($\\varkappa<0$) in \n$$\ndX_t=\\varkappa(x_0-X_t)dt+\\sqrt{2D}dW_t.\n$$\n\nFor $\\varkappa>0$, the expected distance is \n$$\n\\mathbb{E}[X_t]=X_0e^{-\\varkappa t} + x_0(1-e^{-\\varkappa t})\n$$\nand the covariance is\n$$\n\\text{cov}(x_s,x_t)=\\frac{D}{\\varkappa}\\Big(e^{-\\varkappa |t-s|}-e^{-\\varkappa (t+s)}\\Big).\n$$\n", "_____no_output_____" ], [ "# DONT: dev add-result.pl", "_____no_output_____" ], [ "# TODO: dev routine that uses dask to do a sweep over N\nHINT: descend from Nmax to Nmin", "_____no_output_____" ] ], [ [ "#######################################################\n### TODO: implement in PYTHON (and later) with DASK ###\n#######################################################\n#TODO: open a file in append mode\n#TODO: print N, from Nmax descending to $Nmin\n#TODO: write params.input for this Ntips\n#TODO: system(./xrun < params.input)\n#TODO: change params.input to params/params_N_{N}.input\n#TODO: do all ^this in dask, but not with a daskbag... this should update a preallocated vector as tasks are finishedd\n#HINT: preallocation means save in current increasing N format", "_____no_output_____" ], [ "# #DONT(do perl stuff in python): initialize line \n# i=37\n# string.split(',')[i]", "_____no_output_____" ], [ "#TODO(goal): get item_lst from a daskbag for item = (n,Tavg)\nNmax=60;Nmin=6\nn_values=np.arange(Nmax,Nmin,-1)\nn_values", "_____no_output_____" ], [ "#template arguments for simulation\nx=np.array([0.1, 2, 5, 500, 0., 0., 1e-5, 1e-5, 8, 500, 1234, 0, 0, 0, 0])\nlog_dir=\"/home/timothytyree/Documents/GitHub/bgmc/c/ou\"\nos.chdir(log_dir)\n#make routine that generates Tavg for a given n\ndef routine(n):\n fn_out=f\"Log/out_n_{n}.output\"\n #TODO: integrate return_CollTime with routine\n os.system(f\"/return_CollTime.x < 1-control.input | grep 'Tavg=' | grep -Eo '[+-]?[0-9]+([.][0-9]+)?' > Log/1-control.output\")\n# os.system(f\"./xrun.sh ${x[0]} ${x[1]} ${x[2]} ${x[3]} ${x[4]} ${x[5]} ${x[6]} ${x[7]} {n} ${x[9]} ${x[10]} ${x[11]} > {fn_out}\")\n #TODO: parse fn_out for Tavg\n retval=os.system(f'grep \"Tavg=\" {fn_out}')#' #| grep -Eo \"[+-]?[0-9]+([.][0-9]+)?\"')\n# retval=os.system(f'grep \"Tavg=\" {fn_out} #| grep -Eo \"[+-]?[0-9]+([.][0-9]+)?\"')\n #TODO: return Tavg\n return retval", "_____no_output_____" ], [ "n=32\n# os.system(f\"./xrun.sh ${x[0]} ${x[1]} ${x[2]} ${x[3]} ${x[4]} ${x[5]} ${x[6]} ${x[7]} {n} ${x[9]} ${x[10]} ${x[11]} > a.out\")\nroutine(n)", "_____no_output_____" ], [ "!ls", "1-test.input\t output.txt\t\t runs\r\n1-test.output\t params.input\t\t src\r\nadd-results.pl\t post_process.sh\t summarize.sh\r\na.txt\t\t prepare-CollTimes.pl\t test-3.input\r\nclean-log.sh\t python\t\t\t test-4.input\r\nCommonDefines.h requirements.sh\t test-5.input\r\ndev\t\t return_CollTime.c\t test-6.input\r\nfktest.input\t return_CollTimes_Archived.c test.input\r\ngcc.sh\t\t return_CollTimes.c\t testpl.sh\r\nkey.input\t return_CollTimes.sh\t test.sh\r\nlib\t\t return-CollTimes.submit test.txt\r\nlocal-main.sh\t return-CollTimes-test.submit testy.sh\r\nLog\t\t return_CollTimes.x\t xrun.sh\r\nmyoutput.txt\t return_CollTime.x\r\n" ], [ "import numpy as np\nniter=1500 #trials per worker\nr_values=np.array([0.1,1.0])#,0.2,0.3,0.4,0.5,.6,.7,.8,.9,1.,2.])#cm\nD_values=np.array([2.,20.])#0.2,1.0,1.5,2.0,3.,4.,5.])#cm^2/s\nA_values=np.array([25.])#20.25,25,39,50,56.25,100,156.25,189,250])[::-1]#cm^2\nL_values=np.sqrt(A_values)#cm\nkappa_values=np.array([500,1500])#5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,250,500])#1/s\nvarkappa_values=np.array([-5,-1.5,-0.2,0,0.2,1.5,5])#1/s\nx0_values=np.array([0.,0.1,0.2,0.3,0.4,0.5,1.0])#cm\nDt_values=np.array([1e-5,1e-4])#10**-i for i in range(6)])\ndt=1e-5\nNmax=100\nnum_trials_per_setting=1\nreflect_values=np.array([0])\nset_second_values=np.array([0])\nno_repulsion_values=np.array([0,1])\nno_attraction_values=np.array([0,1])\n#iterate over settings, scheduling the longest jobs first\ncount=0\nfor set_second in set_second_values:\n for reflect in reflect_values:\n for r in r_values:\n for D in D_values:\n for L in L_values:\n for kappa in kappa_values:\n for varkappa in varkappa_values:\n for x0 in x0_values:\n for Dt in Dt_values:\n for no_repulsion in no_repulsion_values:\n for no_attraction in no_attraction_values:\n num_trials=0\n while num_trials<num_trials_per_setting:\n num_trials+=1\n count=count+1\n print(f\"{r} {D} {L} {kappa} {varkappa} {x0} {Dt} {dt} {Nmax} {niter} {reflect} {set_second} {no_repulsion} {no_attraction}\")\n# print(count)", "3136\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb9ce4d3301c4230009d63dff9c9d68a8c977ef7
392,907
ipynb
Jupyter Notebook
casing3D/DC_Approximating_Steel_Cased_Wells.ipynb
lheagy/casingNotebooks
70c96a20db0f6e5b749438c7aca3ada311a7c5ea
[ "MIT" ]
null
null
null
casing3D/DC_Approximating_Steel_Cased_Wells.ipynb
lheagy/casingNotebooks
70c96a20db0f6e5b749438c7aca3ada311a7c5ea
[ "MIT" ]
null
null
null
casing3D/DC_Approximating_Steel_Cased_Wells.ipynb
lheagy/casingNotebooks
70c96a20db0f6e5b749438c7aca3ada311a7c5ea
[ "MIT" ]
null
null
null
292.995526
160,178
0.909523
[ [ [ "# Approximating Steel Cased Wells - DC\n\n[Lindsey Heagy](http://github.com/lheagy)\n\nIn this example, we examine the impact of upscaling the well using\n- the assumption that the well is a solid rod of steel\n- averaging conductivity such that the $\\sigma A$ is the same in both cases\n\nThese experiments are conducted at DC. The initial model we consider is based on that shown Um, 2015, *Finite element modelling of transient electromagnetic fields near steel-cased wells* https://doi.org/10.1093/gji/ggv193 \n\n### Reproducing these results\nTo run these examples, you need to have installed \n- [discretize](http://discretize.simpeg.xyz) (branch: `feat/3Dcylmesh`)\n```\npip install git+https://github.com/simpeg/discretize.git@feat/3Dcylmesh\n```\n- [SimPEG](http://docs.simpeg.xyz) (branch: `em/feat/galvanic-hj`)\n```\npip install git+https://github.com/simpeg/simpeg.git@em/feat/galvanic-hj\n```\n- [casingSimulations](https://github.com/lheagy/casingSimulations) (branch: `master`)\n```\npip install git+https://github.com/lheagy/casingSimulations.git\n```\n- [pymatsolver](https://github.com/rowanc1/pymatsolver)\n```\npip install pymatsolver\n```\n\nSimPEG and the implementation for electrical and electromagnetic methods is described in ([Cockett et al., 2015](https://doi.org/10.1016/j.cageo.2015.09.015) and [Heagy et al., 2017](https://arxiv.org/abs/1610.00804)).", "_____no_output_____" ] ], [ [ "import discretize\nfrom discretize import utils\nimport numpy as np\nimport scipy.sparse as sp\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\nfrom scipy.constants import mu_0, inch, foot\nimport ipywidgets\n\nfrom SimPEG.EM import TDEM\nfrom SimPEG import Utils, Maps\n\nimport casingSimulations as casingSim\n\nfrom pymatsolver import Pardiso\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Parameters\n\nTo examine the impact of replacing the steel cased well with a solid well, I have run a few simulations using SimPEG and simulating the DC resistivity equations using a finite volume approach on a 3D cylindrical mesh. \n\nThe model setup follows . The electrical conductivities used are:\n- Casing $1\\times 10^6$ S/m\n- Air $3\\times10^{-7}$ S/m\n- Background $3\\times10^{-2}$ S/m\n- Inside the casing - same as background\n\nIn the first example, the well is 200m long. The thickness of the casing is 12mm. Here, we are interested in examining a top casing source, where one electrode is connected to the top of the casing and a return electrode is some radial distance away on the surface. In the following examples, we will put the return electrode a distance of 2x the length of the well (for the first example, it is 400 m away). For datam we measure the radial electric field along a line $90^\\circ$ from the source wire, starting from the well and measuring out to a distance of 400m away. \n\n### Experiments\n\nFrom the base example, there are 3 that are setup, you can select one by changing the `experiment` variable or setting your own values for `casing_l`, `sigma_back` and `sigma_air`.", "_____no_output_____" ] ], [ [ "simDir = 'DC_approximations' # create a simulation directory where results can be saved. ", "_____no_output_____" ], [ "# casing parameters by experiment \nexperiments = {\n 1: {'casing_l': 200, 'sigma_back': 3e-2},\n 2: {'casing_l': 200, 'sigma_back': 3e-1},\n 3: {'casing_l': 1000, 'sigma_back': 3e-2},\n}", "_____no_output_____" ] ], [ [ "### select an experiment", "_____no_output_____" ] ], [ [ "experiment = 3", "_____no_output_____" ], [ "casing_l = experiments[experiment]['casing_l']\nsigma_back = experiments[experiment]['sigma_back']\n\nprint(\n 'Setting up experiment {}. \\nThe casing is {}m long, and the '\n 'conductivity of the background is {} S/m'.format(\n experiment, casing_l, sigma_back\n )\n)", "Setting up experiment 3. \nThe casing is 1000m long, and the conductivity of the background is 0.03 S/m\n" ], [ "casing_t = 10e-3 # 10mm thick casing\ncasing_d = 100e-3 # 10cm diameter\n\nmodel = casingSim.model.CasingInHalfspace(\n directory = simDir,\n sigma_casing = 1e6, # conductivity of the casing (S/m)\n sigma_back = sigma_back, # conductivity of the background (S/m)\n sigma_inside = sigma_back, # fluid inside the well has same conductivity as the background\n sigma_air = 3e-7, # conductivity of the air\n casing_d = casing_d-casing_t, # 135mm is outer casing diameter\n casing_l = casing_l,\n casing_t = casing_t, \n src_a = np.r_[0., np.pi, -1.25], # put the A electrode just below the surface\n src_b = np.r_[2*casing_l, np.pi, -1.25] # put the return electrode at a distance of 2 x length of well away\n)\n\n# adjust the src_a location so it is connected to well\nmodel.src_a = np.r_[model.casing_r, np.pi, -1.25] ", "_____no_output_____" ], [ "# Here we print the parameters being used to set up the simulation\nmodel.serialize()", "_____no_output_____" ] ], [ [ "## Mesh\n\nHere we set up a 3D cylindrical mesh, discretizing in $x$, $\\theta$ and $z$. \n\nTo discretize in x, we start by defining the finest region of the mesh, ensuring that we have 4 cells across the thickness of the casing. From there, we expand the cell sizes until we reach the second uniform cell size we want to model at (`csx2`). We then use a constant cell spacing of `csx2` until we have meshed out to the end of the domain in which we want to examine data (`domainx2`). Beyond that, we add padding cells to *\"infinity\"*", "_____no_output_____" ] ], [ [ "# parameters defining the core region of the mesh\n# note that the finest re\ncsx2 = 25. # cell size in the x-direction in the second uniform region of the mesh (where we measure data)\ncsz = 2.5 # cell size in the z-direction \ndomainx2 = 500 # go out 500m from the well\n\n# padding parameters\nnpadx, npadz = 15, 22 # number of padding cells\npfx2 = 1.4 # expansion factor for the padding to infinity in the x-direction\npfz = 1.4\n\n# discretization in theta\nncy = 1\nnstretchy = 5\nstretchfact = 1.5\nhy = utils.meshTensor([(1, nstretchy, -stretchfact), (1, ncy), (1, nstretchy, stretchfact)])\nhy = hy * 2*np.pi/hy.sum()\n\n# set up a mesh generator which will build a mesh based on the provided parameters\n# and casing geometry\ncylMeshGen = casingSim.CasingMeshGenerator(\n directory=simDir, # directory where we can save things\n modelParameters=model, # casing parameters\n npadx=npadx, # number of padding cells in the x-direction\n npadz=npadz, # number of padding cells in the z-direction\n domain_x=domainx2, # extent of the second uniform region of the mesh \n hy=hy, # cell spacings in the \n csx1=model.casing_t/4., # use at least 4 cells per across the thickness of the casing\n csx2=csx2, # second core cell size\n csz=csz, # cell size in the z-direction\n pfx2=pfx2, # padding factor to \"infinity\"\n pfz=pfz # padding factor to \"infinity\" for the z-direction\n)", "_____no_output_____" ], [ "cylMeshGen.mesh.plotGrid()", "_____no_output_____" ], [ "# Plot the source location\n\nax = plt.subplot(111, projection='polar')\ncylMeshGen.mesh.plotGrid(ax=ax, slice='z')\nax.plot(model.src_a[1], model.src_a[0], 'ro')\nax.plot(model.src_b[1], model.src_b[0], 'rs')\nax.set_ylim([0., 3*model.casing_l])", "_____no_output_____" ] ], [ [ "## Create models to compare to\n\nHere, we create two more models which we want to simulate on\n- `solid` fills in the center of the casing with a conductivity equal to that of steel\n- `sigma_A` approximates the casing as a solid rod with conductivity found by preserving the conductivity - cross sectional area product. ", "_____no_output_____" ] ], [ [ "# Solid steel\nmodel_solid = model.copy()\nmodel_solid.sigma_inside = model_solid.sigma_casing # fill in the center of the well with steel", "_____no_output_____" ], [ "# average so that we preserve the conductivity * cross sectional area\nsigmaA = model.sigma_casing * (model.casing_b**2 - model.casing_a**2) / (model.casing_b**2) # times pi / pi\n\nmodel_sigma_A = model.copy()\nmodel_sigma_A.sigma_inside = sigmaA\nmodel_sigma_A.sigma_casing = sigmaA\n\nprint(\"Preserving sigma * A gives a conductivity of {} S/m\".format(sigmaA))", "Preserving sigma * A gives a conductivity of 360000.0 S/m\n" ], [ "# put the models in a dictionary for convienence \nmodel_names = ['baseline', 'solid', 'sigma_A']\nmodelDict = dict(zip(model_names, [model, model_solid, model_sigma_A]))", "_____no_output_____" ], [ "# Assign physical properties on the mesh\nphyspropsDict = {\n 'baseline': casingSim.model.PhysicalProperties(cylMeshGen, model),\n 'solid': casingSim.model.PhysicalProperties(cylMeshGen, model_solid),\n 'sigma_A': casingSim.model.PhysicalProperties(cylMeshGen, model_sigma_A)\n}", "_____no_output_____" ], [ "# Plot the models\nxlim = np.r_[-1, 1] # x-limits in meters\nzlim = np.r_[-1.5*model.casing_l, 10.] # z-limits in meters. (z-positive up)\n\nfig, ax = plt.subplots(1, 3, figsize=(18, 5), dpi=350)\nfor a, title in zip(ax, model_names):\n pp = physpropsDict[title]\n pp.plot_sigma(\n ax=a, \n pcolorOpts={'norm':LogNorm()} # plot on a log-scale\n )\n a.set_title('{} \\n\\n $\\sigma$ = {:1.2e}S/m'.format(title, pp.modelParameters.sigma_casing), fontsize=13)\n# cylMeshGen.mesh.plotGrid(ax=a, slice='theta') # uncomment to plot the mesh on top of this\n a.set_xlim(xlim)\n a.set_ylim(zlim)", "_____no_output_____" ], [ "model.src_a", "_____no_output_____" ], [ "# Set up 4 sources, top casing, top & not coupled, downhole, downhole not coupled\n\nsrc_a = np.vstack([\n [model.casing_r, np.pi, -1.25],\n [model.casing_r + 1., np.pi, -1.25],\n [model.casing_r, np.pi, -casing_l + 5.],\n [0., np.pi, -casing_l + 5.],\n [0., np.pi, -casing_l - 5.]\n])\n\nsrc_b = np.ones((src_a.shape[0],1)) * model.src_b\n\nsrc_names = [\n \"top casing\",\n \"surface, disconnected\",\n \"downhole\",\n \"downhole, disconnected\",\n \"below hole\"\n]", "_____no_output_____" ], [ "# Plot the source location\n\nsymbols = ['ro', 'ws', 'k>', 'mo', 'c*', 'yx']\n\nfig, ax = plt.subplots(1, 1, figsize=(4, 5))\n# cylMeshGen.mesh.plotGrid(ax=ax, slice='theta')\nphyspropsDict['baseline'].plot_sigma(\n ax=ax, \n pcolorOpts={'norm':LogNorm()} # plot on a log-scale\n)\n\nfor i in range(src_a.shape[0]):\n ax.plot(src_a[i, 0], src_a[i, 2], symbols[i])\n ax.plot(src_b[i, 0], src_b[i, 2], symbols[i])\n \nax.set_xlim([-0.1, 1.25]) #src_b[:, 0].max()])\nax.set_ylim([ -1.1*model.casing_l, 1])\n", "_____no_output_____" ] ], [ [ "## set up a DC simulation", "_____no_output_____" ] ], [ [ "simDict = {}\nfor title in model_names:\n simDict[title] = casingSim.run.SimulationDC(\n modelParameters=modelDict[title], directory=simDir, \n meshGenerator=cylMeshGen, \n src_a=src_a, src_b=src_b\n )\n", "Homogeneous Dirichlet is the natural BC for this CC discretization.\nHomogeneous Dirichlet is the natural BC for this CC discretization.\nHomogeneous Dirichlet is the natural BC for this CC discretization.\n" ], [ "%%time\nfieldsDict = {}\nfor title in model_names:\n print('--- Running {} ---'.format(title))\n fieldsDict[title] = simDict[title].run()\n print('\\n')", "--- Running baseline ---\nValidating parameters...\n max x: 14124.7083613, min z: -15352.8558054, max z: 14352.8558054, nC: 459448\nSaved DC_approximations/simulationParameters.json\nStarting SimulationDC\nUsing <class 'pymatsolver.direct.Pardiso'> Solver\n ... Done. Elapsed time : 16.3764960766\n\n\n--- Running solid ---\nValidating parameters...\n max x: 14124.7083613, min z: -15352.8558054, max z: 14352.8558054, nC: 459448\nSaved DC_approximations/simulationParameters.json\nStarting SimulationDC\nUsing <class 'pymatsolver.direct.Pardiso'> Solver\n ... Done. Elapsed time : 17.7007148266\n\n\n--- Running sigma_A ---\nValidating parameters...\n max x: 14124.7083613, min z: -15352.8558054, max z: 14352.8558054, nC: 459448\nSaved DC_approximations/simulationParameters.json\nStarting SimulationDC\nUsing <class 'pymatsolver.direct.Pardiso'> Solver\n ... Done. Elapsed time : 19.5516200066\n\n\nCPU times: user 1min 17s, sys: 6.72 s, total: 1min 24s\nWall time: 53.7 s\n" ] ], [ [ "## Plot Results\n\nHere we plot the radial electric field along a line $90^{\\circ}$ from the source. ", "_____no_output_____" ] ], [ [ "# plot e-field on surface\n\nepsilon = 1e-16\n\ndef plot_ex_field(theta_ind=1, src_ind=0, xmin=10, xmax=500, zloc=0):\n xlim = [xmin, xmax]\n src_baseline = simDict['baseline'].survey.srcList[src_ind]\n\n fig, ax = plt.subplots(1, 2, figsize=(10, 4), dpi=400)\n for i, key in enumerate(model_names):\n f = fieldsDict[key]\n src = simDict[key].survey.srcList[src_ind]\n casingSim.view.plotLinesFx(\n cylMeshGen.mesh, field=f[src,'e'],\n pltType='semilogy', ax=ax[0],\n theta_ind=theta_ind, xlim=xlim, \n color_ind=i, label=key, \n zloc=zloc\n )\n\n if key != 'baseline':\n f = fieldsDict[key]\n casingSim.view.plotLinesFx(\n cylMeshGen.mesh,\n field=100*(\n np.absolute(f[src,'e'] - fieldsDict['baseline'][src_baseline,'e'])/\n (np.absolute(fieldsDict['baseline'][src_baseline,'e']) + epsilon)\n ),\n pltType='plot', ax=ax[1],\n theta_ind=theta_ind, xlim=xlim, \n color_ind=i, label=key, zloc=zloc\n )\n \n# print('src_a = {}'.format(src_a[src_ind, :]))\n ax[0].legend()\n ax[0].set_ylabel('Electric Field (V/m)') \n ax[0].set_title(src_names[src_ind])\n\n ax[1].legend()\n ax[1].set_ylabel('percent difference')\n plt.tight_layout()\n plt.show()\n", "_____no_output_____" ], [ "ipywidgets.interact(\n plot_ex_field,\n theta_ind = ipywidgets.IntSlider(min=0, max=len(cylMeshGen.hy)-1, value=1),\n src_ind = ipywidgets.IntSlider(min=0, max=src_a.shape[0]-1, value=0),\n xmin = ipywidgets.FloatText(value=10), \n xmax = ipywidgets.FloatText(value=500),\n zloc = ipywidgets.FloatText(value=0)\n)", "_____no_output_____" ] ], [ [ "## Plot the casing currents", "_____no_output_____" ] ], [ [ "# plot currents in casing\n\nepsilon = 1e-16\n\ndef plot_casing_currents(src_ind=0):\n \n src_baseline = simDict['baseline'].survey.srcList[src_ind]\n\n fig, ax = plt.subplots(1, 2, figsize=(10, 4), dpi=400)\n ax = discretize.utils.mkvc(ax)\n\n baseline_src = simDict['baseline'].survey.srcList[src_ind]\n ix_baseline, iz_baseline = casingSim.physics.CasingCurrents(\n fieldsDict['baseline'][baseline_src, 'j'], cylMeshGen.mesh, \n# modelDict['baseline'].casing_a, \n 0,\n modelDict['baseline'].casing_b, modelDict['baseline'].casing_z,\n )\n \n for i, key in enumerate(model_names):\n f = fieldsDict[key]\n src = simDict[key].survey.srcList[src_ind]\n mod = modelDict[key]\n \n ix, iz = casingSim.physics.CasingCurrents(\n f[src, 'j'], cylMeshGen.mesh, 0, \n# mod.casing_a if key == 'baseline' else 0, \n mod.casing_b, mod.casing_z,\n )\n \n ax[0].plot(cylMeshGen.mesh.vectorNz, -iz, label=key, color=\"C{}\".format(i))\n# ax[2].plot(cylMeshGen.mesh.vectorCCz, ix, label=key, color=\"C{}\".format(i))\n\n if key != 'baseline':\n \n ax[1].plot(\n cylMeshGen.mesh.vectorNz, \n np.absolute(iz-iz_baseline)/(np.absolute(iz_baseline)+epsilon)*100,\n label=key, color=\"C{}\".format(i)\n )\n# ax[3].plot(\n# cylMeshGen.mesh.vectorCCz, \n# np.absolute(ix-ix_baseline)/(np.absolute(ix_baseline)+epsilon),\n# label=key, color=\"C{}\".format(i)\n# )\n \n \n [a.set_xlim([0., -casing_l]) for a in ax]\n [a.legend() for a in ax]\n [a.set_xlabel('depth (m)')]\n# ax[0].set_ylabel('Electric Field (V/m)') \n\n# ax[1].legend()\n# ax[1].set_ylabel('percent difference')\n\n ax[0].set_ylabel('Downward-going Current (A)')\n ax[1].set_ylabel('Percent difference from baseline')\n \n ax[0].set_title(src_names[src_ind] + ' source \\n\\n Vertical Current in Casing')\n ax[1].set_title('Difference from baseline (%)')\n \n \n plt.tight_layout()\n plt.show()", "_____no_output_____" ], [ "ipywidgets.interact(\n plot_casing_currents, \n src_ind = ipywidgets.IntSlider(min=0, max=src_a.shape[0]-1, value=0)\n)", "_____no_output_____" ], [ "mesh = cylMeshGen.mesh\nj = fieldsDict['baseline'][simDict['baseline'].survey.srcList[0], 'j']\nradius = 1", "_____no_output_____" ], [ "def horizontal_current_flux(mesh, j, radius=1):\n Fx_inds = np.absolute(mesh.gridFx[:,0] - radius) == np.min(np.absolute(mesh.vectorNx - radius))\n\n jA = utils.sdiag(mesh.area) * j\n jA_surface = jA[\n np.hstack([Fx_inds, np.zeros(mesh.nFy, dtype=bool), np.zeros(mesh.nFz, dtype=bool)])\n ].reshape(mesh.vnFx[1], mesh.vnFx[2], order='F')\n\n ix = jA_surface.sum(0)\n \n return ix", "_____no_output_____" ], [ "# plot currents in casing\n\nepsilon = 1e-16\n\ndef plot_formation_currents(src_ind=0, radius=1, ymax=None):\n \n src_baseline = simDict['baseline'].survey.srcList[src_ind]\n\n fig, ax = plt.subplots(1, 2, figsize=(10, 4), dpi=400)\n ax = discretize.utils.mkvc(ax)\n\n baseline_src = simDict['baseline'].survey.srcList[src_ind]\n ix_baseline = horizontal_current_flux(\n cylMeshGen.mesh, fieldsDict['baseline'][baseline_src, 'j'], radius\n )\n \n for i, key in enumerate(model_names):\n f = fieldsDict[key]\n src = simDict[key].survey.srcList[src_ind]\n \n ix = horizontal_current_flux(\n cylMeshGen.mesh, fieldsDict[key][src, 'j'], radius\n )\n \n ax[0].plot(cylMeshGen.mesh.vectorCCz, ix, label=key, color=\"C{}\".format(i))\n\n if key != 'baseline':\n \n ax[1].plot(\n cylMeshGen.mesh.vectorCCz, \n np.absolute(ix-ix_baseline)/(np.absolute(ix_baseline)+epsilon)*100,\n label=key, color=\"C{}\".format(i)\n )\n \n [a.set_xlim([0., -1.25*casing_l]) for a in ax]\n [a.legend() for a in ax]\n [a.set_xlabel('depth (m)')]\n \n if not ymax is not None or ymax !=0:\n ylim = [0, ymax]\n ax[0].set_ylim(ylim)\n\n ax[0].set_ylabel('Horizontal currents (A)')\n ax[1].set_ylabel('Percent difference from baseline')\n \n ax[0].set_title(src_names[src_ind] + ' source \\n\\n Horizontal Current in Formation, r={}'.format(radius))\n ax[1].set_title('Difference from baseline (%)')\n \n \n plt.tight_layout()\n plt.show()", "_____no_output_____" ], [ "ipywidgets.interact(\n plot_formation_currents,\n src_ind=ipywidgets.IntSlider(min=0, max=len(src_names)-1, value=0), \n radius=ipywidgets.FloatText(value=10),\n ymax=ipywidgets.FloatText(value=0)\n)", "_____no_output_____" ] ], [ [ "## Charges\n\nHow do the charges change with different casing models?", "_____no_output_____" ] ], [ [ "# Plot the charges\nmesh2D = cylMeshGen.create_2D_mesh().mesh\n\ndef plotScalar(\n theta_ind=0, src_ind=0, clim_max=None, clim_min=None, \n max_depth=1.1*model.casing_l, \n max_r=0.1, top=10., view='charge'\n):\n fig, ax = plt.subplots(1, len(model_names), figsize=(len(model_names)*5, 6))\n \n assert view.lower() in ['charge', 'phi', 'j']\n\n xlim = max_r*np.r_[0, 1] # x-limits in meters\n zlim = np.r_[-max_depth, top] # z-limits in meters. (z-positive up)\n \n clim = None\n plotopts = {\n 'theta_ind': theta_ind,\n }\n if not clim_max is not None or clim_max != 0.:\n clim = clim_max * np.r_[-1, 1]\n plotopts['clim'] = clim\n\n if clim_min is not None or clim_min != 0.:\n plotopts['clim'][0] = clim_min\n \n for a, title in zip(ax, model_names):\n pp = physpropsDict[title]\n src = simDict[title].survey.srcList[src_ind]\n plotme = simDict[title].fields()[src, view]\n \n if view in ['charge', 'phi']:\n _, cb = pp.plot_prop(\n plotme,\n ax=a,\n pcolorOpts = {'cmap': 'bwr' if view == 'charge' else 'viridis'},\n **plotopts\n )\n elif view == 'j':\n jplt = casingSim.face3DthetaSlice(\n cylMeshGen.mesh, plotme, theta_ind=theta_ind\n )\n \n _, cb = casingSim.plotFace2D(\n mesh2D,\n jplt, real_or_imag='real', ax=a, range_x=xlim,\n range_y=zlim, sample_grid=np.r_[np.diff(xlim)/100., np.diff(zlim)/100.],\n logScale=True, clim=clim\n )\n a.set_title('{} source \\n {} model \\n\\n $\\sigma$ = {:1.2e}S/m, \\ntheta = {:1.1f} degrees'.format(\n src_names[src_ind], title, pp.modelParameters.sigma_casing, cylMeshGen.mesh.vectorCCy[theta_ind]*180/np.pi\n ), fontsize=13)\n # cylMeshGen.mesh.plotGrid(ax=a, slice='theta') # uncomment to plot the mesh on top of this\n a.set_xlim(xlim)\n a.set_ylim(zlim)\n# cb.set_label(view)\n \n # plot outline of casing\n m = modelDict[title]\n a.plot(\n np.r_[m.casing_a, m.casing_a, m.casing_b, m.casing_b, m.casing_a],\n np.r_[m.casing_z[1], m.casing_z[0], m.casing_z[0], m.casing_z[1], m.casing_z[1]],\n 'k', \n lw = 0.5\n )\n plt.tight_layout()\n plt.show()", "_____no_output_____" ] ], [ [ "### Widget for viewing charges\n\nThere are 3 panels, the true (left), if we were to replace the well with solid steel (center) and if we choose sigma to preserve the integrated conductance (right). For reference, I have plotted the true casing cross section.\n\nThe widgets available to you are\n- **theta_ind**: [0, ntheta slices] Which azimuth should we slice through\n- **clim_max**: max value on the colorbar so you can saturate it\n- **max_depth**: max z-limit for the plot\n- **top**: top of the plot (z)\n\nobservations:\n- (sanity checks): in all charges at interface between casing and formation. All positive charges (positive electrode connected to casing\n- charge distribution more uniform along length of casing for solid steel (also makes sense: better conductor)", "_____no_output_____" ] ], [ [ "# fig, ax = plt.subplots(1, 3, figsize=(18, 5), dpi=350)\n\nipywidgets.interact(\n plotScalar,\n theta_ind = ipywidgets.IntSlider(min=0, max=len(cylMeshGen.hy)-1, value=1),\n src_ind = ipywidgets.IntSlider(min=0, max=src_a.shape[0]-1, value=0),\n clim_max = ipywidgets.FloatText(value=0),\n clim_min = ipywidgets.FloatText(value=0),\n max_depth = ipywidgets.FloatText(value=np.ceil(1.25*model.casing_l)), \n max_r = ipywidgets.FloatText(value=0.1),\n top = ipywidgets.FloatText(value=10),\n view = ipywidgets.ToggleButtons(\n options=['charge', 'phi', 'j'], value='charge'\n ),\n) ", "_____no_output_____" ], [ "# Plot the models\n\ndef plotScalarDifference(\n clim_max=None, theta_ind=0, src_ind=0, max_depth=1.1*model.casing_l, max_r=0.1, top=10., view='charge', \n):\n assert view in ['charge', 'phi']\n \n fig, ax = plt.subplots(1, len(model_names)-1, figsize=(5*(len(model_names)-1), 5))\n \n xlim = max_r*np.r_[0, 1] # x-limits in meters\n zlim = np.r_[-max_depth, top] # z-limits in meters. (z-positive up)\n \n src_baseline = simDict['baseline'].survey.srcList[src_ind]\n \n for a, title in zip(ax, ['solid', 'sigma_A']):\n pp = physpropsDict[title]\n src = simDict[title].survey.srcList[src_ind]\n plotme = simDict[title].fields()[src, view] - simDict['baseline'].fields()[src_baseline, view]\n \n if clim_max is None or clim_max == 0.:\n clim = np.absolute(plotme).max() * np.r_[-1., 1.]\n else: \n clim = clim_max * np.r_[-1, 1]\n \n _, cb = pp.plot_prop(\n plotme,\n ax=a,\n pcolorOpts={\n 'cmap': 'bwr' if view == 'charge' else 'viridis',\n },\n clim=clim,\n theta_ind=theta_ind\n )\n a.set_title('{} \\n\\n $\\sigma$ = {:1.2e}S/m\\ntheta = {:1.2f} degree'.format(\n title, pp.modelParameters.sigma_casing, cylMeshGen.mesh.vectorCCy[theta_ind]*180/np.pi\n ), fontsize=13)\n # cylMeshGen.mesh.plotGrid(ax=a, slice='theta') # uncomment to plot the mesh on top of this\n a.set_xlim(xlim)\n a.set_ylim(zlim)\n cb.set_label('secondary ' + view)\n \n # plot outline of casing\n m = modelDict[title]\n a.plot(\n np.r_[m.casing_a, m.casing_a, m.casing_b, m.casing_b, m.casing_a],\n np.r_[m.casing_z[1], m.casing_z[0], m.casing_z[0], m.casing_z[1], m.casing_z[1]],\n 'k' if view == 'charge' else 'w', \n lw = 0.5\n )\n \n plt.tight_layout()\n plt.show()", "_____no_output_____" ] ], [ [ "### Plot the difference in charge distributions (approximation - true) \n\nobservations: \n- first: colorbar scales are different between the two! \n- solid steel: more negative at top, positive at bottom (consistent with more uniform charge distribution) ", "_____no_output_____" ] ], [ [ "ipywidgets.interact(\n plotScalarDifference,\n theta_ind = ipywidgets.IntSlider(min=0, max=len(cylMeshGen.hy)-1, value=1),\n src_ind = ipywidgets.IntSlider(min=0, max=src_a.shape[0]-1, value=0),\n clim_max = ipywidgets.FloatText(value=0), \n max_depth = ipywidgets.FloatText(value=1.25*model.casing_l), \n max_r = ipywidgets.FloatText(value=0.1),\n top = ipywidgets.FloatText(value=10),\n view = ipywidgets.ToggleButtons(options=['charge', 'phi'], value='charge'), \n)", "_____no_output_____" ] ], [ [ "### Total charge on the casing\n\n- integrate the charge on the casing. ", "_____no_output_____" ] ], [ [ "for src_ind in range(src_a.shape[0]):\n print('\\n----- src {} ------'.format(src_a[src_ind,[0,2]]))\n casing_charge = {}\n for title in model_names:\n casing_charge[title] = (\n utils.mkvc(simDict[title].fields()[simDict[title].survey.srcList[src_ind], 'charge'])\n )[modelDict[title].ind_casing(cylMeshGen.mesh)].sum()\n print('{:8s}: {:1.3e}'.format(title, casing_charge[title]))", "\n----- src [ 0.045 -1.25 ] ------\nbaseline: 1.513e-10\nsolid : 1.513e-10\nsigma_A : 1.513e-10\n\n----- src [ 1.045 -1.25 ] ------\nbaseline: -1.012e-14\nsolid : -1.065e-14\nsigma_A : -1.040e-14\n\n----- src [ 4.5e-02 -9.5e+02] ------\nbaseline: 1.513e-10\nsolid : 1.513e-10\nsigma_A : 1.513e-10\n\n----- src [ 0. -950.] ------\nbaseline: 8.301e-12\nsolid : 1.513e-10\nsigma_A : 1.513e-10\n\n----- src [ 0. -1005.] ------\nbaseline: 8.113e-16\nsolid : 1.834e-14\nsigma_A : 1.819e-14\n" ], [ "print(np.finfo(float).eps)", "2.220446049250313e-16\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb9ce89c3fe3d748c9b5a6a7ace8915836274f98
932
ipynb
Jupyter Notebook
Run Stock Analyzer Notebook.ipynb
sarahmk125/stock-analyzer
0669676dd60e1a8dd11d295967415aec4ac8376c
[ "Apache-2.0" ]
null
null
null
Run Stock Analyzer Notebook.ipynb
sarahmk125/stock-analyzer
0669676dd60e1a8dd11d295967415aec4ac8376c
[ "Apache-2.0" ]
null
null
null
Run Stock Analyzer Notebook.ipynb
sarahmk125/stock-analyzer
0669676dd60e1a8dd11d295967415aec4ac8376c
[ "Apache-2.0" ]
null
null
null
18.64
44
0.532189
[ [ [ "# Install the requirements\n# This needs to be done one time\n!pip install -r requirements.txt", "_____no_output_____" ], [ "# Import the manager\nfrom app.lib.manager import Manager\n\n# Run the function\nManager().runner()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
cb9d02568cd66d27c6dc46a382f0fae9b79099b3
235,238
ipynb
Jupyter Notebook
R_Notebooks/.ipynb_checkpoints/pm3-notebook-newdata-nn-checkpoint.ipynb
JannisKueck/14.38_Causal_ML
b6c0560550190b736f1760a3983c61bcf90d578d
[ "MIT" ]
null
null
null
R_Notebooks/.ipynb_checkpoints/pm3-notebook-newdata-nn-checkpoint.ipynb
JannisKueck/14.38_Causal_ML
b6c0560550190b736f1760a3983c61bcf90d578d
[ "MIT" ]
null
null
null
R_Notebooks/.ipynb_checkpoints/pm3-notebook-newdata-nn-checkpoint.ipynb
JannisKueck/14.38_Causal_ML
b6c0560550190b736f1760a3983c61bcf90d578d
[ "MIT" ]
null
null
null
104.411008
730
0.561644
[ [ [ "\n\nThis notebook contains an example for teaching.\n", "_____no_output_____" ], [ "# A Simple Case Study using Wage Data from 2015 - proceeding", "_____no_output_____" ], [ "So far we considered many machine learning method, e.g Lasso and Random Forests, to build a predictive model. In this lab, we extend our toolbox by predicting wages by a neural network.", "_____no_output_____" ], [ "## Data preparation", "_____no_output_____" ], [ "Again, we consider data from the U.S. March Supplement of the Current Population Survey (CPS) in 2015.", "_____no_output_____" ] ], [ [ "# Sys.setenv(RETICULATE_PYTHON = \"C:/Users/MSI-NB/anaconda3/envs/tensorflow_2\")", "_____no_output_____" ], [ "load(\"wage2015_subsample_inference.Rdata\")\nZ <- subset(data,select=-c(lwage,wage)) # regressors", "_____no_output_____" ] ], [ [ "Firt, we split the data first and normalize it.", "_____no_output_____" ] ], [ [ "nrow(data)", "_____no_output_____" ], [ "set.seed(1234)\ntraining <- sample(nrow(data), nrow(data)*(3/4), replace=FALSE)\ndim(data)", "_____no_output_____" ], [ "data_train <- data[training,1:16]\ndata_test <- data[-training,1:16]\ndata_train", "_____no_output_____" ], [ "# normalize the data\nmean <- apply(data_train, 2, mean)\nstd <- apply(data_train, 2, sd)", "_____no_output_____" ], [ "data_train <- scale(data_train, center = mean, scale = std)\ndata_test <- scale(data_test, center = mean, scale = std)\ndata_test", "_____no_output_____" ], [ "data_train <- as.data.frame(data_train)\ndata_test <- as.data.frame(data_test)", "_____no_output_____" ], [ "data_train", "_____no_output_____" ] ], [ [ "Then, we construct the inputs for our network.", "_____no_output_____" ] ], [ [ "X_basic <- \"sex + exp1 + shs + hsg+ scl + clg + mw + so + we\"\nformula_basic <- as.formula(paste(\"lwage\", \"~\", X_basic))\nformula_basic", "_____no_output_____" ], [ "model_X_basic_train <- model.matrix(formula_basic,data_train)\nmodel_X_basic_test <- model.matrix(formula_basic,data_test)\n\nY_train <- data_train$lwage\nY_test <- data_test$lwage", "_____no_output_____" ], [ "model_X_basic_train", "_____no_output_____" ] ], [ [ "### Neural Networks", "_____no_output_____" ], [ "First, we need to determine the structure of our network. We are using the R package *keras* to build a simple sequential neural network with three dense layers.", "_____no_output_____" ] ], [ [ "dim(model_X_basic_train)[2]", "_____no_output_____" ], [ "library(keras)\n\nbuild_model <- function() {\n model <- keras_model_sequential() %>% \n layer_dense(units = 20, activation = \"relu\", \n input_shape = dim(model_X_basic_train)[2])%>% \n layer_dense(units = 10, activation = \"relu\") %>% \n layer_dense(units = 1) \n \n model %>% compile(\n optimizer = optimizer_adam(lr = 0.005),\n loss = \"mse\", \n metrics = c(\"mae\")\n )\n}", "_____no_output_____" ] ], [ [ "Let us have a look at the structure of our network in detail.", "_____no_output_____" ] ], [ [ "model <- build_model()", "_____no_output_____" ] ], [ [ "It is worth to notice that we have in total $441$ trainable parameters.", "_____no_output_____" ], [ "Now, let us train the network. Note that this takes some computation time. Thus, we are using gpu to speed up. The exact speed-up varies based on a number of factors including model architecture, batch-size, input pipeline complexity, etc.", "_____no_output_____" ] ], [ [ "# training the network \nnum_epochs <- 1000\nmodel %>% fit(model_X_basic_train, Y_train,\n epochs = num_epochs, batch_size = 100, verbose = 0)", "_____no_output_____" ] ], [ [ "After training the neural network, we can evaluate the performance of our model on the test sample.", "_____no_output_____" ] ], [ [ "# evaluating the performnace\nmodel %>% evaluate(model_X_basic_test, Y_test, verbose = 0)", "_____no_output_____" ], [ "# Calculating the performance measures\npred.nn <- model %>% predict(model_X_basic_test)\nMSE.nn = summary(lm((Y_test-pred.nn)^2~1))$coef[1:2]\nR2.nn <- 1-MSE.nn[1]/var(Y_test)\n# printing R^2\ncat(\"R^2 of the neural network:\",R2.nn)", "_____no_output_____" ], [ "MSE.nn = summary(lm((Y_test-pred.nn)^2~1))$coef[1:2]\nMSE.nn", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb9d025a2a3bcbb855857af0a0f7d7987bb861ef
107,324
ipynb
Jupyter Notebook
lessons/.ipynb_checkpoints/03_CFL_Condition-checkpoint.ipynb
greenty5/thermoFluidPython
b20be74bc1dcae5b426530572f29f3774e33968a
[ "CC-BY-3.0" ]
1
2020-12-03T14:45:40.000Z
2020-12-03T14:45:40.000Z
lessons/.ipynb_checkpoints/03_CFL_Condition-checkpoint.ipynb
greenty5/thermoFluidPython
b20be74bc1dcae5b426530572f29f3774e33968a
[ "CC-BY-3.0" ]
null
null
null
lessons/.ipynb_checkpoints/03_CFL_Condition-checkpoint.ipynb
greenty5/thermoFluidPython
b20be74bc1dcae5b426530572f29f3774e33968a
[ "CC-BY-3.0" ]
null
null
null
195.489982
13,009
0.888636
[ [ [ "Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved BSD-3 license. (c) Lorena A. Barba, Gilbert F. Forsyth 2017. Thanks to NSF for support via CAREER award #1149784.", "_____no_output_____" ] ], [ [ "[@LorenaABarba](https://twitter.com/LorenaABarba)", "_____no_output_____" ], [ "12 steps to Navier–Stokes\n=====\n***", "_____no_output_____" ], [ "Did you experiment in Steps [1](./01_Step_1.ipynb) and [2](./02_Step_2.ipynb) using different parameter choices? If you did, you probably ran into some unexpected behavior. Did your solution ever blow up? (In my experience, CFD students *love* to make things blow up.)\n\nYou are probably wondering why changing the discretization parameters affects your solution in such a drastic way. This notebook complements our [interactive CFD lessons](https://github.com/barbagroup/CFDPython) by discussing the CFL condition. And learn more by watching Prof. Barba's YouTube lectures (links below). ", "_____no_output_____" ], [ "Convergence and the CFL Condition\n----\n***", "_____no_output_____" ], [ "For the first few steps, we've been using the same general initial and boundary conditions. With the parameters we initially suggested, the grid has 41 points and the timestep is 0.25 seconds. Now, we're going to experiment with increasing the size of our grid. The code below is identical to the code we used in [Step 1](./01_Step_1.ipynb), but here it has been bundled up in a function so that we can easily examine what happens as we adjust just one variable: **the grid size**.", "_____no_output_____" ] ], [ [ "import numpy #numpy is a library for array operations akin to MATLAB\nfrom matplotlib import pyplot #matplotlib is 2D plotting library\n%matplotlib inline\n\ndef linearconv(nx):\n dx = 2 / (nx - 1)\n nt = 20 #nt is the number of timesteps we want to calculate\n dt = .025 #dt is the amount of time each timestep covers (delta t)\n c = 1\n\n u = numpy.ones(nx) #defining a numpy array which is nx elements long with every value equal to 1.\n u[int(.5/dx):int(1 / dx + 1)] = 2 #setting u = 2 between 0.5 and 1 as per our I.C.s\n\n un = numpy.ones(nx) #initializing our placeholder array, un, to hold the values we calculate for the n+1 timestep\n\n for n in range(nt): #iterate through time\n un = u.copy() ##copy the existing values of u into un\n for i in range(1, nx):\n u[i] = un[i] - c * dt / dx * (un[i] - un[i-1])\n \n pyplot.plot(numpy.linspace(0, 2, nx), u);", "_____no_output_____" ] ], [ [ "Now let's examine the results of our linear convection problem with an increasingly fine mesh. ", "_____no_output_____" ] ], [ [ "linearconv(41) #convection using 41 grid points", "_____no_output_____" ] ], [ [ "This is the same result as our Step 1 calculation, reproduced here for reference.", "_____no_output_____" ] ], [ [ "linearconv(61)", "_____no_output_____" ] ], [ [ "Here, there is still numerical diffusion present, but it is less severe. ", "_____no_output_____" ] ], [ [ "linearconv(71)", "_____no_output_____" ] ], [ [ "Here the same pattern is present -- the wave is more square than in the previous runs.", "_____no_output_____" ] ], [ [ "linearconv(85)", "_____no_output_____" ] ], [ [ "This doesn't look anything like our original hat function. ", "_____no_output_____" ], [ "### What happened?", "_____no_output_____" ], [ "To answer that question, we have to think a little bit about what we're actually implementing in code. \n\nIn each iteration of our time loop, we use the existing data about our wave to estimate the speed of the wave in the subsequent time step. Initially, the increase in the number of grid points returned more accurate answers. There was less numerical diffusion and the square wave looked much more like a square wave than it did in our first example. \n\nEach iteration of our time loop covers a time-step of length $\\Delta t$, which we have been defining as 0.025\n\nDuring this iteration, we evaluate the speed of the wave at each of the $x$ points we've created. In the last plot, something has clearly gone wrong. \n\nWhat has happened is that over the time period $\\Delta t$, the wave is travelling a distance which is greater than `dx`. The length `dx` of each grid box is related to the number of total points `nx`, so stability can be enforced if the $\\Delta t$ step size is calculated with respect to the size of `dx`. \n\n$$\\sigma = \\frac{u \\Delta t}{\\Delta x} \\leq \\sigma_{\\max}$$\n\nwhere $u$ is the speed of the wave; $\\sigma$ is called the **Courant number** and the value of $\\sigma_{\\max}$ that will ensure stability depends on the discretization used. \n\nIn a new version of our code, we'll use the CFL number to calculate the appropriate time-step `dt` depending on the size of `dx`. \n\n", "_____no_output_____" ] ], [ [ "import numpy\nfrom matplotlib import pyplot\n\ndef linearconv(nx):\n dx = 2 / (nx - 1)\n nt = 20 #nt is the number of timesteps we want to calculate\n c = 1\n sigma = .5\n \n dt = sigma * dx\n\n u = numpy.ones(nx) \n u[int(.5/dx):int(1 / dx + 1)] = 2\n\n un = numpy.ones(nx)\n\n for n in range(nt): #iterate through time\n un = u.copy() ##copy the existing values of u into un\n for i in range(1, nx):\n u[i] = un[i] - c * dt / dx * (un[i] - un[i-1])\n \n pyplot.plot(numpy.linspace(0, 2, nx), u)", "_____no_output_____" ], [ "linearconv(41)", "_____no_output_____" ], [ "linearconv(61)", "_____no_output_____" ], [ "linearconv(81)", "_____no_output_____" ], [ "linearconv(101)", "_____no_output_____" ], [ "linearconv(121)", "_____no_output_____" ] ], [ [ "Notice that as the number of points `nx` increases, the wave convects a shorter and shorter distance. The number of time iterations we have advanced the solution at is held constant at `nt = 20`, but depending on the value of `nx` and the corresponding values of `dx` and `dt`, a shorter time window is being examined overall. ", "_____no_output_____" ], [ "Learn More\n-----\n***", "_____no_output_____" ], [ "It's possible to do rigurous analysis of the stability of numerical schemes, in some cases. Watch Prof. Barba's presentation of this topic in **Video Lecture 9** on You Tube.", "_____no_output_____" ] ], [ [ "from IPython.display import YouTubeVideo\nYouTubeVideo('Yw1YPBupZxU')", "_____no_output_____" ], [ "from IPython.core.display import HTML\ndef css_styling():\n styles = open(\"../styles/custom.css\", \"r\").read()\n return HTML(styles)\ncss_styling()", "_____no_output_____" ] ] ]
[ "raw", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "raw" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ] ]
cb9d05b1db2783e195dd889ab5abe29b000de858
4,894
ipynb
Jupyter Notebook
notebooks/Likelihood.ipynb
ts4051/retro
b249f713b19f7c25de16e30edbe7d2eb641cd8d3
[ "Apache-2.0" ]
1
2018-03-02T01:05:52.000Z
2018-03-02T01:05:52.000Z
notebooks/Likelihood.ipynb
ts4051/retro
b249f713b19f7c25de16e30edbe7d2eb641cd8d3
[ "Apache-2.0" ]
30
2018-01-30T21:03:28.000Z
2019-11-07T16:42:07.000Z
notebooks/Likelihood.ipynb
ts4051/retro
b249f713b19f7c25de16e30edbe7d2eb641cd8d3
[ "Apache-2.0" ]
6
2017-07-27T19:49:13.000Z
2019-11-19T13:38:27.000Z
30.02454
367
0.575398
[ [ [ "# Likelihood for Retro", "_____no_output_____" ], [ "To calculate the likelihood of a hypothesis $H$ given observed data $\\boldsymbol{k}$, we construct the extended likelihood given as:", "_____no_output_____" ], [ "$$\\large L(H|\\boldsymbol{k}) = \\prod_{i\\in\\text{DOMs}} \\frac{\\lambda_i^{k_i}} {k_i!} e^{-\\lambda_i} \\prod_{j\\in\\text{hits}}p^j(t_j|H)^{k_j}$$", "_____no_output_____" ], [ "where:\n* $\\lambda_i$ is the expected total charge in DOM $i$ given the hypothesis $H$\n* $k_i$ is the observed total charge in DOM $i$\n* $p^j(t_j|H)$ is the probability of observing a hit a time $t_j$ in a given DOM $j$ under the hypothesis $H$, raised to the power of the charge $k_j$ of that observed hit", "_____no_output_____" ], [ "We can take the logarithm of this to change the products into sums", "_____no_output_____" ], [ "$$\\large \\log L(H|\\boldsymbol{k}) = \\sum_{i\\in\\text{DOMs}} k_i\\log{\\lambda_i} -\\log{{k_i!} - \\lambda_i} +\\sum_{j\\in\\text{hits}} k_j\\log{p^j(t_j|H)} $$", "_____no_output_____" ], [ "Since we're only interested in finding the maximum likelihood, we can omit the constant terms $\\log{k!}$", "_____no_output_____" ], [ "In retro, the expected charge $\\lambda$ as well as the pdfs $p$ are decomposed into the hypothesis dependent part $N_\\gamma(t,x)$ that corresponds to the number of of photons generated by a hypothesis at any given point in space-time and the independent part $p_\\gamma(t,x)$ -- the probability that a given source photon in space-time is registered at a DOM.", "_____no_output_____" ], [ "* The probability $p^j(t_j|H)$ is then simply the sum over all space bins $\\sum_x{N_\\gamma(t_j,x)p^j_\\gamma(t_j,x)}/\\lambda_j$, where $\\lambda_j$ is the normalization to properly normalize the expression to a pdf\n* The time-independent $\\lambda_i$s can be interpreted as the total expected charge, given by $\\sum_x{\\sum_t{p^i_\\gamma(x,t)}\\sum_t{N_\\gamma(x,t)}}$", "_____no_output_____" ], [ "For many DOMs in an event we observe 0 hits, i.e. $k_i = 0$ for many $i$, this means that the sum over $i$ for these spacial cases simplifies to", "_____no_output_____" ], [ "$$\\sum_{i\\in\\text{DOMs}} -\\lambda_i$$", "_____no_output_____" ], [ "Plugging in the abvove expression for $\\lambda_i$ yields:", "_____no_output_____" ], [ "$$\\sum_{i\\in\\text{DOMs}}\\sum_x{\\sum_t{p^i_\\gamma(x,t)}\\sum_t{N_\\gamma(x,t)}}$$", "_____no_output_____" ], [ "Of course only the probabilities $p^i_\\gamma$ are dependent on the DOMs, so we can factorize:", "_____no_output_____" ], [ "$$\\sum_x{\\left(\\sum_{i\\in\\text{DOMs}}\\sum_t{p^i_\\gamma(x,t)}\\right)\\sum_t{N_\\gamma(x,t)}} = \\sum_x{p^{TDI}_\\gamma(x)\\sum_t{N_\\gamma(x,t)}}$$", "_____no_output_____" ], [ "The large sum over the DOMs can therefore be pre-computed, we call this the time-dom-independent (TDI) table, as the time and DOM sums have been evaluated", "_____no_output_____" ], [ "So we will just need to add the additional terms for DOMs with hits and the total likelihood then can be written as:", "_____no_output_____" ], [ "$$\\large \\log L(H|\\boldsymbol{k}) = \\sum_{i\\in\\text{hit DOMs}} k_i\\log{\\sum_x{\\sum_t{p^i_\\gamma(x,t)}\\sum_t{N_\\gamma(x,t)}}} -\\sum_x{p^{TDI}_\\gamma(x)\\sum_t{N_\\gamma(x,t)}} +\\sum_{j\\in\\text{hits}} k_j\\log{\\sum_x{N_\\gamma(t_j,x)p_\\gamma(t_j,x)}/\\lambda_j}$$", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cb9d0aabced4bc3df34d6657a0695ea866d01a1c
14,529
ipynb
Jupyter Notebook
Calculadora/Calculadora.ipynb
kendiaka09n/Python
a7fff9cf4e8935786133228671fdbff58753786d
[ "MIT" ]
null
null
null
Calculadora/Calculadora.ipynb
kendiaka09n/Python
a7fff9cf4e8935786133228671fdbff58753786d
[ "MIT" ]
null
null
null
Calculadora/Calculadora.ipynb
kendiaka09n/Python
a7fff9cf4e8935786133228671fdbff58753786d
[ "MIT" ]
null
null
null
51.704626
1,800
0.571134
[ [ [ "# Calculadora", "_____no_output_____" ], [ "Purpose of this project is the creation of a simple calculator using python code.", "_____no_output_____" ] ], [ [ "def adicao(x,y):\n return x+y\n\ndef subtracao(x,y):\n return x-y\n\ndef multiplicacao(x,y):\n return x*y\n\ndef divisao(x,y):\n return x/y\n\ndef escolhadaoperacao():\n while True:\n try:\n escolha = int(input('Indique que operação você gostaria de realizar \\n'))\n except:\n print('Você não digitou um número corretamente')\n continue\n else:\n if escolha > 4 or escolha <=0:\n print('Operação não cadastrada')\n print(escolhadaoperacao())\n continue \n else:\n print('Obrigado por indicar uma operação')\n break\n break\n finally:\n print('Obrigado!\\n')\n print(primeironumero())\n return escolha\n \ndef primeironumero():\n while True:\n try:\n num1 = float(input ('Indique o primeiro número da operação que você gostaria de realizar \\n'))\n except:\n print(\"Você não digitou um número corretamente\")\n continue\n else:\n print('Obrigado por digitar um número')\n break\n finally:\n print (\"Obrigado!\")\n print (segundonumero())\n return num1\n \ndef segundonumero():\n while True:\n try:\n num2 = float(input ('Indique o sgundo número da operação que você gostaria de realizar \\n'))\n except:\n print(\"Você não digitou um número corretamente\")\n continue\n else:\n print('Obrigado por digitar um número')\n break\n finally:\n print (\"Obrigado!\")\n print (conta())\n return num2\n\ndef conta():\n if escolha == 1:\n print(\"Operação \", num1 ,\" + \", num2, \" = \", adicao(num1,num2))\n elif escolha == 2:\n print(\"Operação \", num1 ,\" - \", num2, \" = \", subtracao(num1,num2))\n elif escolha == 3:\n print(\"Operação \", num1 ,\" / \", num2, \" = \", divisao(num1,num2))\n elif escolha == 4:\n print(\"Operação \", num1 ,\" * \", num2, \" = \", multiplicacao(num1,num2))\n else:\n print(\"Opção incorreta!\")\n", "_____no_output_____" ], [ "#######################Python Calculator##########################)\nprint(\"1 - Soma \\n2 - Subtração \\n3 - Divisão \\n4 - Multiplicação\")\nescolhadaoperacao()", "1 - Soma \n2 - Subtração \n3 - Divisão \n4 - Multiplicação\nIndique que operação você gostaria de realizar \n1\nObrigado por indicar uma operação\nObrigado!\n\nIndique o primeiro número da operação que você gostaria de realizar \n1\nObrigado por digitar um número\nObrigado!\nIndique o sgundo número da operação que você gostaria de realizar \n2\nObrigado por digitar um número\nObrigado!\n" ], [ "primeironumero()", "Indique o primeiro número da operação que você gostaria de realizar \n2\nObrigado por digitar um número\nObrigado!\n" ], [ "segundonumero()", "Indique o sgundo número da operação que você gostaria de realizar \n4\nObrigado por digitar um número\nObrigado!\n" ], [ "if escolha == 1:\n print(\"Operação \", num1 ,\" + \", num2, \" = \", adicao(num1,num2))\nelif escolha == 2:\n print(\"Operação \", num1 ,\" - \", num2, \" = \", subtracao(num1,num2))\nelif escolha == 3:\n print(\"Operação \", num1 ,\" / \", num2, \" = \", divisao(num1,num2))\nelif escolha == 4:\n print(\"Operação \", num1 ,\" * \", num2, \" = \", multiplicacao(num1,num2))\nelse:\n print(\"Opção incorreta!\")", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb9d1a869cee28c91bc87654f385648047597115
7,152
ipynb
Jupyter Notebook
3_Materialization/Tutorial7/Materialisation_Test.ipynb
BlockResearchGroup/CSD2_2022
6ecd461937d855397b62ac3ad896b4cbe708ca93
[ "MIT" ]
null
null
null
3_Materialization/Tutorial7/Materialisation_Test.ipynb
BlockResearchGroup/CSD2_2022
6ecd461937d855397b62ac3ad896b4cbe708ca93
[ "MIT" ]
1
2022-02-21T09:09:01.000Z
2022-02-21T09:09:01.000Z
3_Materialization/Tutorial7/Materialisation_Test.ipynb
BlockResearchGroup/CSD2_2022
6ecd461937d855397b62ac3ad896b4cbe708ca93
[ "MIT" ]
null
null
null
23.84
147
0.535375
[ [ [ "print('Materialisation Data Test')", "Materialisation Data Test\n" ], [ "import os\nimport compas\nfrom compas.datastructures import Mesh, mesh_bounding_box_xy\nfrom compas.geometry import Vector, Frame, Scale\n\nHERE = os.getcwd()\n\nFILE_I = os.path.join(HERE, 'blocks and ribs_RHINO', 'sessions', 'bm_vertical_equilibrium', 'simple_tripod.rv2')\nFILE_O1 = os.path.join(HERE, 'blocks and ribs_RHINO', 'data', 'form.json')\nFILE_O2 = os.path.join(HERE, 'blocks and ribs_RHINO', 'data', 'scaled_form.json')\n\nsession = compas.json_load(FILE_I)\n\nmesh = Mesh.from_data(session['data']['form'])\nloader_mesh = Mesh.from_data(session['data']['form'])", "_____no_output_____" ] ], [ [ "### to delete extra faces(more than 4 edges) if subdivided with catmulclark or other weird subdivision that connects the mesh with the ground", "_____no_output_____" ] ], [ [ "delete_faces =[]\n\nfor fkey in mesh.faces():\n if len(mesh.face_vertices(fkey)) > 4:\n delete_faces.append(fkey)\n\nfor fkey in delete_faces: \n mesh.delete_face(fkey)\n mesh.remove_unused_vertices()", "_____no_output_____" ] ], [ [ "### scale up the form if needed", "_____no_output_____" ] ], [ [ "scaled_mesh = mesh.copy()\n\nbox_points = mesh_bounding_box_xy(scaled_mesh)\nbase_mesh = scaled_mesh.from_points(box_points)\ncentroid = base_mesh.centroid()\n#print (centroid)\nframe = Frame(centroid,Vector(1,0,0),Vector(0,1,0))\n\nS = Scale.from_factors([100, 100, 100], frame)\nscaled_mesh.transform(S)", "_____no_output_____" ] ], [ [ "### Visualise and export Initial Mesh", "_____no_output_____" ] ], [ [ "mesh.to_json(FILE_O1)\nscaled_mesh.to_json(FILE_O2)\n\nprint(mesh)", "<Mesh with 37 vertices, 24 faces, 60 edges>\n" ], [ "from pythreejs import *\nimport numpy as np\nfrom IPython.display import display\n\nmesh = scaled_mesh.copy()\n\nvertices = []\nfor face in mesh.faces():\n for v in mesh.face_vertices(face):\n xyz = mesh.vertex_attributes(v, \"xyz\")\n vertices.append(xyz)\n\n# print(vertices)", "_____no_output_____" ], [ "vertices = BufferAttribute(\n array = np.array(vertices,dtype=np.float32),\n normalized = False)\n\n# print(vertices)\n", "_____no_output_____" ], [ "geometry = BufferGeometry(\n attributes={'position': vertices})\n\nprint(geometry)", "BufferGeometry(attributes={'position': <BufferAttribute shape=(96, 3), dtype=float32>})\n" ], [ "geometry.exec_three_obj_method('computeVertexNormals')\n\nmesh_3j = Mesh(geometry=geometry,\n material=MeshLambertMaterial(color='red'),\n position=[0,0,0])", "_____no_output_____" ], [ "print(geometry)", "BufferGeometry(attributes={'position': <BufferAttribute shape=(96, 3), dtype=float32>})\n" ], [ "print(type(mesh_3j))", "<class 'pythreejs.objects.Mesh_autogen.Mesh'>\n" ], [ "c = PerspectiveCamera(position=[0, 5, 5], up=[0, 1, 0],\n children=[DirectionalLight(color='white', position=[3, 5, 1], intensity=0.5)])\n\nscene=Scene(children=[mesh_3j,c, AmbientLight(color='#777777')])\n\n\nrenderer = Renderer(camera=c, \n scene=scene,\n width=800,\n height=800,\n controls=[OrbitControls(controlling=c)])", "/opt/miniconda3/envs/csd2/lib/python3.9/site-packages/jupyter_client/session.py:716: UserWarning: Message serialization failed with:\nOut of range float values are not JSON compliant\nSupporting this message is deprecated in jupyter-client 7, please make sure your message is JSON-compliant\n content = self.pack(content)\n" ], [ "display(renderer)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9d1e024a12e91925d1f67f9a330950663a8857
100,574
ipynb
Jupyter Notebook
code/exploratory/automate.ipynb
manuflores/grn_learn
eccaac3c589066ddbc50e5084d24ab2f46f55bb5
[ "MIT" ]
1
2019-10-19T09:21:15.000Z
2019-10-19T09:21:15.000Z
code/exploratory/automate.ipynb
manuflores/grn_learn
eccaac3c589066ddbc50e5084d24ab2f46f55bb5
[ "MIT" ]
null
null
null
code/exploratory/automate.ipynb
manuflores/grn_learn
eccaac3c589066ddbc50e5084d24ab2f46f55bb5
[ "MIT" ]
null
null
null
40.456154
200
0.48076
[ [ [ "import numpy as np\nimport pandas as pd\nfrom grn_learn.viz import set_plotting_style\nimport seaborn as sns \nimport matplotlib.pyplot as plt \n\n\nfrom grn_learn import download_and_preprocess_data\nfrom grn_learn import annot_data_trn\nfrom grn_learn import train_keras_multilabel_nn\nfrom sklearn.model_selection import StratifiedKFold\n\nfrom keras.backend import clear_session\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras import regularizers\nfrom keras.utils import np_utils\nfrom keras.metrics import categorical_accuracy\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import KFold\nfrom keras.layers import Dropout\nimport keras.backend as K\nfrom keras import regularizers\n\nfrom sklearn.model_selection import train_test_split\n\nseed = 42 \nnp.random.seed(seed)\n\nset_plotting_style()\n\n%matplotlib inline\n%config InlineBackend.figure_format = 'svg'", "_____no_output_____" ] ], [ [ "### Download, preprocess data for *P. aeru*", "_____no_output_____" ] ], [ [ "org = 'paeru'", "_____no_output_____" ] ], [ [ "g.download_and_preprocess_data('paeru',\n data_dir = 'colombos_'+ org + '_exprdata_20151029.txt')", "_____no_output_____" ], [ "### Annotate dataset using the TRN from the Martinez-Antonio lab", "_____no_output_____" ] ], [ [ "# paeru \npaeru_path = '~/Documents/uni/bioinfo/data/paeru/'\n\ng.annot_data_trn(tf_tf_net_path = paeru_path + 'paeru_tf_tf_net.csv',\n trn_path = paeru_path + 'paeru-trn.csv',\n denoised_data_path= '~/Downloads/',\n org = 'paeru', \n output_path = '~/Downloads/')\n\n#df_trn = pd.read_csv(path + 'paeru-trn.csv', comment= '#')\n\n#tfs = pd.read_csv(path+'paeru_tfs.csv')\n\n# tf_tf_df = get_gene_data(df_trn, 'Target gene', tf_list)", "_____no_output_____" ], [ "denoised = pd.read_csv('~/Downloads/denoised_hot_paeru.csv')\n\ndenoised.head()\n\nregulons_p = denoised[denoised['TG'] == 1]\n\nnon_regulons_p = denoised[denoised['TG'] == 0]\n\nnoise = non_regulons_p.sample(n = 50, replace = False)\n\nregulons_with_noise_p = pd.concat([regulons_p, noise], axis = 0)\n\nnon_regulons_wo_noise = non_regulons_p.drop(noise.index.to_list())\n\n\n#annot = regulons_with_noise.iloc[:, :3]\n\n", "_____no_output_____" ], [ "data_p = regulons_with_noise_p.iloc[:, 3:-13]\n\ntarget_p = regulons_with_noise_p.iloc[:, -13:-1]", "_____no_output_____" ], [ "target_p.head()", "_____no_output_____" ], [ "val_shape = int(data.shape[0] * 0.15)\nX_train, X_test, y_train, y_test = train_test_split(data,\n target,\n shuffle = True,\n test_size=0.2,\n random_state= seed) \n\nx_val = X_train[:val_shape]\npartial_x_train = X_train[val_shape:]\ny_val = y_train[:val_shape]\npartial_y_train = y_train[val_shape:]", "_____no_output_____" ] ], [ [ "### Run keras net on paeru dataset", "_____no_output_____" ] ], [ [ "nn, history = g.train_keras_multilabel_nn(X_train,\n y_train, \n partial_x_train,\n partial_y_train, \n x_val, \n y_val,\n n_units=64,\n epochs=20,\n n_deep_layers=3,\n batch_size=128)", "Model: \"sequential_3\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_11 (Dense) (None, 64) 35840 \n_________________________________________________________________\ndense_12 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndropout_7 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_13 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndropout_8 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_14 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndropout_9 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_15 (Dense) (None, 12) 780 \n=================================================================\nTotal params: 49,100\nTrainable params: 49,100\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "score, accuracy = nn.evaluate(\n X_test,\n y_test, \n batch_size=64, \n verbose=2\n)\n\naccuracy", "_____no_output_____" ] ], [ [ "### B. subti data download", "_____no_output_____" ] ], [ [ "# bsubti \nbsubt_path = '~/Documents/uni/bioinfo/data/bsubti/'", "_____no_output_____" ], [ "g.download_and_preprocess_data('bsubt')\n# data_dir = 'colombos_'+ org + '_exprdata_20151029.txt')", "_____no_output_____" ] ], [ [ "### B. subti annotate dataset using TRN from the Merino Lab", "_____no_output_____" ] ], [ [ "g.annot_data_trn(tf_tf_net_path = bsubt_path + 'bsub-tf-net.csv',\n trn_path = bsubt_path + 'bsubt_trn-l.txt',\n denoised_data_path= '~/Downloads/',\n org = 'bsubt', \n output_path = '~/Downloads/')", "_____no_output_____" ], [ "denoised_b = pd.read_csv('~/Downloads/denoised_hot_bsubt.csv')", "_____no_output_____" ], [ "#denoised.head()\n\nregulons_b = denoised_b[denoised_b['TG'] == 1]\n\nnon_regulons_b = denoised_b[denoised_b['TG'] == 0]\n\nnoise = non_regulons_b.sample(n = 50, replace = False)\n\nregulons_with_noise_b = pd.concat([regulons_b, noise], axis = 0)\n\nnon_regulons_wo_noise = non_regulons_b.drop(noise.index.to_list())\n\n#annot = regulons_with_noise.iloc[:, :3]\n\n", "_____no_output_____" ], [ "data_b = regulons_with_noise_b.iloc[:, 3:-7]\n\ntarget_b = regulons_with_noise_b.iloc[:, -7:-1]", "_____no_output_____" ], [ "data_b[:5, -1]\n", "_____no_output_____" ], [ "val_shape = int(data.shape[0] * 0.15)\nX_train, X_test, y_train, y_test = train_test_split(data,\n target,\n shuffle = True,\n test_size=0.2,\n random_state= seed) \n\nx_val = X_train[:val_shape]\npartial_x_train = X_train[val_shape:]\ny_val = y_train[:val_shape]\npartial_y_train = y_train[val_shape:]", "_____no_output_____" ], [ "nn, history = g.train_keras_multilabel_nn(X_train,\n y_train, \n partial_x_train,\n partial_y_train, \n x_val, \n y_val,\n n_units=64,\n epochs=20,\n n_deep_layers=3,\n batch_size=128)", "Model: \"sequential_2\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_6 (Dense) (None, 64) 80640 \n_________________________________________________________________\ndense_7 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndropout_4 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_8 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndropout_5 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_9 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndropout_6 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_10 (Dense) (None, 6) 390 \n=================================================================\nTotal params: 93,510\nTrainable params: 93,510\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "history.head()", "_____no_output_____" ], [ "score, accuracy = nn.evaluate(\n X_test,\n y_test, \n batch_size=64, \n verbose=2\n)\n\naccuracy", "_____no_output_____" ] ], [ [ "## Upload coli data ", "_____no_output_____" ] ], [ [ "denoised = pd.read_csv('~/Downloads/denoised_hot_coli.csv')", "_____no_output_____" ], [ "regulons_e = denoised[denoised['TG'] == 1]\n\nnon_regulons_e = denoised[denoised['TG'] == 0]\n\nnoise = non_regulons_e.sample(n = 50, replace = False)\n\nregulons_with_noise_e = pd.concat([regulons_e, noise], axis = 0)\n\nnon_regulons_wo_noise = non_regulons_e.drop(noise.index.to_list())\n\n\n#annot = regulons_with_noise.iloc[:, :3]\n\n", "_____no_output_____" ], [ "data_e = regulons_with_noise_e.iloc[:, 3:-10]\n\ntarget_e = regulons_with_noise_e.iloc[:, -10:-1]\n", "_____no_output_____" ] ], [ [ "### Set up simulations for E. coli, B. subti, and P. aeru", "_____no_output_____" ] ], [ [ "organisms = ['ecoli', 'bsubti', 'paeru']\n\ndatasets = [(data_e, target_e), (data_b, target_b), (data_p, target_p)]\n\nkfold = KFold(n_splits = 5, shuffle= True, random_state=seed)\n\ncross_val_df = pd.DataFrame()\n\n# Iterate over organisms\nfor ix in range(3):\n \n # \n org = organisms[ix]\n data = datasets[ix]\n \n #Extract datasets\n X = data[0]\n y = data[1]\n \n # Iterate over folds\n for train, test in kfold.split(data[0], data[1]):\n \n # Train test split\n X_train = X.iloc[train, :]\n y_train = y.iloc[train, :]\n X_test = X.iloc[test, :]\n y_test = y.iloc[test, :]\n #print(type(X_train))\n \n # Run neural net\n nn = Sequential()\n \n #initial layer\n nn.add(Dense(128, activation='relu', input_shape=(X_train.shape[1],)))\n \n #extra deep layers\n for i in range(2):\n nn.add(Dense(64, activation='relu',\n kernel_regularizer=regularizers.l2(0.001))\n )\n nn.add(Dropout(0.25))\n \n #add final output layer\n nn.add(Dense(y_train.shape[1], activation='softmax'))\n nn.compile(optimizer='rmsprop',\n loss='binary_crossentropy', \n metrics=['accuracy'])\n \n #print neural net architecture\n nn.summary()\n\n #fit and load history\n \n history = nn.fit(X_train, y_train, epochs=20,\n batch_size= 128,\n verbose = 0)\n \n # Compute accuracy\n score, acc = nn.evaluate(X_test, y_test)\n \n # Store acc in dataframe\n sub_df = pd.DataFrame({'accuracy': [acc],\n 'organism': [org]})\n \n cross_val_df = pd.concat([cross_val_df, sub_df])\n ", "Model: \"sequential_1\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_1 (Dense) (None, 128) 521984 \n_________________________________________________________________\ndense_2 (Dense) (None, 64) 8256 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_3 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_4 (Dense) (None, 9) 585 \n=================================================================\nTotal params: 534,985\nTrainable params: 534,985\nNon-trainable params: 0\n_________________________________________________________________\n294/294 [==============================] - 1s 2ms/step\nModel: \"sequential_2\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_5 (Dense) (None, 128) 521984 \n_________________________________________________________________\ndense_6 (Dense) (None, 64) 8256 \n_________________________________________________________________\ndropout_3 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_7 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndropout_4 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_8 (Dense) (None, 9) 585 \n=================================================================\nTotal params: 534,985\nTrainable params: 534,985\nNon-trainable params: 0\n_________________________________________________________________\n293/293 [==============================] - 0s 2ms/step\nModel: \"sequential_3\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_9 (Dense) (None, 128) 521984 \n_________________________________________________________________\ndense_10 (Dense) (None, 64) 8256 \n_________________________________________________________________\ndropout_5 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_11 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndropout_6 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_12 (Dense) (None, 9) 585 \n=================================================================\nTotal params: 534,985\nTrainable params: 534,985\nNon-trainable params: 0\n_________________________________________________________________\n293/293 [==============================] - 1s 2ms/step\nModel: \"sequential_4\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_13 (Dense) (None, 128) 521984 \n_________________________________________________________________\ndense_14 (Dense) (None, 64) 8256 \n_________________________________________________________________\ndropout_7 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_15 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndropout_8 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_16 (Dense) (None, 9) 585 \n=================================================================\nTotal params: 534,985\nTrainable params: 534,985\nNon-trainable params: 0\n_________________________________________________________________\n293/293 [==============================] - 0s 2ms/step\nModel: \"sequential_5\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_17 (Dense) (None, 128) 521984 \n_________________________________________________________________\ndense_18 (Dense) (None, 64) 8256 \n_________________________________________________________________\ndropout_9 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_19 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndropout_10 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_20 (Dense) (None, 9) 585 \n=================================================================\nTotal params: 534,985\nTrainable params: 534,985\nNon-trainable params: 0\n_________________________________________________________________\n293/293 [==============================] - 0s 2ms/step\nModel: \"sequential_6\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_21 (Dense) (None, 128) 161280 \n_________________________________________________________________\ndense_22 (Dense) (None, 64) 8256 \n_________________________________________________________________\ndropout_11 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_23 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndropout_12 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_24 (Dense) (None, 6) 390 \n=================================================================\nTotal params: 174,086\nTrainable params: 174,086\nNon-trainable params: 0\n_________________________________________________________________\n97/97 [==============================] - 1s 9ms/step\nModel: \"sequential_7\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense_25 (Dense) (None, 128) 161280 \n_________________________________________________________________\ndense_26 (Dense) (None, 64) 8256 \n_________________________________________________________________\ndropout_13 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_27 (Dense) (None, 64) 4160 \n_________________________________________________________________\ndropout_14 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_28 (Dense) (None, 6) 390 \n=================================================================\nTotal params: 174,086\nTrainable params: 174,086\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "#cross_val_df.to_csv('../../data/cv_data.csv', index = False)", "_____no_output_____" ], [ "#cross_val_df = pd.read_csv('../../data/cv_data.csv')", "_____no_output_____" ], [ "sns.boxplot?", "_____no_output_____" ], [ "plt.figure(figsize = (6, 3.2)) \n\nsns.boxplot(data = cross_val_df, \n y = 'organism', \n x = 'accuracy',\n color = 'lightgray',\n saturation = 1,\n whis = 1,\n width = 0.7\n #alpha = 0.5\n )\n\nsns.stripplot(data = cross_val_df, \n y = 'organism', \n x = 'accuracy',\n palette = 'Set2',\n #edgecolor = 'gray',\n #linewidth = 0.4,\n size = 10,\n alpha = 0.7)\n\nplt.tight_layout()\nplt.xlim(0.5, 1.01)\n\nplt.savefig('cross_val_org.pdf', dpi = 600)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb9d2ee4ab94d9dd68e699069cee8715a76b2a65
852,394
ipynb
Jupyter Notebook
Moringa_School_IPWeek_8_2021_Greg_Katono (1).ipynb
Katonokatono/Decission-Trees-SVM
bf0c0b14bce7c7162287775a33684e1a651bf288
[ "MIT" ]
null
null
null
Moringa_School_IPWeek_8_2021_Greg_Katono (1).ipynb
Katonokatono/Decission-Trees-SVM
bf0c0b14bce7c7162287775a33684e1a651bf288
[ "MIT" ]
null
null
null
Moringa_School_IPWeek_8_2021_Greg_Katono (1).ipynb
Katonokatono/Decission-Trees-SVM
bf0c0b14bce7c7162287775a33684e1a651bf288
[ "MIT" ]
null
null
null
184.580771
320,366
0.837118
[ [ [ "## ***Defining the Question***\nProvided with the dataset from Nairobi Hospital, your are task to build a model that determines whether or not the patient's symptoms indicate that the patient has hypothyroid.", "_____no_output_____" ], [ "## ***Metric For Success***\nThe Metric of Sucess will be to find a decission tree model to determine whether or not a patient has hypothyroid based on the patient's symptoms.", "_____no_output_____" ], [ "## ***Context***\nHypothyroid is a condition in which your thyroid gland doesn't produce enough of certain crucial hormones. An example of the crucial hormone is thyroxin which plays vital roles in digestion, heart and muscle function, brain development and maintenance of bones.With that stated, Nairobi Hospital conducted a clinical camp to test for hypothyroidism.", "_____no_output_____" ], [ "## ***Experimental Design***\n\n\nThe experimental design for this project was in adherence to the CRISP-DM methodology.It will follow the CRISP-DM steps which are:\n\n1.Problem Understanding\n\n2.Data Understanding\n\n3.Data Preparation\n\n4.Modelling\n\n5.Evaluation\n", "_____no_output_____" ], [ "## **Data Relevance**\nThe health care dataset is relevant to build a model that determines whether or not the patient's symptoms indicate that the patient has hypothyroid.", "_____no_output_____" ], [ "***Import the libraries***", "_____no_output_____" ] ], [ [ "#ILl first import the necessary libraries\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV\nfrom sklearn import metrics\nfrom sklearn.preprocessing import LabelEncoder, StandardScaler, MinMaxScaler\nfrom sklearn.ensemble import RandomForestClassifier, \\\nGradientBoostingClassifier, AdaBoostClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.tree import export_graphviz\nfrom sklearn.externals.six import StringIO\nfrom IPython.display import Image\nimport pydotplus\nimport warnings\nwarnings.filterwarnings('ignore') ", "_____no_output_____" ], [ "#ILl then load the dataset\ndf = pd.read_csv('http://bit.ly/hypothyroid_data')\ndf.head()", "_____no_output_____" ], [ "#Ill then check the tail of the dataset\ndf.tail()", "_____no_output_____" ], [ "#Ill check the the shape of the dataset\ndf.shape\n# The output is 3163 rows and 26 columns", "_____no_output_____" ], [ "# Ill then use the .info() function which prints information about a DataFrame \ndf.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 3163 entries, 0 to 3162\nData columns (total 26 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 status 3163 non-null object\n 1 age 3163 non-null object\n 2 sex 3163 non-null object\n 3 on_thyroxine 3163 non-null object\n 4 query_on_thyroxine 3163 non-null object\n 5 on_antithyroid_medication 3163 non-null object\n 6 thyroid_surgery 3163 non-null object\n 7 query_hypothyroid 3163 non-null object\n 8 query_hyperthyroid 3163 non-null object\n 9 pregnant 3163 non-null object\n 10 sick 3163 non-null object\n 11 tumor 3163 non-null object\n 12 lithium 3163 non-null object\n 13 goitre 3163 non-null object\n 14 TSH_measured 3163 non-null object\n 15 TSH 3163 non-null object\n 16 T3_measured 3163 non-null object\n 17 T3 3163 non-null object\n 18 TT4_measured 3163 non-null object\n 19 TT4 3163 non-null object\n 20 T4U_measured 3163 non-null object\n 21 T4U 3163 non-null object\n 22 FTI_measured 3163 non-null object\n 23 FTI 3163 non-null object\n 24 TBG_measured 3163 non-null object\n 25 TBG 3163 non-null object\ndtypes: object(26)\nmemory usage: 642.6+ KB\n" ] ], [ [ "The .info() function shows index dtype and column dtypes, non-null values and memory usage.\n", "_____no_output_____" ], [ "## ***Data Cleaning***", "_____no_output_____" ] ], [ [ "#ILl then ensure uniformity in my data by changing the column names to lowercase\ndf.columns = map(str.lower,df.columns)\n", "_____no_output_____" ], [ "#ILl then change the column names to small leter\ndf.columns", "_____no_output_____" ], [ "#Ill then check for nulls in the dataset\ndf.isnull().sum()\n", "_____no_output_____" ] ], [ [ "Output shows no null values", "_____no_output_____" ] ], [ [ "#Ill then check for unique values in the dataset\n#ILl use the for loop function to iterate through the column names\nfor column in df.columns:\n print(column) \n print(df[column].unique()) \n print(\"Their are\", df[column].nunique(),\"unique values in this column\")\n #spacing purposes the \"********\" \n print(\"***************************************************\") \n", "status\n['hypothyroid' 'negative']\nTheir are 2 unique values in this column\n***************************************************\nage\n['72' '15' '24' '77' '85' '64' '20' '42' '69' '75' '53' '59' '68' '50'\n '78' '65' '28' '43' '40' '84' '?' '83' '63' '73' '36' '27' '48' '71' '60'\n '34' '79' '62' '74' '32' '41' '70' '29' '5' '58' '35' '51' '22' '16' '44'\n '39' '47' '18' '17' '88' '56' '30' '45' '66' '52' '54' '80' '33' '38'\n '92' '67' '46' '13' '89' '61' '49' '26' '57' '31' '90' '81' '25' '37'\n '76' '87' '23' '55' '86' '98' '21' '97' '1' '6' '14' '82' '11' '19' '9'\n '12' '10' '8' '7' '4' '93']\nTheir are 93 unique values in this column\n***************************************************\nsex\n['M' 'F' '?']\nTheir are 3 unique values in this column\n***************************************************\non_thyroxine\n['f' 't']\nTheir are 2 unique values in this column\n***************************************************\nquery_on_thyroxine\n['f' 't']\nTheir are 2 unique values in this column\n***************************************************\non_antithyroid_medication\n['f' 't']\nTheir are 2 unique values in this column\n***************************************************\nthyroid_surgery\n['f' 't']\nTheir are 2 unique values in this column\n***************************************************\nquery_hypothyroid\n['f' 't']\nTheir are 2 unique values in this column\n***************************************************\nquery_hyperthyroid\n['f' 't']\nTheir are 2 unique values in this column\n***************************************************\npregnant\n['f' 't']\nTheir are 2 unique values in this column\n***************************************************\nsick\n['f' 't']\nTheir are 2 unique values in this column\n***************************************************\ntumor\n['f' 't']\nTheir are 2 unique values in this column\n***************************************************\nlithium\n['f' 't']\nTheir are 2 unique values in this column\n***************************************************\ngoitre\n['f' 't']\nTheir are 2 unique values in this column\n***************************************************\ntsh_measured\n['y' 'n']\nTheir are 2 unique values in this column\n***************************************************\ntsh\n['30' '145' '0' '430' '7.30' '138' '7.70' '21' '92' '48' '36' '15' '15.30'\n '25' '61' '28' '170' '54' '216' '56' '71' '46' '70' '34' '53' '9.40'\n '126' '10' '530' '35' '65' '57' '125' '23' '80' '117' '49' '66' '8.20'\n '150' '?' '18' '165' '164' '24' '90' '77' '19' '58' '100' '213' '17'\n '235' '153' '13' '31' '109' '260' '43' '12' '11' '55' '6.50' '20' '7.50'\n '14' '60' '140' '33' '8.70' '0.25' '10.70' '82' '45' '42' '41' '160' '16'\n '89' '44' '176' '6.40' '183' '29' '37' '39' '7.90' '59' '68' '38' '47'\n '143' '6.60' '288' '96' '0.50' '1.20' '1.90' '0.09' '3.10' '0.40' '0.85'\n '0.30' '0.90' '1.50' '4.60' '0.20' '5.80' '4' '1.40' '2' '2.60' '2.90'\n '0.80' '4.90' '10.30' '2.30' '0.70' '0.60' '1.30' '2.50' '5.10' '1.70'\n '2.70' '22' '8.10' '3.30' '5' '3.20' '1.80' '1.10' '0.46' '1' '5.60'\n '4.40' '16.50' '2.10' '6.30' '5.50' '7.40' '5.90' '0.15' '14.90' '2.20'\n '9.70' '9.60' '1.60' '3.70' '3' '2.80' '0.10' '4.20' '27' '3.50' '0.05'\n '19.20' '8' '9.10' '3.60' '7.80' '3.80' '8.50' '9.20' '52' '2.40' '3.90'\n '0.03' '0.04' '26' '6.10' '0.65' '0.92' '7.60' '8.40' '14.40' '1.05'\n '0.37' '178' '6.90' '4.50' '4.70' '40' '6' '4.10' '14.30' '5.40' '8.30'\n '0.06' '8.80' '8.90' '7.10' '4.80' '0.43' '6.70' '200' '8.60' '86' '0.21'\n '9' '0.26' '3.40' '6.20' '17.40' '85' '1.83' '63' '13.30' '0.02' '0.42'\n '9.50' '0.84' '0.88' '4.30' '0.67' '0.81' '5.20' '9.80' '76' '0.62'\n '0.01' '11.40' '88' '0.68' '0.07' '0.69' '0.77' '9.90' '0.64' '13.20'\n '6.80' '0.28' '11.60' '0.91' '1.01' '0.23' '0.35' '50' '32' '0.83' '0.08']\nTheir are 240 unique values in this column\n***************************************************\nt3_measured\n['y' 'n']\nTheir are 2 unique values in this column\n***************************************************\nt3\n['0.60' '1.70' '0.20' '0.40' '1.20' '1.10' '1.30' '1.90' '?' '0.80' '2.20'\n '1.50' '2.70' '2' '0.30' '2.10' '0.50' '0.70' '1' '1.40' '2.30' '2.40'\n '0.90' '1.80' '0' '2.80' '1.60' '6.70' '3.30' '3' '2.50' '2.60' '4'\n '9.80' '3.90' '3.40' '3.80' '4.50' '3.20' '3.70' '4.30' '2.90' '3.10'\n '3.50' '3.60' '6.60' '4.90' '8.90' '4.10' '5' '5.30' '7.60' '7.30' '4.20'\n '4.40' '5.10' '7' '0.10' '0.05' '4.70' '6.20' '5.50' '4.60' '6.10' '8.10'\n '5.20' '5.60' '4.80' '10.20' '8.60']\nTheir are 70 unique values in this column\n***************************************************\ntt4_measured\n['y' 'n']\nTheir are 2 unique values in this column\n***************************************************\ntt4\n['15' '19' '4' '6' '57' '27' '54' '34' '39' '7.60' '53' '38' '98' '44'\n '37' '81' '13' '17' '12' '50' '23' '14' '3.90' '52' '32' '30' '84' '10'\n '55' '24' '28' '11' '6.60' '31' '33' '41' '9' '78' '42' '8.10' '65' '16'\n '9.70' '46' '22' '61' '2.30' '21' '7.50' '18' '66' '25' '43' '2' '3' '58'\n '59' '71' '74' '56' '40' '20' '48' '230' '70' '4.10' '63' '5.30' '2.90'\n '62' '75' '68' '45' '47' '36' '80' '109' '8.60' '83' '82' '101' '76'\n '103' '112' '206' '110' '93' '79' '121' '?' '107' '88' '187' '254' '115'\n '113' '260' '117' '164' '97' '64' '129' '111' '106' '95' '87' '105' '90'\n '67' '134' '35' '193' '143' '130' '108' '86' '96' '92' '99' '216' '152'\n '125' '138' '194' '91' '120' '102' '170' '126' '116' '140' '94' '159'\n '85' '210' '89' '151' '247' '139' '153' '178' '123' '137' '161' '7.30'\n '72' '202' '122' '124' '157' '118' '104' '135' '186' '241' '131' '119'\n '100' '220' '176' '136' '183' '155' '208' '199' '154' '169' '128' '235'\n '195' '267' '232' '197' '212' '149' '127' '245' '132' '162' '77' '142'\n '69' '173' '141' '156' '181' '189' '60' '114' '198' '147' '191' '177'\n '174' '207' '168' '73' '160' '145' '200' '182' '158' '203' '209' '217'\n '430' '213' '218' '163' '296' '167' '148' '166' '185' '248' '172' '144'\n '150' '165' '196' '308' '146' '426' '224' '29' '204' '201' '133' '180'\n '184' '450' '244' '301' '255' '252' '396' '223' '261' '250' '179' '219'\n '190' '51' '221' '171' '242' '275' '211' '49' '239' '205' '222' '256'\n '214' '282' '287' '253' '269' '175' '228' '333' '258' '236' '225' '263'\n '283' '359' '188' '240']\nTheir are 269 unique values in this column\n***************************************************\nt4u_measured\n['y' 'n']\nTheir are 2 unique values in this column\n***************************************************\nt4u\n['1.48' '1.13' '1' '1.04' '1.28' '1.19' '0.86' '1.05' '1.21' '1.02' '0.92'\n '1.29' '0.98' '1.08' '1.01' '1.18' '1.10' '1.27' '0.83' '1.11' '0.78'\n '1.39' '1.12' '0.93' '0.99' '0.90' '1.06' '0.68' '0.67' '1.14' '0.80'\n '0.96' '0.95' '1.46' '1.03' '0.97' '0.94' '1.09' '1.24' '1.23' '1.53'\n '1.26' '1.40' '1.17' '0.87' '0.61' '0.70' '1.07' '1.73' '1.45' '1.20'\n '0.89' '0.84' '0.91' '0.85' '1.35' '0.82' '1.50' '0.74' '1.71' '1.79'\n '0.73' '0.77' '1.25' '?' '0.88' '0.72' '0.31' '0' '0.79' '0.81' '0.71'\n '1.93' '1.32' '0.66' '1.68' '1.42' '1.70' '1.83' '1.41' '1.34' '1.65'\n '1.22' '0.75' '1.74' '0.69' '1.51' '0.76' '1.16' '0.35' '0.62' '0.54'\n '1.36' '1.33' '1.30' '0.48' '1.59' '1.75' '1.86' '1.38' '1.43' '1.62'\n '2.01' '0.30' '1.31' '1.15' '1.57' '0.55' '1.66' '0.52' '1.97' '1.47'\n '0.36' '1.88' '1.55' '0.34' '1.67' '0.58' '0.60' '0.59' '1.37' '1.76'\n '0.64' '1.80' '0.41' '0.20' '1.77' '0.33' '1.54' '0.46' '0.32' '1.44'\n '1.56' '0.56' '1.52' '2.03' '0.65' '0.63' '1.96' '0.53' '0.47' '0.29'\n '2.21' '1.69' '1.64' '1.49' '1.72' '1.58' '0.57' '1.61' '0.28' '2' '1.63'\n '0.27' '1.94' '0.50' '0.49' '0.40' '0.38']\nTheir are 159 unique values in this column\n***************************************************\nfti_measured\n['y' 'n']\nTheir are 2 unique values in this column\n***************************************************\nfti\n['10' '17' '0' '6' '44' '23' '63' '32' '7.50' '61' '41' '76' '45' '34'\n '81' '11' '16' '46' '22' '12' '5' '47' '29' '37' '60' '8.40' '8.50' '24'\n '28' '7' '49' '36' '15' '9' '80' '53' '7.20' '68' '6.60' '48' '21' '14'\n '2' '20' '1.40' '56' '3' '51' '13' '43' '78' '52' '64' '26' '35' '31'\n '133' '3.40' '54' '8.90' '57' '5.50' '58' '55' '2.80' '66' '4' '40' '50'\n '8.70' '87' '89' '112' '123' '99' '92' '104' '69' '196' '107' '106' '74'\n '97' '?' '122' '84' '114' '124' '241' '119' '120' '136' '85' '127' '839'\n '137' '197' '96' '77' '108' '82' '72' '94' '91' '240' '121' '109' '79'\n '100' '98' '73' '117' '165' '103' '88' '101' '126' '132' '128' '143'\n '152' '141' '186' '113' '102' '125' '93' '105' '159' '188' '71' '149'\n '116' '83' '176' '161' '86' '140' '111' '158' '115' '171' '90' '95' '70'\n '135' '134' '178' '110' '170' '205' '695' '155' '118' '741' '131' '129'\n '142' '153' '75' '254' '246' '139' '185' '157' '147' '164' '130' '222'\n '650' '174' '182' '144' '169' '257' '145' '345' '180' '167' '151' '154'\n '150' '219' '228' '65' '187' '166' '195' '208' '138' '33' '173' '146'\n '211' '181' '190' '247' '221' '395' '216' '212' '148' '162' '220' '370'\n '450' '160' '59' '189' '203' '168' '179' '264' '470' '184' '163' '67'\n '213' '354' '292' '359' '172' '156' '334' '177' '214' '194' '192' '215'\n '217' '271' '250' '244' '308' '881' '258' '316' '200' '711' '256' '232'\n '202' '39' '248' '237' '259' '445' '19' '852' '42' '235' '634' '283'\n '305' '204' '229' '332' '550' '249' '263' '615' '266' '198' '175' '209'\n '299' '193' '227' '223' '337' '210' '272' '702' '218' '239' '355' '482'\n '347' '485' '612' '428']\nTheir are 281 unique values in this column\n***************************************************\ntbg_measured\n['n' 'y']\nTheir are 2 unique values in this column\n***************************************************\ntbg\n['?' '28' '34' '0' '19' '30' '25' '48' '39' '31' '16' '21' '37' '40' '27'\n '3.10' '38' '108' '20' '36' '17' '23' '35' '32' '63' '9.30' '15' '26'\n '33' '41' '110' '22' '43' '53' '80' '47' '29' '13' '24' '18' '45' '86'\n '50' '8.40' '52' '11' '12' '14' '65' '7.30' '8.50' '69' '122']\nTheir are 53 unique values in this column\n***************************************************\n" ] ], [ [ "After checking the unique values, I have noticed character \"?\" in some the column names. I'll convert them to nulls so that I impute them", "_____no_output_____" ] ], [ [ "#Ill then replace the \"?\" with nan\ndf = df.replace(['?'], np.nan)", "_____no_output_____" ], [ "#Ill then check for nulls again\ndf.isnull().sum()", "_____no_output_____" ], [ "#Ill then drop the \"tbg\" column since it has many null values\ndf.drop(['tbg', 'tbg_measured'], axis = 1, inplace = True)", "_____no_output_____" ], [ "# ILl thenconfirm if it has been dropped\ndf.head()\n#They are dropped", "_____no_output_____" ], [ "# Ill then check the datatypes again \ndf.dtypes\n", "_____no_output_____" ] ], [ [ "The output shows that all the column names are of object (string). Ill fix that into appropriate data types", "_____no_output_____" ] ], [ [ "\n# Ill then use labelencoder because the categorical columns mostly have two\n# initiating the label encoder object\nlabelenc = LabelEncoder()\ndf['status'] = labelenc.fit_transform(df['status'])\n\n# replacing string values with numbers 0 and 1 \n# f/false is transformed to 0 and t/true is transformed to 1,\n# y/yes is transformed to 1 and n/no is transformed to 0. \ndf['sex'].replace({'M':0, 'F':1}, inplace=True)\ndf.replace({'f':0,'t':1, 'y':1, 'n':0}, inplace=True)\n\n# previewing the first five observations of the dataset\ndf.head()\n\n", "_____no_output_____" ] ], [ [ "***Meaning***\n\nIn the above output:\n\n1. 0 represents hypothyroidism and 1 represents negative.\n2.0 represents male and 1 represents females.\n3.false represents 0 and true represents 1,\n4.yes represents 1 and no represents 0. \n", "_____no_output_____" ] ], [ [ "#ILl then confirm if the data types have been changed\ndf.dtypes\n", "_____no_output_____" ] ], [ [ "***Imputation***\n\nInorder to choose the appropriate imputation procedure,their are things to consider like knowing your continuos variable and categorical variable\n\n", "_____no_output_____" ] ], [ [ "#Ill first create a copy for imputation\ndf_copy = df.copy(deep=True)", "_____no_output_____" ], [ "df_copy.head()", "_____no_output_____" ], [ "#Ill then convert columns of object datatype to integer datatype using pd.to_numeric function\nconvert = df_copy.columns[df_copy.dtypes == object]\n\ndf_copy[convert]= df_copy[convert].apply(pd.to_numeric, errors='coerce')\n\n# Ill then confirm if the change has been effected\ndf_copy.dtypes", "_____no_output_____" ], [ "#ILl then do a value count so as to find the number of occurence in a dataframe\ndf_copy['sex'].value_counts()", "_____no_output_____" ] ], [ [ "**Output reads 2255 females and 908 males**\n\n**For the Categorical variable, imputing with the mode is generally used.**", "_____no_output_____" ] ], [ [ "#Ill then fix the dataset. Inorder to start imputing ill have to specify the intergers and strings so\n# as to avoid 'can only concatenate str (not \"int\") to str'. \n# Dealing with the null values\nnumerical_columns = ['age', 'tsh', 't4u', 't3', 'tt4', 'fti']\n\n# filling missing values in columns in mean_col list with mean values of each column\nfor col in numerical_columns:\n df_copy[col].fillna(df_copy[col].mean(), inplace=True)\ndf_copy['sex'].fillna(1.0, inplace = True)\n\n# checking for null values after imputing\ndf_copy.isnull().sum()", "_____no_output_____" ] ], [ [ "**I used a 1.0 (fillna)in sex because it was the most repeated number in the number.**", "_____no_output_____" ], [ "Output reads 2255 females and 908 males\n\nFor the Categorical variable, imputing with the mode is generally used.", "_____no_output_____" ] ], [ [ "#ILl then confirm if their are still null values.\ndf_copy.isnull().sum()\n", "_____no_output_____" ] ], [ [ "***Conclusion***\n\nOn Imputing, I had to differetiate between categorical variables and continuos variables. Mean and Median are generally used in continuos variables(numeric) while categotical variables we use mode(most repeated)", "_____no_output_____" ], [ "## ***Exploratory Data Analysis***\nWe can define EDA as the process of performing initial investigation to data so as to unravel patterns, spot anomalies, test hypothesis and to check assumptions", "_____no_output_____" ], [ "### ***Univariate Analysis***\n", "_____no_output_____" ] ], [ [ "#Ill first check for outliers in the dataset with box plot\ncolumn_1 = ['age', 't3', 'tsh', 'fti', 't4u', 'tt4']\n\n#Ill then craft a list of colors for the boxplots\ncolors = ['cyan','magenta','yellow','black','blue','green']\n\n# plotting the boxplots\nfiggg, axes = plt.subplots(2,3, figsize=(18,9))\nplt.suptitle('Boxplots Showing Outliers in Numerical Columns', fontsize=18, color='darkblue', y=0.93)\n#Ill then use a for loop for iterartion\nfor ax, data, col, color in zip(axes.flatten(), df_copy, column_1, colors):\n sns.boxplot(df_copy[col], ax=ax, color=color)\n ax.set_facecolor('antiquewhite')\nplt.show();\n ", "_____no_output_____" ] ], [ [ "**I will not the outliers as they will take a huge percentage of the project.**", "_____no_output_____" ] ], [ [ "#Ill then plot a bar graph to show the distribution in the sec column\ndf_copy.sex.value_counts().plot(kind='bar')\nplt.xticks((np.arange(0, 2,)), ('Female', 'Male'), rotation=360)\nplt.title('Number of Men and Women in the dataset', fontsize=15, color='black')\nplt.show()\n#Females are more", "_____no_output_____" ], [ "#Ill then plot a histogram to show distribution of the patients age\ndf_copy.age.plot.hist(grid=False, bins=20, rwidth=0.9,\n color='blue')\nplt.title('Age distribution of patients')\nplt.xlabel('Age')\nplt.ylabel('Number')\nplt.grid(axis='y', alpha=0.75)\n", "_____no_output_____" ] ], [ [ "**The output shows that their is a high number of patients between 50 and 60**", "_____no_output_____" ], [ "### ***Bivariate Analysis***\nBivariate analysis involves two different variables. It aims to find out the causes and relationships between those two variables.\n\n", "_____no_output_____" ] ], [ [ "#Ill first preview my dataset\ndf_copy.head()", "_____no_output_____" ], [ "#Ill first check the overall correlation of the columns\ncorr = df_copy.corr()\ncorr", "_____no_output_____" ], [ "#Ill then a plot a heatmap to show the coorelation of the variables\ncorrelation= df_copy.corr() \nplt.figure(figsize = (20,10))\nsns.heatmap(correlation, xticklabels=correlation.columns, yticklabels=correlation.columns, annot=True)\nplt.title('A Heatmap of Patient Correlation in our Dataset', color='black')\nplt.show()\n", "_____no_output_____" ] ], [ [ "***Conclusion***\n\n1. Problem that were encountered was the pairplot was not running \n2. From the above EDA, I chose not to drop the outliers for this project cause it will take a high percentage of data.\n3. I also found out that most patients were Females.\n4. Most patients were from the age 50 and 60.\n", "_____no_output_____" ], [ "## ***Modelling***", "_____no_output_____" ], [ "### ***Random Forest Classifier***\n", "_____no_output_____" ] ], [ [ "#Ill first seperate the depenent variable and the independent variable\nindependent = df_copy.drop('status', 1)\ndependent = df_copy['status']", "_____no_output_____" ], [ "#ILl then split the data into training and test sets.\nX_train, X_test, y_train, y_test = train_test_split(independent, dependent, test_size=.25, random_state=34)\n#Ill then print out the shape of the train set and test set\nprint('X_train shape: ', X_train.shape, '\\nY_Train Shape:', y_train.shape)\nprint('X_test shape: ', X_test.shape, '\\nY_Test Shape:', y_test.shape)", "X_train shape: (2372, 23) \nY_Train Shape: (2372,)\nX_test shape: (791, 23) \nY_Test Shape: (791,)\n" ], [ "# Ill then scale the train and test sets using stadard scaler\nscaler = StandardScaler()\nX_train = scaler.fit_transform(X_train)\nX_test = scaler.transform(X_test)", "_____no_output_____" ], [ "# Ill then instantiate the random forest classifier object\nrfc = RandomForestClassifier(n_estimators=100, random_state=42, min_samples_split=20, max_depth=5)\nrfc = rfc.fit(X_train, y_train)\nrfc_pred = rfc.predict(X_test)", "_____no_output_____" ], [ "#Ill check my models predictions\ngreg_Predicted = pd.DataFrame({'Actual': y_test, 'Predicted': rfc_pred})\ngreg_Predicted.head()", "_____no_output_____" ], [ "# ILl then print out the \nprint('Mean Absolute Error:', metrics.mean_absolute_error(y_test, rfc_pred)) \nprint('Mean Squared Error:', metrics.mean_squared_error(y_test, rfc_pred)) \nprint('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, rfc_pred)))\nprint('R2 squared:', metrics.r2_score(y_test, rfc_pred)) \nprint('Accuracy Score:', metrics.accuracy_score(y_test, rfc_pred))", "Mean Absolute Error: 0.008849557522123894\nMean Squared Error: 0.008849557522123894\nRoot Mean Squared Error: 0.09407208683835973\nR2 squared: 0.7494343379491357\nAccuracy Score: 0.9911504424778761\n" ] ], [ [ "***The accuracy of this model shows a 99% accuracy,It could be overfitting***", "_____no_output_____" ] ], [ [ "\n# lll then plott the decision trees from the random forest\ndot_data = StringIO()\ntest_features = df_copy.columns.to_list()\ntest_features.remove('status')\ntree = rfc.estimators_[50]\n\nexport_graphviz(tree, out_file=dot_data, filled=True, rounded=True, \\\n special_characters=True, feature_names=test_features)\ngraph = pydotplus.graph_from_dot_data(dot_data.getvalue())\ngraph.write_png('hypothyroidism_forest.png')\nImage(graph.create_png())", "_____no_output_____" ] ], [ [ "***Conclusion***\n\nThe plot shows the most important features in predicting whether a person is hypothyroid or not.\n\n**. the most import feature is fti test which accounts for 76% of the total feature importance**\n", "_____no_output_____" ], [ "## ***Gradient Boosting Classifier***", "_____no_output_____" ] ], [ [ "#ILl then split the data into training and test sets.\nX_train, X_test, y_train, y_test = train_test_split(independent, dependent, test_size=.25, random_state=34)\n", "_____no_output_____" ], [ "# Ill then scale the train and test sets using MinMaxScaler\nmscaler = MinMaxScaler()\nX_train = mscaler.fit_transform(X_train)\nX_test = mscaler.transform(X_test)", "_____no_output_____" ], [ "#Ill first create a list so as to get the best learning rate\nlist = [.05, .075, .1, .25, .5, .75, 1]\n\nfor x in list:\n gboost = GradientBoostingClassifier(n_estimators=100, learning_rate=x, \\\n max_features=2, max_depth=5, random_state=42)\n gboost.fit(X_train, y_train)\n\n print('Learning rate: ', x)\n print('Training set accuracy score: {0:.3f}' .format(gboost.score(X_train, y_train)))\n print('Test set accuracy score: {0:.3f}' .format(gboost.score(X_test, y_test)))\n\n \n\n# The output shows the best learing rate is 0.25", "_____no_output_____" ], [ "#ILl then get the accuracy score of the model\ngb_clf = GradientBoostingClassifier(n_estimators=20, learning_rate=0.25, \\\n max_features=2, max_depth=5, random_state=42)\ngb_clf.fit(X_train, y_train)\ngb_pred = gb_clf.predict(X_test)\nprint('\\nAccuracy Score: ', metrics.accuracy_score(y_test, gb_pred))\n", "_____no_output_____" ] ], [ [ "## ***ADA Boost Classifier***", "_____no_output_____" ] ], [ [ "#ILl then split the data into training and test sets.\nX_train, X_test, y_train, y_test = train_test_split(independent, dependent, test_size=.25, random_state=34)", "_____no_output_____" ], [ "# ILl then instantiate the ada boost classifier object\n\nADA = AdaBoostClassifier(n_estimators=50, learning_rate=1)\nADA_boost= ADA.fit(X_train, y_train)\nADApred = ADA_boost.predict(X_test)", "_____no_output_____" ], [ "# ill then get the accuracy of the model\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"\n\nprint('Ada Boost Classifier Accuracy: ', round(metrics.accuracy_score(y_test, ADApred) * 100, 2),'%')", "Ada Boost Classifier Accuracy: 99.12 %\n" ] ], [ [ "## ***Support Vector Machines***", "_____no_output_____" ] ], [ [ "# Ill then plot data and regression model\nplt.figure(figsize=(10,8))\nsample_hypo = df_copy.sample(300, random_state=308)\nsns.lmplot('tsh', 'tt4', data=sample_hypo, hue='status', legend=False, palette='Set1', fit_reg=False, scatter_kws={'s': 70})\nplt.legend({'Hypothroidism': 0, 'Negative': 1})\nplt.title('TSH and TT4 test as the variable is equal to Status')\nplt.show();\n", "_____no_output_____" ] ], [ [ "### ***Fitting the Model***", "_____no_output_____" ] ], [ [ "# Ill then input the model\nfitting= sample_hypo[['tsh', 'tt4']].values\n# specifying label for the model\nlabel_type = sample_hypo['status'].values\nX_train, X_test, y_train, y_test = train_test_split(fitting, label_type, test_size=.25, random_state=34)", "_____no_output_____" ], [ "# fitting the model\nsvmodel = SVC(kernel = 'linear')\nsvclass = svmodel.fit(X_train, y_train)", "_____no_output_____" ], [ "# Create a mesh grid for our graph\ndef make_meshgrid(x, y, h=.02):\n x_min, x_max = x.min() - 1, x.max() + 1\n y_min, y_max = y.min() - 1, y.max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n return xx, yy\n\n# A function that creates the hyperplanes from our model\ndef plot_contours(ax, clf, xx, yy, **params):\n Z = svclass.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n out = ax.contourf(xx, yy, Z, **params)\n return out\n\nfig, ax = plt.subplots()\n# title for the plots\ntitle = ('Hypothyroid Classification with linear SVC')\n# Set-up grid for plotting.\nt_sam = np.random.choice(fitting[:, 0], 300)\nt_sam2 = np.random.choice(fitting[:, 1], 300)\nX0, X1 = t_sam,t_sam2\nxx, yy = make_meshgrid(X0, X1)\n\nplot_contours(ax, svclass, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8)\nax.scatter(X0, X1, c=label_type, cmap=plt.cm.coolwarm, s=20, edgecolors='k')\n# ax.set_ylabel('TT4')\n# ax.set_xlabel('TSH')\nax.set_xticks(())\n# ax.legend(['Hypothyroid', 'Negative'])\nax.set_yticks(())\nax.set_title(title)\nplt.show()", "_____no_output_____" ], [ "# Ill first take a sample of the dataframe\nsample= df_copy.sample(300, random_state=308)\n", "_____no_output_____" ], [ "# input for the model\nsample_feat = sample[['tsh', 'tt4']].values\n# specifying label for the model\nkatono = sample['status'].values\n\nsample_feat.shape\n", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_test_split(sample_feat, katono, test_size=.25, random_state=34)", "_____no_output_____" ], [ "model3 = SVC(kernel='poly', degree=3, gamma='auto', random_state=308)\n\n# Ill then train the model usin the training sets\nmodel3.fit(X_train, y_train)", "_____no_output_____" ], [ "# visualizing the data points in the kernel\n# Create a mesh grid for our graph\ndef make_meshgrid(x, y, h=.02):\n x_min, x_max = x.min() - 1, x.max() + 1\n y_min, y_max = y.min() - 1, y.max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n return xx, yy\n\n# A function that creates the hyperplanes from our model\ndef plot_contours(ax, clf, xx, yy, **params):\n Z = model3.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n out = ax.contourf(xx, yy, Z, **params)\n return out\n\nfig, ax = plt.subplots()\n# title for the plots\ntitle = ('Flower Classification with polynomial SVC')\n\n# Set-up grid for plotting.\n# pol_sam = np.random.choice(polfeat[:, 0], 300)\n# pol_sam2 = np.random.choice(polfeat[:, 1], 300)\nX0, X1 = sample_feat[:, 0], sample_feat[:, 1]\nxx, yy = make_meshgrid(X0, X1)\n\nplot_contours(ax, model3, xx, yy, cmap=plt.cm.Paired, alpha=0.8)\nax.scatter(X0, X1, c=poltar, cmap=plt.cm.Paired, s=20, edgecolors='k')\n# ax.set_ylabel()\n# ax.set_xlabel()\nax.set_xticks(())\nax.set_yticks(())\nax.set_title(title)\nplt.show()", "_____no_output_____" ] ], [ [ "***Conclusion***\n\nRandom Forests, Ada Boosted Trees, Gradient Boosted and SVCs are generally good prediction models for testing for hypothyroidism.\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
cb9d2ff5d18667b91c63b32e9c04bf92558ad2d7
13,765
ipynb
Jupyter Notebook
saved1_linear.ipynb
Virantto/LinearRegression
21590c0670fd8de5a244ed5566050af2cbc3606d
[ "Apache-2.0" ]
1
2021-09-06T14:50:15.000Z
2021-09-06T14:50:15.000Z
saved1_linear.ipynb
Virantto/LinearRegression
21590c0670fd8de5a244ed5566050af2cbc3606d
[ "Apache-2.0" ]
null
null
null
saved1_linear.ipynb
Virantto/LinearRegression
21590c0670fd8de5a244ed5566050af2cbc3606d
[ "Apache-2.0" ]
null
null
null
41.838906
6,824
0.69713
[ [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns", "_____no_output_____" ], [ "data = pd.read_csv(\"data.csv\")\nx = data.iloc[:,:-1].values\ny = data.iloc[:,:4].values\ndata.head()", "_____no_output_____" ], [ "sns.heatmap(data.corr())", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.2,random_state = 0)", "_____no_output_____" ], [ "from sklearn.linear_model import LinearRegression\nreg = LinearRegression()", "_____no_output_____" ], [ "reg.fit(x_train,y_train)", "_____no_output_____" ], [ "y_pred = reg.predict(x_test)\nprint(y_pred)", "[[ 3. 24. 52. 91.33978703]]\n" ], [ "reg.coef_", "_____no_output_____" ], [ "reg.intercept_", "_____no_output_____" ], [ "from sklearn.metrics import r2_score", "_____no_output_____" ], [ "r2_score(y_test,y_pred)", "C:\\ProgramData\\Anaconda3\\lib\\site-packages\\sklearn\\metrics\\_regression.py:682: UndefinedMetricWarning: R^2 score is not well-defined with less than two samples.\n warnings.warn(msg, UndefinedMetricWarning)\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9d320ffb908baa1ae729be95dd409f138f71ea
587,023
ipynb
Jupyter Notebook
Deep.Learning/5.Generative-Adversial-Networks/2.Deep-Convolutional-Gan/Batch_Normalization_Lesson.ipynb
Scrier/udacity
1326441aa2104a641b555676ec2429d8b6eb539f
[ "MIT" ]
1
2021-09-08T02:55:34.000Z
2021-09-08T02:55:34.000Z
Deep.Learning/5.Generative-Adversial-Networks/2.Deep-Convolutional-Gan/Batch_Normalization_Lesson.ipynb
Scrier/udacity
1326441aa2104a641b555676ec2429d8b6eb539f
[ "MIT" ]
1
2018-01-14T16:34:49.000Z
2018-01-14T16:34:49.000Z
Deep.Learning/5.Generative-Adversial-Networks/2.Deep-Convolutional-Gan/Batch_Normalization_Lesson.ipynb
Scrier/udacity
1326441aa2104a641b555676ec2429d8b6eb539f
[ "MIT" ]
null
null
null
276.897642
34,176
0.905067
[ [ [ "# Batch Normalization – Lesson\n\n1. [What is it?](#theory)\n2. [What are it's benefits?](#benefits)\n3. [How do we add it to a network?](#implementation_1)\n4. [Let's see it work!](#demos)\n5. [What are you hiding?](#implementation_2)\n\n# What is Batch Normalization?<a id='theory'></a>\n\nBatch normalization was introduced in Sergey Ioffe's and Christian Szegedy's 2015 paper [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/pdf/1502.03167.pdf). The idea is that, instead of just normalizing the inputs to the network, we normalize the inputs to _layers within_ the network. It's called \"batch\" normalization because during training, we normalize each layer's inputs by using the mean and variance of the values in the current mini-batch.\n\nWhy might this help? Well, we know that normalizing the inputs to a _network_ helps the network learn. But a network is a series of layers, where the output of one layer becomes the input to another. That means we can think of any layer in a neural network as the _first_ layer of a smaller network.\n\nFor example, imagine a 3 layer network. Instead of just thinking of it as a single network with inputs, layers, and outputs, think of the output of layer 1 as the input to a two layer network. This two layer network would consist of layers 2 and 3 in our original network. \n\nLikewise, the output of layer 2 can be thought of as the input to a single layer network, consisting only of layer 3.\n\nWhen you think of it like that - as a series of neural networks feeding into each other - then it's easy to imagine how normalizing the inputs to each layer would help. It's just like normalizing the inputs to any other neural network, but you're doing it at every layer (sub-network).\n\nBeyond the intuitive reasons, there are good mathematical reasons why it helps the network learn better, too. It helps combat what the authors call _internal covariate shift_. This discussion is best handled [in the paper](https://arxiv.org/pdf/1502.03167.pdf) and in [Deep Learning](http://www.deeplearningbook.org) a book you can read online written by Ian Goodfellow, Yoshua Bengio, and Aaron Courville. Specifically, check out the batch normalization section of [Chapter 8: Optimization for Training Deep Models](http://www.deeplearningbook.org/contents/optimization.html).", "_____no_output_____" ], [ "# Benefits of Batch Normalization<a id=\"benefits\"></a>\n\nBatch normalization optimizes network training. It has been shown to have several benefits:\n1. **Networks train faster** – Each training _iteration_ will actually be slower because of the extra calculations during the forward pass and the additional hyperparameters to train during back propagation. However, it should converge much more quickly, so training should be faster overall. \n2. **Allows higher learning rates** – Gradient descent usually requires small learning rates for the network to converge. And as networks get deeper, their gradients get smaller during back propagation so they require even more iterations. Using batch normalization allows us to use much higher learning rates, which further increases the speed at which networks train. \n3. **Makes weights easier to initialize** – Weight initialization can be difficult, and it's even more difficult when creating deeper networks. Batch normalization seems to allow us to be much less careful about choosing our initial starting weights. \n4. **Makes more activation functions viable** – Some activation functions do not work well in some situations. Sigmoids lose their gradient pretty quickly, which means they can't be used in deep networks. And ReLUs often die out during training, where they stop learning completely, so we need to be careful about the range of values fed into them. Because batch normalization regulates the values going into each activation function, non-linearlities that don't seem to work well in deep networks actually become viable again. \n5. **Simplifies the creation of deeper networks** – Because of the first 4 items listed above, it is easier to build and faster to train deeper neural networks when using batch normalization. And it's been shown that deeper networks generally produce better results, so that's great.\n6. **Provides a bit of regularlization** – Batch normalization adds a little noise to your network. In some cases, such as in Inception modules, batch normalization has been shown to work as well as dropout. But in general, consider batch normalization as a bit of extra regularization, possibly allowing you to reduce some of the dropout you might add to a network. \n7. **May give better results overall** – Some tests seem to show batch normalization actually improves the training results. However, it's really an optimization to help train faster, so you shouldn't think of it as a way to make your network better. But since it lets you train networks faster, that means you can iterate over more designs more quickly. It also lets you build deeper networks, which are usually better. So when you factor in everything, you're probably going to end up with better results if you build your networks with batch normalization.", "_____no_output_____" ], [ "# Batch Normalization in TensorFlow<a id=\"implementation_1\"></a>\n\nThis section of the notebook shows you one way to add batch normalization to a neural network built in TensorFlow. \n\nThe following cell imports the packages we need in the notebook and loads the MNIST dataset to use in our experiments. However, the `tensorflow` package contains all the code you'll actually need for batch normalization.", "_____no_output_____" ] ], [ [ "# Import necessary packages\nimport tensorflow as tf\nimport tqdm\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n# Import MNIST data so we have something for our experiments\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)", "/usr/local/lib/python3.5/dist-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n" ] ], [ [ "### Neural network classes for testing\n\nThe following class, `NeuralNet`, allows us to create identical neural networks with and without batch normalization. The code is heavily documented, but there is also some additional discussion later. You do not need to read through it all before going through the rest of the notebook, but the comments within the code blocks may answer some of your questions.\n\n*About the code:*\n>This class is not meant to represent TensorFlow best practices – the design choices made here are to support the discussion related to batch normalization.\n\n>It's also important to note that we use the well-known MNIST data for these examples, but the networks we create are not meant to be good for performing handwritten character recognition. We chose this network architecture because it is similar to the one used in the original paper, which is complex enough to demonstrate some of the benefits of batch normalization while still being fast to train.", "_____no_output_____" ] ], [ [ "class NeuralNet:\n def __init__(self, initial_weights, activation_fn, use_batch_norm):\n \"\"\"\n Initializes this object, creating a TensorFlow graph using the given parameters.\n \n :param initial_weights: list of NumPy arrays or Tensors\n Initial values for the weights for every layer in the network. We pass these in\n so we can create multiple networks with the same starting weights to eliminate\n training differences caused by random initialization differences.\n The number of items in the list defines the number of layers in the network,\n and the shapes of the items in the list define the number of nodes in each layer.\n e.g. Passing in 3 matrices of shape (784, 256), (256, 100), and (100, 10) would \n create a network with 784 inputs going into a hidden layer with 256 nodes,\n followed by a hidden layer with 100 nodes, followed by an output layer with 10 nodes.\n :param activation_fn: Callable\n The function used for the output of each hidden layer. The network will use the same\n activation function on every hidden layer and no activate function on the output layer.\n e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.\n :param use_batch_norm: bool\n Pass True to create a network that uses batch normalization; False otherwise\n Note: this network will not use batch normalization on layers that do not have an\n activation function.\n \"\"\"\n # Keep track of whether or not this network uses batch normalization.\n self.use_batch_norm = use_batch_norm\n self.name = \"With Batch Norm\" if use_batch_norm else \"Without Batch Norm\"\n\n # Batch normalization needs to do different calculations during training and inference,\n # so we use this placeholder to tell the graph which behavior to use.\n self.is_training = tf.placeholder(tf.bool, name=\"is_training\")\n\n # This list is just for keeping track of data we want to plot later.\n # It doesn't actually have anything to do with neural nets or batch normalization.\n self.training_accuracies = []\n\n # Create the network graph, but it will not actually have any real values until after you\n # call train or test\n self.build_network(initial_weights, activation_fn)\n \n def build_network(self, initial_weights, activation_fn):\n \"\"\"\n Build the graph. The graph still needs to be trained via the `train` method.\n \n :param initial_weights: list of NumPy arrays or Tensors\n See __init__ for description. \n :param activation_fn: Callable\n See __init__ for description. \n \"\"\"\n self.input_layer = tf.placeholder(tf.float32, [None, initial_weights[0].shape[0]])\n layer_in = self.input_layer\n for weights in initial_weights[:-1]:\n layer_in = self.fully_connected(layer_in, weights, activation_fn) \n self.output_layer = self.fully_connected(layer_in, initial_weights[-1])\n \n def fully_connected(self, layer_in, initial_weights, activation_fn=None):\n \"\"\"\n Creates a standard, fully connected layer. Its number of inputs and outputs will be\n defined by the shape of `initial_weights`, and its starting weight values will be\n taken directly from that same parameter. If `self.use_batch_norm` is True, this\n layer will include batch normalization, otherwise it will not. \n \n :param layer_in: Tensor\n The Tensor that feeds into this layer. It's either the input to the network or the output\n of a previous layer.\n :param initial_weights: NumPy array or Tensor\n Initial values for this layer's weights. The shape defines the number of nodes in the layer.\n e.g. Passing in 3 matrix of shape (784, 256) would create a layer with 784 inputs and 256 \n outputs. \n :param activation_fn: Callable or None (default None)\n The non-linearity used for the output of the layer. If None, this layer will not include \n batch normalization, regardless of the value of `self.use_batch_norm`. \n e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.\n \"\"\"\n # Since this class supports both options, only use batch normalization when\n # requested. However, do not use it on the final layer, which we identify\n # by its lack of an activation function.\n if self.use_batch_norm and activation_fn:\n # Batch normalization uses weights as usual, but does NOT add a bias term. This is because \n # its calculations include gamma and beta variables that make the bias term unnecessary.\n # (See later in the notebook for more details.)\n weights = tf.Variable(initial_weights)\n linear_output = tf.matmul(layer_in, weights)\n\n # Apply batch normalization to the linear combination of the inputs and weights\n batch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training)\n\n # Now apply the activation function, *after* the normalization.\n return activation_fn(batch_normalized_output)\n else:\n # When not using batch normalization, create a standard layer that multiplies\n # the inputs and weights, adds a bias, and optionally passes the result \n # through an activation function. \n weights = tf.Variable(initial_weights)\n biases = tf.Variable(tf.zeros([initial_weights.shape[-1]]))\n linear_output = tf.add(tf.matmul(layer_in, weights), biases)\n return linear_output if not activation_fn else activation_fn(linear_output)\n\n def train(self, session, learning_rate, training_batches, batches_per_sample, save_model_as=None):\n \"\"\"\n Trains the model on the MNIST training dataset.\n \n :param session: Session\n Used to run training graph operations.\n :param learning_rate: float\n Learning rate used during gradient descent.\n :param training_batches: int\n Number of batches to train.\n :param batches_per_sample: int\n How many batches to train before sampling the validation accuracy.\n :param save_model_as: string or None (default None)\n Name to use if you want to save the trained model.\n \"\"\"\n # This placeholder will store the target labels for each mini batch\n labels = tf.placeholder(tf.float32, [None, 10])\n\n # Define loss and optimizer\n cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=self.output_layer))\n \n # Define operations for testing\n correct_prediction = tf.equal(tf.argmax(self.output_layer, 1), tf.argmax(labels, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n if self.use_batch_norm:\n # If we don't include the update ops as dependencies on the train step, the \n # tf.layers.batch_normalization layers won't update their population statistics,\n # which will cause the model to fail at inference time\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)\n else:\n train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)\n \n # Train for the appropriate number of batches. (tqdm is only for a nice timing display)\n for i in tqdm.tqdm(range(training_batches)):\n # We use batches of 60 just because the original paper did. You can use any size batch you like.\n batch_xs, batch_ys = mnist.train.next_batch(60)\n session.run(train_step, feed_dict={self.input_layer: batch_xs, \n labels: batch_ys, \n self.is_training: True})\n \n # Periodically test accuracy against the 5k validation images and store it for plotting later.\n if i % batches_per_sample == 0:\n test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.validation.images,\n labels: mnist.validation.labels,\n self.is_training: False})\n self.training_accuracies.append(test_accuracy)\n\n # After training, report accuracy against test data\n test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.validation.images,\n labels: mnist.validation.labels,\n self.is_training: False})\n print('{}: After training, final accuracy on validation set = {}'.format(self.name, test_accuracy))\n\n # If you want to use this model later for inference instead of having to retrain it,\n # just construct it with the same parameters and then pass this file to the 'test' function\n if save_model_as:\n tf.train.Saver().save(session, save_model_as)\n\n def test(self, session, test_training_accuracy=False, include_individual_predictions=False, restore_from=None):\n \"\"\"\n Trains a trained model on the MNIST testing dataset.\n\n :param session: Session\n Used to run the testing graph operations.\n :param test_training_accuracy: bool (default False)\n If True, perform inference with batch normalization using batch mean and variance;\n if False, perform inference with batch normalization using estimated population mean and variance.\n Note: in real life, *always* perform inference using the population mean and variance.\n This parameter exists just to support demonstrating what happens if you don't.\n :param include_individual_predictions: bool (default True)\n This function always performs an accuracy test against the entire test set. But if this parameter\n is True, it performs an extra test, doing 200 predictions one at a time, and displays the results\n and accuracy.\n :param restore_from: string or None (default None)\n Name of a saved model if you want to test with previously saved weights.\n \"\"\"\n # This placeholder will store the true labels for each mini batch\n labels = tf.placeholder(tf.float32, [None, 10])\n\n # Define operations for testing\n correct_prediction = tf.equal(tf.argmax(self.output_layer, 1), tf.argmax(labels, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n # If provided, restore from a previously saved model\n if restore_from:\n tf.train.Saver().restore(session, restore_from)\n\n # Test against all of the MNIST test data\n test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.test.images,\n labels: mnist.test.labels,\n self.is_training: test_training_accuracy})\n print('-'*75)\n print('{}: Accuracy on full test set = {}'.format(self.name, test_accuracy))\n\n # If requested, perform tests predicting individual values rather than batches\n if include_individual_predictions:\n predictions = []\n correct = 0\n\n # Do 200 predictions, 1 at a time\n for i in range(200):\n # This is a normal prediction using an individual test case. However, notice\n # we pass `test_training_accuracy` to `feed_dict` as the value for `self.is_training`.\n # Remember that will tell it whether it should use the batch mean & variance or\n # the population estimates that were calucated while training the model.\n pred, corr = session.run([tf.arg_max(self.output_layer,1), accuracy],\n feed_dict={self.input_layer: [mnist.test.images[i]],\n labels: [mnist.test.labels[i]],\n self.is_training: test_training_accuracy})\n correct += corr\n\n predictions.append(pred[0])\n\n print(\"200 Predictions:\", predictions)\n print(\"Accuracy on 200 samples:\", correct/200)\n", "_____no_output_____" ] ], [ [ "There are quite a few comments in the code, so those should answer most of your questions. However, let's take a look at the most important lines.\n\nWe add batch normalization to layers inside the `fully_connected` function. Here are some important points about that code:\n1. Layers with batch normalization do not include a bias term.\n2. We use TensorFlow's [`tf.layers.batch_normalization`](https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization) function to handle the math. (We show lower-level ways to do this [later in the notebook](#implementation_2).)\n3. We tell `tf.layers.batch_normalization` whether or not the network is training. This is an important step we'll talk about later.\n4. We add the normalization **before** calling the activation function.\n\nIn addition to that code, the training step is wrapped in the following `with` statement:\n```python\nwith tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n```\nThis line actually works in conjunction with the `training` parameter we pass to `tf.layers.batch_normalization`. Without it, TensorFlow's batch normalization layer will not operate correctly during inference.\n\nFinally, whenever we train the network or perform inference, we use the `feed_dict` to set `self.is_training` to `True` or `False`, respectively, like in the following line:\n```python\nsession.run(train_step, feed_dict={self.input_layer: batch_xs, \n labels: batch_ys, \n self.is_training: True})\n```\nWe'll go into more details later, but next we want to show some experiments that use this code and test networks with and without batch normalization.", "_____no_output_____" ], [ "# Batch Normalization Demos<a id='demos'></a>\nThis section of the notebook trains various networks with and without batch normalization to demonstrate some of the benefits mentioned earlier. \n\nWe'd like to thank the author of this blog post [Implementing Batch Normalization in TensorFlow](http://r2rt.com/implementing-batch-normalization-in-tensorflow.html). That post provided the idea of - and some of the code for - plotting the differences in accuracy during training, along with the idea for comparing multiple networks using the same initial weights.", "_____no_output_____" ], [ "## Code to support testing\n\nThe following two functions support the demos we run in the notebook. \n\nThe first function, `plot_training_accuracies`, simply plots the values found in the `training_accuracies` lists of the `NeuralNet` objects passed to it. If you look at the `train` function in `NeuralNet`, you'll see it that while it's training the network, it periodically measures validation accuracy and stores the results in that list. It does that just to support these plots.\n\nThe second function, `train_and_test`, creates two neural nets - one with and one without batch normalization. It then trains them both and tests them, calling `plot_training_accuracies` to plot how their accuracies changed over the course of training. The really imporant thing about this function is that it initializes the starting weights for the networks _outside_ of the networks and then passes them in. This lets it train both networks from the exact same starting weights, which eliminates performance differences that might result from (un)lucky initial weights.", "_____no_output_____" ] ], [ [ "def plot_training_accuracies(*args, **kwargs):\n \"\"\"\n Displays a plot of the accuracies calculated during training to demonstrate\n how many iterations it took for the model(s) to converge.\n \n :param args: One or more NeuralNet objects\n You can supply any number of NeuralNet objects as unnamed arguments \n and this will display their training accuracies. Be sure to call `train` \n the NeuralNets before calling this function.\n :param kwargs: \n You can supply any named parameters here, but `batches_per_sample` is the only\n one we look for. It should match the `batches_per_sample` value you passed\n to the `train` function.\n \"\"\"\n fig, ax = plt.subplots()\n\n batches_per_sample = kwargs['batches_per_sample']\n \n for nn in args:\n ax.plot(range(0,len(nn.training_accuracies)*batches_per_sample,batches_per_sample),\n nn.training_accuracies, label=nn.name)\n ax.set_xlabel('Training steps')\n ax.set_ylabel('Accuracy')\n ax.set_title('Validation Accuracy During Training')\n ax.legend(loc=4)\n ax.set_ylim([0,1])\n plt.yticks(np.arange(0, 1.1, 0.1))\n plt.grid(True)\n plt.show()\n\ndef train_and_test(use_bad_weights, learning_rate, activation_fn, training_batches=50000, batches_per_sample=500):\n \"\"\"\n Creates two networks, one with and one without batch normalization, then trains them\n with identical starting weights, layers, batches, etc. Finally tests and plots their accuracies.\n \n :param use_bad_weights: bool\n If True, initialize the weights of both networks to wildly inappropriate weights;\n if False, use reasonable starting weights.\n :param learning_rate: float\n Learning rate used during gradient descent.\n :param activation_fn: Callable\n The function used for the output of each hidden layer. The network will use the same\n activation function on every hidden layer and no activate function on the output layer.\n e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.\n :param training_batches: (default 50000)\n Number of batches to train.\n :param batches_per_sample: (default 500)\n How many batches to train before sampling the validation accuracy.\n \"\"\"\n # Use identical starting weights for each network to eliminate differences in\n # weight initialization as a cause for differences seen in training performance\n #\n # Note: The networks will use these weights to define the number of and shapes of\n # its layers. The original batch normalization paper used 3 hidden layers\n # with 100 nodes in each, followed by a 10 node output layer. These values\n # build such a network, but feel free to experiment with different choices.\n # However, the input size should always be 784 and the final output should be 10.\n if use_bad_weights:\n # These weights should be horrible because they have such a large standard deviation\n weights = [np.random.normal(size=(784,100), scale=5.0).astype(np.float32),\n np.random.normal(size=(100,100), scale=5.0).astype(np.float32),\n np.random.normal(size=(100,100), scale=5.0).astype(np.float32),\n np.random.normal(size=(100,10), scale=5.0).astype(np.float32)\n ]\n else:\n # These weights should be good because they have such a small standard deviation\n weights = [np.random.normal(size=(784,100), scale=0.05).astype(np.float32),\n np.random.normal(size=(100,100), scale=0.05).astype(np.float32),\n np.random.normal(size=(100,100), scale=0.05).astype(np.float32),\n np.random.normal(size=(100,10), scale=0.05).astype(np.float32)\n ]\n\n # Just to make sure the TensorFlow's default graph is empty before we start another\n # test, because we don't bother using different graphs or scoping and naming \n # elements carefully in this sample code.\n tf.reset_default_graph()\n\n # build two versions of same network, 1 without and 1 with batch normalization\n nn = NeuralNet(weights, activation_fn, False)\n bn = NeuralNet(weights, activation_fn, True)\n \n # train and test the two models\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n\n nn.train(sess, learning_rate, training_batches, batches_per_sample)\n bn.train(sess, learning_rate, training_batches, batches_per_sample)\n \n nn.test(sess)\n bn.test(sess)\n \n # Display a graph of how validation accuracies changed during training\n # so we can compare how the models trained and when they converged\n plot_training_accuracies(nn, bn, batches_per_sample=batches_per_sample)\n", "_____no_output_____" ] ], [ [ "## Comparisons between identical networks, with and without batch normalization\n\nThe next series of cells train networks with various settings to show the differences with and without batch normalization. They are meant to clearly demonstrate the effects of batch normalization. We include a deeper discussion of batch normalization later in the notebook.", "_____no_output_____" ], [ "**The following creates two networks using a ReLU activation function, a learning rate of 0.01, and reasonable starting weights.**", "_____no_output_____" ] ], [ [ "train_and_test(False, 0.01, tf.nn.relu)", "WARNING:tensorflow:From <ipython-input-2-53e452de3b25>:118: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\n\nFuture major versions of TensorFlow will allow gradients to flow\ninto the labels input on backprop by default.\n\nSee tf.nn.softmax_cross_entropy_with_logits_v2.\n\n" ] ], [ [ "As expected, both networks train well and eventually reach similar test accuracies. However, notice that the model with batch normalization converges slightly faster than the other network, reaching accuracies over 90% almost immediately and nearing its max acuracy in 10 or 15 thousand iterations. The other network takes about 3 thousand iterations to reach 90% and doesn't near its best accuracy until 30 thousand or more iterations.\n\nIf you look at the raw speed, you can see that without batch normalization we were computing over 1100 batches per second, whereas with batch normalization that goes down to just over 500. However, batch normalization allows us to perform fewer iterations and converge in less time over all. (We only trained for 50 thousand batches here so we could plot the comparison.)", "_____no_output_____" ], [ "**The following creates two networks with the same hyperparameters used in the previous example, but only trains for 2000 iterations.**", "_____no_output_____" ] ], [ [ "train_and_test(False, 0.01, tf.nn.relu, 2000, 50)", "100%|██████████| 2000/2000 [00:01<00:00, 1005.06it/s]\n" ] ], [ [ "As you can see, using batch normalization produces a model with over 95% accuracy in only 2000 batches, and it was above 90% at somewhere around 500 batches. Without batch normalization, the model takes 1750 iterations just to hit 80% – the network with batch normalization hits that mark after around 200 iterations! (Note: if you run the code yourself, you'll see slightly different results each time because the starting weights - while the same for each model - are different for each run.)\n\nIn the above example, you should also notice that the networks trained fewer batches per second then what you saw in the previous example. That's because much of the time we're tracking is actually spent periodically performing inference to collect data for the plots. In this example we perform that inference every 50 batches instead of every 500, so generating the plot for this example requires 10 times the overhead for the same 2000 iterations.", "_____no_output_____" ], [ "**The following creates two networks using a sigmoid activation function, a learning rate of 0.01, and reasonable starting weights.**", "_____no_output_____" ] ], [ [ "train_and_test(False, 0.01, tf.nn.sigmoid)", "100%|██████████| 50000/50000 [00:51<00:00, 971.70it/s] \n" ] ], [ [ "With the number of layers we're using and this small learning rate, using a sigmoid activation function takes a long time to start learning. It eventually starts making progress, but it took over 45 thousand batches just to get over 80% accuracy. Using batch normalization gets to 90% in around one thousand batches. ", "_____no_output_____" ], [ "**The following creates two networks using a ReLU activation function, a learning rate of 1, and reasonable starting weights.**", "_____no_output_____" ] ], [ [ "train_and_test(False, 1, tf.nn.relu)", "100%|██████████| 50000/50000 [00:45<00:00, 1105.60it/s]\n" ] ], [ [ "Now we're using ReLUs again, but with a larger learning rate. The plot shows how training started out pretty normally, with the network with batch normalization starting out faster than the other. But the higher learning rate bounces the accuracy around a bit more, and at some point the accuracy in the network without batch normalization just completely crashes. It's likely that too many ReLUs died off at this point because of the high learning rate.\n\nThe next cell shows the same test again. The network with batch normalization performs the same way, and the other suffers from the same problem again, but it manages to train longer before it happens.", "_____no_output_____" ] ], [ [ "train_and_test(False, 1, tf.nn.relu)", "100%|██████████| 50000/50000 [00:44<00:00, 1120.13it/s]\n" ] ], [ [ "In both of the previous examples, the network with batch normalization manages to gets over 98% accuracy, and get near that result almost immediately. The higher learning rate allows the network to train extremely fast.", "_____no_output_____" ], [ "**The following creates two networks using a sigmoid activation function, a learning rate of 1, and reasonable starting weights.**", "_____no_output_____" ] ], [ [ "train_and_test(False, 1, tf.nn.sigmoid)", "100%|██████████| 50000/50000 [00:47<00:00, 1057.24it/s]\n" ] ], [ [ "In this example, we switched to a sigmoid activation function. It appears to hande the higher learning rate well, with both networks achieving high accuracy.\n\nThe cell below shows a similar pair of networks trained for only 2000 iterations.", "_____no_output_____" ] ], [ [ "train_and_test(False, 1, tf.nn.sigmoid, 2000, 50)", "100%|██████████| 2000/2000 [00:01<00:00, 1031.56it/s]\n" ] ], [ [ "As you can see, even though these parameters work well for both networks, the one with batch normalization gets over 90% in 400 or so batches, whereas the other takes over 1700. When training larger networks, these sorts of differences become more pronounced.", "_____no_output_____" ], [ "**The following creates two networks using a ReLU activation function, a learning rate of 2, and reasonable starting weights.**", "_____no_output_____" ] ], [ [ "train_and_test(False, 2, tf.nn.relu)", "100%|██████████| 50000/50000 [00:42<00:00, 1174.07it/s]\n" ] ], [ [ "With this very large learning rate, the network with batch normalization trains fine and almost immediately manages 98% accuracy. However, the network without normalization doesn't learn at all.", "_____no_output_____" ], [ "**The following creates two networks using a sigmoid activation function, a learning rate of 2, and reasonable starting weights.**", "_____no_output_____" ] ], [ [ "train_and_test(False, 2, tf.nn.sigmoid)", "100%|██████████| 50000/50000 [00:44<00:00, 1115.73it/s]\n" ] ], [ [ "Once again, using a sigmoid activation function with the larger learning rate works well both with and without batch normalization.\n\nHowever, look at the plot below where we train models with the same parameters but only 2000 iterations. As usual, batch normalization lets it train faster. ", "_____no_output_____" ] ], [ [ "train_and_test(False, 2, tf.nn.sigmoid, 2000, 50)", "100%|██████████| 2000/2000 [00:01<00:00, 1028.21it/s]\n" ] ], [ [ "In the rest of the examples, we use really bad starting weights. That is, normally we would use very small values close to zero. However, in these examples we choose random values with a standard deviation of 5. If you were really training a neural network, you would **not** want to do this. But these examples demonstrate how batch normalization makes your network much more resilient. ", "_____no_output_____" ], [ "**The following creates two networks using a ReLU activation function, a learning rate of 0.01, and bad starting weights.**", "_____no_output_____" ] ], [ [ "train_and_test(True, 0.01, tf.nn.relu)", "100%|██████████| 50000/50000 [00:48<00:00, 1036.75it/s]\n" ] ], [ [ "As the plot shows, without batch normalization the network never learns anything at all. But with batch normalization, it actually learns pretty well and gets to almost 80% accuracy. The starting weights obviously hurt the network, but you can see how well batch normalization does in overcoming them. ", "_____no_output_____" ], [ "**The following creates two networks using a sigmoid activation function, a learning rate of 0.01, and bad starting weights.**", "_____no_output_____" ] ], [ [ "train_and_test(True, 0.01, tf.nn.sigmoid)", "100%|██████████| 50000/50000 [00:56<00:00, 886.58it/s] \n" ] ], [ [ "Using a sigmoid activation function works better than the ReLU in the previous example, but without batch normalization it would take a tremendously long time to train the network, if it ever trained at all. ", "_____no_output_____" ], [ "**The following creates two networks using a ReLU activation function, a learning rate of 1, and bad starting weights.**<a id=\"successful_example_lr_1\"></a>", "_____no_output_____" ] ], [ [ "train_and_test(True, 1, tf.nn.relu)", "100%|██████████| 50000/50000 [00:44<00:00, 1133.28it/s]\n" ] ], [ [ "The higher learning rate used here allows the network with batch normalization to surpass 90% in about 30 thousand batches. The network without it never gets anywhere.", "_____no_output_____" ], [ "**The following creates two networks using a sigmoid activation function, a learning rate of 1, and bad starting weights.**", "_____no_output_____" ] ], [ [ "train_and_test(True, 1, tf.nn.sigmoid)", "100%|██████████| 50000/50000 [00:41<00:00, 1192.07it/s]\n" ] ], [ [ "Using sigmoid works better than ReLUs for this higher learning rate. However, you can see that without batch normalization, the network takes a long time tro train, bounces around a lot, and spends a long time stuck at 90%. The network with batch normalization trains much more quickly, seems to be more stable, and achieves a higher accuracy.", "_____no_output_____" ], [ "**The following creates two networks using a ReLU activation function, a learning rate of 2, and bad starting weights.**<a id=\"successful_example_lr_2\"></a>", "_____no_output_____" ] ], [ [ "train_and_test(True, 2, tf.nn.relu)", "100%|██████████| 50000/50000 [00:45<00:00, 1105.44it/s]\n" ] ], [ [ "We've already seen that ReLUs do not do as well as sigmoids with higher learning rates, and here we are using an extremely high rate. As expected, without batch normalization the network doesn't learn at all. But with batch normalization, it eventually achieves 90% accuracy. Notice, though, how its accuracy bounces around wildly during training - that's because the learning rate is really much too high, so the fact that this worked at all is a bit of luck.", "_____no_output_____" ], [ "**The following creates two networks using a sigmoid activation function, a learning rate of 2, and bad starting weights.**", "_____no_output_____" ] ], [ [ "train_and_test(True, 2, tf.nn.sigmoid)", "100%|██████████| 50000/50000 [00:43<00:00, 1156.02it/s]\n" ] ], [ [ "In this case, the network with batch normalization trained faster and reached a higher accuracy. Meanwhile, the high learning rate makes the network without normalization bounce around erratically and have trouble getting past 90%.", "_____no_output_____" ], [ "### Full Disclosure: Batch Normalization Doesn't Fix Everything\n\nBatch normalization isn't magic and it doesn't work every time. Weights are still randomly initialized and batches are chosen at random during training, so you never know exactly how training will go. Even for these tests, where we use the same initial weights for both networks, we still get _different_ weights each time we run.\n\nThis section includes two examples that show runs when batch normalization did not help at all.\n\n**The following creates two networks using a ReLU activation function, a learning rate of 1, and bad starting weights.**", "_____no_output_____" ] ], [ [ "train_and_test(True, 1, tf.nn.relu)", "100%|██████████| 50000/50000 [00:42<00:00, 1179.50it/s]\n" ] ], [ [ "When we used these same parameters [earlier](#successful_example_lr_1), we saw the network with batch normalization reach 92% validation accuracy. This time we used different starting weights, initialized using the same standard deviation as before, and the network doesn't learn at all. (Remember, an accuracy around 10% is what the network gets if it just guesses the same value all the time.)\n\n**The following creates two networks using a ReLU activation function, a learning rate of 2, and bad starting weights.**", "_____no_output_____" ] ], [ [ "train_and_test(True, 2, tf.nn.relu)", "100%|██████████| 50000/50000 [00:43<00:00, 1140.26it/s]\n" ] ], [ [ "When we trained with these parameters and batch normalization [earlier](#successful_example_lr_2), we reached 90% validation accuracy. However, this time the network _almost_ starts to make some progress in the beginning, but it quickly breaks down and stops learning. \n\n**Note:** Both of the above examples use *extremely* bad starting weights, along with learning rates that are too high. While we've shown batch normalization _can_ overcome bad values, we don't mean to encourage actually using them. The examples in this notebook are meant to show that batch normalization can help your networks train better. But these last two examples should remind you that you still want to try to use good network design choices and reasonable starting weights. It should also remind you that the results of each attempt to train a network are a bit random, even when using otherwise identical architectures.", "_____no_output_____" ], [ "# Batch Normalization: A Detailed Look<a id='implementation_2'></a>", "_____no_output_____" ], [ "The layer created by `tf.layers.batch_normalization` handles all the details of implementing batch normalization. Many students will be fine just using that and won't care about what's happening at the lower levels. However, some students may want to explore the details, so here is a short explanation of what's really happening, starting with the equations you're likely to come across if you ever read about batch normalization. ", "_____no_output_____" ], [ "In order to normalize the values, we first need to find the average value for the batch. If you look at the code, you can see that this is not the average value of the batch _inputs_, but the average value coming _out_ of any particular layer before we pass it through its non-linear activation function and then feed it as an input to the _next_ layer.\n\nWe represent the average as $\\mu_B$, which is simply the sum of all of the values $x_i$ divided by the number of values, $m$ \n\n$$\n\\mu_B \\leftarrow \\frac{1}{m}\\sum_{i=1}^m x_i\n$$\n\nWe then need to calculate the variance, or mean squared deviation, represented as $\\sigma_{B}^{2}$. If you aren't familiar with statistics, that simply means for each value $x_i$, we subtract the average value (calculated earlier as $\\mu_B$), which gives us what's called the \"deviation\" for that value. We square the result to get the squared deviation. Sum up the results of doing that for each of the values, then divide by the number of values, again $m$, to get the average, or mean, squared deviation.\n\n$$\n\\sigma_{B}^{2} \\leftarrow \\frac{1}{m}\\sum_{i=1}^m (x_i - \\mu_B)^2\n$$\n\nOnce we have the mean and variance, we can use them to normalize the values with the following equation. For each value, it subtracts the mean and divides by the (almost) standard deviation. (You've probably heard of standard deviation many times, but if you have not studied statistics you might not know that the standard deviation is actually the square root of the mean squared deviation.)\n\n$$\n\\hat{x_i} \\leftarrow \\frac{x_i - \\mu_B}{\\sqrt{\\sigma_{B}^{2} + \\epsilon}}\n$$\n\nAbove, we said \"(almost) standard deviation\". That's because the real standard deviation for the batch is calculated by $\\sqrt{\\sigma_{B}^{2}}$, but the above formula adds the term epsilon, $\\epsilon$, before taking the square root. The epsilon can be any small, positive constant - in our code we use the value `0.001`. It is there partially to make sure we don't try to divide by zero, but it also acts to increase the variance slightly for each batch. \n\nWhy increase the variance? Statistically, this makes sense because even though we are normalizing one batch at a time, we are also trying to estimate the population distribution – the total training set, which itself an estimate of the larger population of inputs your network wants to handle. The variance of a population is higher than the variance for any sample taken from that population, so increasing the variance a little bit for each batch helps take that into account. \n\nAt this point, we have a normalized value, represented as $\\hat{x_i}$. But rather than use it directly, we multiply it by a gamma value, $\\gamma$, and then add a beta value, $\\beta$. Both $\\gamma$ and $\\beta$ are learnable parameters of the network and serve to scale and shift the normalized value, respectively. Because they are learnable just like weights, they give your network some extra knobs to tweak during training to help it learn the function it is trying to approximate. \n\n$$\ny_i \\leftarrow \\gamma \\hat{x_i} + \\beta\n$$\n\nWe now have the final batch-normalized output of our layer, which we would then pass to a non-linear activation function like sigmoid, tanh, ReLU, Leaky ReLU, etc. In the original batch normalization paper (linked in the beginning of this notebook), they mention that there might be cases when you'd want to perform the batch normalization _after_ the non-linearity instead of before, but it is difficult to find any uses like that in practice.\n\nIn `NeuralNet`'s implementation of `fully_connected`, all of this math is hidden inside the following line, where `linear_output` serves as the $x_i$ from the equations:\n```python\nbatch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training)\n```\nThe next section shows you how to implement the math directly. ", "_____no_output_____" ], [ "### Batch normalization without the `tf.layers` package\n\nOur implementation of batch normalization in `NeuralNet` uses the high-level abstraction [tf.layers.batch_normalization](https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization), found in TensorFlow's [`tf.layers`](https://www.tensorflow.org/api_docs/python/tf/layers) package.\n\nHowever, if you would like to implement batch normalization at a lower level, the following code shows you how.\nIt uses [tf.nn.batch_normalization](https://www.tensorflow.org/api_docs/python/tf/nn/batch_normalization) from TensorFlow's [neural net (nn)](https://www.tensorflow.org/api_docs/python/tf/nn) package.\n\n**1)** You can replace the `fully_connected` function in the `NeuralNet` class with the below code and everything in `NeuralNet` will still work like it did before.", "_____no_output_____" ] ], [ [ "def fully_connected(self, layer_in, initial_weights, activation_fn=None):\n \"\"\"\n Creates a standard, fully connected layer. Its number of inputs and outputs will be\n defined by the shape of `initial_weights`, and its starting weight values will be\n taken directly from that same parameter. If `self.use_batch_norm` is True, this\n layer will include batch normalization, otherwise it will not. \n \n :param layer_in: Tensor\n The Tensor that feeds into this layer. It's either the input to the network or the output\n of a previous layer.\n :param initial_weights: NumPy array or Tensor\n Initial values for this layer's weights. The shape defines the number of nodes in the layer.\n e.g. Passing in 3 matrix of shape (784, 256) would create a layer with 784 inputs and 256 \n outputs. \n :param activation_fn: Callable or None (default None)\n The non-linearity used for the output of the layer. If None, this layer will not include \n batch normalization, regardless of the value of `self.use_batch_norm`. \n e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.\n \"\"\"\n if self.use_batch_norm and activation_fn:\n # Batch normalization uses weights as usual, but does NOT add a bias term. This is because \n # its calculations include gamma and beta variables that make the bias term unnecessary.\n weights = tf.Variable(initial_weights)\n linear_output = tf.matmul(layer_in, weights)\n\n num_out_nodes = initial_weights.shape[-1]\n\n # Batch normalization adds additional trainable variables: \n # gamma (for scaling) and beta (for shifting).\n gamma = tf.Variable(tf.ones([num_out_nodes]))\n beta = tf.Variable(tf.zeros([num_out_nodes]))\n\n # These variables will store the mean and variance for this layer over the entire training set,\n # which we assume represents the general population distribution.\n # By setting `trainable=False`, we tell TensorFlow not to modify these variables during\n # back propagation. Instead, we will assign values to these variables ourselves. \n pop_mean = tf.Variable(tf.zeros([num_out_nodes]), trainable=False)\n pop_variance = tf.Variable(tf.ones([num_out_nodes]), trainable=False)\n\n # Batch normalization requires a small constant epsilon, used to ensure we don't divide by zero.\n # This is the default value TensorFlow uses.\n epsilon = 1e-3\n\n def batch_norm_training():\n # Calculate the mean and variance for the data coming out of this layer's linear-combination step.\n # The [0] defines an array of axes to calculate over.\n batch_mean, batch_variance = tf.nn.moments(linear_output, [0])\n\n # Calculate a moving average of the training data's mean and variance while training.\n # These will be used during inference.\n # Decay should be some number less than 1. tf.layers.batch_normalization uses the parameter\n # \"momentum\" to accomplish this and defaults it to 0.99\n decay = 0.99\n train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))\n train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))\n\n # The 'tf.control_dependencies' context tells TensorFlow it must calculate 'train_mean' \n # and 'train_variance' before it calculates the 'tf.nn.batch_normalization' layer.\n # This is necessary because the those two operations are not actually in the graph\n # connecting the linear_output and batch_normalization layers, \n # so TensorFlow would otherwise just skip them.\n with tf.control_dependencies([train_mean, train_variance]):\n return tf.nn.batch_normalization(linear_output, batch_mean, batch_variance, beta, gamma, epsilon)\n \n def batch_norm_inference():\n # During inference, use the our estimated population mean and variance to normalize the layer\n return tf.nn.batch_normalization(linear_output, pop_mean, pop_variance, beta, gamma, epsilon)\n\n # Use `tf.cond` as a sort of if-check. When self.is_training is True, TensorFlow will execute \n # the operation returned from `batch_norm_training`; otherwise it will execute the graph\n # operation returned from `batch_norm_inference`.\n batch_normalized_output = tf.cond(self.is_training, batch_norm_training, batch_norm_inference)\n \n # Pass the batch-normalized layer output through the activation function.\n # The literature states there may be cases where you want to perform the batch normalization *after*\n # the activation function, but it is difficult to find any uses of that in practice.\n return activation_fn(batch_normalized_output)\n else:\n # When not using batch normalization, create a standard layer that multiplies\n # the inputs and weights, adds a bias, and optionally passes the result \n # through an activation function. \n weights = tf.Variable(initial_weights)\n biases = tf.Variable(tf.zeros([initial_weights.shape[-1]]))\n linear_output = tf.add(tf.matmul(layer_in, weights), biases)\n return linear_output if not activation_fn else activation_fn(linear_output)\n", "_____no_output_____" ] ], [ [ "This version of `fully_connected` is much longer than the original, but once again has extensive comments to help you understand it. Here are some important points:\n\n1. It explicitly creates variables to store gamma, beta, and the population mean and variance. These were all handled for us in the previous version of the function.\n2. It initializes gamma to one and beta to zero, so they start out having no effect in this calculation: $y_i \\leftarrow \\gamma \\hat{x_i} + \\beta$. However, during training the network learns the best values for these variables using back propagation, just like networks normally do with weights.\n3. Unlike gamma and beta, the variables for population mean and variance are marked as untrainable. That tells TensorFlow not to modify them during back propagation. Instead, the lines that call `tf.assign` are used to update these variables directly.\n4. TensorFlow won't automatically run the `tf.assign` operations during training because it only evaluates operations that are required based on the connections it finds in the graph. To get around that, we add this line: `with tf.control_dependencies([train_mean, train_variance]):` before we run the normalization operation. This tells TensorFlow it needs to run those operations before running anything inside the `with` block. \n5. The actual normalization math is still mostly hidden from us, this time using [`tf.nn.batch_normalization`](https://www.tensorflow.org/api_docs/python/tf/nn/batch_normalization).\n5. `tf.nn.batch_normalization` does not have a `training` parameter like `tf.layers.batch_normalization` did. However, we still need to handle training and inference differently, so we run different code in each case using the [`tf.cond`](https://www.tensorflow.org/api_docs/python/tf/cond) operation.\n6. We use the [`tf.nn.moments`](https://www.tensorflow.org/api_docs/python/tf/nn/moments) function to calculate the batch mean and variance.", "_____no_output_____" ], [ "**2)** The current version of the `train` function in `NeuralNet` will work fine with this new version of `fully_connected`. However, it uses these lines to ensure population statistics are updated when using batch normalization: \n```python\nif self.use_batch_norm:\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)\nelse:\n train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)\n```\nOur new version of `fully_connected` handles updating the population statistics directly. That means you can also simplify your code by replacing the above `if`/`else` condition with just this line:\n```python\ntrain_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)\n```", "_____no_output_____" ], [ "**3)** And just in case you want to implement every detail from scratch, you can replace this line in `batch_norm_training`:\n\n```python\nreturn tf.nn.batch_normalization(linear_output, batch_mean, batch_variance, beta, gamma, epsilon)\n```\nwith these lines:\n```python\nnormalized_linear_output = (linear_output - batch_mean) / tf.sqrt(batch_variance + epsilon)\nreturn gamma * normalized_linear_output + beta\n```\nAnd replace this line in `batch_norm_inference`:\n```python\nreturn tf.nn.batch_normalization(linear_output, pop_mean, pop_variance, beta, gamma, epsilon)\n```\nwith these lines:\n```python\nnormalized_linear_output = (linear_output - pop_mean) / tf.sqrt(pop_variance + epsilon)\nreturn gamma * normalized_linear_output + beta\n```\n\nAs you can see in each of the above substitutions, the two lines of replacement code simply implement the following two equations directly. The first line calculates the following equation, with `linear_output` representing $x_i$ and `normalized_linear_output` representing $\\hat{x_i}$: \n\n$$\n\\hat{x_i} \\leftarrow \\frac{x_i - \\mu_B}{\\sqrt{\\sigma_{B}^{2} + \\epsilon}}\n$$\n\nAnd the second line is a direct translation of the following equation:\n\n$$\ny_i \\leftarrow \\gamma \\hat{x_i} + \\beta\n$$\n\nWe still use the `tf.nn.moments` operation to implement the other two equations from earlier – the ones that calculate the batch mean and variance used in the normalization step. If you really wanted to do everything from scratch, you could replace that line, too, but we'll leave that to you. \n\n## Why the difference between training and inference?\n\nIn the original function that uses `tf.layers.batch_normalization`, we tell the layer whether or not the network is training by passing a value for its `training` parameter, like so:\n```python\nbatch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training)\n```\nAnd that forces us to provide a value for `self.is_training` in our `feed_dict`, like we do in this example from `NeuralNet`'s `train` function:\n```python\nsession.run(train_step, feed_dict={self.input_layer: batch_xs, \n labels: batch_ys, \n self.is_training: True})\n```\nIf you looked at the [low level implementation](#low_level_code), you probably noticed that, just like with `tf.layers.batch_normalization`, we need to do slightly different things during training and inference. But why is that?\n\nFirst, let's look at what happens when we don't. The following function is similar to `train_and_test` from earlier, but this time we are only testing one network and instead of plotting its accuracy, we perform 200 predictions on test inputs, 1 input at at time. We can use the `test_training_accuracy` parameter to test the network in training or inference modes (the equivalent of passing `True` or `False` to the `feed_dict` for `is_training`).", "_____no_output_____" ] ], [ [ "def batch_norm_test(test_training_accuracy):\n \"\"\"\n :param test_training_accuracy: bool\n If True, perform inference with batch normalization using batch mean and variance;\n if False, perform inference with batch normalization using estimated population mean and variance.\n \"\"\"\n\n weights = [np.random.normal(size=(784,100), scale=0.05).astype(np.float32),\n np.random.normal(size=(100,100), scale=0.05).astype(np.float32),\n np.random.normal(size=(100,100), scale=0.05).astype(np.float32),\n np.random.normal(size=(100,10), scale=0.05).astype(np.float32)\n ]\n\n tf.reset_default_graph()\n\n # Train the model\n bn = NeuralNet(weights, tf.nn.relu, True)\n \n # First train the network\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n\n bn.train(sess, 0.01, 2000, 2000)\n\n bn.test(sess, test_training_accuracy=test_training_accuracy, include_individual_predictions=True)", "_____no_output_____" ] ], [ [ "In the following cell, we pass `True` for `test_training_accuracy`, which performs the same batch normalization that we normally perform **during training**.", "_____no_output_____" ] ], [ [ "batch_norm_test(True)", "100%|██████████| 2000/2000 [00:03<00:00, 581.25it/s]\n" ] ], [ [ "As you can see, the network guessed the same value every time! But why? Because during training, a network with batch normalization adjusts the values at each layer based on the mean and variance **of that batch**. The \"batches\" we are using for these predictions have a single input each time, so their values _are_ the means, and their variances will always be 0. That means the network will normalize the values at any layer to zero. (Review the equations from before to see why a value that is equal to the mean would always normalize to zero.) So we end up with the same result for every input we give the network, because its the value the network produces when it applies its learned weights to zeros at every layer. \n\n**Note:** If you re-run that cell, you might get a different value from what we showed. That's because the specific weights the network learns will be different every time. But whatever value it is, it should be the same for all 200 predictions.\n\nTo overcome this problem, the network does not just normalize the batch at each layer. It also maintains an estimate of the mean and variance for the entire population. So when we perform inference, instead of letting it \"normalize\" all the values using their own means and variance, it uses the estimates of the population mean and variance that it calculated while training. \n\nSo in the following example, we pass `False` for `test_training_accuracy`, which tells the network that we it want to perform inference with the population statistics it calculates during training.", "_____no_output_____" ] ], [ [ "batch_norm_test(False)", "100%|██████████| 2000/2000 [00:03<00:00, 580.08it/s]\n" ] ], [ [ "As you can see, now that we're using the estimated population mean and variance, we get a 97% accuracy. That means it guessed correctly on 194 of the 200 samples – not too bad for something that trained in under 4 seconds. :)\n\n# Considerations for other network types\n\nThis notebook demonstrates batch normalization in a standard neural network with fully connected layers. You can also use batch normalization in other types of networks, but there are some special considerations.\n\n### ConvNets\n\nConvolution layers consist of multiple feature maps. (Remember, the depth of a convolutional layer refers to its number of feature maps.) And the weights for each feature map are shared across all the inputs that feed into the layer. Because of these differences, batch normalizaing convolutional layers requires batch/population mean and variance per feature map rather than per node in the layer.\n\nWhen using `tf.layers.batch_normalization`, be sure to pay attention to the order of your convolutionlal dimensions.\nSpecifically, you may want to set a different value for the `axis` parameter if your layers have their channels first instead of last. \n\nIn our low-level implementations, we used the following line to calculate the batch mean and variance:\n```python\nbatch_mean, batch_variance = tf.nn.moments(linear_output, [0])\n```\nIf we were dealing with a convolutional layer, we would calculate the mean and variance with a line like this instead:\n```python\nbatch_mean, batch_variance = tf.nn.moments(conv_layer, [0,1,2], keep_dims=False)\n```\nThe second parameter, `[0,1,2]`, tells TensorFlow to calculate the batch mean and variance over each feature map. (The three axes are the batch, height, and width.) And setting `keep_dims` to `False` tells `tf.nn.moments` not to return values with the same size as the inputs. Specifically, it ensures we get one mean/variance pair per feature map.\n\n### RNNs\n\nBatch normalization can work with recurrent neural networks, too, as shown in the 2016 paper [Recurrent Batch Normalization](https://arxiv.org/abs/1603.09025). It's a bit more work to implement, but basically involves calculating the means and variances per time step instead of per layer. You can find an example where someone extended `tf.nn.rnn_cell.RNNCell` to include batch normalization in [this GitHub repo](https://gist.github.com/spitis/27ab7d2a30bbaf5ef431b4a02194ac60).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb9d351a827af6671cde46e3d9d6ac2907eb6683
39,386
ipynb
Jupyter Notebook
notebooks/Convert to Long.ipynb
rizwandel/Master-Thesis-Multilingual-Longformer
31fd783cfbffb873da655635a0cd5726e82253a0
[ "MIT" ]
14
2021-02-19T21:53:31.000Z
2022-03-30T10:34:59.000Z
notebooks/Convert to Long.ipynb
rizwandel/Master-Thesis-Multilingual-Longformer
31fd783cfbffb873da655635a0cd5726e82253a0
[ "MIT" ]
5
2021-04-29T15:59:16.000Z
2021-08-24T08:02:08.000Z
notebooks/Convert to Long.ipynb
rizwandel/Master-Thesis-Multilingual-Longformer
31fd783cfbffb873da655635a0cd5726e82253a0
[ "MIT" ]
3
2021-08-20T07:13:46.000Z
2021-10-13T14:11:57.000Z
76.925781
1,611
0.669553
[ [ [ "import logging\nimport os\nimport math\nfrom dataclasses import dataclass, field\n\nimport copy # for deep copy\n\nimport torch\nfrom torch import nn\nfrom transformers import RobertaForMaskedLM, RobertaTokenizerFast, TextDataset, DataCollatorForLanguageModeling, Trainer\nfrom transformers import TrainingArguments, HfArgumentParser\nfrom transformers.modeling_longformer import LongformerSelfAttention\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)", "_____no_output_____" ], [ "class RobertaLongSelfAttention(LongformerSelfAttention): \n def forward(\n self,\n hidden_states, attention_mask=None, is_index_masked=None, is_index_global_attn=None, is_global_attn=None\n ):\n return super().forward(hidden_states, attention_mask=attention_mask)\n\nclass RobertaLongForMaskedLM(RobertaForMaskedLM):\n def __init__(self, config):\n super().__init__(config)\n for i, layer in enumerate(self.roberta.encoder.layer):\n # replace the `modeling_bert.BertSelfAttention` object with `LongformerSelfAttention`\n layer.attention.self = RobertaLongSelfAttention(config, layer_id=i)", "_____no_output_____" ], [ "def create_long_model(save_model_to, attention_window, max_pos):\n model = RobertaForMaskedLM.from_pretrained('roberta-base')\n tokenizer = RobertaTokenizerFast.from_pretrained('roberta-base', model_max_length=max_pos)\n config = model.config\n\n # extend position embeddings\n tokenizer.model_max_length = max_pos\n tokenizer.init_kwargs['model_max_length'] = max_pos\n current_max_pos, embed_size = model.roberta.embeddings.position_embeddings.weight.shape\n max_pos += 2 # NOTE: RoBERTa has positions 0,1 reserved, so embedding size is max position + 2\n config.max_position_embeddings = max_pos\n assert max_pos > current_max_pos\n # allocate a larger position embedding matrix\n new_pos_embed = model.roberta.embeddings.position_embeddings.weight.new_empty(max_pos, embed_size)\n # copy position embeddings over and over to initialize the new position embeddings\n k = 2\n step = current_max_pos - 2\n while k < max_pos - 1:\n new_pos_embed[k:(k + step)] = model.roberta.embeddings.position_embeddings.weight[2:]\n k += step\n \n model.roberta.embeddings.position_embeddings.weight.data = new_pos_embed\n model.roberta.embeddings.position_ids.data = torch.tensor([i for i in range(max_pos)]).reshape(1, max_pos)\n \"\"\"\n model.roberta.embeddings.position_embeddings.weight.data = new_pos_embed # add after this line\n model.roberta.embeddings.position_embeddings.num_embeddings = len(new_pos_embed.data)\n # first, check that model.roberta.embeddings.position_embeddings.weight.data.shape is correct — has to be 4096 (default) of your desired length\n model.roberta.embeddings.position_ids = torch.arange(0, model.roberta.embeddings.position_embeddings.num_embeddings)[None]\n \"\"\"\n \n \n # replace the `modeling_bert.BertSelfAttention` object with `LongformerSelfAttention`\n config.attention_window = [attention_window] * config.num_hidden_layers\n for i, layer in enumerate(model.roberta.encoder.layer):\n longformer_self_attn = LongformerSelfAttention(config, layer_id=i)\n longformer_self_attn.query = copy.deepcopy(layer.attention.self.query)\n longformer_self_attn.key = copy.deepcopy(layer.attention.self.key)\n longformer_self_attn.value = copy.deepcopy(layer.attention.self.value)\n\n longformer_self_attn.query_global = copy.deepcopy(layer.attention.self.query)\n longformer_self_attn.key_global = copy.deepcopy(layer.attention.self.key)\n longformer_self_attn.value_global = copy.deepcopy(layer.attention.self.value)\n\n \"\"\"\n longformer_self_attn = LongformerSelfAttention(config, layer_id=i)\n longformer_self_attn.query = layer.attention.self.query\n longformer_self_attn.key = layer.attention.self.key\n longformer_self_attn.value = layer.attention.self.value\n\n longformer_self_attn.query_global = layer.attention.self.query\n longformer_self_attn.key_global = layer.attention.self.key\n longformer_self_attn.value_global = layer.attention.self.value\n \"\"\"\n\n layer.attention.self = longformer_self_attn\n\n logger.info(f'saving model to {save_model_to}')\n model.save_pretrained(save_model_to)\n tokenizer.save_pretrained(save_model_to)\n return model, tokenizer", "_____no_output_____" ], [ "def copy_proj_layers(model):\n for i, layer in enumerate(model.roberta.encoder.layer):\n layer.attention.self.query_global = layer.attention.self.query\n layer.attention.self.key_global = layer.attention.self.key\n layer.attention.self.value_global = layer.attention.self.value\n return model", "_____no_output_____" ], [ "def pretrain_and_evaluate(args, model, tokenizer, eval_only, model_path):\n val_dataset = TextDataset(tokenizer=tokenizer,\n file_path=args.val_datapath,\n block_size=tokenizer.max_len)\n if eval_only:\n train_dataset = val_dataset\n else:\n logger.info(f'Loading and tokenizing training data is usually slow: {args.train_datapath}')\n train_dataset = TextDataset(tokenizer=tokenizer,\n file_path=args.train_datapath,\n block_size=tokenizer.max_len)\n\n data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=True, mlm_probability=0.15)\n trainer = Trainer(model=model, args=args, data_collator=data_collator,\n train_dataset=train_dataset, eval_dataset=val_dataset, prediction_loss_only=True)\n\n eval_loss = trainer.evaluate()\n eval_loss = eval_loss['eval_loss']\n logger.info(f'Initial eval bpc: {eval_loss/math.log(2)}')\n \n if not eval_only:\n trainer.train(model_path=model_path)\n trainer.save_model()\n\n eval_loss = trainer.evaluate()\n eval_loss = eval_loss['eval_loss']\n logger.info(f'Eval bpc after pretraining: {eval_loss/math.log(2)}')", "_____no_output_____" ], [ "@dataclass\nclass ModelArgs:\n attention_window: int = field(default=512, metadata={\"help\": \"Size of attention window\"})\n max_pos: int = field(default=4096, metadata={\"help\": \"Maximum position\"})\n\nparser = HfArgumentParser((TrainingArguments, ModelArgs,))\n\n\ntraining_args, model_args = parser.parse_args_into_dataclasses(look_for_args_file=False, args=[\n '--output_dir', 'tmp',\n '--warmup_steps', '500',\n '--learning_rate', '0.00003',\n '--weight_decay', '0.01',\n '--adam_epsilon', '1e-6',\n '--max_steps', '3000',\n '--logging_steps', '500',\n '--save_steps', '500',\n '--max_grad_norm', '5.0',\n '--per_gpu_eval_batch_size', '8',\n '--per_gpu_train_batch_size', '2', # 32GB gpu with fp32\n '--gradient_accumulation_steps', '32',\n '--evaluate_during_training',\n '--do_train',\n '--do_eval',\n])\ntraining_args.val_datapath = '/workspace/data/wikitext-103-raw/wiki.valid.raw'\ntraining_args.train_datapath = '/workspace/data/wikitext-103-raw/wiki.train.raw'\n\n# Choose GPU\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"", "_____no_output_____" ], [ "roberta_base = RobertaForMaskedLM.from_pretrained('roberta-base')\nroberta_base_tokenizer = RobertaTokenizerFast.from_pretrained('roberta-base')\nlogger.info('Evaluating roberta-base (seqlen: 512) for refernece ...')\npretrain_and_evaluate(training_args, roberta_base, roberta_base_tokenizer, eval_only=True, model_path=None)", "Some weights of RobertaForMaskedLM were not initialized from the model checkpoint at roberta-base and are newly initialized: ['lm_head.decoder.bias']\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\nINFO:__main__:Evaluating roberta-base (seqlen: 512) for refernece ...\nINFO:filelock:Lock 140125418510600 acquired on /workspace/data/wikitext-103-raw/cached_lm_RobertaTokenizerFast_510_wiki.valid.raw.lock\nINFO:filelock:Lock 140125418510600 released on /workspace/data/wikitext-103-raw/cached_lm_RobertaTokenizerFast_510_wiki.valid.raw.lock\nUsing deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future version. Using `--per_device_eval_batch_size` is preferred.\n" ], [ "model_path = f'{training_args.output_dir}/roberta-base-{model_args.max_pos}'\nif not os.path.exists(model_path):\n os.makedirs(model_path)\n\nlogger.info(f'Converting roberta-base into roberta-base-{model_args.max_pos}')\nmodel, tokenizer = create_long_model(\n save_model_to=model_path, attention_window=model_args.attention_window, max_pos=model_args.max_pos)", "INFO:__main__:Converting roberta-base into roberta-base-4096\nSome weights of RobertaForMaskedLM were not initialized from the model checkpoint at roberta-base and are newly initialized: ['lm_head.decoder.bias']\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\nINFO:__main__:saving model to tmp/roberta-base-4096\n" ], [ "\"\"\"\nSelf = \n \n (query): Linear(in_features=768, out_features=768, bias=True)\n (key): Linear(in_features=768, out_features=768, bias=True)\n (value): Linear(in_features=768, out_features=768, bias=True)\n (query_global): Linear(in_features=768, out_features=768, bias=True)\n (key_global): Linear(in_features=768, out_features=768, bias=True)\n (value_global): Linear(in_features=768, out_features=768, bias=True)\n\"\"\"", "_____no_output_____" ], [ "logger.info(f'Loading the model from {model_path}')\ntokenizer = RobertaTokenizerFast.from_pretrained(model_path)\nmodel = RobertaLongForMaskedLM.from_pretrained(model_path)", "INFO:__main__:Loading the model from tmp/roberta-base-4096\n" ], [ "logger.info(f'Pretraining roberta-base-{model_args.max_pos} ... ')\n\ntraining_args.max_steps = 3 ## <<<<<<<<<<<<<<<<<<<<<<<< REMOVE THIS <<<<<<<<<<<<<<<<<<<<<<<<\n%magic\npretrain_and_evaluate(training_args, model, tokenizer, eval_only=False, model_path=training_args.output_dir)", "INFO:__main__:Pretraining roberta-base-4096 ... \nINFO:filelock:Lock 140124002609248 acquired on /workspace/data/wikitext-103-raw/cached_lm_RobertaTokenizerFast_4094_wiki.valid.raw.lock\nINFO:filelock:Lock 140124002609248 released on /workspace/data/wikitext-103-raw/cached_lm_RobertaTokenizerFast_4094_wiki.valid.raw.lock\nINFO:__main__:Loading and tokenizing training data is usually slow: /workspace/data/wikitext-103-raw/wiki.train.raw\nINFO:filelock:Lock 140125403321344 acquired on /workspace/data/wikitext-103-raw/cached_lm_RobertaTokenizerFast_4094_wiki.train.raw.lock\nINFO:filelock:Lock 140125403321344 released on /workspace/data/wikitext-103-raw/cached_lm_RobertaTokenizerFast_4094_wiki.train.raw.lock\nUsing deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future version. Using `--per_device_eval_batch_size` is preferred.\n" ], [ "logger.info(f'Copying local projection layers into global projection layers ... ')\nmodel = copy_proj_layers(model)\nlogger.info(f'Saving model to {model_path}')\nmodel.save_pretrained(model_path)\n", "_____no_output_____" ], [ "logger.info(f'Loading the model from {model_path}')\ntokenizer = RobertaTokenizerFast.from_pretrained(model_path)\nmodel = RobertaLongForMaskedLM.from_pretrained(model_path)", "_____no_output_____" ], [ "import transformers", "_____no_output_____" ], [ "transformers.__version__", "_____no_output_____" ], [ "model.roberta.embeddings", "_____no_output_____" ], [ "model.roberta.embeddings.position_embeddings", "_____no_output_____" ], [ "model.roberta.embeddings.position_embeddings.num_embeddings", "_____no_output_____" ], [ "model.roberta.embeddings.position_embeddings.num_embeddings", "_____no_output_____" ], [ "torch.cop", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9d39658a847f600e2ed04c31b3c55993a95e80
8,964
ipynb
Jupyter Notebook
EXP 1/PYTHON_CSV.ipynb
s-1-n-t-h/DATA-EXPLORATION-LAB
284fd8a239077b9ad68518e09b25fe2b3140f3af
[ "MIT" ]
null
null
null
EXP 1/PYTHON_CSV.ipynb
s-1-n-t-h/DATA-EXPLORATION-LAB
284fd8a239077b9ad68518e09b25fe2b3140f3af
[ "MIT" ]
null
null
null
EXP 1/PYTHON_CSV.ipynb
s-1-n-t-h/DATA-EXPLORATION-LAB
284fd8a239077b9ad68518e09b25fe2b3140f3af
[ "MIT" ]
null
null
null
35.43083
509
0.367247
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "dict1 = {\n \"name\":['harry','rohan','skillf','shubh'],\n \"marks\":[12,34,24,17],\n \"city\":['rampur','kolkata','barelly','antarctica']\n}", "_____no_output_____" ], [ "df = pd.DataFrame(dict1)", "_____no_output_____" ], [ "df.to_csv('friends_index_false.csv',index=False) ", "_____no_output_____" ], [ "DATA = pd.read_csv('friends_index_false.csv')", "_____no_output_____" ], [ "DATA", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb9d66d43247a9932a93a2199e708d02ab42ce65
13,113
ipynb
Jupyter Notebook
notebooks/7.ipynb
noahbjohnson/senior-project
6353b7f5aeab6d7bcb3457e77dd1d816af7ee240
[ "MIT" ]
null
null
null
notebooks/7.ipynb
noahbjohnson/senior-project
6353b7f5aeab6d7bcb3457e77dd1d816af7ee240
[ "MIT" ]
1
2019-04-29T13:13:59.000Z
2019-04-30T03:06:18.000Z
notebooks/7.ipynb
noahbjohnson/senior-project
6353b7f5aeab6d7bcb3457e77dd1d816af7ee240
[ "MIT" ]
null
null
null
61.853774
228
0.30527
[ [ [ "# Inequality Data Processing (WIID)\n\n## Data Dictionary\n\n| Variable | Definition |\n|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| id | Identifier |\n| country | Country/area |\n| c3 | 3-digit country code in ISO 3166-1 alpha-3 format |\n| c2 | 2-digit country code in ISO 3166-1 alpha-2 format |\n| year | Year. Note that when a survey continues for more than a year, the year when it is finished is considered |\n| gini_reported | Gini coefficient as reported by the source (in most cases based on microdata, in some older observations estimates derive from grouped data) |\n| q1-q5 | Quintile group shares of resource |\n| d1-d10 | Decile group shares of resource |\n| bottom5 and top5 | Bottom five and top five percent group shares of resource |\n| resource | Resource concept |\n| resource_detailed | Detailed resource concept |\n| scale | Equivalence scale |\n| scale_detailed | Detailed equivalence scale |\n| sharing_unit | Income sharing unit/statistical unit |\n| reference_unit | Unit of analysis, indicates whether the data has been weighted with a person or a household weight |\n| areacovr | Area coverage. The land area which was included in the original sample surveys etc. |\n| areacovr_detailed | Detailed area coverage |\n| popcovr | Population coverage. The population covered in the sample surveys in the land area (all, rural, urban etc.) which was included |\n| popcovr_detailed | Detailed population coverage, including age coverage information in certain cases |\n| region_un | Regional grouping based on United Nations geoscheme |\n| region_un_sub | Sub-regional grouping based on United Nations geoscheme |\n| region_wb | Regional grouping based on World Bank classification |\n| eu | Current EU member state |\n| oecd | Current OECD member state |\n| incomegroup | World Bank classification by country income |\n| mean | Survey mean given with the same underlying definitions as the Gini coefficient and the share data |\n| median | Survey median given with the same underlying definitions as the Gini coefficient and the share data |\n| currency | Currency for the mean and median values. If the reference is US$2011PPP it means that the currency is in 2011 US dollar per month, with purchasing power parity applied on it. |\n| reference_period | Time period for measuring mean and median values |\n| exchangerate | Conversion rate from local currency units (LCU) to United States Dollars (USD) |\n| mean_usd | Mean measure in United States Dollar (USD) |\n| median_usd | Median measure in United States Dollar (USD) |\n| gdp_ppp_pc_usd2011 | Gross Domestic Product (GDP) is converted to United States Dollars (USD) using purchasing power parity rates and divided by total population. Data are in constant 2011 United States Dollar (USD) |\n| population | Population of countries from the UN population prospects |\n| revision | Indicates the time of the revision when the observation was included to the database |\n| quality | Quality assessment |\n| quality_score | Computed quality score |\n| source | Source type |\n| source_detailed | Source from which the observation was obtained |\n| source_comments | Additional source comments |\n| survey | Originating survey information |", "_____no_output_____" ] ], [ [ "import re\n\nimport numpy as np\nimport pandas as pd\nimport pycountry\n\n%matplotlib inline\n\npd.set_option('display.float_format', lambda x: '%.3f' % x)\npd.set_option('display.max_columns', None)", "_____no_output_____" ] ], [ [ "## Load The File", "_____no_output_____" ] ], [ [ "df = pd.read_excel('../data/external/Inequality/WIID/WIID_19Dec2018.xlsx')", "_____no_output_____" ] ], [ [ "## Standardize Country Codes", "_____no_output_____" ] ], [ [ "\"\"\" Only Select rows with valid country codes\n\"\"\"\ncountry_locations = []\nfor country in df['c3']:\n try:\n pycountry.countries.lookup(country)\n country_locations.append(True)\n except LookupError:\n country_locations.append(False)\ndf = df[country_locations]", "_____no_output_____" ] ], [ [ "## Standardize Indexes", "_____no_output_____" ] ], [ [ "df.rename(\n {\n \"c3\": \"Country Code\",\n \"year\": \"Year\"\n },\n axis='columns',\n inplace=True)", "_____no_output_____" ] ], [ [ "## Remove out of scope rows (consumption/gross)", "_____no_output_____" ] ], [ [ "df = df[(df.resource != \"Consumption\")]", "_____no_output_____" ] ], [ [ "## Remove out of scope rows by year", "_____no_output_____" ] ], [ [ "df = df[df[\"Year\"] > 1994]\ndf = df[df[\"Year\"] < 2018]\n\ndf = df.groupby([\"Country Code\",\"Year\"]).mean()", "_____no_output_____" ] ], [ [ "## Save Data", "_____no_output_____" ] ], [ [ "df.to_pickle(\"../data/processed/Inequality_WIID.pickle\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb9d74a2e907b5622a33906cae41ed958894de98
32,958
ipynb
Jupyter Notebook
site/en-snapshot/lite/tutorials/model_maker_text_classification.ipynb
teijeong/docs-l10n
e4b0e97920d58232721999faced6d2c103c48415
[ "Apache-2.0" ]
null
null
null
site/en-snapshot/lite/tutorials/model_maker_text_classification.ipynb
teijeong/docs-l10n
e4b0e97920d58232721999faced6d2c103c48415
[ "Apache-2.0" ]
null
null
null
site/en-snapshot/lite/tutorials/model_maker_text_classification.ipynb
teijeong/docs-l10n
e4b0e97920d58232721999faced6d2c103c48415
[ "Apache-2.0" ]
null
null
null
37.926352
505
0.559803
[ [ [ "##### Copyright 2019 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Text classification with TensorFlow Lite Model Maker", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/lite/tutorials/model_maker_text_classification\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/model_maker_text_classification.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/model_maker_text_classification.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/tensorflow/tensorflow/lite/g3doc/tutorials/model_maker_text_classification.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>", "_____no_output_____" ], [ "The TensorFlow Lite Model Maker library simplifies the process of adapting and converting a TensorFlow model to particular input data when deploying this model for on-device ML applications.\n\nThis notebook shows an end-to-end example that utilizes the Model Maker library to illustrate the adaptation and conversion of a commonly-used text classification model to classify movie reviews on a mobile device. The text classification model classifies text into predefined categories. The inputs should be preprocessed text and the outputs are the probabilities of the categories. The dataset used in this tutorial are positive and negative movie reviews.", "_____no_output_____" ], [ "## Prerequisites\n", "_____no_output_____" ], [ "### Install the required packages\nTo run this example, install the required packages, including the Model Maker package from the [GitHub repo](https://github.com/tensorflow/examples/tree/master/tensorflow_examples/lite/model_maker).\n\n**If you run this notebook on Colab, you may see an error message about `tensorflowjs` and `tensorflow-hub` version incompatibility. It is safe to ignore this error as we do not use `tensorflowjs` in this workflow.**", "_____no_output_____" ] ], [ [ "!pip install -q tflite-model-maker", "_____no_output_____" ] ], [ [ "Import the required packages.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport os\n\nfrom tflite_model_maker import configs\nfrom tflite_model_maker import ExportFormat\nfrom tflite_model_maker import model_spec\nfrom tflite_model_maker import text_classifier\nfrom tflite_model_maker import TextClassifierDataLoader\n\nimport tensorflow as tf\nassert tf.__version__.startswith('2')\ntf.get_logger().setLevel('ERROR')", "_____no_output_____" ] ], [ [ "### Download the sample training data.\n\nIn this tutorial, we will use the [SST-2](https://nlp.stanford.edu/sentiment/index.html) (Stanford Sentiment Treebank) which is one of the tasks in the [GLUE](https://gluebenchmark.com/) benchmark. It contains 67,349 movie reviews for training and 872 movie reviews for testing. The dataset has two classes: positive and negative movie reviews.", "_____no_output_____" ] ], [ [ "data_dir = tf.keras.utils.get_file(\n fname='SST-2.zip',\n origin='https://dl.fbaipublicfiles.com/glue/data/SST-2.zip',\n extract=True)\ndata_dir = os.path.join(os.path.dirname(data_dir), 'SST-2')", "_____no_output_____" ] ], [ [ "The SST-2 dataset is stored in TSV format. The only difference between TSV and CSV is that TSV uses a tab `\\t` character as its delimiter instead of a comma `,` in the CSV format.\n\nHere are the first 5 lines of the training dataset. label=0 means negative, label=1 means positive.\n\n| sentence | label | | | |\n|-------------------------------------------------------------------------------------------|-------|---|---|---|\n| hide new secretions from the parental units | 0 | | | |\n| contains no wit , only labored gags | 0 | | | |\n| that loves its characters and communicates something rather beautiful about human nature | 1 | | | |\n| remains utterly satisfied to remain the same throughout | 0 | | | |\n| on the worst revenge-of-the-nerds clichés the filmmakers could dredge up | 0 | | | |\n\nNext, we will load the dataset into a Pandas dataframe and change the current label names (`0` and `1`) to a more human-readable ones (`negative` and `positive`) and use them for model training.\n", "_____no_output_____" ] ], [ [ "import pandas as pd\n\ndef replace_label(original_file, new_file):\n # Load the original file to pandas. We need to specify the separator as\n # '\\t' as the training data is stored in TSV format\n df = pd.read_csv(original_file, sep='\\t')\n\n # Define how we want to change the label name\n label_map = {0: 'negative', 1: 'positive'}\n\n # Excute the label change\n df.replace({'label': label_map}, inplace=True)\n\n # Write the updated dataset to a new file\n df.to_csv(new_file)\n\n# Replace the label name for both the training and test dataset. Then write the\n# updated CSV dataset to the current folder.\nreplace_label(os.path.join(os.path.join(data_dir, 'train.tsv')), 'train.csv')\nreplace_label(os.path.join(os.path.join(data_dir, 'dev.tsv')), 'dev.csv')", "_____no_output_____" ] ], [ [ "## Quickstart\n\nThere are five steps to train a text classification model:\n\n**Step 1. Choose a text classification model architecture.**\n\nHere we use the average word embedding model architecture, which will produce a small and fast model with decent accuracy.", "_____no_output_____" ] ], [ [ "spec = model_spec.get('average_word_vec')", "_____no_output_____" ] ], [ [ "Model Maker also supports other model architectures such as [BERT](https://arxiv.org/abs/1810.04805). If you are interested to learn about other architecture, see the [Choose a model architecture for Text Classifier](#scrollTo=kJ_B8fMDOhMR) section below.", "_____no_output_____" ], [ "**Step 2. Load the training and test data, then preprocess them according to a specific `model_spec`.**\n\nModel Maker can take input data in the CSV format. We will load the training and test dataset with the human-readable label name that were created earlier.\n\nEach model architecture requires input data to be processed in a particular way. `TextClassifierDataLoader` reads the requirement from `model_spec` and automatically executes the necessary preprocessing.", "_____no_output_____" ] ], [ [ "train_data = TextClassifierDataLoader.from_csv(\n filename='train.csv',\n text_column='sentence',\n label_column='label',\n model_spec=spec,\n is_training=True)\ntest_data = TextClassifierDataLoader.from_csv(\n filename='dev.csv',\n text_column='sentence',\n label_column='label',\n model_spec=spec,\n is_training=False)", "_____no_output_____" ] ], [ [ "**Step 3. Train the TensorFlow model with the training data.**\n\nThe average word embedding model use `batch_size = 32` by default. Therefore you will see that it takes 2104 steps to go through the 67,349 sentences in the training dataset. We will train the model for 10 epochs, which means going through the training dataset 10 times.", "_____no_output_____" ] ], [ [ "model = text_classifier.create(train_data, model_spec=spec, epochs=10)", "_____no_output_____" ] ], [ [ "**Step 4. Evaluate the model with the test data.**\n\nAfter training the text classification model using the sentences in the training dataset, we will use the remaining 872 sentences in the test dataset to evaluate how the model performs against new data it has never seen before.\n\nAs the default batch size is 32, it will take 28 steps to go through the 872 sentences in the test dataset.", "_____no_output_____" ] ], [ [ "loss, acc = model.evaluate(test_data)", "_____no_output_____" ] ], [ [ "**Step 5. Export as a TensorFlow Lite model.**\n\nLet's export the text classification that we have trained in the TensorFlow Lite format. We will specify which folder to export the model.\n\nYou may see a warning about `vocab.txt` file does not exist in the metadata but they can be safely ignored.", "_____no_output_____" ] ], [ [ "model.export(export_dir='average_word_vec')", "_____no_output_____" ] ], [ [ "You can download the TensorFlow Lite model file using the left sidebar of Colab. Go into the `average_word_vec` folder as we specified in `export_dir` parameter above, right-click on the `model.tflite` file and choose `Download` to download it to your local computer.\n\nThis model can be integrated into an Android or an iOS app using the [NLClassifier API](https://www.tensorflow.org/lite/inference_with_metadata/task_library/nl_classifier) of the [TensorFlow Lite Task Library](https://www.tensorflow.org/lite/inference_with_metadata/task_library/overview).\n\nSee the [TFLite Text Classification sample app](https://github.com/tensorflow/examples/blob/master/lite/examples/text_classification/android/lib_task_api/src/main/java/org/tensorflow/lite/examples/textclassification/client/TextClassificationClient.java#L54) for more details on how the model is used in a working app.\n\n*Note 1: Android Studio Model Binding does not support text classification yet so please use the TensorFlow Lite Task Library.*\n\n*Note 2: There is a `model.json` file in the same folder with the TFLite model. It contains the JSON representation of the [metadata](https://www.tensorflow.org/lite/convert/metadata) bundled inside the TensorFlow Lite model. Model metadata helps the TFLite Task Library know what the model does and how to pre-process/post-process data for the model. You don't need to download the `model.json` file as it is only for informational purpose and its content is already inside the TFLite file.*\n\n*Note 3: If you train a text classification model using MobileBERT or BERT-Base architecture, you will need to use [BertNLClassifier API](https://www.tensorflow.org/lite/inference_with_metadata/task_library/bert_nl_classifier) instead to integrate the trained model into a mobile app.*", "_____no_output_____" ], [ "The following sections walk through the example step by step to show more details.", "_____no_output_____" ], [ "## Choose a model architecture for Text Classifier\n\nEach `model_spec` object represents a specific model for the text classifier. TensorFlow Lite Model Maker currently supports [MobileBERT](https://arxiv.org/pdf/2004.02984.pdf), averaging word embeddings and [BERT-Base](https://arxiv.org/pdf/1810.04805.pdf) models.\n\n| Supported Model | Name of model_spec | Model Description | Model size |\n|--------------------------|-------------------------|-----------------------------------------------------------------------------------------------------------------------|---------------------------------------------|\n| Averaging Word Embedding | 'average_word_vec' | Averaging text word embeddings with RELU activation. | <1MB |\n| MobileBERT | 'mobilebert_classifier' | 4.3x smaller and 5.5x faster than BERT-Base while achieving competitive results, suitable for on-device applications. | 25MB w/ quantization <br/> 100MB w/o quantization |\n| BERT-Base | 'bert_classifier' | Standard BERT model that is widely used in NLP tasks. | 300MB |\n\nIn the quick start, we have used the average word embedding model. Let's switch to [MobileBERT](https://arxiv.org/pdf/2004.02984.pdf) to train a model with higher accuracy.", "_____no_output_____" ] ], [ [ "mb_spec = model_spec.get('mobilebert_classifier')", "_____no_output_____" ] ], [ [ "## Load training data\n\nYou can upload your own dataset to work through this tutorial. Upload your dataset by using the left sidebar in Colab.\n\n<img src=\"https://storage.googleapis.com/download.tensorflow.org/models/tflite/screenshots/model_maker_text_classification.png\" alt=\"Upload File\" width=\"800\" hspace=\"100\">\n\nIf you prefer not to upload your dataset to the cloud, you can also locally run the library by following the [guide](https://github.com/tensorflow/examples/tree/master/tensorflow_examples/lite/model_maker).", "_____no_output_____" ], [ "To keep it simple, we will reuse the SST-2 dataset downloaded earlier. Let's use the `TestClassifierDataLoader.from_csv` method to load the data.\n\nPlease be noted that as we have changed the model architecture, we will need to reload the training and test dataset to apply the new preprocessing logic.", "_____no_output_____" ] ], [ [ "train_data = TextClassifierDataLoader.from_csv(\n filename='train.csv',\n text_column='sentence',\n label_column='label',\n model_spec=mb_spec,\n is_training=True)\ntest_data = TextClassifierDataLoader.from_csv(\n filename='dev.csv',\n text_column='sentence',\n label_column='label',\n model_spec=mb_spec,\n is_training=False)", "_____no_output_____" ] ], [ [ "The Model Maker library also supports the `from_folder()` method to load data. It assumes that the text data of the same class are in the same subdirectory and that the subfolder name is the class name. Each text file contains one movie review sample. The `class_labels` parameter is used to specify which the subfolders.", "_____no_output_____" ], [ "## Train a TensorFlow Model\n\nTrain a text classification model using the training data.\n\n*Note: As MobileBERT is a complex model, each training epoch will takes about 10 minutes on a Colab GPU. Please make sure that you are using a GPU runtime.*", "_____no_output_____" ] ], [ [ "model = text_classifier.create(train_data, model_spec=mb_spec, epochs=3)", "_____no_output_____" ] ], [ [ "Examine the detailed model structure.", "_____no_output_____" ] ], [ [ "model.summary()", "_____no_output_____" ] ], [ [ "## Evaluate the model\n\nEvaluate the model that we have just trained using the test data and measure the loss and accuracy value.", "_____no_output_____" ] ], [ [ "loss, acc = model.evaluate(test_data)", "_____no_output_____" ] ], [ [ "## Quantize the model\n\nIn many on-device ML application, the model size is an important factor. Therefore, it is recommended that you apply quantize the model to make it smaller and potentially run faster. Model Maker automatically applies the recommended quantization scheme for each model architecture but you can customize the quantization config as below.", "_____no_output_____" ] ], [ [ "config = configs.QuantizationConfig.create_dynamic_range_quantization(optimizations=[tf.lite.Optimize.DEFAULT])\nconfig.experimental_new_quantizer = True", "_____no_output_____" ] ], [ [ "## Export as a TensorFlow Lite model\n\nConvert the trained model to TensorFlow Lite model format with [metadata](https://www.tensorflow.org/lite/convert/metadata) so that you can later use in an on-device ML application. The label file and the vocab file are embedded in metadata. The default TFLite filename is `model.tflite`.", "_____no_output_____" ] ], [ [ "model.export(export_dir='mobilebert/', quantization_config=config)", "_____no_output_____" ] ], [ [ "The TensorFlow Lite model file can be integrated in a mobile app using the [BertNLClassifier API](https://www.tensorflow.org/lite/inference_with_metadata/task_library/bert_nl_classifier) in [TensorFlow Lite Task Library](https://www.tensorflow.org/lite/inference_with_metadata/task_library/overview). Please note that this is **different** from the `NLClassifier` API used to integrate the text classification trained with the average word vector model architecture.", "_____no_output_____" ], [ "The export formats can be one or a list of the following:\n\n* `ExportFormat.TFLITE`\n* `ExportFormat.LABEL`\n* `ExportFormat.VOCAB`\n* `ExportFormat.SAVED_MODEL`\n\nBy default, it exports only the TensorFlow Lite model file containing the model metadata. You can also choose to export other files related to the model for better examination. For instance, exporting only the label file and vocab file as follows:", "_____no_output_____" ] ], [ [ "model.export(export_dir='mobilebert/', export_format=[ExportFormat.LABEL, ExportFormat.VOCAB])", "_____no_output_____" ] ], [ [ "You can evaluate the TFLite model with `evaluate_tflite` method to measure its accuracy. Converting the trained TensorFlow model to TFLite format and apply quantization can affect its accuracy so it is recommended to evaluate the TFLite model accuracy before deployment.", "_____no_output_____" ] ], [ [ "accuracy = model.evaluate_tflite('mobilebert/model.tflite', test_data)\nprint('TFLite model accuracy: ', accuracy)", "_____no_output_____" ] ], [ [ "## Advanced Usage\n\nThe `create` function is the driver function that the Model Maker library uses to create models. The `model_spec` parameter defines the model specification. The `AverageWordVecModelSpec` and `BertClassifierModelSpec` classes are currently supported. The `create` function comprises of the following steps:\n\n1. Creates the model for the text classifier according to `model_spec`.\n2. Trains the classifier model. The default epochs and the default batch size are set by the `default_training_epochs` and `default_batch_size` variables in the `model_spec` object.\n\nThis section covers advanced usage topics like adjusting the model and the training hyperparameters.", "_____no_output_____" ], [ "### Customize the MobileBERT model hyperparameters\n\nThe model parameters you can adjust are:\n\n* `seq_len`: Length of the sequence to feed into the model.\n* `initializer_range`: The standard deviation of the `truncated_normal_initializer` for initializing all weight matrices.\n* `trainable`: Boolean that specifies whether the pre-trained layer is trainable.\n\nThe training pipeline parameters you can adjust are:\n\n* `model_dir`: The location of the model checkpoint files. If not set, a temporary directory will be used.\n* `dropout_rate`: The dropout rate.\n* `learning_rate`: The initial learning rate for the Adam optimizer.\n* `tpu`: TPU address to connect to.\n\nFor instance, you can set the `seq_len=256` (default is 128). This allows the model to classify longer text.", "_____no_output_____" ] ], [ [ "new_model_spec = model_spec.get('mobilebert_classifier')\nnew_model_spec.seq_len = 256", "_____no_output_____" ] ], [ [ "### Customize the average word embedding model hyperparameters\n\nYou can adjust the model infrastructure like the `wordvec_dim` and the `seq_len` variables in the `AverageWordVecModelSpec` class.\n", "_____no_output_____" ], [ "For example, you can train the model with a larger value of `wordvec_dim`. Note that you must construct a new `model_spec` if you modify the model.", "_____no_output_____" ] ], [ [ "new_model_spec = model_spec.AverageWordVecModelSpec(wordvec_dim=32)", "_____no_output_____" ] ], [ [ "Get the preprocessed data.", "_____no_output_____" ] ], [ [ "new_train_data = TextClassifierDataLoader.from_csv(\n filename='train.csv',\n text_column='sentence',\n label_column='label',\n model_spec=new_model_spec,\n is_training=True)", "_____no_output_____" ] ], [ [ "Train the new model.", "_____no_output_____" ] ], [ [ "model = text_classifier.create(new_train_data, model_spec=new_model_spec)", "_____no_output_____" ] ], [ [ "### Tune the training hyperparameters\nYou can also tune the training hyperparameters like `epochs` and `batch_size` that affect the model accuracy. For instance,\n\n* `epochs`: more epochs could achieve better accuracy, but may lead to overfitting.\n* `batch_size`: the number of samples to use in one training step.\n\nFor example, you can train with more epochs.", "_____no_output_____" ] ], [ [ "model = text_classifier.create(new_train_data, model_spec=new_model_spec, epochs=20)", "_____no_output_____" ] ], [ [ "Evaluate the newly retrained model with 20 training epochs.", "_____no_output_____" ] ], [ [ "new_test_data = TextClassifierDataLoader.from_csv(\n filename='dev.csv',\n text_column='sentence',\n label_column='label',\n model_spec=new_model_spec,\n is_training=False)\n\nloss, accuracy = model.evaluate(new_test_data)", "_____no_output_____" ] ], [ [ "### Change the Model Architecture\n\nYou can change the model by changing the `model_spec`. The following shows how to change to BERT-Base model.\n\nChange the `model_spec` to BERT-Base model for the text classifier.", "_____no_output_____" ] ], [ [ "spec = model_spec.get('bert_classifier')", "_____no_output_____" ] ], [ [ "The remaining steps are the same.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb9d8400a670e3b867a238bf22f3a17703cd173a
24,271
ipynb
Jupyter Notebook
Bagian Bonus - Natural Language Processing/5. Klasifikasi Teks.ipynb
DimasWidyatama/Fundamental-Pandas
7abd48b130d90482bd30d6f2630537b00e5498d7
[ "MIT" ]
null
null
null
Bagian Bonus - Natural Language Processing/5. Klasifikasi Teks.ipynb
DimasWidyatama/Fundamental-Pandas
7abd48b130d90482bd30d6f2630537b00e5498d7
[ "MIT" ]
null
null
null
Bagian Bonus - Natural Language Processing/5. Klasifikasi Teks.ipynb
DimasWidyatama/Fundamental-Pandas
7abd48b130d90482bd30d6f2630537b00e5498d7
[ "MIT" ]
1
2020-11-22T07:45:07.000Z
2020-11-22T07:45:07.000Z
29.816953
138
0.425611
[ [ [ "download datasetnya https://drive.google.com/file/d/1IX9cWMwzc4v8lLivk19k2LV2JrCj0KD1/view?usp=sharing", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\n\n\ndf = pd.read_csv('Amazon_Unlocked_Mobile.csv')\n\n\n# df = df.sample(frac=0.1, random_state=10)\n\ndf.head()", "_____no_output_____" ], [ "df.dropna(inplace=True)\n\ndf = df[df['Rating'] != 3]\n\ndf['Positively Rated'] = np.where(df['Rating'] > 3, 1, 0)\ndf.head(10)", "_____no_output_____" ], [ "df['Positively Rated'].mean()", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\n\n\nX_train, X_test, y_train, y_test = train_test_split(df['Reviews'], \n df['Positively Rated'], \n random_state=0)", "_____no_output_____" ], [ "print('X_train first entry:\\n\\n', X_train.iloc[0])\nprint('\\n\\nX_train shape: ', X_train.shape)", "X_train first entry:\n\n I bought a BB Black and was deliveried a White BB.Really is not a serious provider...Next time is better to cancel the order.\n\n\nX_train shape: (231207,)\n" ] ], [ [ "# CountVectorizer", "_____no_output_____" ] ], [ [ "from sklearn.feature_extraction.text import CountVectorizer\n\n\nvect = CountVectorizer().fit(X_train)", "_____no_output_____" ], [ "vect.get_feature_names()[::2000]", "_____no_output_____" ], [ "len(vect.get_feature_names())", "_____no_output_____" ], [ "X_train_vectorized = vect.transform(X_train)\n\nX_train_vectorized", "_____no_output_____" ], [ "from sklearn.linear_model import LogisticRegression\n\n\nmodel = LogisticRegression()\nmodel.fit(X_train_vectorized, y_train)", "_____no_output_____" ], [ "from sklearn.metrics import roc_auc_score\n\n\npredictions = model.predict(vect.transform(X_test))\n\nprint('AUC: ', roc_auc_score(y_test, predictions))", "AUC: 0.9284099624537354\n" ], [ "\nfeature_names = np.array(vect.get_feature_names())\n\n\nsorted_coef_index = model.coef_[0].argsort()\n\n\nprint('Smallest Coefs:\\n{}\\n'.format(feature_names[sorted_coef_index[:10]]))\nprint('Largest Coefs: \\n{}'.format(feature_names[sorted_coef_index[:-11:-1]]))", "Smallest Coefs:\n['mony' 'worst' 'false' 'worthless' 'junk' 'garbage' 'messing' 'horribly'\n 'blacklist' 'useless']\n\nLargest Coefs: \n['excelent' 'excelente' 'exelente' 'excellent' 'loving' 'efficient'\n 'loves' 'perfecto' 'lovely' '4eeeks']\n" ] ], [ [ "# TfIdf", "_____no_output_____" ] ], [ [ "from sklearn.feature_extraction.text import TfidfVectorizer\n\n# Fit the TfidfVectorizer to the training data specifiying a minimum document frequency of 5\nvect = TfidfVectorizer(min_df=5).fit(X_train)\nlen(vect.get_feature_names())", "_____no_output_____" ], [ "X_train_vectorized = vect.transform(X_train)\n\nmodel = LogisticRegression()\nmodel.fit(X_train_vectorized, y_train)\n\npredictions = model.predict(vect.transform(X_test))\n\nprint('AUC: ', roc_auc_score(y_test, predictions))", "AUC: 0.9266100666746837\n" ], [ "feature_names = np.array(vect.get_feature_names())\n\nsorted_tfidf_index = X_train_vectorized.max(0).toarray()[0].argsort()\n\nprint('Smallest tfidf:\\n{}\\n'.format(feature_names[sorted_tfidf_index[:10]]))\nprint('Largest tfidf: \\n{}'.format(feature_names[sorted_tfidf_index[:-11:-1]]))", "Smallest tfidf:\n['commenter' 'pthalo' 'warmness' 'storageso' 'aggregration' '1300'\n '625nits' 'a10' 'submarket' 'brawns']\n\nLargest tfidf: \n['defective' 'batteries' 'gooood' 'epic' 'luis' 'goood' 'basico'\n 'aceptable' 'problems' 'excellant']\n" ], [ "sorted_coef_index = model.coef_[0].argsort()\n\nprint('Smallest Coefs:\\n{}\\n'.format(feature_names[sorted_coef_index[:10]]))\nprint('Largest Coefs: \\n{}'.format(feature_names[sorted_coef_index[:-11:-1]]))", "Smallest Coefs:\n['not' 'worst' 'useless' 'disappointed' 'terrible' 'return' 'waste' 'poor'\n 'horrible' 'doesn']\n\nLargest Coefs: \n['love' 'great' 'excellent' 'perfect' 'amazing' 'awesome' 'perfectly'\n 'easy' 'best' 'loves']\n" ], [ "print(model.predict(vect.transform(['not an issue, phone is working',\n 'an issue, phone is not working'])))", "[0 0]\n" ], [ "vect = CountVectorizer(min_df=5, ngram_range=(1,2)).fit(X_train)\n\nX_train_vectorized = vect.transform(X_train)\n\nlen(vect.get_feature_names())", "_____no_output_____" ], [ "model = LogisticRegression()\nmodel.fit(X_train_vectorized, y_train)\n\npredictions = model.predict(vect.transform(X_test))\n\nprint('AUC: ', roc_auc_score(y_test, predictions))", "AUC: 0.9671263879424379\n" ], [ "feature_names = np.array(vect.get_feature_names())\n\nsorted_coef_index = model.coef_[0].argsort()\n\nprint('Smallest Coefs:\\n{}\\n'.format(feature_names[sorted_coef_index[:10]]))\nprint('Largest Coefs: \\n{}'.format(feature_names[sorted_coef_index[:-11:-1]]))", "Smallest Coefs:\n['no good' 'worst' 'junk' 'not good' 'not happy' 'horrible' 'garbage'\n 'terrible' 'looks ok' 'nope']\n\nLargest Coefs: \n['not bad' 'excelent' 'excelente' 'excellent' 'perfect' 'no problems'\n 'exelente' 'awesome' 'no issues' 'great']\n" ], [ "print(model.predict(vect.transform(['not an issue, phone is working',\n 'an issue, phone is not working'])))", "[1 0]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9d9652ad85d69aeb12716e6623e464009b0a45
419,216
ipynb
Jupyter Notebook
DoG.ipynb
gabilodeau/INF6804
126defd9397beafbf1c97ddeec6effe699da235d
[ "MIT" ]
5
2019-06-03T21:17:07.000Z
2022-01-30T19:43:56.000Z
DoG.ipynb
gabilodeau/INF6804
126defd9397beafbf1c97ddeec6effe699da235d
[ "MIT" ]
null
null
null
DoG.ipynb
gabilodeau/INF6804
126defd9397beafbf1c97ddeec6effe699da235d
[ "MIT" ]
5
2018-10-22T20:43:07.000Z
2022-01-19T03:29:15.000Z
1,768.843882
141,194
0.958818
[ [ [ "<a href=\"https://colab.research.google.com/github/gabilodeau/INF6804/blob/master/DoG.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "INF6804 Vision par ordinateur\n\nPolytechnique Montréal\n\nExemple d'une différence de Gaussiennes", "_____no_output_____" ] ], [ [ "import cv2\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np", "_____no_output_____" ] ], [ [ "Lecture d'une image et affichage.", "_____no_output_____" ] ], [ [ "image_name = 'bureau.jpg'\nif not os.path.exists(image_name):\n !gdown https://raw.githubusercontent.com/gabilodeau/INF6804/master/images/bureau.jpg", "Downloading...\nFrom: https://raw.githubusercontent.com/gabilodeau/INF6804/master/images/bureau.jpg\nTo: /content/bureau.jpg\n\r 0% 0.00/21.9k [00:00<?, ?B/s]\r100% 21.9k/21.9k [00:00<00:00, 1.47MB/s]\n" ], [ "image = cv2.imread('bureau.jpg',cv2.IMREAD_GRAYSCALE)\nplt.figure(figsize = (10,10))\nplt.imshow(image,cmap = plt.get_cmap('gray'))\nplt.show()", "_____no_output_____" ] ], [ [ "Convolution de l'image avec deux filtres Gaussiens d'écart-types différents. Le deuxième filtre rend l'image plus flou.", "_____no_output_____" ] ], [ [ "convavecGauss1 = cv2.GaussianBlur(image.astype(float),(5,5),3)\nconvavecGauss2 = cv2.GaussianBlur(image.astype(float),(5,5),5)\n\nf, axarr = plt.subplots(1, 2, figsize=(18,15))\naxarr[0].imshow(convavecGauss1.astype('uint8'),cmap = plt.get_cmap('gray'))\naxarr[1].imshow(convavecGauss2.astype('uint8'),cmap = plt.get_cmap('gray'))\nplt.show()", "_____no_output_____" ] ], [ [ "La différence de Gaussiennes est la différence des deux images convoluées avec les filtres Gaussiens. ", "_____no_output_____" ] ], [ [ "difGauss = np.absolute(convavecGauss1.astype(float)-convavecGauss2.astype(float)) #Fait le calcul en float.\n\ndifGauss = difGauss * 255/np.max(difGauss) #Mise à l'échelle pour affichage\nplt.figure(figsize = (10,10))\nplt.imshow(difGauss.astype('uint8'),cmap = plt.get_cmap('binary')) #Plus foncé = valeur plus élevée\nplt.show()", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb9dc1b1b25a2441cae0f0989e69887080658919
70,214
ipynb
Jupyter Notebook
New_Zealand_Universities/Auckland_University_of_Technology/Auckland_University_of_Technology.ipynb
mohansah/Web_Scraping
3668af35f03748a21b5153c7133fdd72fb90811f
[ "MIT" ]
null
null
null
New_Zealand_Universities/Auckland_University_of_Technology/Auckland_University_of_Technology.ipynb
mohansah/Web_Scraping
3668af35f03748a21b5153c7133fdd72fb90811f
[ "MIT" ]
null
null
null
New_Zealand_Universities/Auckland_University_of_Technology/Auckland_University_of_Technology.ipynb
mohansah/Web_Scraping
3668af35f03748a21b5153c7133fdd72fb90811f
[ "MIT" ]
1
2021-02-09T18:21:57.000Z
2021-02-09T18:21:57.000Z
100.019943
213
0.729057
[ [ [ "import requests\nimport bs4\nimport csv\n\nlist=[]\nlis_h=['Course_Name','Link']\nlist.append(lis_h)", "_____no_output_____" ], [ "res1=requests.get('https://www.studyinnewzealand.govt.nz/study-options/course/provider-results?institutionid=142314&pageno=28') \nsoup1=bs4.BeautifulSoup(res1.text,'lxml')", "_____no_output_____" ], [ "s1=soup1.select('h3')\nfor i in range(len(s1)):\n if(s1[i].get('class')==['crs_tit', 'univ_tit']):\n print(s1[i].a.text)\n print(s1[i].a.get('href'))", "Postgraduate Diploma in Public Health\nhttps://www.studyinnewzealand.govt.nz/study-options/course/details?courseid=54641602&institutionid=142314&course=Postgraduate-Diploma-in-Public-Health\nPostgraduate Diploma in Science\nhttps://www.studyinnewzealand.govt.nz/study-options/course/details?courseid=53058192&institutionid=142314&course=Postgraduate-Diploma-in-Science\nPostgraduate Diploma in Sport and Exercise\nhttps://www.studyinnewzealand.govt.nz/study-options/course/details?courseid=2152580&institutionid=142314&course=Postgraduate-Diploma-in-Sport-and-Exercise\nTESOL/TKT\nhttps://www.studyinnewzealand.govt.nz/study-options/course/details?courseid=57083784&institutionid=142314&course=TESOL/TKT\n" ], [ "import requests\nimport bs4\nimport csv\n\nlist=[]\nlis_h=['Course_Name','Link']\nlist.append(lis_h)\n\nfor j in range(29):\n res1=requests.get('https://www.studyinnewzealand.govt.nz/study-options/course/provider-results?institutionid=142314&pageno='+str(j)) \n soup1=bs4.BeautifulSoup(res1.text,'lxml')\n s1=soup1.select('h3')\n for i in range(len(s1)):\n if(s1[i].get('class')==['crs_tit', 'univ_tit']):\n list.append([s1[i].a.text,s1[i].a.get('href')])\n \nlist", "_____no_output_____" ], [ "len(list)", "_____no_output_____" ], [ "with open('Auckland_University_of_Technology.csv','w',newline=\"\") as file:\n write=csv.writer(file)\n for row in list:\n write.writerow(row)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
cb9de0d76bc7cd9a978e040c6fd6851cb896a06d
164,537
ipynb
Jupyter Notebook
kindle_analysis/kindle_analysis.ipynb
riron1206/data_analysis
f6b7c4ce4cc80fe687ff7bac6036e8f507cc7519
[ "MIT" ]
null
null
null
kindle_analysis/kindle_analysis.ipynb
riron1206/data_analysis
f6b7c4ce4cc80fe687ff7bac6036e8f507cc7519
[ "MIT" ]
null
null
null
kindle_analysis/kindle_analysis.ipynb
riron1206/data_analysis
f6b7c4ce4cc80fe687ff7bac6036e8f507cc7519
[ "MIT" ]
null
null
null
368.91704
93,128
0.925257
[ [ [ "!pwd\nimport sys\n%reload_ext autoreload\n%autoreload 2\n%matplotlib inline\nsys.executable", "/c/Users/81908/jupyter_notebook/tf_2_work/kindle_data_analysis\n" ] ], [ [ "# Kindleの蔵書リストをGoogle Colaboratoryでデータ分析してみた \n- https://karaage.hatenadiary.jp/entry/2020/07/20/073000", "_____no_output_____" ] ], [ [ "import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\nplt.rcParams[\"font.family\"] = 'Yu Gothic' # Yu Gothic指定すれば日本語出せる\n\ndf = pd.read_csv(\"Kindle.csv\", index_col=\"PurchaseDate\", parse_dates=[\"PurchaseDate\"])\n\ndf[\"year\"] = df.index.year\ndf[\"month\"] = df.index.month\ndf[\"dayofweek\"] = df.index.dayofweek\ndf[\"day\"] = df.index.day\n\n# タイトルのカッコの中にある出版社情報や巻数情報を削除\ndf['title_renamed'] = df['Title'].str.replace(r'\\s*\\([^()]*\\)','').str.replace(r'\\s*\\([^()]*\\)','').str.strip()\n\n# タイトルの文字数の列を作る\ndf['title_length'] = list(pd.Series(df['title_renamed']).apply(lambda x: len(x)))\n\ndf.head()", "_____no_output_____" ], [ "# 年、月、曜日、日での累積購入冊数\nfig, ax = plt.subplots(2, 2, figsize=(10, 20))\n\n_df = df[\"year\"].value_counts().sort_values()\n_df.plot.barh(ax=ax[0, 0], title=\"累積購入冊数(年)\")\n\n_df = df[\"month\"].value_counts().sort_values()\n_df.plot.barh(ax=ax[1, 0], title=\"累積購入冊数(月)\")\n\n_df = df[\"dayofweek\"].value_counts().sort_values()\n_df.plot.barh(ax=ax[0, 1], title=\"累積購入冊数(曜日)(0が月曜日、6が土曜日)\")\n\n_df = df[\"day\"].value_counts().sort_values()\n_df.plot.barh(ax=ax[1, 1], title=\"累積購入冊数(日)\")\n\nplt.show()\nplt.clf() # メモリ解放\nplt.close()", "_____no_output_____" ], [ "# 購入冊数が多い年月\nfig, ax = plt.subplots(1, 3, figsize=(20, 10))\n\ndf_cross = pd.crosstab(df['year'], df[\"month\"])\nsns.heatmap(df_cross, cmap='coolwarm', annot=True, ax=ax[0])\n\ndf_cross = pd.crosstab(df['year'], df[\"dayofweek\"])\nsns.heatmap(df_cross, cmap='coolwarm', annot=True, ax=ax[1])\n\ndf_cross = pd.crosstab(df['year'], df[\"day\"])\nsns.heatmap(df_cross, cmap='coolwarm', annot=True, ax=ax[2])\n\nplt.show()\nplt.clf() # メモリ解放\nplt.close()", "_____no_output_____" ], [ "# タイトルの文字数のヒストグラム\ndf['title_length'].plot(kind='hist', bins=20, figsize=(16,4), alpha=0.5)", "_____no_output_____" ], [ "# 同タイトルの冊数\ndf['title_renamed'].value_counts()", "_____no_output_____" ], [ "# 同著者の冊数\ndf['Authors'].value_counts()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb9df005a2e317fb530dbda6ab42ff5656512249
40,171
ipynb
Jupyter Notebook
boston-housing.ipynb
Pankaj703/ML
fd63a70031a96538e2c1bccf00c02ccc29099856
[ "Apache-2.0" ]
null
null
null
boston-housing.ipynb
Pankaj703/ML
fd63a70031a96538e2c1bccf00c02ccc29099856
[ "Apache-2.0" ]
null
null
null
boston-housing.ipynb
Pankaj703/ML
fd63a70031a96538e2c1bccf00c02ccc29099856
[ "Apache-2.0" ]
1
2021-11-21T21:26:26.000Z
2021-11-21T21:26:26.000Z
53.06605
15,812
0.637574
[ [ [ "<h1 align=\"center\">Assignment</h1>\n<h3 align=\"center\">Faisal Akhtar</h3>\n<h3 align=\"center\">Roll No.: 17/1409</h3>\n<h3 align=\"center\">Machine Learning - B.Sc. Hons Computer Science - Vth Semester</h4>", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\n\nfrom scipy import stats", "_____no_output_____" ], [ "column_names = [\"CRIM\", \"ZN\", \"INDUS\", \"CHAS\", \"NOX\", \"RM\", \"AGE\", \"DIS\", \"RAD\", \"TAX\", \"PTRATIO\", \"B\", \"LSTAT\", \"MEDV\"]\ndata = pd.read_csv(\"../input/boston-housing.csv\", header=None, delimiter=r\"\\s+\", names=column_names)\nprint(\"\\n\\nData loaded\\n\\n\")", "\n\nData loaded\n\n\n" ], [ "data.head()", "_____no_output_____" ], [ "data.describe()", "_____no_output_____" ] ], [ [ "<h4>Preprocessing</h4>", "_____no_output_____" ] ], [ [ "data = pd.DataFrame(np.c_[data['RM'],data['AGE'],data['MEDV']], columns = ['RM', 'AGE', 'MEDV'])\n\n# Check null values\nprint(\"\\n\\nCheck null values\\n\",data.isnull().sum())", "\n\nCheck null values\n RM 0\nAGE 0\nMEDV 0\ndtype: int64\n" ] ], [ [ "No null values found...Moving on", "_____no_output_____" ] ], [ [ "# Discovering outliers by Z-Score\nZScore = np.abs(stats.zscore(data))\nprint(\"\\n\\nChecking where outliers are less than the ZScore\")\nprint(\"ZScore > 1\\n\",np.where(ZScore > 1)[0],\"\\n\",np.where(ZScore > 1)[1],\"\\n\")\nprint(\"ZScore > 2\\n\",np.where(ZScore > 2)[0],\"\\n\",np.where(ZScore > 2)[1],\"\\n\")\nprint(\"ZScore > 3\\n\",np.where(ZScore > 3)[0],\"\\n\",np.where(ZScore > 3)[1],\"\\n\")", "\n\nChecking where outliers are less than the ZScore\nZScore > 1\n [ 2 2 3 3 4 4 8 12 16 18 18 20 20 23 30 31 32 33\n 34 38 39 40 40 40 41 42 43 44 45 46 48 52 53 55 55 55\n 56 58 64 64 65 66 67 68 69 70 71 72 73 74 79 80 82 88\n 89 93 97 97 98 98 98 99 99 105 108 123 128 130 131 132 134 135\n 137 138 138 139 141 141 142 142 143 143 144 144 144 145 146 147 148 150\n 151 151 152 153 156 156 157 157 158 159 161 161 162 162 162 163 163 166\n 166 171 172 175 179 180 180 181 182 182 183 186 186 187 188 189 189 189\n 190 190 191 192 192 192 193 194 195 195 195 196 196 196 197 197 198 198\n 198 199 199 200 200 200 201 202 202 202 203 203 203 204 204 204 205 209\n 209 211 213 214 214 224 224 225 225 226 226 227 228 228 228 229 231 232\n 232 233 233 237 238 243 246 249 250 251 252 253 253 253 254 255 256 256\n 256 257 257 258 258 258 259 260 260 261 261 262 262 263 264 264 265 266\n 267 267 268 268 271 273 273 274 274 275 276 276 277 277 278 279 279 280\n 280 281 281 282 282 283 283 283 284 284 284 285 286 287 289 290 291 291\n 291 292 293 295 298 299 299 301 302 303 303 304 304 306 306 310 310 324\n 325 326 328 329 330 331 332 333 334 335 338 341 341 344 347 348 349 351\n 352 353 354 355 356 362 364 365 366 367 367 368 368 368 369 369 370 370\n 370 371 371 372 373 373 374 374 375 375 377 377 378 379 379 380 381 381\n 382 382 382 383 383 383 384 384 385 385 385 386 386 386 387 387 388 388\n 388 389 389 389 390 392 392 392 394 395 395 396 397 397 398 398 398 399\n 400 400 401 401 402 402 403 403 404 404 405 405 406 406 406 407 408 409\n 410 411 412 412 413 413 414 414 414 415 415 416 417 417 418 418 419 420\n 424 424 425 426 427 428 429 434 436 437 437 438 439 440 441 442 443 444\n 445 447 448 449 449 451 453 453 456 474 475 475 477 477 477 478 482 483\n 488 489 489 489 490 490 490 491 495 496 499 505] \n [0 2 0 2 0 2 1 1 1 0 1 0 1 1 2 1 2 2 1 1 1 0 1 2 1 1 1 1 1 1 0 1 1 0 1 2 1\n 1 0 2 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 1 0 2 0 1 2 0 2 1 1 1 1 1 1 1 1 1 1 1\n 2 1 0 1 0 1 0 1 0 1 2 1 1 0 0 1 0 1 0 1 0 2 1 2 1 1 0 2 0 1 2 0 2 0 2 1 0\n 1 2 0 2 2 0 2 2 0 2 2 1 0 1 2 1 2 1 0 1 2 1 1 0 1 2 0 1 2 0 1 0 1 2 1 2 0\n 1 2 1 0 1 2 0 1 2 0 1 2 1 0 1 0 1 0 1 0 2 0 2 0 2 0 0 1 2 1 0 0 2 0 2 0 1\n 1 1 1 1 1 1 0 1 2 1 1 0 1 2 0 2 0 1 2 1 0 2 0 2 0 2 0 0 2 0 0 0 2 0 2 1 0\n 2 1 2 2 0 2 1 2 1 1 2 0 2 1 2 0 2 0 1 2 0 1 2 1 1 1 1 1 0 1 2 1 1 1 1 0 1\n 1 1 1 2 0 2 0 2 0 1 1 1 1 1 1 1 1 1 1 1 1 1 0 2 1 1 1 1 1 1 1 1 1 1 0 0 0\n 0 0 1 0 1 2 1 2 0 1 2 1 2 2 0 1 0 1 0 1 1 2 2 1 2 2 1 2 0 1 2 0 1 2 0 2 0\n 1 2 0 1 2 0 2 0 1 2 0 1 2 1 0 1 2 2 1 2 2 1 2 0 1 2 2 1 2 1 2 1 2 0 2 0 2\n 1 2 0 1 2 1 1 1 1 1 0 1 0 1 0 1 2 1 2 2 0 2 1 2 2 1 0 2 2 2 2 2 2 2 2 1 2\n 2 2 2 1 1 1 2 2 2 1 1 2 1 0 1 2 0 1 2 0 1 2 1 0 1 0 0 1 2 0 1 2 1 1 0 0 2] \n\nZScore > 2\n [ 41 42 43 70 72 73 74 97 98 98 157 161 162 162 163 163 166 166\n 180 186 186 193 195 195 202 203 203 204 204 214 224 224 225 225 226 228\n 232 232 233 233 243 251 252 253 253 253 256 257 257 261 262 262 267 267\n 268 273 280 280 282 283 283 299 364 365 367 368 369 370 371 372 374 384\n 386 388 406 412 414] \n [1 1 1 1 1 1 1 0 0 2 2 2 0 2 0 2 0 2 0 0 2 1 0 2 2 0 2 0 2 1 0 2 0 2 0 2 0\n 2 0 2 1 1 1 0 1 2 2 0 2 2 0 2 0 2 2 0 0 2 2 0 2 1 0 0 0 2 2 2 2 2 0 0 0 0\n 0 0 0] \n\nZScore > 3\n [225 257 262 364 365 367 374 406] \n [0 0 0 0 0 0 0 0] \n\n" ] ], [ [ "Selecting ZScore 3 to remove outliers", "_____no_output_____" ] ], [ [ "data_o = data[(ZScore<3).all(axis=1)]\nprint (\"Shape before removing outliers : \",np.shape(data),\"\\nShape after removing outliers : \",np.shape(data_o))", "Shape before removing outliers : (506, 3) \nShape after removing outliers : (498, 3)\n" ] ], [ [ "<h4>Preparing the data for training</h4>\nwhrere X is input data and Y is output data", "_____no_output_____" ] ], [ [ "X = pd.DataFrame(np.c_[data_o['RM'],data_o['AGE']], columns = ['RM', 'AGE'])\nY = pd.DataFrame(np.c_[data_o['MEDV']], columns = ['MEDV'])\nprint(\"\\n\\nX =\\n\",X.head(5))\nprint(\"\\n\\nY =\\n\",Y.head(5))", "\n\nX =\n RM AGE\n0 6.575 65.2\n1 6.421 78.9\n2 7.185 61.1\n3 6.998 45.8\n4 7.147 54.2\n\n\nY =\n MEDV\n0 24.0\n1 21.6\n2 34.7\n3 33.4\n4 36.2\n" ] ], [ [ "<h4>Splitting dataset in Training sets and Test sets</h4>\nWhere 75% data is for training and 25% is for testing", "_____no_output_____" ] ], [ [ "X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size=0.25)\nprint(\"X_train.shape : \", X_train.shape, \"\\tX_test.shape\", X_test.shape)\nprint(\"Y_train.shape : \", Y_train.shape, \"\\tY_train.shape\", Y_train.shape)", "X_train.shape : (373, 2) \tX_test.shape (125, 2)\nY_train.shape : (373, 1) \tY_train.shape (373, 1)\n" ] ], [ [ "<h4>Linear Regression</h4>\nFitting Linear regression model to training model", "_____no_output_____" ] ], [ [ "lin_model = LinearRegression()\nlin_model = lin_model.fit(X_train, Y_train)", "_____no_output_____" ] ], [ [ "<h4>Model Analysis</h4>", "_____no_output_____" ] ], [ [ "predictions = lin_model.predict(X_test)\n\n# Scatter Plot\nplt.scatter(Y_test, predictions)\nplt.xlabel(\"True Values\",color='red')\nplt.ylabel(\"Predictions\",color='blue')\nplt.title(\"Predicted vs Actual value\")\nplt.grid(True)\nplt.show()\n", "_____no_output_____" ] ], [ [ "<h4>The coefficient of determination R^2 of the prediction</h4>", "_____no_output_____" ] ], [ [ "print(lin_model.score(X_test,Y_test))", "0.5925746602474407\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb9df94b87e4e474ff5cf777a4720f47925e07a1
812,827
ipynb
Jupyter Notebook
Projects/quadcopter/Quadcopter_Project.ipynb
onurasiliskender/deep-learning
468e849e5ed0c359165a7396b4cc8910029f8236
[ "MIT" ]
null
null
null
Projects/quadcopter/Quadcopter_Project.ipynb
onurasiliskender/deep-learning
468e849e5ed0c359165a7396b4cc8910029f8236
[ "MIT" ]
null
null
null
Projects/quadcopter/Quadcopter_Project.ipynb
onurasiliskender/deep-learning
468e849e5ed0c359165a7396b4cc8910029f8236
[ "MIT" ]
null
null
null
262.795668
232,699
0.891848
[ [ [ "# Project: Train a Quadcopter How to Fly\n\nDesign an agent to fly a quadcopter, and then train it using a reinforcement learning algorithm of your choice! \n\nTry to apply the techniques you have learnt, but also feel free to come up with innovative ideas and test them.", "_____no_output_____" ], [ "## Instructions\n\nTake a look at the files in the directory to better understand the structure of the project. \n\n- `task.py`: Define your task (environment) in this file.\n- `agents/`: Folder containing reinforcement learning agents.\n - `policy_search.py`: A sample agent has been provided here.\n - `agent.py`: Develop your agent here.\n- `physics_sim.py`: This file contains the simulator for the quadcopter. **DO NOT MODIFY THIS FILE**.\n\nFor this project, you will define your own task in `task.py`. Although we have provided a example task to get you started, you are encouraged to change it. Later in this notebook, you will learn more about how to amend this file.\n\nYou will also design a reinforcement learning agent in `agent.py` to complete your chosen task. \n\nYou are welcome to create any additional files to help you to organize your code. For instance, you may find it useful to define a `model.py` file defining any needed neural network architectures.\n\n## Controlling the Quadcopter\n\nWe provide a sample agent in the code cell below to show you how to use the sim to control the quadcopter. This agent is even simpler than the sample agent that you'll examine (in `agents/policy_search.py`) later in this notebook!\n\nThe agent controls the quadcopter by setting the revolutions per second on each of its four rotors. The provided agent in the `Basic_Agent` class below always selects a random action for each of the four rotors. These four speeds are returned by the `act` method as a list of four floating-point numbers. \n\nFor this project, the agent that you will implement in `agents/agent.py` will have a far more intelligent method for selecting actions!", "_____no_output_____" ] ], [ [ "import random\n\nclass Basic_Agent():\n def __init__(self, task):\n self.task = task\n \n def act(self):\n new_thrust = random.gauss(450., 25.)\n return [new_thrust + random.gauss(0., 1.) for x in range(4)]", "_____no_output_____" ] ], [ [ "Run the code cell below to have the agent select actions to control the quadcopter. \n\nFeel free to change the provided values of `runtime`, `init_pose`, `init_velocities`, and `init_angle_velocities` below to change the starting conditions of the quadcopter.\n\nThe `labels` list below annotates statistics that are saved while running the simulation. All of this information is saved in a text file `data.txt` and stored in the dictionary `results`. ", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2\n\nimport csv\nimport numpy as np\nfrom task import Task\n\n# Modify the values below to give the quadcopter a different starting position.\nruntime = 5. # time limit of the episode\ninit_pose = np.array([0., 0., 10., 0., 0., 0.]) # initial pose\ninit_velocities = np.array([0., 0., 0.]) # initial velocities\ninit_angle_velocities = np.array([0., 0., 0.]) # initial angle velocities\nfile_output = 'data.txt' # file name for saved results\n\n# Setup\ntask = Task(init_pose, init_velocities, init_angle_velocities, runtime)\nagent = Basic_Agent(task)\ndone = False\nlabels = ['time', 'x', 'y', 'z', 'phi', 'theta', 'psi', 'x_velocity',\n 'y_velocity', 'z_velocity', 'phi_velocity', 'theta_velocity',\n 'psi_velocity', 'rotor_speed1', 'rotor_speed2', 'rotor_speed3', 'rotor_speed4']\nresults = {x : [] for x in labels}\n\n# Run the simulation, and save the results.\nwith open(file_output, 'w') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(labels)\n while True:\n rotor_speeds = agent.act()\n _, _, done = task.step(rotor_speeds)\n to_write = [task.sim.time] + list(task.sim.pose) + list(task.sim.v) + list(task.sim.angular_v) + list(rotor_speeds)\n for ii in range(len(labels)):\n results[labels[ii]].append(to_write[ii])\n writer.writerow(to_write)\n if done:\n break", "_____no_output_____" ] ], [ [ "Run the code cell below to visualize how the position of the quadcopter evolved during the simulation.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n%matplotlib inline\n\nplt.plot(results['time'], results['x'], label='x')\nplt.plot(results['time'], results['y'], label='y')\nplt.plot(results['time'], results['z'], label='z')\nplt.legend()\n_ = plt.ylim()", "_____no_output_____" ] ], [ [ "The next code cell visualizes the velocity of the quadcopter.", "_____no_output_____" ] ], [ [ "plt.plot(results['time'], results['x_velocity'], label='x_hat')\nplt.plot(results['time'], results['y_velocity'], label='y_hat')\nplt.plot(results['time'], results['z_velocity'], label='z_hat')\nplt.legend()\n_ = plt.ylim()", "_____no_output_____" ] ], [ [ "Next, you can plot the Euler angles (the rotation of the quadcopter over the $x$-, $y$-, and $z$-axes),", "_____no_output_____" ] ], [ [ "plt.plot(results['time'], results['phi'], label='phi')\nplt.plot(results['time'], results['theta'], label='theta')\nplt.plot(results['time'], results['psi'], label='psi')\nplt.legend()\n_ = plt.ylim()", "_____no_output_____" ] ], [ [ "before plotting the velocities (in radians per second) corresponding to each of the Euler angles.", "_____no_output_____" ] ], [ [ "plt.plot(results['time'], results['phi_velocity'], label='phi_velocity')\nplt.plot(results['time'], results['theta_velocity'], label='theta_velocity')\nplt.plot(results['time'], results['psi_velocity'], label='psi_velocity')\nplt.legend()\n_ = plt.ylim()", "_____no_output_____" ] ], [ [ "Finally, you can use the code cell below to print the agent's choice of actions. ", "_____no_output_____" ] ], [ [ "plt.plot(results['time'], results['rotor_speed1'], label='Rotor 1 revolutions / second')\nplt.plot(results['time'], results['rotor_speed2'], label='Rotor 2 revolutions / second')\nplt.plot(results['time'], results['rotor_speed3'], label='Rotor 3 revolutions / second')\nplt.plot(results['time'], results['rotor_speed4'], label='Rotor 4 revolutions / second')\nplt.legend()\n_ = plt.ylim()", "_____no_output_____" ] ], [ [ "When specifying a task, you will derive the environment state from the simulator. Run the code cell below to print the values of the following variables at the end of the simulation:\n- `task.sim.pose` (the position of the quadcopter in ($x,y,z$) dimensions and the Euler angles),\n- `task.sim.v` (the velocity of the quadcopter in ($x,y,z$) dimensions), and\n- `task.sim.angular_v` (radians/second for each of the three Euler angles).", "_____no_output_____" ] ], [ [ "# the pose, velocity, and angular velocity of the quadcopter at the end of the episode\nprint(task.sim.pose)\nprint(task.sim.v)\nprint(task.sim.angular_v)", "[ 4.30350923 -18.97479635 29.40515312 5.69503546 5.97416093 0. ]\n[ 4.21560278 -12.6592352 2.12677842]\n[-0.12655521 -0.05231868 0. ]\n" ] ], [ [ "In the sample task in `task.py`, we use the 6-dimensional pose of the quadcopter to construct the state of the environment at each timestep. However, when amending the task for your purposes, you are welcome to expand the size of the state vector by including the velocity information. You can use any combination of the pose, velocity, and angular velocity - feel free to tinker here, and construct the state to suit your task.\n\n## The Task\n\nA sample task has been provided for you in `task.py`. Open this file in a new window now. \n\nThe `__init__()` method is used to initialize several variables that are needed to specify the task. \n- The simulator is initialized as an instance of the `PhysicsSim` class (from `physics_sim.py`). \n- Inspired by the methodology in the original DDPG paper, we make use of action repeats. For each timestep of the agent, we step the simulation `action_repeats` timesteps. If you are not familiar with action repeats, please read the **Results** section in [the DDPG paper](https://arxiv.org/abs/1509.02971).\n- We set the number of elements in the state vector. For the sample task, we only work with the 6-dimensional pose information. To set the size of the state (`state_size`), we must take action repeats into account. \n- The environment will always have a 4-dimensional action space, with one entry for each rotor (`action_size=4`). You can set the minimum (`action_low`) and maximum (`action_high`) values of each entry here.\n- The sample task in this provided file is for the agent to reach a target position. We specify that target position as a variable.\n\nThe `reset()` method resets the simulator. The agent should call this method every time the episode ends. You can see an example of this in the code cell below.\n\nThe `step()` method is perhaps the most important. It accepts the agent's choice of action `rotor_speeds`, which is used to prepare the next state to pass on to the agent. Then, the reward is computed from `get_reward()`. The episode is considered done if the time limit has been exceeded, or the quadcopter has travelled outside of the bounds of the simulation.\n\nIn the next section, you will learn how to test the performance of an agent on this task.", "_____no_output_____" ], [ "## The Agent\n\nThe sample agent given in `agents/policy_search.py` uses a very simplistic linear policy to directly compute the action vector as a dot product of the state vector and a matrix of weights. Then, it randomly perturbs the parameters by adding some Gaussian noise, to produce a different policy. Based on the average reward obtained in each episode (`score`), it keeps track of the best set of parameters found so far, how the score is changing, and accordingly tweaks a scaling factor to widen or tighten the noise.\n\nRun the code cell below to see how the agent performs on the sample task.", "_____no_output_____" ] ], [ [ "import sys\nimport pandas as pd\nfrom agents.policy_search import PolicySearch_Agent\nfrom task import Task\n\nnum_episodes = 1000\ntarget_pos = np.array([0., 0., 10.])\ntask = Task(target_pos=target_pos)\nagent = PolicySearch_Agent(task) \n\nfor i_episode in range(1, num_episodes+1):\n state = agent.reset_episode() # start a new episode\n while True:\n action = agent.act(state) \n next_state, reward, done = task.step(action)\n agent.step(reward, done)\n state = next_state\n if done:\n print(\"\\rEpisode = {:4d}, score = {:7.3f} (best = {:7.3f}), noise_scale = {}\".format(\n i_episode, agent.score, agent.best_score, agent.noise_scale), end=\"\") # [debug]\n break\n sys.stdout.flush()", "Episode = 1000, score = 0.838 (best = 0.899), noise_scale = 3.255" ] ], [ [ "This agent should perform very poorly on this task. And that's where you come in!", "_____no_output_____" ], [ "## Define the Task, Design the Agent, and Train Your Agent!\n\nAmend `task.py` to specify a task of your choosing. If you're unsure what kind of task to specify, you may like to teach your quadcopter to takeoff, hover in place, land softly, or reach a target pose. \n\nAfter specifying your task, use the sample agent in `agents/policy_search.py` as a template to define your own agent in `agents/agent.py`. You can borrow whatever you need from the sample agent, including ideas on how you might modularize your code (using helper methods like `act()`, `learn()`, `reset_episode()`, etc.).\n\nNote that it is **highly unlikely** that the first agent and task that you specify will learn well. You will likely have to tweak various hyperparameters and the reward function for your task until you arrive at reasonably good behavior.\n\nAs you develop your agent, it's important to keep an eye on how it's performing. Use the code above as inspiration to build in a mechanism to log/save the total rewards obtained in each episode to file. If the episode rewards are gradually increasing, this is an indication that your agent is learning.", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2\n\nimport numpy as np\nfrom agents.agent import Agent\nfrom task import Task\nimport matplotlib.pyplot as plt\n%matplotlib notebook\n\nnum_episodes = 300 \ninit_pose = np.array([0., 0., 0.1, 0., 0., 0.])\ninit_velocities = np.array([0., 0., 0.]) \ninit_angle_velocities = np.array([0., 0., 0.]) \ntarget_pos = np.array([0.,0.,0.])\n\ntask = Task(init_pose=init_pose, init_velocities=init_velocities, init_angle_velocities=init_angle_velocities,target_pos=target_pos)\nagent = Agent(task)\n\ndisplay_graph = True\ndisplay_freq = 20\n\n# generate plot function\ndef plt_dynamic(x, z,score, color_z='g', color_score='b'):\n sub1.plot(x, z, color_z)\n sub2.plot(x, score, color_score)\n fig.canvas.draw()\n\n# create plots\nfig, sub1= plt.subplots(1,1)\nsub2 = sub1.twinx()\n\ntime_limit = 5\nz_lower = 0\nz_upper = 100\nscore_lower = 0\nscore_upper = 15\n\nsub1.set_xlim(0, time_limit) # this is typically time\nsub1.set_ylim(z_lower, z_upper) # limits to your y1\nsub2.set_xlim(0, time_limit) # time, again\nsub2.set_ylim(score_lower, score_upper) # limits to your y2\n\n# set labels and colors for the axes\nsub1.set_xlabel('time (s)', color='k') \nsub1.tick_params(axis='x', colors='k')\n\nsub1.set_ylabel('z-height', color='g')\nsub1.tick_params(axis='y', colors=\"g\")\n\nsub2.set_ylabel('total reward', color='b') \nsub2.tick_params(axis='y', colors='b')\n\nbest_x, best_z, best_score = [], [0], [0]\ntotal_rewards = []\n\nfor episode in range(num_episodes + 1):\n state = agent.reset_episode()\n done = False\n \n x, z, score = [], [], []\n \n while done is False:\n \n x.append(task.sim.time) \n z.append(task.sim.pose[2]) \n score.append(agent.total_reward) \n \n action = agent.act(state)\n next_state, reward, done = task.step(action)\n agent.step(action, reward, next_state, done)\n state = next_state\n \n total_rewards.append(np.amax(score))\n \n # store if best reward\n if np.amax(score) > np.amax(best_score):\n best_x, best_z, best_score = x, z, score\n print(\"Episode {:4d}: Improved reward return {}\".format(episode, np.amax(best_score)))\n \n if (episode % display_freq == 0) and (display_graph is True):\n plt_dynamic(x, z, score)\n \n print(\"Episode = {:4d}, total reward = {:7.3f}, noise_scale = {}\".format(\n episode, agent.total_reward, agent.noise_scale))\n", "The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n" ] ], [ [ "## Plot the Rewards\n\nOnce you are satisfied with your performance, plot the episode rewards, either from a single run, or averaged over multiple runs. ", "_____no_output_____" ] ], [ [ "# create plots\nfig, sub1= plt.subplots(1,1)\nsub2 = sub1.twinx()\n\n# set plot boundaries\nepisode = len(total_rewards)\nreward_lower = np.amin(total_rewards)\nreward_upper = np.amax(total_rewards)\n\nsub1.set_xlim(0, episode)\nsub1.set_ylim(reward_lower, reward_upper)\n\n# set labels and colors for the axes\nsub1.set_xlabel('episode', color='k') \nsub1.tick_params(axis='x', colors='k')\n\nsub1.set_ylabel('total reward', color='g')\nsub1.tick_params(axis='y', colors=\"g\")\n\nsub1.plot(range(len(total_rewards)), total_rewards, 'g')\nfig.canvas.draw()", "_____no_output_____" ], [ "print(\"Best total reward = {}\".format(np.amax(best_score)))\n\n# create plots\nfig, sub1= plt.subplots(1,1)\nsub2 = sub1.twinx()\n\ntime_limit = 5\nz_lower = 0\nz_upper = np.amax(best_z) + 1.0\nscore_lower = np.amin(best_score)\nscore_upper = np.amax(best_score) + 1.0\n\nsub1.set_xlim(0, time_limit) # this is typically time\nsub1.set_ylim(z_lower, z_upper) # limits to your y1\nsub2.set_xlim(0, time_limit) # time, again\nsub2.set_ylim(score_lower, score_upper) # limits to your y2\n\n# set labels and colors for the axes\nsub1.set_xlabel('time (s)', color='k') \nsub1.tick_params(axis='x', colors='k')\n\nsub1.set_ylabel('z-height', color='g')\nsub1.tick_params(axis='y', colors=\"g\")\n\nsub2.set_ylabel('total reward', color='b') \nsub2.tick_params(axis='y', colors='b')\n\nplt_dynamic(best_x, best_z, best_score)", "Best total reward = 14.184171794965872\n" ] ], [ [ "## Reflections\n\n**Question 1**: Describe the task that you specified in `task.py`. How did you design the reward function?\n\n**Answer**: \n\nStarts from point 0 (z is 0.1) in x, y and z and reach 0, 0, 10. \n\nI tried to design the reward function to fly the quadcopter straight up(z-axis) and penalises it for significant changes to its pose (other than z axis). The sigmoid function is used to provide a distance from target score which would always be between 0 and 1 with values closer to the target closer to 0. Then took this distance score minus 1 to give a score of 1 when the quadcopter was on the target.", "_____no_output_____" ], [ "**Question 2**: Discuss your agent briefly, using the following questions as a guide:\n\n- What learning algorithm(s) did you try? What worked best for you?\n- What was your final choice of hyperparameters (such as $\\alpha$, $\\gamma$, $\\epsilon$, etc.)?\n- What neural network architecture did you use (if any)? Specify layers, sizes, activation functions, etc.\n\n**Answer**:\nThe DDPG algorithm is used in this project. After trialling multiple hyperparameters, I used the same gamma and tau values are used as 0.99 and 0.001.\n\nThe actor has 2 hidden layers with 512 and 256 nodes and relu activation layers. The critic has single hidden layers of 128 nodes likewise with relu activation. Adding extra layers had a minimal positive effect on the training, except that they were significantly slower.\n\nI have set the learning rates to 0.001 and 0.001 on the actor and critic respectively. ", "_____no_output_____" ], [ "**Question 3**: Using the episode rewards plot, discuss how the agent learned over time.\n\n- Was it an easy task to learn or hard?\n- Was there a gradual learning curve, or an aha moment?\n- How good was the final performance of the agent? (e.g. mean rewards over the last 10 episodes)\n\n**Answer**:\n\nLearning is really challenging. To a certain point rewards are fluctuating but around 150 episodes rewards are setteled down.\n\nI couldn't get the network to learn enough to attempt as shown in the results above, and so the final set of rewards are not perfect.\n", "_____no_output_____" ], [ "**Question 4**: Briefly summarize your experience working on this project. You can use the following prompts for ideas.\n\n- What was the hardest part of the project? (e.g. getting started, plotting, specifying the task, etc.)\n- Did you find anything interesting in how the quadcopter or your agent behaved?\n\n**Answer**:\nThe project was challenging and caused such a serious leap from the others I've worked on. There are too many different components and parameters. I've had really hard times to set the reward function correctly and tried to keep it simple. I need some extra work on RL.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
cb9dfbbf7884d4351f9c9219b7f85753b747fce4
119,463
ipynb
Jupyter Notebook
samples/shapes/train_shapes.ipynb
MirandaLv/Mask_RCNN
afc8b3fd321ba3be710fe20990a3137d606a5449
[ "MIT" ]
null
null
null
samples/shapes/train_shapes.ipynb
MirandaLv/Mask_RCNN
afc8b3fd321ba3be710fe20990a3137d606a5449
[ "MIT" ]
null
null
null
samples/shapes/train_shapes.ipynb
MirandaLv/Mask_RCNN
afc8b3fd321ba3be710fe20990a3137d606a5449
[ "MIT" ]
null
null
null
111.335508
23,858
0.794522
[ [ [ "# Mask R-CNN - Train on Shapes Dataset\n\n\nThis notebook shows how to train Mask R-CNN on your own dataset. To keep things simple we use a synthetic dataset of shapes (squares, triangles, and circles) which enables fast training. You'd still need a GPU, though, because the network backbone is a Resnet101, which would be too slow to train on a CPU. On a GPU, you can start to get okay-ish results in a few minutes, and good results in less than an hour.\n\nThe code of the *Shapes* dataset is included below. It generates images on the fly, so it doesn't require downloading any data. And it can generate images of any size, so we pick a small image size to train faster. ", "_____no_output_____" ] ], [ [ "import os\nimport sys\nimport random\nimport math\nimport re\nimport time\nimport numpy as np\nimport cv2\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"../../\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn.config import Config\nfrom mrcnn import utils\nimport mrcnn.model as modellib\nfrom mrcnn import visualize\nfrom mrcnn.model import log\n\n%matplotlib inline \n\n# Directory to save logs and trained model\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n# Local path to trained weights file\nCOCO_MODEL_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n# Download COCO trained weights from Releases if needed\nif not os.path.exists(COCO_MODEL_PATH):\n utils.download_trained_weights(COCO_MODEL_PATH)", "C:\\Users\\zlv\\Anaconda3\\envs\\test\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\nC:\\Users\\zlv\\Anaconda3\\envs\\test\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\nC:\\Users\\zlv\\Anaconda3\\envs\\test\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:528: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\nC:\\Users\\zlv\\Anaconda3\\envs\\test\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:529: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\nC:\\Users\\zlv\\Anaconda3\\envs\\test\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:530: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\nC:\\Users\\zlv\\Anaconda3\\envs\\test\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:535: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\nUsing TensorFlow backend.\n" ] ], [ [ "## Configurations", "_____no_output_____" ] ], [ [ "class ShapesConfig(Config):\n \"\"\"Configuration for training on the toy shapes dataset.\n Derives from the base Config class and overrides values specific\n to the toy shapes dataset.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"shapes\"\n\n # Train on 1 GPU and 8 images per GPU. We can put multiple images on each\n # GPU because the images are small. Batch size is 8 (GPUs * images/GPU).\n GPU_COUNT = 1\n IMAGES_PER_GPU = 8\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + 3 # background + 3 shapes\n\n # Use small images for faster training. Set the limits of the small side\n # the large side, and that determines the image shape.\n IMAGE_MIN_DIM = 128\n IMAGE_MAX_DIM = 128\n\n # Use smaller anchors because our image and objects are small\n RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels\n\n # Reduce training ROIs per image because the images are small and have\n # few objects. Aim to allow ROI sampling to pick 33% positive ROIs.\n TRAIN_ROIS_PER_IMAGE = 32\n\n # Use a small epoch since the data is simple\n STEPS_PER_EPOCH = 100\n\n # use small validation steps since the epoch is small\n VALIDATION_STEPS = 5\n \nconfig = ShapesConfig()\nconfig.display()", "\nConfigurations:\nBACKBONE resnet101\nBACKBONE_STRIDES [4, 8, 16, 32, 64]\nBATCH_SIZE 8\nBBOX_STD_DEV [0.1 0.1 0.2 0.2]\nCOMPUTE_BACKBONE_SHAPE None\nDETECTION_MAX_INSTANCES 100\nDETECTION_MIN_CONFIDENCE 0.7\nDETECTION_NMS_THRESHOLD 0.3\nFPN_CLASSIF_FC_LAYERS_SIZE 1024\nGPU_COUNT 1\nGRADIENT_CLIP_NORM 5.0\nIMAGES_PER_GPU 8\nIMAGE_CHANNEL_COUNT 3\nIMAGE_MAX_DIM 128\nIMAGE_META_SIZE 16\nIMAGE_MIN_DIM 128\nIMAGE_MIN_SCALE 0\nIMAGE_RESIZE_MODE square\nIMAGE_SHAPE [128 128 3]\nLEARNING_MOMENTUM 0.9\nLEARNING_RATE 0.001\nLOSS_WEIGHTS {'rpn_class_loss': 1.0, 'rpn_bbox_loss': 1.0, 'mrcnn_class_loss': 1.0, 'mrcnn_bbox_loss': 1.0, 'mrcnn_mask_loss': 1.0}\nMASK_POOL_SIZE 14\nMASK_SHAPE [28, 28]\nMAX_GT_INSTANCES 100\nMEAN_PIXEL [123.7 116.8 103.9]\nMINI_MASK_SHAPE (56, 56)\nNAME shapes\nNUM_CLASSES 4\nPOOL_SIZE 7\nPOST_NMS_ROIS_INFERENCE 1000\nPOST_NMS_ROIS_TRAINING 2000\nPRE_NMS_LIMIT 6000\nROI_POSITIVE_RATIO 0.33\nRPN_ANCHOR_RATIOS [0.5, 1, 2]\nRPN_ANCHOR_SCALES (8, 16, 32, 64, 128)\nRPN_ANCHOR_STRIDE 1\nRPN_BBOX_STD_DEV [0.1 0.1 0.2 0.2]\nRPN_NMS_THRESHOLD 0.7\nRPN_TRAIN_ANCHORS_PER_IMAGE 256\nSTEPS_PER_EPOCH 100\nTOP_DOWN_PYRAMID_SIZE 256\nTRAIN_BN False\nTRAIN_ROIS_PER_IMAGE 32\nUSE_MINI_MASK True\nUSE_RPN_ROIS True\nVALIDATION_STEPS 5\nWEIGHT_DECAY 0.0001\n\n\n" ] ], [ [ "## Notebook Preferences", "_____no_output_____" ] ], [ [ "def get_ax(rows=1, cols=1, size=8):\n \"\"\"Return a Matplotlib Axes array to be used in\n all visualizations in the notebook. Provide a\n central point to control graph sizes.\n \n Change the default size attribute to control the size\n of rendered images\n \"\"\"\n _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\n return ax", "_____no_output_____" ] ], [ [ "## Dataset\n\nCreate a synthetic dataset\n\nExtend the Dataset class and add a method to load the shapes dataset, `load_shapes()`, and override the following methods:\n\n* load_image()\n* load_mask()\n* image_reference()", "_____no_output_____" ] ], [ [ "class ShapesDataset(utils.Dataset):\n \"\"\"Generates the shapes synthetic dataset. The dataset consists of simple\n shapes (triangles, squares, circles) placed randomly on a blank surface.\n The images are generated on the fly. No file access required.\n \"\"\"\n\n def load_shapes(self, count, height, width):\n \"\"\"Generate the requested number of synthetic images.\n count: number of images to generate.\n height, width: the size of the generated images.\n \"\"\"\n # Add classes\n self.add_class(\"shapes\", 1, \"square\")\n self.add_class(\"shapes\", 2, \"circle\")\n self.add_class(\"shapes\", 3, \"triangle\")\n\n # Add images\n # Generate random specifications of images (i.e. color and\n # list of shapes sizes and locations). This is more compact than\n # actual images. Images are generated on the fly in load_image().\n for i in range(count):\n bg_color, shapes = self.random_image(height, width)\n self.add_image(\"shapes\", image_id=i, path=None,\n width=width, height=height,\n bg_color=bg_color, shapes=shapes)\n\n def load_image(self, image_id):\n \"\"\"Generate an image from the specs of the given image ID.\n Typically this function loads the image from a file, but\n in this case it generates the image on the fly from the\n specs in image_info.\n \"\"\"\n info = self.image_info[image_id]\n bg_color = np.array(info['bg_color']).reshape([1, 1, 3])\n image = np.ones([info['height'], info['width'], 3], dtype=np.uint8)\n image = image * bg_color.astype(np.uint8)\n for shape, color, dims in info['shapes']:\n image = self.draw_shape(image, shape, dims, color)\n return image\n\n def image_reference(self, image_id):\n \"\"\"Return the shapes data of the image.\"\"\"\n info = self.image_info[image_id]\n if info[\"source\"] == \"shapes\":\n return info[\"shapes\"]\n else:\n super(self.__class__).image_reference(self, image_id)\n\n def load_mask(self, image_id):\n \"\"\"Generate instance masks for shapes of the given image ID.\n \"\"\"\n info = self.image_info[image_id]\n shapes = info['shapes']\n count = len(shapes)\n mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)\n for i, (shape, _, dims) in enumerate(info['shapes']):\n mask[:, :, i:i+1] = self.draw_shape(mask[:, :, i:i+1].copy(),\n shape, dims, 1)\n # Handle occlusions\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n for i in range(count-2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))\n # Map class names to class IDs.\n class_ids = np.array([self.class_names.index(s[0]) for s in shapes])\n return mask.astype(np.bool), class_ids.astype(np.int32)\n\n def draw_shape(self, image, shape, dims, color):\n \"\"\"Draws a shape from the given specs.\"\"\"\n # Get the center x, y and the size s\n x, y, s = dims\n if shape == 'square':\n cv2.rectangle(image, (x-s, y-s), (x+s, y+s), color, -1)\n elif shape == \"circle\":\n cv2.circle(image, (x, y), s, color, -1)\n elif shape == \"triangle\":\n points = np.array([[(x, y-s),\n (x-s/math.sin(math.radians(60)), y+s),\n (x+s/math.sin(math.radians(60)), y+s),\n ]], dtype=np.int32)\n cv2.fillPoly(image, points, color)\n return image\n\n def random_shape(self, height, width):\n \"\"\"Generates specifications of a random shape that lies within\n the given height and width boundaries.\n Returns a tuple of three valus:\n * The shape name (square, circle, ...)\n * Shape color: a tuple of 3 values, RGB.\n * Shape dimensions: A tuple of values that define the shape size\n and location. Differs per shape type.\n \"\"\"\n # Shape\n shape = random.choice([\"square\", \"circle\", \"triangle\"])\n # Color\n color = tuple([random.randint(0, 255) for _ in range(3)])\n # Center x, y\n buffer = 20\n y = random.randint(buffer, height - buffer - 1)\n x = random.randint(buffer, width - buffer - 1)\n # Size\n s = random.randint(buffer, height//4)\n return shape, color, (x, y, s)\n\n def random_image(self, height, width):\n \"\"\"Creates random specifications of an image with multiple shapes.\n Returns the background color of the image and a list of shape\n specifications that can be used to draw the image.\n \"\"\"\n # Pick random background color\n bg_color = np.array([random.randint(0, 255) for _ in range(3)])\n # Generate a few random shapes and record their\n # bounding boxes\n shapes = []\n boxes = []\n N = random.randint(1, 4)\n for _ in range(N):\n shape, color, dims = self.random_shape(height, width)\n shapes.append((shape, color, dims))\n x, y, s = dims\n boxes.append([y-s, x-s, y+s, x+s])\n # Apply non-max suppression wit 0.3 threshold to avoid\n # shapes covering each other\n keep_ixs = utils.non_max_suppression(np.array(boxes), np.arange(N), 0.3)\n shapes = [s for i, s in enumerate(shapes) if i in keep_ixs]\n return bg_color, shapes", "_____no_output_____" ], [ "# Training dataset\ndataset_train = ShapesDataset()\ndataset_train.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])\ndataset_train.prepare()\n\n# Validation dataset\ndataset_val = ShapesDataset()\ndataset_val.load_shapes(50, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])\ndataset_val.prepare()", "_____no_output_____" ], [ "# Load and display random samples\nimage_ids = np.random.choice(dataset_train.image_ids, 4)\nfor image_id in image_ids:\n image = dataset_train.load_image(image_id)\n mask, class_ids = dataset_train.load_mask(image_id)\n visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)", "_____no_output_____" ] ], [ [ "## Create Model", "_____no_output_____" ] ], [ [ "# Create model in training mode\nmodel = modellib.MaskRCNN(mode=\"training\", config=config,\n model_dir=MODEL_DIR)", "WARNING:tensorflow:From C:\\Users\\zlv\\Anaconda3\\envs\\test\\lib\\site-packages\\tensorflow\\python\\framework\\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\nWARNING:tensorflow:From C:\\Users\\zlv\\Anaconda3\\envs\\test\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:1208: calling reduce_max_v1 (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version.\nInstructions for updating:\nkeep_dims is deprecated, use keepdims instead\nWARNING:tensorflow:From C:\\Users\\zlv\\Anaconda3\\envs\\test\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:1242: calling reduce_sum_v1 (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version.\nInstructions for updating:\nkeep_dims is deprecated, use keepdims instead\n" ], [ "# Which weights to start with?\ninit_with = \"coco\" # imagenet, coco, or last\n\nif init_with == \"imagenet\":\n model.load_weights(model.get_imagenet_weights(), by_name=True)\nelif init_with == \"coco\":\n # Load weights trained on MS COCO, but skip layers that\n # are different due to the different number of classes\n # See README for instructions to download the COCO weights\n model.load_weights(COCO_MODEL_PATH, by_name=True,\n exclude=[\"mrcnn_class_logits\", \"mrcnn_bbox_fc\", \n \"mrcnn_bbox\", \"mrcnn_mask\"])\nelif init_with == \"last\":\n # Load the last model you trained and continue training\n model.load_weights(model.find_last(), by_name=True)", "_____no_output_____" ] ], [ [ "## Training\n\nTrain in two stages:\n1. Only the heads. Here we're freezing all the backbone layers and training only the randomly initialized layers (i.e. the ones that we didn't use pre-trained weights from MS COCO). To train only the head layers, pass `layers='heads'` to the `train()` function.\n\n2. Fine-tune all layers. For this simple example it's not necessary, but we're including it to show the process. Simply pass `layers=\"all` to train all layers.", "_____no_output_____" ] ], [ [ "# Train the head branches\n# Passing layers=\"heads\" freezes all layers except the head\n# layers. You can also pass a regular expression to select\n# which layers to train by name pattern.\nmodel.train(dataset_train, dataset_val, \n learning_rate=config.LEARNING_RATE, \n epochs=1, \n layers='heads')", "\nStarting at epoch 0. LR=0.001\n\nCheckpoint Path: C:\\Users\\zlv\\Documents\\GitHub\\Mask_RCNN\\logs\\shapes20210126T2307\\mask_rcnn_shapes_{epoch:04d}.h5\nSelecting layers to train\nfpn_c5p5 (Conv2D)\nfpn_c4p4 (Conv2D)\nfpn_c3p3 (Conv2D)\nfpn_c2p2 (Conv2D)\nfpn_p5 (Conv2D)\nfpn_p2 (Conv2D)\nfpn_p3 (Conv2D)\nfpn_p4 (Conv2D)\nIn model: rpn_model\n rpn_conv_shared (Conv2D)\n rpn_class_raw (Conv2D)\n rpn_bbox_pred (Conv2D)\nmrcnn_mask_conv1 (TimeDistributed)\nmrcnn_mask_bn1 (TimeDistributed)\nmrcnn_mask_conv2 (TimeDistributed)\nmrcnn_mask_bn2 (TimeDistributed)\nmrcnn_class_conv1 (TimeDistributed)\nmrcnn_class_bn1 (TimeDistributed)\nmrcnn_mask_conv3 (TimeDistributed)\nmrcnn_mask_bn3 (TimeDistributed)\nmrcnn_class_conv2 (TimeDistributed)\nmrcnn_class_bn2 (TimeDistributed)\nmrcnn_mask_conv4 (TimeDistributed)\nmrcnn_mask_bn4 (TimeDistributed)\nmrcnn_bbox_fc (TimeDistributed)\nmrcnn_mask_deconv (TimeDistributed)\nmrcnn_class_logits (TimeDistributed)\nmrcnn_mask (TimeDistributed)\nEpoch 1/1\n" ], [ "# Fine tune all layers\n# Passing layers=\"all\" trains all layers. You can also \n# pass a regular expression to select which layers to\n# train by name pattern.\nmodel.train(dataset_train, dataset_val, \n learning_rate=config.LEARNING_RATE / 10,\n epochs=2, \n layers=\"all\")", "Checkpoint Path: /deepmatter/mask_rcnn/logs/shapes2017102802/mask_rcnn_{epoch:04d}.h5\nStarting at epoch 0. LR=0.0002\n\nSelecting layers to train\nconv1 (Conv2D)\nbn_conv1 (BatchNorm)\nres2a_branch2a (Conv2D)\nbn2a_branch2a (BatchNorm)\nres2a_branch2b (Conv2D)\nbn2a_branch2b (BatchNorm)\nres2a_branch2c (Conv2D)\nres2a_branch1 (Conv2D)\nbn2a_branch2c (BatchNorm)\nbn2a_branch1 (BatchNorm)\nres2b_branch2a (Conv2D)\nbn2b_branch2a (BatchNorm)\nres2b_branch2b (Conv2D)\nbn2b_branch2b (BatchNorm)\nres2b_branch2c (Conv2D)\nbn2b_branch2c (BatchNorm)\nres2c_branch2a (Conv2D)\nbn2c_branch2a (BatchNorm)\nres2c_branch2b (Conv2D)\nbn2c_branch2b (BatchNorm)\nres2c_branch2c (Conv2D)\nbn2c_branch2c (BatchNorm)\nres3a_branch2a (Conv2D)\nbn3a_branch2a (BatchNorm)\nres3a_branch2b (Conv2D)\nbn3a_branch2b (BatchNorm)\nres3a_branch2c (Conv2D)\nres3a_branch1 (Conv2D)\nbn3a_branch2c (BatchNorm)\nbn3a_branch1 (BatchNorm)\nres3b_branch2a (Conv2D)\nbn3b_branch2a (BatchNorm)\nres3b_branch2b (Conv2D)\nbn3b_branch2b (BatchNorm)\nres3b_branch2c (Conv2D)\nbn3b_branch2c (BatchNorm)\nres3c_branch2a (Conv2D)\nbn3c_branch2a (BatchNorm)\nres3c_branch2b (Conv2D)\nbn3c_branch2b (BatchNorm)\nres3c_branch2c (Conv2D)\nbn3c_branch2c (BatchNorm)\nres3d_branch2a (Conv2D)\nbn3d_branch2a (BatchNorm)\nres3d_branch2b (Conv2D)\nbn3d_branch2b (BatchNorm)\nres3d_branch2c (Conv2D)\nbn3d_branch2c (BatchNorm)\nres4a_branch2a (Conv2D)\nbn4a_branch2a (BatchNorm)\nres4a_branch2b (Conv2D)\nbn4a_branch2b (BatchNorm)\nres4a_branch2c (Conv2D)\nres4a_branch1 (Conv2D)\nbn4a_branch2c (BatchNorm)\nbn4a_branch1 (BatchNorm)\nres4b_branch2a (Conv2D)\nbn4b_branch2a (BatchNorm)\nres4b_branch2b (Conv2D)\nbn4b_branch2b (BatchNorm)\nres4b_branch2c (Conv2D)\nbn4b_branch2c (BatchNorm)\nres4c_branch2a (Conv2D)\nbn4c_branch2a (BatchNorm)\nres4c_branch2b (Conv2D)\nbn4c_branch2b (BatchNorm)\nres4c_branch2c (Conv2D)\nbn4c_branch2c (BatchNorm)\nres4d_branch2a (Conv2D)\nbn4d_branch2a (BatchNorm)\nres4d_branch2b (Conv2D)\nbn4d_branch2b (BatchNorm)\nres4d_branch2c (Conv2D)\nbn4d_branch2c (BatchNorm)\nres4e_branch2a (Conv2D)\nbn4e_branch2a (BatchNorm)\nres4e_branch2b (Conv2D)\nbn4e_branch2b (BatchNorm)\nres4e_branch2c (Conv2D)\nbn4e_branch2c (BatchNorm)\nres4f_branch2a (Conv2D)\nbn4f_branch2a (BatchNorm)\nres4f_branch2b (Conv2D)\nbn4f_branch2b (BatchNorm)\nres4f_branch2c (Conv2D)\nbn4f_branch2c (BatchNorm)\nres4g_branch2a (Conv2D)\nbn4g_branch2a (BatchNorm)\nres4g_branch2b (Conv2D)\nbn4g_branch2b (BatchNorm)\nres4g_branch2c (Conv2D)\nbn4g_branch2c (BatchNorm)\nres4h_branch2a (Conv2D)\nbn4h_branch2a (BatchNorm)\nres4h_branch2b (Conv2D)\nbn4h_branch2b (BatchNorm)\nres4h_branch2c (Conv2D)\nbn4h_branch2c (BatchNorm)\nres4i_branch2a (Conv2D)\nbn4i_branch2a (BatchNorm)\nres4i_branch2b (Conv2D)\nbn4i_branch2b (BatchNorm)\nres4i_branch2c (Conv2D)\nbn4i_branch2c (BatchNorm)\nres4j_branch2a (Conv2D)\nbn4j_branch2a (BatchNorm)\nres4j_branch2b (Conv2D)\nbn4j_branch2b (BatchNorm)\nres4j_branch2c (Conv2D)\nbn4j_branch2c (BatchNorm)\nres4k_branch2a (Conv2D)\nbn4k_branch2a (BatchNorm)\nres4k_branch2b (Conv2D)\nbn4k_branch2b (BatchNorm)\nres4k_branch2c (Conv2D)\nbn4k_branch2c (BatchNorm)\nres4l_branch2a (Conv2D)\nbn4l_branch2a (BatchNorm)\nres4l_branch2b (Conv2D)\nbn4l_branch2b (BatchNorm)\nres4l_branch2c (Conv2D)\nbn4l_branch2c (BatchNorm)\nres4m_branch2a (Conv2D)\nbn4m_branch2a (BatchNorm)\nres4m_branch2b (Conv2D)\nbn4m_branch2b (BatchNorm)\nres4m_branch2c (Conv2D)\nbn4m_branch2c (BatchNorm)\nres4n_branch2a (Conv2D)\nbn4n_branch2a (BatchNorm)\nres4n_branch2b (Conv2D)\nbn4n_branch2b (BatchNorm)\nres4n_branch2c (Conv2D)\nbn4n_branch2c (BatchNorm)\nres4o_branch2a (Conv2D)\nbn4o_branch2a (BatchNorm)\nres4o_branch2b (Conv2D)\nbn4o_branch2b (BatchNorm)\nres4o_branch2c (Conv2D)\nbn4o_branch2c (BatchNorm)\nres4p_branch2a (Conv2D)\nbn4p_branch2a (BatchNorm)\nres4p_branch2b (Conv2D)\nbn4p_branch2b (BatchNorm)\nres4p_branch2c (Conv2D)\nbn4p_branch2c (BatchNorm)\nres4q_branch2a (Conv2D)\nbn4q_branch2a (BatchNorm)\nres4q_branch2b (Conv2D)\nbn4q_branch2b (BatchNorm)\nres4q_branch2c (Conv2D)\nbn4q_branch2c (BatchNorm)\nres4r_branch2a (Conv2D)\nbn4r_branch2a (BatchNorm)\nres4r_branch2b (Conv2D)\nbn4r_branch2b (BatchNorm)\nres4r_branch2c (Conv2D)\nbn4r_branch2c (BatchNorm)\nres4s_branch2a (Conv2D)\nbn4s_branch2a (BatchNorm)\nres4s_branch2b (Conv2D)\nbn4s_branch2b (BatchNorm)\nres4s_branch2c (Conv2D)\nbn4s_branch2c (BatchNorm)\nres4t_branch2a (Conv2D)\nbn4t_branch2a (BatchNorm)\nres4t_branch2b (Conv2D)\nbn4t_branch2b (BatchNorm)\nres4t_branch2c (Conv2D)\nbn4t_branch2c (BatchNorm)\nres4u_branch2a (Conv2D)\nbn4u_branch2a (BatchNorm)\nres4u_branch2b (Conv2D)\nbn4u_branch2b (BatchNorm)\nres4u_branch2c (Conv2D)\nbn4u_branch2c (BatchNorm)\nres4v_branch2a (Conv2D)\nbn4v_branch2a (BatchNorm)\nres4v_branch2b (Conv2D)\nbn4v_branch2b (BatchNorm)\nres4v_branch2c (Conv2D)\nbn4v_branch2c (BatchNorm)\nres4w_branch2a (Conv2D)\nbn4w_branch2a (BatchNorm)\nres4w_branch2b (Conv2D)\nbn4w_branch2b (BatchNorm)\nres4w_branch2c (Conv2D)\nbn4w_branch2c (BatchNorm)\nres5a_branch2a (Conv2D)\nbn5a_branch2a (BatchNorm)\nres5a_branch2b (Conv2D)\nbn5a_branch2b (BatchNorm)\nres5a_branch2c (Conv2D)\nres5a_branch1 (Conv2D)\nbn5a_branch2c (BatchNorm)\nbn5a_branch1 (BatchNorm)\nres5b_branch2a (Conv2D)\nbn5b_branch2a (BatchNorm)\nres5b_branch2b (Conv2D)\nbn5b_branch2b (BatchNorm)\nres5b_branch2c (Conv2D)\nbn5b_branch2c (BatchNorm)\nres5c_branch2a (Conv2D)\nbn5c_branch2a (BatchNorm)\nres5c_branch2b (Conv2D)\nbn5c_branch2b (BatchNorm)\nres5c_branch2c (Conv2D)\nbn5c_branch2c (BatchNorm)\nfpn_c5p5 (Conv2D)\nfpn_c4p4 (Conv2D)\nfpn_c3p3 (Conv2D)\nfpn_c2p2 (Conv2D)\nfpn_p5 (Conv2D)\nfpn_p2 (Conv2D)\nfpn_p3 (Conv2D)\nfpn_p4 (Conv2D)\nIn model: rpn_model\n rpn_conv_shared (Conv2D)\n rpn_class_raw (Conv2D)\n rpn_bbox_pred (Conv2D)\nmrcnn_mask_conv1 (TimeDistributed)\nmrcnn_mask_bn1 (TimeDistributed)\nmrcnn_mask_conv2 (TimeDistributed)\nmrcnn_mask_bn2 (TimeDistributed)\nmrcnn_class_conv1 (TimeDistributed)\nmrcnn_class_bn1 (TimeDistributed)\nmrcnn_mask_conv3 (TimeDistributed)\nmrcnn_mask_bn3 (TimeDistributed)\nmrcnn_class_conv2 (TimeDistributed)\nmrcnn_class_bn2 (TimeDistributed)\nmrcnn_mask_conv4 (TimeDistributed)\nmrcnn_mask_bn4 (TimeDistributed)\nmrcnn_bbox_fc (TimeDistributed)\nmrcnn_mask_deconv (TimeDistributed)\nmrcnn_class_logits (TimeDistributed)\nmrcnn_mask (TimeDistributed)\n" ], [ "# Save weights\n# Typically not needed because callbacks save after every epoch\n# Uncomment to save manually\n# model_path = os.path.join(MODEL_DIR, \"mask_rcnn_shapes.h5\")\n# model.keras_model.save_weights(model_path)", "_____no_output_____" ] ], [ [ "## Detection", "_____no_output_____" ] ], [ [ "class InferenceConfig(ShapesConfig):\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\ninference_config = InferenceConfig()\n\n# Recreate the model in inference mode\nmodel = modellib.MaskRCNN(mode=\"inference\", \n config=inference_config,\n model_dir=MODEL_DIR)\n\n# Get path to saved weights\n# Either set a specific path or find last trained weights\n# model_path = os.path.join(ROOT_DIR, \".h5 file name here\")\nmodel_path = model.find_last()\n\n# Load trained weights\nprint(\"Loading weights from \", model_path)\nmodel.load_weights(model_path, by_name=True)", "_____no_output_____" ], [ "# Test on a random image\nimage_id = random.choice(dataset_val.image_ids)\noriginal_image, image_meta, gt_class_id, gt_bbox, gt_mask =\\\n modellib.load_image_gt(dataset_val, inference_config, \n image_id, use_mini_mask=False)\n\nlog(\"original_image\", original_image)\nlog(\"image_meta\", image_meta)\nlog(\"gt_class_id\", gt_class_id)\nlog(\"gt_bbox\", gt_bbox)\nlog(\"gt_mask\", gt_mask)\n\nvisualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id, \n dataset_train.class_names, figsize=(8, 8))", "original_image shape: (128, 128, 3) min: 108.00000 max: 236.00000\nimage_meta shape: (12,) min: 0.00000 max: 128.00000\ngt_bbox shape: (2, 5) min: 2.00000 max: 102.00000\ngt_mask shape: (128, 128, 2) min: 0.00000 max: 1.00000\n" ], [ "results = model.detect([original_image], verbose=1)\n\nr = results[0]\nvisualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'], \n dataset_val.class_names, r['scores'], ax=get_ax())", "Processing 1 images\nimage shape: (128, 128, 3) min: 108.00000 max: 236.00000\nmolded_images shape: (1, 128, 128, 3) min: -15.70000 max: 132.10000\nimage_metas shape: (1, 12) min: 0.00000 max: 128.00000\n" ] ], [ [ "## Evaluation", "_____no_output_____" ] ], [ [ "# Compute VOC-Style mAP @ IoU=0.5\n# Running on 10 images. Increase for better accuracy.\nimage_ids = np.random.choice(dataset_val.image_ids, 10)\nAPs = []\nfor image_id in image_ids:\n # Load image and ground truth data\n image, image_meta, gt_class_id, gt_bbox, gt_mask =\\\n modellib.load_image_gt(dataset_val, inference_config,\n image_id, use_mini_mask=False)\n molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)\n # Run object detection\n results = model.detect([image], verbose=0)\n r = results[0]\n # Compute AP\n AP, precisions, recalls, overlaps =\\\n utils.compute_ap(gt_bbox, gt_class_id, gt_mask,\n r[\"rois\"], r[\"class_ids\"], r[\"scores\"], r['masks'])\n APs.append(AP)\n \nprint(\"mAP: \", np.mean(APs))", "mAP: 0.95\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb9e1299ced4865865e933c896b5635f2180c3fc
197,028
ipynb
Jupyter Notebook
House Prices_Train_1.ipynb
malavika8/HousePricePrediction
aacf984e80c5bddf4f0876eb9e369d2b2bfad5d9
[ "BSD-2-Clause" ]
null
null
null
House Prices_Train_1.ipynb
malavika8/HousePricePrediction
aacf984e80c5bddf4f0876eb9e369d2b2bfad5d9
[ "BSD-2-Clause" ]
null
null
null
House Prices_Train_1.ipynb
malavika8/HousePricePrediction
aacf984e80c5bddf4f0876eb9e369d2b2bfad5d9
[ "BSD-2-Clause" ]
null
null
null
64.220339
58,476
0.700408
[ [ [ "## House Prices: Advanced Regression Techniques : Kaggle Competition", "_____no_output_____" ], [ "### Import Libraries", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns", "_____no_output_____" ] ], [ [ "### Import Data", "_____no_output_____" ] ], [ [ "df=pd.read_csv('train.csv')", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ] ], [ [ "### Step1: Check for missing values", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(figsize=(20,5)) # To change fig shape for better representation\nsns.heatmap(df.isnull(),yticklabels=False,cbar=False, ax=ax)", "_____no_output_____" ], [ "def missing_zero_values_table(dataframe):\n zero_val = (dataframe == 0.00).astype(int).sum(axis=0)\n mis_val = dataframe.isnull().sum()\n mis_val_percent = 100 * dataframe.isnull().sum() / len(dataframe)\n mz_table = pd.concat([zero_val, mis_val, mis_val_percent], axis=1)\n mz_table = mz_table.rename(\n columns = {0 : 'Zero Values', 1 : 'Missing Values', 2 : '% of Total Values'})\n zero_val = (dataframe == 0.00).astype(int).sum(axis=0)\n mis_val = dataframe.isnull().sum()\n mis_val_percent = 100 * dataframe.isnull().sum() / len(dataframe)\n mz_table = pd.concat([zero_val, mis_val, mis_val_percent], axis=1)\n mz_table = mz_table.rename(\n columns = {0 : 'Zero Values', 1 : 'Missing Values', 2 : '% of Total Values'})\n mz_table['Data Type'] = dataframe.dtypes\n mz_table = mz_table[\n mz_table.iloc[:,1] != 0].sort_values(\n '% of Total Values', ascending=False).round(1)\n print (\"Your selected dataframe has \" + str(dataframe.shape[1]) + \" columns and \" + str(dataframe.shape[0]) + \" Rows.\\n\" \n \"There are \" + str(mz_table.shape[0]) +\n \" columns that have missing values.\")\n return mz_table", "_____no_output_____" ], [ "missing_zero_values_table(df)", "Your selected dataframe has 81 columns and 1460 Rows.\nThere are 19 columns that have missing values.\n" ] ], [ [ "### Step 2: Filling Missing values and droping columns whose missing >70%", "_____no_output_____" ] ], [ [ "# droping columns whose missing >70%\ndf.drop(['PoolQC','MiscFeature','Alley','Fence'],axis=1,inplace=True)", "_____no_output_____" ] ], [ [ "#### Handling Missing data : categorical data with MODE & numerical data with MEAN", "_____no_output_____" ] ], [ [ "df['FireplaceQu'].value_counts()", "_____no_output_____" ], [ "df['FireplaceQu'].fillna(value='Gd', inplace=True) ", "_____no_output_____" ], [ "df['LotFrontage'].mean()", "_____no_output_____" ], [ "df['LotFrontage'].fillna(value=70.05, inplace=True)", "_____no_output_____" ], [ "df['GarageType'].value_counts()", "_____no_output_____" ], [ "df['GarageType'].fillna(value='Attchd', inplace=True) ", "_____no_output_____" ], [ "df['GarageYrBlt'].value_counts()", "_____no_output_____" ], [ "df['GarageYrBlt'].fillna(value=2005, inplace=True) ", "_____no_output_____" ], [ "df['GarageFinish'].value_counts()", "_____no_output_____" ], [ "df['GarageFinish'].fillna(value='Unf', inplace=True) ", "_____no_output_____" ], [ "df['GarageQual'].value_counts()", "_____no_output_____" ], [ "df['GarageQual'].fillna(value='TA', inplace=True) ", "_____no_output_____" ], [ "df['GarageCond'].value_counts()", "_____no_output_____" ], [ "df['GarageCond'].fillna(value='TA', inplace=True) ", "_____no_output_____" ], [ "df['BsmtExposure'].value_counts()", "_____no_output_____" ], [ "df['BsmtExposure'].fillna(value='No', inplace=True)", "_____no_output_____" ], [ "df['BsmtFinType1'].value_counts()", "_____no_output_____" ], [ "df['BsmtFinType1'].fillna(value='Unf', inplace=True)", "_____no_output_____" ], [ "df['BsmtFinType2'].value_counts()", "_____no_output_____" ], [ "df['BsmtFinType2'].fillna(value='Unf', inplace=True)", "_____no_output_____" ], [ "df['BsmtCond'].value_counts()", "_____no_output_____" ], [ "df['BsmtCond'].fillna(value='TA', inplace=True)", "_____no_output_____" ], [ "df['BsmtQual'].value_counts()", "_____no_output_____" ], [ "df['BsmtQual'].fillna(value='TA', inplace=True)", "_____no_output_____" ], [ "df['MasVnrArea'].mean()", "_____no_output_____" ], [ "df['MasVnrArea'].fillna(value=103.6, inplace=True)", "_____no_output_____" ], [ "df['MasVnrType'].value_counts()", "_____no_output_____" ], [ "df['MasVnrType'].fillna(value='None', inplace=True)", "_____no_output_____" ], [ "df['Electrical'].value_counts()", "_____no_output_____" ], [ "df['Electrical'].fillna(value='SBrkr', inplace=True)", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "#df.drop(['Id'],axis=1,inplace=True)", "_____no_output_____" ], [ "missing_zero_values_table(df)", "Your selected dataframe has 77 columns and 1460 Rows.\nThere are 0 columns that have missing values.\n" ], [ "fig, ax = plt.subplots(figsize=(20,5)) \nsns.heatmap(df.isnull(),yticklabels=False,cbar=False,cmap='YlGnBu',ax=ax)", "_____no_output_____" ], [ "df.dropna(inplace=True)", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "### Data Engineering is done !!\n\n### Now we will handle Categorical Data ( to Numerical Data)", "_____no_output_____" ] ], [ [ "##HAndle Categorical Features\ncolumns = list(df.select_dtypes(include=['object']).columns)\ncolumns", "_____no_output_____" ], [ "len(columns)", "_____no_output_____" ], [ "main_df=df.copy() # saving original data copy", "_____no_output_____" ], [ "## Test Data \ntest_df=pd.read_csv('cleaned_test.csv')", "_____no_output_____" ], [ "test_df.shape", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ] ], [ [ "#### Read!\nTrain Data has 77 features and Test Data has 76 features.\nThat extra column(feature) is SalePrice which will be seperated later before fitting in model\n\nNow we will combine both the test and train data and apply get_dummies to categorical data which will convert categorical variable into dummy/indicator variables.\nWe have combined them both together so that while conversion identity remains same. ", "_____no_output_____" ] ], [ [ "test_df.head()", "_____no_output_____" ], [ "final_df=pd.concat([df,test_df],axis=0)", "_____no_output_____" ], [ "final_df['SalePrice'] #SalePrice of test data will get Nan values which needs to be predicted", "_____no_output_____" ], [ "final_df.shape", "_____no_output_____" ], [ "# function to apply get_dummies to all categorical data which will convert categorical variable into \n# dummy/indicator variables \ndef category_onehot_multcols(multcolumns):\n df_final=final_df\n i=0\n for fields in multcolumns:\n \n print(fields)\n df1=pd.get_dummies(final_df[fields],drop_first=True)\n \n final_df.drop([fields],axis=1,inplace=True)\n if i==0:\n df_final=df1.copy()\n else:\n \n df_final=pd.concat([df_final,df1],axis=1)\n i=i+1\n \n \n df_final=pd.concat([final_df,df_final],axis=1)\n \n return df_final", "_____no_output_____" ], [ "final_df=category_onehot_multcols(columns)", "MSZoning\nStreet\nLotShape\nLandContour\nUtilities\nLotConfig\nLandSlope\nNeighborhood\nCondition1\nCondition2\nBldgType\nHouseStyle\nRoofStyle\nRoofMatl\nExterior1st\nExterior2nd\nMasVnrType\nExterQual\nExterCond\nFoundation\nBsmtQual\nBsmtCond\nBsmtExposure\nBsmtFinType1\nBsmtFinType2\nHeating\nHeatingQC\nCentralAir\nElectrical\nKitchenQual\nFunctional\nFireplaceQu\nGarageType\nGarageFinish\nGarageQual\nGarageCond\nPavedDrive\nSaleType\nSaleCondition\n" ], [ "final_df.shape", "_____no_output_____" ], [ "# removing duplicate columns as they wont help\nfinal_df =final_df.loc[:,~final_df.columns.duplicated()] ", "_____no_output_____" ], [ "final_df.shape", "_____no_output_____" ], [ "final_df", "_____no_output_____" ], [ "final_df['SalePrice'] ", "_____no_output_____" ] ], [ [ "### Final dataset of train+test\nNow we will seperate test and train \nIn train we will further seperate features mapping to price\n#### X------------->Y\n(fetures). . . . (price)", "_____no_output_____" ] ], [ [ "df_Train=final_df.iloc[:1460,:] # 1460 is clculted from previous train data size\ndf_Test=final_df.iloc[1460:,:]", "_____no_output_____" ], [ "df_Train.head()", "_____no_output_____" ], [ "df_Test.head()", "_____no_output_____" ], [ "df_Train.shape", "_____no_output_____" ], [ "df_Test.drop(['SalePrice'],axis=1,inplace=True)", "/usr/local/lib/python3.7/site-packages/pandas/core/frame.py:3997: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n errors=errors,\n" ] ], [ [ "Remove ID and seperate out salesPrice as they are not fetures just label and output", "_____no_output_____" ] ], [ [ "X_train=df_Train.drop(['SalePrice','Id'],axis=1)\ny_train=df_Train['SalePrice']\n", "_____no_output_____" ], [ "X_test=df_Test.drop(['Id'],axis=1)", "_____no_output_____" ], [ "print(X_train.shape)\nprint(X_test.shape)", "(1460, 176)\n(1459, 176)\n" ] ], [ [ "## Prediciton and selecting the Algorithm", "_____no_output_____" ], [ "I am just simply using Liner Regression, we will improve on that later", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.metrics import mean_squared_error", "_____no_output_____" ], [ "regressor = LinearRegression() \nregressor.fit(X_train, y_train) #training the algorithm", "_____no_output_____" ], [ "y_pred = regressor.predict(X_test)\ny_pred", "_____no_output_____" ], [ "##Create Sample Submission file and Submit\npred=pd.DataFrame(y_pred)\nsub_df=pd.read_csv('sample_submission.csv')\ndatasets=pd.concat([sub_df['Id'],pred],axis=1)\ndatasets.columns=['Id','SalePrice']\ndatasets.to_csv('trial_1.csv',index=False)", "_____no_output_____" ], [ "sub_df.shape", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb9e22ec77c14474ad228c3044c31cbeb7fbb723
8,842
ipynb
Jupyter Notebook
all_processing_job/01_exp_on_notebook.ipynb
yito0427/experiment-pipeline
de50450227f68f9c84aab0c3d6e88c7bc9b8e93d
[ "MIT" ]
null
null
null
all_processing_job/01_exp_on_notebook.ipynb
yito0427/experiment-pipeline
de50450227f68f9c84aab0c3d6e88c7bc9b8e93d
[ "MIT" ]
null
null
null
all_processing_job/01_exp_on_notebook.ipynb
yito0427/experiment-pipeline
de50450227f68f9c84aab0c3d6e88c7bc9b8e93d
[ "MIT" ]
null
null
null
34.138996
355
0.619091
[ [ [ "# 1. データサイエンティストによるノートブックでの試行錯誤\n\nデータが蓄積され取得できるようになったら、データサイエンティストはEDA(探索的データ解析)を行い、モデルを構築し、評価します。\n本ノートブックでは、データサイエンティストによるモデル構築コードを提示します。\n以降のノートブックで、作成されたスクリプトのモジュール化を行なっていきます。\n", "_____no_output_____" ], [ "## 実験内容\n\n下記のノートブックと同様の実験を行います。\n\nhttps://github.com/aws-samples/aws-ml-jp/blob/main/mlops/step-functions-data-science-sdk/model-train-evaluate-compare/step_functions_mlworkflow_scikit_learn_data_processing_and_model_evaluation_with_experiments.ipynb\n\n>このノートブックで使用するデータは Census-Income KDD Dataset です。このデータセットから特徴量を選択し、データクレンジングを実施し、二値分類モデルの利用できる形にデータを変換し、最後にデータを学習用とテスト用に分割します。このノートブックではロジスティック回帰モデルを使って、国勢調査の回答者の収入が 5万ドル以上か 5万ドル未満かを予測します。このデータセットはクラスごとの不均衡が大きく、ほとんどのデータに 5万ドル以下というラベルが付加されています。", "_____no_output_____" ], [ "## 前提:データは事前に dataset/ に手動で格納しておく\n\nデータを以下のサイトから入手し、 dataset ディレクトリに配置してください。\n\nhttps://archive.ics.uci.edu/ml/datasets/Census-Income+%28KDD%29\n\n./dataset/census-income.csv(101.5MB)", "_____no_output_____" ] ], [ [ "import shutil", "_____no_output_____" ], [ "shutil.unpack_archive(\"./census-income.csv.zip\", extract_dir='./dataset')", "_____no_output_____" ] ], [ [ "## データサイエンティストによる、モデル構築\nデータサイエンティストがEDAを行なったあと、ノートブック上でモデルの構築、評価を行なった場合を想定します。\n\nこのスクリプトでは、以下の処理が実行されます。\n\n* 重複データやコンフリクトしているデータの削除\n* ターゲット変数 income 列をカテゴリ変数から 2つのラベルを持つ列に変換\n* age と num persons worked for employer をビニングして数値からカテゴリ変数に変換\n* 連続値であるcapital gains, capital losses, dividends from stocks を学習しやすいようスケーリング\n* education, major industry code, class of workerを学習しやすいようエンコード\n* データを学習用とテスト用に分割し特徴量とラベルの値をそれぞれ保存", "_____no_output_____" ], [ "## コードの詳細\n\n以下、69行(空行含む)\n* ライブラリ読み込み:7行\n* 空行:9行\n* コメント:11行\n* コード実行:19行\n* コード実行の改行:23行", "_____no_output_____" ] ], [ [ "# Import the latest sagemaker, stepfunctions and boto3 SDKs\nimport sys\n\n!{sys.executable} -m pip install --upgrade pip\n!{sys.executable} -m pip install -qU pandas", "_____no_output_____" ], [ "import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder, KBinsDiscretizer\nfrom sklearn.compose import make_column_transformer\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import classification_report, roc_auc_score, accuracy_score\n\n### データ読み込み\ncolumns = [\n \"age\",\n \"education\",\n \"major industry code\",\n \"class of worker\",\n \"num persons worked for employer\",\n \"capital gains\",\n \"capital losses\",\n \"dividends from stocks\",\n \"income\",\n]\nclass_labels = [\" - 50000.\", \" 50000+.\"]\n\ndf = pd.read_csv(\"./dataset/census-income.csv\")\ndf = df[columns]\n\n### 前処理\n#重複データやコンフリクトしているデータの削除\ndf.dropna(inplace=True)\ndf.drop_duplicates(inplace=True)\ndf.replace(class_labels, [0, 1], inplace=True)\n\n#ターゲット変数 income 列をカテゴリ変数から 2つのラベルを持つ列に変換\nnegative_examples, positive_examples = np.bincount(df[\"income\"])\n#データを学習用とテスト用に分割\nX_train, X_test, y_train, y_test = train_test_split(df.drop(\"income\", axis=1), df[\"income\"], test_size=0.2)\n\npreprocess = make_column_transformer(\n #age と num persons worked for employer をビニングして数値からカテゴリ変数に変換\n (\n KBinsDiscretizer(encode=\"onehot-dense\", n_bins=10),\n [\"age\", \"num persons worked for employer\"],\n ),\n #連続値であるcapital gains, capital losses, dividends from stocks を学習しやすいようスケーリング\n (\n StandardScaler(),\n [\"capital gains\", \"capital losses\", \"dividends from stocks\"],\n ),\n #education, major industry code, class of workerを学習しやすいようエンコード\n (\n OneHotEncoder(sparse=False, handle_unknown='ignore'),\n [\"education\", \"major industry code\", \"class of worker\"],\n ),\n)\nX_train = preprocess.fit_transform(X_train)\nX_test = preprocess.transform(X_test)\n\n### 学習\nmodel = LogisticRegression(class_weight=\"balanced\", solver=\"lbfgs\", C=float(1.0), verbose=1)\nmodel.fit(X_train, y_train)\n\n### 推論\npredictions = model.predict(X_test)\n\n### 評価\nreport_dict = classification_report(y_test, predictions, output_dict=True)\nreport_dict[\"accuracy\"] = accuracy_score(y_test, predictions)\nreport_dict[\"roc_auc\"] = roc_auc_score(y_test, predictions)\nprint(report_dict)", "_____no_output_____" ] ], [ [ "ノートブックのインタラクティブ性は、EDAやモデルプロトタイプなどの初期の試行錯誤には大変便利です。\n一方で、モジュール化されていないコードや記録されていないコードや、本番運用を見据えると、後のコード本番化、リファクタリングなどの工数を増加や、テストの難しさによる品質確保が難しいといった懸念もあります。\n\n試行錯誤の柔軟性を確保しつつ、モジュール化されたコードをきちんと記録していくことが、コードの品質向上と、本番導入の迅速化には重要になります。\n以降のノートブックでは、実験を支援するパイプラインを準備し、ノートブックをモジュール化していく例をみていきます。", "_____no_output_____" ], [ "## [参考] 詰め込んだ場合、以下の23行で完了", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder, KBinsDiscretizer\nfrom sklearn.compose import make_column_transformer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import classification_report, roc_auc_score, accuracy_score\ndf = pd.read_csv(\"./dataset/census-income.csv\")\ndf = df[[\"age\",\"education\",\"major industry code\",\"class of worker\",\"num persons worked for employer\",\"capital gains\",\"capital losses\",\"dividends from stocks\",\"income\",]]\ndf.dropna(inplace=True)\ndf.drop_duplicates(inplace=True)\ndf.replace([\" - 50000.\", \" 50000+.\"], [0, 1], inplace=True)\nX_train, X_test, y_train, y_test = train_test_split(df.drop(\"income\", axis=1), df[\"income\"], test_size=0.2)\npreprocess = make_column_transformer((KBinsDiscretizer(encode=\"onehot-dense\", n_bins=10),[\"age\", \"num persons worked for employer\"],),(StandardScaler(),[\"capital gains\", \"capital losses\", \"dividends from stocks\"],),(OneHotEncoder(sparse=False, handle_unknown='ignore'),[\"education\", \"major industry code\", \"class of worker\"],),)\nX_train = preprocess.fit_transform(X_train)\nX_test = preprocess.transform(X_test)\nmodel = LogisticRegression(class_weight=\"balanced\", solver=\"lbfgs\", C=float(1.0), verbose=1)\nmodel.fit(X_train, y_train)\npredictions = model.predict(X_test)\nreport_dict = classification_report(y_test, predictions, output_dict=True)\nreport_dict[\"accuracy\"] = accuracy_score(y_test, predictions)\nreport_dict[\"roc_auc\"] = roc_auc_score(y_test, predictions)\nprint(report_dict)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ] ]